diff --git a/.gitattributes b/.gitattributes index 1cbadb948e72cbf2781ebb9dd4e9f43f6139e3b3..e84f34c52b2854aea415585933d7534780c1e0d6 100644 --- a/.gitattributes +++ b/.gitattributes @@ -95,3 +95,6 @@ llmeval-env/lib/python3.10/site-packages/scipy.libs/libgfortran-040039e1.so.5.0. llmeval-env/lib/python3.10/site-packages/nvidia/cuda_cupti/lib/libcheckpoint.so filter=lfs diff=lfs merge=lfs -text llmeval-env/lib/python3.10/site-packages/nvidia/cuda_cupti/lib/libcupti.so.12 filter=lfs diff=lfs merge=lfs -text llmeval-env/lib/python3.10/site-packages/nvidia/cuda_cupti/lib/libnvperf_target.so filter=lfs diff=lfs merge=lfs -text +llmeval-env/lib/python3.10/site-packages/nvidia/cuda_cupti/lib/libnvperf_host.so filter=lfs diff=lfs merge=lfs -text +llmeval-env/lib/python3.10/site-packages/scipy.libs/libopenblasp-r0-24bff013.3.26.dev.so filter=lfs diff=lfs merge=lfs -text +llmeval-env/lib/python3.10/site-packages/nvidia/cuda_nvrtc/lib/libnvrtc-builtins.so.12.1 filter=lfs diff=lfs merge=lfs -text diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nvidia/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ba68fc1f06d79b72aeb20370f0c62f829ca66329 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nvidia/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_cupti/lib/libnvperf_host.so b/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_cupti/lib/libnvperf_host.so new file mode 100644 index 0000000000000000000000000000000000000000..77b1889eea4fd270e250230034c5c0e01138ad63 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_cupti/lib/libnvperf_host.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:95cec42ae770c1f2251d204b03e12d56fdb2e5561e4898c07b40382fe2474589 +size 28636664 diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_nvrtc/__init__.py b/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_nvrtc/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_nvrtc/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_nvrtc/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..432690b2f3a1dcaba80c0ee94563da68f22ef231 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_nvrtc/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_nvrtc/include/__init__.py b/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_nvrtc/include/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_nvrtc/include/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_nvrtc/include/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ec4ebbf44c2199e667abfa8d31ca22e104380e29 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_nvrtc/include/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_nvrtc/lib/__init__.py b/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_nvrtc/lib/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_nvrtc/lib/libnvrtc-builtins.so.12.1 b/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_nvrtc/lib/libnvrtc-builtins.so.12.1 new file mode 100644 index 0000000000000000000000000000000000000000..9b5907d0edd02341a332c58f1d7a8480f0b212a7 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_nvrtc/lib/libnvrtc-builtins.so.12.1 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6c5639ce397a9f5b82cd277432d146370674358334a4ce0d33fa9a5ca090ac8a +size 6842248 diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/__init__.py b/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups.h b/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups.h new file mode 100644 index 0000000000000000000000000000000000000000..942f22dab54701141c9ce5e2a055c957824ca5b8 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups.h @@ -0,0 +1,1690 @@ +/* + * Copyright 1993-2021 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#ifndef _COOPERATIVE_GROUPS_H_ +#define _COOPERATIVE_GROUPS_H_ + +#if defined(__cplusplus) && defined(__CUDACC__) + +#include "cooperative_groups/details/info.h" +#include "cooperative_groups/details/driver_abi.h" +#include "cooperative_groups/details/helpers.h" +#include "cooperative_groups/details/memory.h" + +#if defined(_CG_HAS_STL_ATOMICS) +#include +#define _CG_THREAD_SCOPE(scope) _CG_STATIC_CONST_DECL cuda::thread_scope thread_scope = scope; +#else +#define _CG_THREAD_SCOPE(scope) +#endif + +_CG_BEGIN_NAMESPACE + +namespace details { + _CG_CONST_DECL unsigned int coalesced_group_id = 1; + _CG_CONST_DECL unsigned int multi_grid_group_id = 2; + _CG_CONST_DECL unsigned int grid_group_id = 3; + _CG_CONST_DECL unsigned int thread_block_id = 4; + _CG_CONST_DECL unsigned int multi_tile_group_id = 5; + _CG_CONST_DECL unsigned int cluster_group_id = 6; +} + +/** + * class thread_group; + * + * Generic thread group type, into which all groups are convertible. + * It acts as a container for all storage necessary for the derived groups, + * and will dispatch the API calls to the correct derived group. This means + * that all derived groups must implement the same interface as thread_group. + */ +class thread_group +{ +protected: + struct group_data { + unsigned int _unused : 1; + unsigned int type : 7, : 0; + }; + + struct gg_data { + details::grid_workspace *gridWs; + }; + +#if defined(_CG_CPP11_FEATURES) && defined(_CG_ABI_EXPERIMENTAL) + struct mg_data { + unsigned long long _unused : 1; + unsigned long long type : 7; + unsigned long long handle : 56; + const details::multi_grid::multi_grid_functions *functions; + }; +#endif + + struct tg_data { + unsigned int is_tiled : 1; + unsigned int type : 7; + unsigned int size : 24; + // packed to 4b + unsigned int metaGroupSize : 16; + unsigned int metaGroupRank : 16; + // packed to 8b + unsigned int mask; + // packed to 12b + unsigned int _res; + }; + + friend _CG_QUALIFIER thread_group tiled_partition(const thread_group& parent, unsigned int tilesz); + friend class thread_block; + + union __align__(8) { + group_data group; + tg_data coalesced; + gg_data grid; +#if defined(_CG_CPP11_FEATURES) && defined(_CG_ABI_EXPERIMENTAL) + mg_data multi_grid; +#endif + } _data; + + _CG_QUALIFIER thread_group operator=(const thread_group& src); + + _CG_QUALIFIER thread_group(unsigned int type) { + _data.group.type = type; + _data.group._unused = false; + } + +#ifdef _CG_CPP11_FEATURES + static_assert(sizeof(tg_data) <= 16, "Failed size check"); + static_assert(sizeof(gg_data) <= 16, "Failed size check"); +# ifdef _CG_ABI_EXPERIMENTAL + static_assert(sizeof(mg_data) <= 16, "Failed size check"); +# endif +#endif + +public: + _CG_THREAD_SCOPE(cuda::thread_scope::thread_scope_device) + + _CG_QUALIFIER unsigned long long size() const; + _CG_QUALIFIER unsigned long long num_threads() const; + _CG_QUALIFIER unsigned long long thread_rank() const; + _CG_QUALIFIER void sync() const; + _CG_QUALIFIER unsigned int get_type() const { + return _data.group.type; + } + +}; + +template +struct thread_group_base : public thread_group { + _CG_QUALIFIER thread_group_base() : thread_group(TyId) {} + _CG_STATIC_CONST_DECL unsigned int id = TyId; +}; + +#if defined(_CG_HAS_MULTI_GRID_GROUP) + +/** + * class multi_grid_group; + * + * Threads within this this group are guaranteed to be co-resident on the + * same system, on multiple devices within the same launched kernels. + * To use this group, the kernel must have been launched with + * cuLaunchCooperativeKernelMultiDevice (or the CUDA Runtime equivalent), + * and the device must support it (queryable device attribute). + * + * Constructed via this_multi_grid(); + */ + + +# if defined(_CG_CPP11_FEATURES) && defined(_CG_ABI_EXPERIMENTAL) +class multi_grid_group; + +// Multi grid group requires these functions to be templated to prevent ptxas from trying to use CG syscalls +template +__device__ _CG_DEPRECATED multi_grid_group this_multi_grid(); + +class multi_grid_group : public thread_group_base +{ +private: + template + _CG_QUALIFIER multi_grid_group() { + _data.multi_grid.functions = details::multi_grid::load_grid_intrinsics(); + _data.multi_grid.handle = _data.multi_grid.functions->get_intrinsic_handle(); + } + + friend multi_grid_group this_multi_grid(); + +public: + _CG_THREAD_SCOPE(cuda::thread_scope::thread_scope_system) + + _CG_QUALIFIER bool is_valid() const { + return (_data.multi_grid.handle != 0); + } + + _CG_QUALIFIER void sync() const { + if (!is_valid()) { + _CG_ABORT(); + } + _data.multi_grid.functions->sync(_data.multi_grid.handle); + } + + _CG_QUALIFIER unsigned long long num_threads() const { + _CG_ASSERT(is_valid()); + return _data.multi_grid.functions->size(_data.multi_grid.handle); + } + + _CG_QUALIFIER unsigned long long size() const { + return num_threads(); + } + + _CG_QUALIFIER unsigned long long thread_rank() const { + _CG_ASSERT(is_valid()); + return _data.multi_grid.functions->thread_rank(_data.multi_grid.handle); + } + + _CG_QUALIFIER unsigned int grid_rank() const { + _CG_ASSERT(is_valid()); + return (_data.multi_grid.functions->grid_rank(_data.multi_grid.handle)); + } + + _CG_QUALIFIER unsigned int num_grids() const { + _CG_ASSERT(is_valid()); + return (_data.multi_grid.functions->num_grids(_data.multi_grid.handle)); + } +}; +# else +class multi_grid_group +{ +private: + unsigned long long _handle; + unsigned int _size; + unsigned int _rank; + + friend _CG_QUALIFIER multi_grid_group this_multi_grid(); + + _CG_QUALIFIER multi_grid_group() { + _handle = details::multi_grid::get_intrinsic_handle(); + _size = details::multi_grid::size(_handle); + _rank = details::multi_grid::thread_rank(_handle); + } + +public: + _CG_THREAD_SCOPE(cuda::thread_scope::thread_scope_system) + + _CG_QUALIFIER _CG_DEPRECATED bool is_valid() const { + return (_handle != 0); + } + + _CG_QUALIFIER _CG_DEPRECATED void sync() const { + if (!is_valid()) { + _CG_ABORT(); + } + details::multi_grid::sync(_handle); + } + + _CG_QUALIFIER _CG_DEPRECATED unsigned long long num_threads() const { + _CG_ASSERT(is_valid()); + return _size; + } + + _CG_QUALIFIER _CG_DEPRECATED unsigned long long size() const { + return num_threads(); + } + + _CG_QUALIFIER _CG_DEPRECATED unsigned long long thread_rank() const { + _CG_ASSERT(is_valid()); + return _rank; + } + + _CG_QUALIFIER _CG_DEPRECATED unsigned int grid_rank() const { + _CG_ASSERT(is_valid()); + return (details::multi_grid::grid_rank(_handle)); + } + + _CG_QUALIFIER _CG_DEPRECATED unsigned int num_grids() const { + _CG_ASSERT(is_valid()); + return (details::multi_grid::num_grids(_handle)); + } +}; +# endif + +/** + * multi_grid_group this_multi_grid() + * + * Constructs a multi_grid_group + */ +# if defined(_CG_CPP11_FEATURES) && defined(_CG_ABI_EXPERIMENTAL) +template +__device__ +#else +_CG_QUALIFIER +# endif +_CG_DEPRECATED +multi_grid_group this_multi_grid() +{ + return multi_grid_group(); +} +#endif + +/** + * class grid_group; + * + * Threads within this this group are guaranteed to be co-resident on the + * same device within the same launched kernel. To use this group, the kernel + * must have been launched with cuLaunchCooperativeKernel (or the CUDA Runtime equivalent), + * and the device must support it (queryable device attribute). + * + * Constructed via this_grid(); + */ +class grid_group : public thread_group_base +{ + _CG_STATIC_CONST_DECL unsigned int _group_id = details::grid_group_id; + friend _CG_QUALIFIER grid_group this_grid(); + +private: + _CG_QUALIFIER grid_group(details::grid_workspace *gridWs) { + _data.grid.gridWs = gridWs; + } + + public: + _CG_THREAD_SCOPE(cuda::thread_scope::thread_scope_device) + + _CG_QUALIFIER bool is_valid() const { + return (_data.grid.gridWs != NULL); + } + + _CG_QUALIFIER void sync() const { + if (!is_valid()) { + _CG_ABORT(); + } + details::grid::sync(&_data.grid.gridWs->barrier); + } + + _CG_STATIC_QUALIFIER unsigned long long size() { + return details::grid::size(); + } + + _CG_STATIC_QUALIFIER unsigned long long thread_rank() { + return details::grid::thread_rank(); + } + + _CG_STATIC_QUALIFIER dim3 group_dim() { + return details::grid::grid_dim(); + } + + _CG_STATIC_QUALIFIER unsigned long long num_threads() { + return details::grid::num_threads(); + } + + _CG_STATIC_QUALIFIER dim3 dim_blocks() { + return details::grid::dim_blocks(); + } + + _CG_STATIC_QUALIFIER unsigned long long num_blocks() { + return details::grid::num_blocks(); + } + + _CG_STATIC_QUALIFIER dim3 block_index() { + return details::grid::block_index(); + } + + _CG_STATIC_QUALIFIER unsigned long long block_rank() { + return details::grid::block_rank(); + } + +# if defined(_CG_HAS_CLUSTER_GROUP) + _CG_STATIC_QUALIFIER dim3 dim_clusters() { + return details::grid::dim_clusters(); + } + + _CG_STATIC_QUALIFIER unsigned long long num_clusters() { + return details::grid::num_clusters(); + } + + _CG_STATIC_QUALIFIER dim3 cluster_index() { + return details::grid::cluster_index(); + } + + _CG_STATIC_QUALIFIER unsigned long long cluster_rank() { + return details::grid::cluster_rank(); + } +# endif +}; + +_CG_QUALIFIER grid_group this_grid() { + // Load a workspace from the driver + grid_group gg(details::get_grid_workspace()); +#ifdef _CG_DEBUG + // *all* threads must be available to synchronize + gg.sync(); +#endif // _CG_DEBUG + return gg; +} + +#if defined(_CG_HAS_CLUSTER_GROUP) +/** + * class cluster_group + * + * Every GPU kernel is executed by a grid of thread blocks. A grid can be evenly + * divided along all dimensions to form groups of blocks, each group of which is + * a block cluster. Clustered grids are subject to various restrictions and + * limitations. Primarily, a cluster consists of at most 8 blocks by default + * (although the user is allowed to opt-in to non-standard sizes,) and clustered + * grids are subject to additional occupancy limitations due to per-cluster + * hardware resource consumption. In exchange, a block cluster is guaranteed to + * be a cooperative group, with access to all cooperative group capabilities, as + * well as cluster specific capabilities and accelerations. A cluster_group + * represents a block cluster. + * + * Constructed via this_cluster_group(); + */ +class cluster_group : public thread_group_base +{ + // Friends + friend _CG_QUALIFIER cluster_group this_cluster(); + + // Disable constructor + _CG_QUALIFIER cluster_group() + { + } + + public: + //_CG_THREAD_SCOPE(cuda::thread_scope::thread_scope_cluster) + + using arrival_token = struct {}; + + // Functionality exposed by the group + _CG_STATIC_QUALIFIER void sync() + { + return details::cluster::sync(); + } + + _CG_STATIC_QUALIFIER arrival_token barrier_arrive() + { + details::cluster::barrier_arrive(); + return arrival_token(); + } + + _CG_STATIC_QUALIFIER void barrier_wait() + { + return details::cluster::barrier_wait(); + } + + _CG_STATIC_QUALIFIER void barrier_wait(arrival_token&&) + { + return details::cluster::barrier_wait(); + } + + _CG_STATIC_QUALIFIER unsigned int query_shared_rank(const void *addr) + { + return details::cluster::query_shared_rank(addr); + } + + template + _CG_STATIC_QUALIFIER T* map_shared_rank(T *addr, int rank) + { + return details::cluster::map_shared_rank(addr, rank); + } + + _CG_STATIC_QUALIFIER dim3 block_index() + { + return details::cluster::block_index(); + } + + _CG_STATIC_QUALIFIER unsigned int block_rank() + { + return details::cluster::block_rank(); + } + + _CG_STATIC_QUALIFIER unsigned int thread_rank() + { + return details::cluster::thread_rank(); + } + + _CG_STATIC_QUALIFIER dim3 dim_blocks() + { + return details::cluster::dim_blocks(); + } + + _CG_STATIC_QUALIFIER unsigned int num_blocks() + { + return details::cluster::num_blocks(); + } + + _CG_STATIC_QUALIFIER dim3 dim_threads() + { + return details::cluster::dim_threads(); + } + + _CG_STATIC_QUALIFIER unsigned int num_threads() + { + return details::cluster::num_threads(); + } + + // Legacy aliases + _CG_STATIC_QUALIFIER unsigned int size() + { + return num_threads(); + } +}; + +/* + * cluster_group this_cluster() + * + * Constructs a cluster_group + */ +_CG_QUALIFIER cluster_group this_cluster() +{ + cluster_group cg; +#ifdef _CG_DEBUG + cg.sync(); +#endif + return cg; +} +#endif + +#if defined(_CG_CPP11_FEATURES) +class thread_block; +template +_CG_QUALIFIER thread_block this_thread_block(block_tile_memory& scratch); +#endif + +/** + * class thread_block + * + * Every GPU kernel is executed by a grid of thread blocks, and threads within + * each block are guaranteed to reside on the same streaming multiprocessor. + * A thread_block represents a thread block whose dimensions are not known until runtime. + * + * Constructed via this_thread_block(); + */ +class thread_block : public thread_group_base +{ + // Friends + friend _CG_QUALIFIER thread_block this_thread_block(); + friend _CG_QUALIFIER thread_group tiled_partition(const thread_group& parent, unsigned int tilesz); + friend _CG_QUALIFIER thread_group tiled_partition(const thread_block& parent, unsigned int tilesz); + +#if defined(_CG_CPP11_FEATURES) + template + friend _CG_QUALIFIER thread_block this_thread_block(block_tile_memory& scratch); + template + friend class __static_size_multi_warp_tile_base; + + details::multi_warp_scratch* const tile_memory; + + template + _CG_QUALIFIER thread_block(block_tile_memory& scratch) : + tile_memory(details::get_scratch_ptr(&scratch)) { +#ifdef _CG_DEBUG + if (num_threads() > MaxBlockSize) { + details::abort(); + } +#endif +#if !defined(_CG_HAS_RESERVED_SHARED) + tile_memory->init_barriers(thread_rank()); + sync(); +#endif + } +#endif + + // Disable constructor + _CG_QUALIFIER thread_block() +#if defined(_CG_CPP11_FEATURES) + : tile_memory(details::get_scratch_ptr(NULL)) +#endif + { } + + // Internal Use + _CG_QUALIFIER thread_group _get_tiled_threads(unsigned int tilesz) const { + const bool pow2_tilesz = ((tilesz & (tilesz - 1)) == 0); + + // Invalid, immediately fail + if (tilesz == 0 || (tilesz > 32) || !pow2_tilesz) { + details::abort(); + return (thread_block()); + } + + unsigned int mask; + unsigned int base_offset = thread_rank() & (~(tilesz - 1)); + unsigned int masklength = min((unsigned int)size() - base_offset, tilesz); + + mask = (unsigned int)(-1) >> (32 - masklength); + mask <<= (details::laneid() & ~(tilesz - 1)); + thread_group tile = thread_group(details::coalesced_group_id); + tile._data.coalesced.mask = mask; + tile._data.coalesced.size = __popc(mask); + tile._data.coalesced.metaGroupSize = (details::cta::size() + tilesz - 1) / tilesz; + tile._data.coalesced.metaGroupRank = details::cta::thread_rank() / tilesz; + tile._data.coalesced.is_tiled = true; + return (tile); + } + + public: + _CG_STATIC_CONST_DECL unsigned int _group_id = details::thread_block_id; + _CG_THREAD_SCOPE(cuda::thread_scope::thread_scope_block) + + _CG_STATIC_QUALIFIER void sync() { + details::cta::sync(); + } + + _CG_STATIC_QUALIFIER unsigned int size() { + return details::cta::size(); + } + + _CG_STATIC_QUALIFIER unsigned int thread_rank() { + return details::cta::thread_rank(); + } + + // Additional functionality exposed by the group + _CG_STATIC_QUALIFIER dim3 group_index() { + return details::cta::group_index(); + } + + _CG_STATIC_QUALIFIER dim3 thread_index() { + return details::cta::thread_index(); + } + + _CG_STATIC_QUALIFIER dim3 group_dim() { + return details::cta::block_dim(); + } + + _CG_STATIC_QUALIFIER dim3 dim_threads() { + return details::cta::dim_threads(); + } + + _CG_STATIC_QUALIFIER unsigned int num_threads() { + return details::cta::num_threads(); + } + +}; + +/** + * thread_block this_thread_block() + * + * Constructs a thread_block group + */ +_CG_QUALIFIER thread_block this_thread_block() +{ + return (thread_block()); +} + +#if defined(_CG_CPP11_FEATURES) +template +_CG_QUALIFIER thread_block this_thread_block(block_tile_memory& scratch) { + return (thread_block(scratch)); +} +#endif + +/** + * class coalesced_group + * + * A group representing the current set of converged threads in a warp. + * The size of the group is not guaranteed and it may return a group of + * only one thread (itself). + * + * This group exposes warp-synchronous builtins. + * Constructed via coalesced_threads(); + */ +class coalesced_group : public thread_group_base +{ +private: + friend _CG_QUALIFIER coalesced_group coalesced_threads(); + friend _CG_QUALIFIER thread_group tiled_partition(const thread_group& parent, unsigned int tilesz); + friend _CG_QUALIFIER coalesced_group tiled_partition(const coalesced_group& parent, unsigned int tilesz); + friend class details::_coalesced_group_data_access; + + _CG_QUALIFIER unsigned int _packLanes(unsigned laneMask) const { + unsigned int member_pack = 0; + unsigned int member_rank = 0; + for (int bit_idx = 0; bit_idx < 32; bit_idx++) { + unsigned int lane_bit = _data.coalesced.mask & (1 << bit_idx); + if (lane_bit) { + if (laneMask & lane_bit) + member_pack |= 1 << member_rank; + member_rank++; + } + } + return (member_pack); + } + + // Internal Use + _CG_QUALIFIER coalesced_group _get_tiled_threads(unsigned int tilesz) const { + const bool pow2_tilesz = ((tilesz & (tilesz - 1)) == 0); + + // Invalid, immediately fail + if (tilesz == 0 || (tilesz > 32) || !pow2_tilesz) { + details::abort(); + return (coalesced_group(0)); + } + if (size() <= tilesz) { + return (*this); + } + + if ((_data.coalesced.is_tiled == true) && pow2_tilesz) { + unsigned int base_offset = (thread_rank() & (~(tilesz - 1))); + unsigned int masklength = min((unsigned int)size() - base_offset, tilesz); + unsigned int mask = (unsigned int)(-1) >> (32 - masklength); + + mask <<= (details::laneid() & ~(tilesz - 1)); + coalesced_group coalesced_tile = coalesced_group(mask); + coalesced_tile._data.coalesced.metaGroupSize = size() / tilesz; + coalesced_tile._data.coalesced.metaGroupRank = thread_rank() / tilesz; + coalesced_tile._data.coalesced.is_tiled = true; + return (coalesced_tile); + } + else if ((_data.coalesced.is_tiled == false) && pow2_tilesz) { + unsigned int mask = 0; + unsigned int member_rank = 0; + int seen_lanes = (thread_rank() / tilesz) * tilesz; + for (unsigned int bit_idx = 0; bit_idx < 32; bit_idx++) { + unsigned int lane_bit = _data.coalesced.mask & (1 << bit_idx); + if (lane_bit) { + if (seen_lanes <= 0 && member_rank < tilesz) { + mask |= lane_bit; + member_rank++; + } + seen_lanes--; + } + } + coalesced_group coalesced_tile = coalesced_group(mask); + // Override parent with the size of this group + coalesced_tile._data.coalesced.metaGroupSize = (size() + tilesz - 1) / tilesz; + coalesced_tile._data.coalesced.metaGroupRank = thread_rank() / tilesz; + return coalesced_tile; + } + else { + // None in _CG_VERSION 1000 + details::abort(); + } + + return (coalesced_group(0)); + } + + protected: + _CG_QUALIFIER coalesced_group(unsigned int mask) { + _data.coalesced.mask = mask; + _data.coalesced.size = __popc(mask); + _data.coalesced.metaGroupRank = 0; + _data.coalesced.metaGroupSize = 1; + _data.coalesced.is_tiled = false; + } + + _CG_QUALIFIER unsigned int get_mask() const { + return (_data.coalesced.mask); + } + + public: + _CG_STATIC_CONST_DECL unsigned int _group_id = details::coalesced_group_id; + _CG_THREAD_SCOPE(cuda::thread_scope::thread_scope_block) + + _CG_QUALIFIER unsigned int num_threads() const { + return _data.coalesced.size; + } + + _CG_QUALIFIER unsigned int size() const { + return num_threads(); + } + + _CG_QUALIFIER unsigned int thread_rank() const { + return (__popc(_data.coalesced.mask & details::lanemask32_lt())); + } + + // Rank of this group in the upper level of the hierarchy + _CG_QUALIFIER unsigned int meta_group_rank() const { + return _data.coalesced.metaGroupRank; + } + + // Total num partitions created out of all CTAs when the group was created + _CG_QUALIFIER unsigned int meta_group_size() const { + return _data.coalesced.metaGroupSize; + } + + _CG_QUALIFIER void sync() const { + __syncwarp(_data.coalesced.mask); + } + +#ifdef _CG_CPP11_FEATURES + template > + _CG_QUALIFIER TyRet shfl(TyElem&& elem, int srcRank) const { + unsigned int lane = (srcRank == 0) ? __ffs(_data.coalesced.mask) - 1 : + (size() == 32) ? srcRank : __fns(_data.coalesced.mask, 0, (srcRank + 1)); + + return details::tile::shuffle_dispatch::shfl( + _CG_STL_NAMESPACE::forward(elem), _data.coalesced.mask, lane, 32); + } + + template > + _CG_QUALIFIER TyRet shfl_down(TyElem&& elem, unsigned int delta) const { + if (size() == 32) { + return details::tile::shuffle_dispatch::shfl_down( + _CG_STL_NAMESPACE::forward(elem), 0xFFFFFFFF, delta, 32); + } + + unsigned int lane = __fns(_data.coalesced.mask, details::laneid(), delta + 1); + + if (lane >= 32) + lane = details::laneid(); + + return details::tile::shuffle_dispatch::shfl( + _CG_STL_NAMESPACE::forward(elem), _data.coalesced.mask, lane, 32); + } + + template > + _CG_QUALIFIER TyRet shfl_up(TyElem&& elem, int delta) const { + if (size() == 32) { + return details::tile::shuffle_dispatch::shfl_up( + _CG_STL_NAMESPACE::forward(elem), 0xFFFFFFFF, delta, 32); + } + + unsigned lane = __fns(_data.coalesced.mask, details::laneid(), -(delta + 1)); + if (lane >= 32) + lane = details::laneid(); + + return details::tile::shuffle_dispatch::shfl( + _CG_STL_NAMESPACE::forward(elem), _data.coalesced.mask, lane, 32); + } +#else + template + _CG_QUALIFIER TyIntegral shfl(TyIntegral var, unsigned int src_rank) const { + details::assert_if_not_arithmetic(); + unsigned int lane = (src_rank == 0) ? __ffs(_data.coalesced.mask) - 1 : + (size() == 32) ? src_rank : __fns(_data.coalesced.mask, 0, (src_rank + 1)); + return (__shfl_sync(_data.coalesced.mask, var, lane, 32)); + } + + template + _CG_QUALIFIER TyIntegral shfl_up(TyIntegral var, int delta) const { + details::assert_if_not_arithmetic(); + if (size() == 32) { + return (__shfl_up_sync(0xFFFFFFFF, var, delta, 32)); + } + unsigned lane = __fns(_data.coalesced.mask, details::laneid(), -(delta + 1)); + if (lane >= 32) lane = details::laneid(); + return (__shfl_sync(_data.coalesced.mask, var, lane, 32)); + } + + template + _CG_QUALIFIER TyIntegral shfl_down(TyIntegral var, int delta) const { + details::assert_if_not_arithmetic(); + if (size() == 32) { + return (__shfl_down_sync(0xFFFFFFFF, var, delta, 32)); + } + unsigned int lane = __fns(_data.coalesced.mask, details::laneid(), delta + 1); + if (lane >= 32) lane = details::laneid(); + return (__shfl_sync(_data.coalesced.mask, var, lane, 32)); + } +#endif + + _CG_QUALIFIER int any(int predicate) const { + return (__ballot_sync(_data.coalesced.mask, predicate) != 0); + } + _CG_QUALIFIER int all(int predicate) const { + return (__ballot_sync(_data.coalesced.mask, predicate) == _data.coalesced.mask); + } + _CG_QUALIFIER unsigned int ballot(int predicate) const { + if (size() == 32) { + return (__ballot_sync(0xFFFFFFFF, predicate)); + } + unsigned int lane_ballot = __ballot_sync(_data.coalesced.mask, predicate); + return (_packLanes(lane_ballot)); + } + +#ifdef _CG_HAS_MATCH_COLLECTIVE + + template + _CG_QUALIFIER unsigned int match_any(TyIntegral val) const { + details::assert_if_not_arithmetic(); + if (size() == 32) { + return (__match_any_sync(0xFFFFFFFF, val)); + } + unsigned int lane_match = __match_any_sync(_data.coalesced.mask, val); + return (_packLanes(lane_match)); + } + + template + _CG_QUALIFIER unsigned int match_all(TyIntegral val, int &pred) const { + details::assert_if_not_arithmetic(); + if (size() == 32) { + return (__match_all_sync(0xFFFFFFFF, val, &pred)); + } + unsigned int lane_match = __match_all_sync(_data.coalesced.mask, val, &pred); + return (_packLanes(lane_match)); + } + +#endif /* !_CG_HAS_MATCH_COLLECTIVE */ + +}; + +_CG_QUALIFIER coalesced_group coalesced_threads() +{ + return (coalesced_group(__activemask())); +} + +namespace details { + template struct verify_thread_block_tile_size; + template <> struct verify_thread_block_tile_size<32> { typedef void OK; }; + template <> struct verify_thread_block_tile_size<16> { typedef void OK; }; + template <> struct verify_thread_block_tile_size<8> { typedef void OK; }; + template <> struct verify_thread_block_tile_size<4> { typedef void OK; }; + template <> struct verify_thread_block_tile_size<2> { typedef void OK; }; + template <> struct verify_thread_block_tile_size<1> { typedef void OK; }; + +#ifdef _CG_CPP11_FEATURES + template + using _is_power_of_2 = _CG_STL_NAMESPACE::integral_constant; + + template + using _is_single_warp = _CG_STL_NAMESPACE::integral_constant; + template + using _is_multi_warp = + _CG_STL_NAMESPACE::integral_constant 32) && (Size <= 1024)>; + + template + using _is_valid_single_warp_tile = + _CG_STL_NAMESPACE::integral_constant::value && _is_single_warp::value>; + template + using _is_valid_multi_warp_tile = + _CG_STL_NAMESPACE::integral_constant::value && _is_multi_warp::value>; +#else + template + struct _is_multi_warp { + static const bool value = false; + }; +#endif +} + +template +class __static_size_tile_base +{ +protected: + _CG_STATIC_CONST_DECL unsigned int numThreads = Size; + +public: + _CG_THREAD_SCOPE(cuda::thread_scope::thread_scope_block) + + // Rank of thread within tile + _CG_STATIC_QUALIFIER unsigned int thread_rank() { + return (details::cta::thread_rank() & (numThreads - 1)); + } + + // Number of threads within tile + _CG_STATIC_CONSTEXPR_QUALIFIER unsigned int num_threads() { + return numThreads; + } + + _CG_STATIC_CONSTEXPR_QUALIFIER unsigned int size() { + return num_threads(); + } +}; + +template +class __static_size_thread_block_tile_base : public __static_size_tile_base +{ + friend class details::_coalesced_group_data_access; + typedef details::tile::tile_helpers th; + +#ifdef _CG_CPP11_FEATURES + static_assert(details::_is_valid_single_warp_tile::value, "Size must be one of 1/2/4/8/16/32"); +#else + typedef typename details::verify_thread_block_tile_size::OK valid; +#endif + using __static_size_tile_base::numThreads; + _CG_STATIC_CONST_DECL unsigned int fullMask = 0xFFFFFFFF; + + protected: + _CG_STATIC_QUALIFIER unsigned int build_mask() { + unsigned int mask = fullMask; + if (numThreads != 32) { + // [0,31] representing the current active thread in the warp + unsigned int laneId = details::laneid(); + // shift mask according to the partition it belongs to + mask = th::tileMask << (laneId & ~(th::laneMask)); + } + return (mask); + } + +public: + _CG_STATIC_CONST_DECL unsigned int _group_id = details::coalesced_group_id; + + _CG_STATIC_QUALIFIER void sync() { + __syncwarp(build_mask()); + } + +#ifdef _CG_CPP11_FEATURES + // PTX supported collectives + template > + _CG_QUALIFIER TyRet shfl(TyElem&& elem, int srcRank) const { + return details::tile::shuffle_dispatch::shfl( + _CG_STL_NAMESPACE::forward(elem), build_mask(), srcRank, numThreads); + } + + template > + _CG_QUALIFIER TyRet shfl_down(TyElem&& elem, unsigned int delta) const { + return details::tile::shuffle_dispatch::shfl_down( + _CG_STL_NAMESPACE::forward(elem), build_mask(), delta, numThreads); + } + + template > + _CG_QUALIFIER TyRet shfl_up(TyElem&& elem, unsigned int delta) const { + return details::tile::shuffle_dispatch::shfl_up( + _CG_STL_NAMESPACE::forward(elem), build_mask(), delta, numThreads); + } + + template > + _CG_QUALIFIER TyRet shfl_xor(TyElem&& elem, unsigned int laneMask) const { + return details::tile::shuffle_dispatch::shfl_xor( + _CG_STL_NAMESPACE::forward(elem), build_mask(), laneMask, numThreads); + } +#else + template + _CG_QUALIFIER TyIntegral shfl(TyIntegral var, int srcRank) const { + details::assert_if_not_arithmetic(); + return (__shfl_sync(build_mask(), var, srcRank, numThreads)); + } + + template + _CG_QUALIFIER TyIntegral shfl_down(TyIntegral var, unsigned int delta) const { + details::assert_if_not_arithmetic(); + return (__shfl_down_sync(build_mask(), var, delta, numThreads)); + } + + template + _CG_QUALIFIER TyIntegral shfl_up(TyIntegral var, unsigned int delta) const { + details::assert_if_not_arithmetic(); + return (__shfl_up_sync(build_mask(), var, delta, numThreads)); + } + + template + _CG_QUALIFIER TyIntegral shfl_xor(TyIntegral var, unsigned int laneMask) const { + details::assert_if_not_arithmetic(); + return (__shfl_xor_sync(build_mask(), var, laneMask, numThreads)); + } +#endif //_CG_CPP11_FEATURES + + _CG_QUALIFIER int any(int predicate) const { + unsigned int lane_ballot = __ballot_sync(build_mask(), predicate); + return (lane_ballot != 0); + } + _CG_QUALIFIER int all(int predicate) const { + unsigned int lane_ballot = __ballot_sync(build_mask(), predicate); + return (lane_ballot == build_mask()); + } + _CG_QUALIFIER unsigned int ballot(int predicate) const { + unsigned int lane_ballot = __ballot_sync(build_mask(), predicate); + return (lane_ballot >> (details::laneid() & (~(th::laneMask)))); + } + +#ifdef _CG_HAS_MATCH_COLLECTIVE + template + _CG_QUALIFIER unsigned int match_any(TyIntegral val) const { + details::assert_if_not_arithmetic(); + unsigned int lane_match = __match_any_sync(build_mask(), val); + return (lane_match >> (details::laneid() & (~(th::laneMask)))); + } + + template + _CG_QUALIFIER unsigned int match_all(TyIntegral val, int &pred) const { + details::assert_if_not_arithmetic(); + unsigned int lane_match = __match_all_sync(build_mask(), val, &pred); + return (lane_match >> (details::laneid() & (~(th::laneMask)))); + } +#endif + +}; + +template +class __static_parent_thread_block_tile_base +{ +public: + // Rank of this group in the upper level of the hierarchy + _CG_STATIC_QUALIFIER unsigned int meta_group_rank() { + return ParentT::thread_rank() / Size; + } + + // Total num partitions created out of all CTAs when the group was created + _CG_STATIC_QUALIFIER unsigned int meta_group_size() { + return (ParentT::size() + Size - 1) / Size; + } +}; + +/** + * class thread_block_tile + * + * Statically-sized group type, representing one tile of a thread block. + * The only specializations currently supported are those with native + * hardware support (1/2/4/8/16/32) + * + * This group exposes warp-synchronous builtins. + * Can only be constructed via tiled_partition(ParentT&) + */ + +template +class __single_warp_thread_block_tile : + public __static_size_thread_block_tile_base, + public __static_parent_thread_block_tile_base +{ + typedef __static_parent_thread_block_tile_base staticParentBaseT; + friend class details::_coalesced_group_data_access; + +protected: + _CG_QUALIFIER __single_warp_thread_block_tile() { }; + _CG_QUALIFIER __single_warp_thread_block_tile(unsigned int, unsigned int) { }; + + _CG_STATIC_QUALIFIER unsigned int get_mask() { + return __static_size_thread_block_tile_base::build_mask(); + } +}; + +template +class __single_warp_thread_block_tile : + public __static_size_thread_block_tile_base, + public thread_group_base +{ + _CG_STATIC_CONST_DECL unsigned int numThreads = Size; + + template friend class __single_warp_thread_block_tile; + friend class details::_coalesced_group_data_access; + + typedef __static_size_thread_block_tile_base staticSizeBaseT; + +protected: + _CG_QUALIFIER __single_warp_thread_block_tile(unsigned int meta_group_rank, unsigned int meta_group_size) { + _data.coalesced.mask = staticSizeBaseT::build_mask(); + _data.coalesced.size = numThreads; + _data.coalesced.metaGroupRank = meta_group_rank; + _data.coalesced.metaGroupSize = meta_group_size; + _data.coalesced.is_tiled = true; + } + + _CG_QUALIFIER unsigned int get_mask() const { + return (_data.coalesced.mask); + } + +public: + using staticSizeBaseT::sync; + using staticSizeBaseT::size; + using staticSizeBaseT::num_threads; + using staticSizeBaseT::thread_rank; + + _CG_QUALIFIER unsigned int meta_group_rank() const { + return _data.coalesced.metaGroupRank; + } + + _CG_QUALIFIER unsigned int meta_group_size() const { + return _data.coalesced.metaGroupSize; + } +}; + +/** + * Outer level API calls + * void sync(GroupT) - see .sync() + * void thread_rank(GroupT) - see .thread_rank() + * void group_size(GroupT) - see .size() + */ +template +_CG_QUALIFIER void sync(GroupT const &g) +{ + g.sync(); +} + +// TODO: Use a static dispatch to determine appropriate return type +// C++03 is stuck with unsigned long long for now +#ifdef _CG_CPP11_FEATURES +template +_CG_QUALIFIER auto thread_rank(GroupT const& g) -> decltype(g.thread_rank()) { + return g.thread_rank(); +} + + +template +_CG_QUALIFIER auto group_size(GroupT const &g) -> decltype(g.num_threads()) { + return g.num_threads(); +} +#else +template +_CG_QUALIFIER unsigned long long thread_rank(GroupT const& g) { + return static_cast(g.thread_rank()); +} + + +template +_CG_QUALIFIER unsigned long long group_size(GroupT const &g) { + return static_cast(g.num_threads()); +} +#endif + + +/** + * tiled_partition + * + * The tiled_partition(parent, tilesz) method is a collective operation that + * partitions the parent group into a one-dimensional, row-major, tiling of subgroups. + * + * A total of ((size(parent)+tilesz-1)/tilesz) subgroups will + * be created where threads having identical k = (thread_rank(parent)/tilesz) + * will be members of the same subgroup. + * + * The implementation may cause the calling thread to wait until all the members + * of the parent group have invoked the operation before resuming execution. + * + * Functionality is limited to power-of-two sized subgorup instances of at most + * 32 threads. Only thread_block, thread_block_tile<>, and their subgroups can be + * tiled_partition() in _CG_VERSION 1000. + */ +_CG_QUALIFIER thread_group tiled_partition(const thread_group& parent, unsigned int tilesz) +{ + if (parent.get_type() == details::coalesced_group_id) { + const coalesced_group *_cg = static_cast(&parent); + return _cg->_get_tiled_threads(tilesz); + } + else { + const thread_block *_tb = static_cast(&parent); + return _tb->_get_tiled_threads(tilesz); + } +} + +// Thread block type overload: returns a basic thread_group for now (may be specialized later) +_CG_QUALIFIER thread_group tiled_partition(const thread_block& parent, unsigned int tilesz) +{ + return (parent._get_tiled_threads(tilesz)); +} + +// Coalesced group type overload: retains its ability to stay coalesced +_CG_QUALIFIER coalesced_group tiled_partition(const coalesced_group& parent, unsigned int tilesz) +{ + return (parent._get_tiled_threads(tilesz)); +} + +namespace details { + template + class internal_thread_block_tile : public __single_warp_thread_block_tile {}; + + template + _CG_QUALIFIER internal_thread_block_tile tiled_partition_internal() { + return internal_thread_block_tile(); + } + + template + _CG_QUALIFIER TyVal multi_warp_collectives_helper( + const GroupT& group, + WarpLambda warp_lambda, + InterWarpLambda inter_warp_lambda) { + return group.template collectives_scheme(warp_lambda, inter_warp_lambda); + } + + template + _CG_QUALIFIER T* multi_warp_scratch_location_getter(const GroupT& group, unsigned int warp_id) { + return group.template get_scratch_location(warp_id); + } + + template + _CG_QUALIFIER details::barrier_t* multi_warp_sync_location_getter(const GroupT& group) { + return group.get_sync_location(); + } + +} +/** + * tiled_partition + * + * The tiled_partition(parent) method is a collective operation that + * partitions the parent group into a one-dimensional, row-major, tiling of subgroups. + * + * A total of ((size(parent)/tilesz) subgroups will be created, + * therefore the parent group size must be evenly divisible by the tilesz. + * The allow parent groups are thread_block or thread_block_tile. + * + * The implementation may cause the calling thread to wait until all the members + * of the parent group have invoked the operation before resuming execution. + * + * Functionality is limited to native hardware sizes, 1/2/4/8/16/32. + * The size(parent) must be greater than the template Size parameter + * otherwise the results are undefined. + */ + +#if defined(_CG_CPP11_FEATURES) +template +class __static_size_multi_warp_tile_base : public __static_size_tile_base +{ + static_assert(details::_is_valid_multi_warp_tile::value, "Size must be one of 64/128/256/512"); + + template + friend __device__ TyVal details::multi_warp_collectives_helper( + const GroupT& group, + WarpLambda warp_lambda, + InterWarpLambda inter_warp_lambda); + template + friend __device__ T* details::multi_warp_scratch_location_getter(const GroupT& group, unsigned int warp_id); + template + friend __device__ details::barrier_t* details::multi_warp_sync_location_getter(const GroupT& group); + template + friend class __static_size_multi_warp_tile_base; + using WarpType = details::internal_thread_block_tile<32, __static_size_multi_warp_tile_base>; + using ThisType = __static_size_multi_warp_tile_base; + _CG_STATIC_CONST_DECL int numWarps = Size / 32; + +protected: + details::multi_warp_scratch* const tile_memory; + + template + _CG_QUALIFIER __static_size_multi_warp_tile_base(const GroupT& g) : tile_memory(g.tile_memory) { +#if defined(_CG_HAS_RESERVED_SHARED) + details::sync_warps_reset(get_sync_location(), details::cta::thread_rank()); + g.sync(); +#endif + } + + +private: + _CG_QUALIFIER details::barrier_t* get_sync_location() const { + // Different group sizes use different barriers, all groups of a given size share one barrier. + unsigned int sync_id = details::log2(Size / 64); + return &tile_memory->barriers[sync_id]; + } + + template + _CG_QUALIFIER T* get_scratch_location(unsigned int warp_id) const { + unsigned int scratch_id = (details::cta::thread_rank() - thread_rank()) / 32 + warp_id; + return reinterpret_cast(&tile_memory->communication_memory[scratch_id]); + } + + template + _CG_QUALIFIER T* get_scratch_location() const { + unsigned int scratch_id = details::cta::thread_rank() / 32; + return reinterpret_cast(&tile_memory->communication_memory[scratch_id]); + } + + template + _CG_QUALIFIER TyVal shfl_impl(TyVal val, unsigned int src) const { + unsigned int src_warp = src / 32; + auto warp = details::tiled_partition_internal<32, ThisType>(); + details::barrier_t* sync_location = get_sync_location(); + + // Get warp slot of the source threads warp. + TyVal* warp_scratch_location = get_scratch_location(src_warp); + + if (warp.meta_group_rank() == src_warp) { + warp.sync(); + // Put shuffled value into my warp slot and let my warp arrive at the barrier. + if (thread_rank() == src) { + *warp_scratch_location = val; + } + details::sync_warps_arrive(sync_location, details::cta::thread_rank(), numWarps); + TyVal result = *warp_scratch_location; + details::sync_warps_wait(sync_location, details::cta::thread_rank()); + return result; + } + else { + // Wait for the source warp to arrive on the barrier. + details::sync_warps_wait_for_specific_warp(sync_location, + (details::cta::thread_rank() / 32 - warp.meta_group_rank() + src_warp)); + TyVal result = *warp_scratch_location; + details::sync_warps(sync_location, details::cta::thread_rank(), numWarps); + return result; + } + } + + template + _CG_QUALIFIER TyVal collectives_scheme(const WarpLambda& warp_lambda, const InterWarpLambda& inter_warp_lambda) const { + static_assert(sizeof(TyVal) <= details::multi_warp_scratch::communication_size, + "Collectives with tiles larger than 32 threads are limited to types smaller then 8 bytes"); + auto warp = details::tiled_partition_internal<32, ThisType>(); + details::barrier_t* sync_location = get_sync_location(); + TyVal* warp_scratch_location = get_scratch_location(); + + warp_lambda(warp, warp_scratch_location); + + if (details::sync_warps_last_releases(sync_location, details::cta::thread_rank(), numWarps)) { + auto subwarp = details::tiled_partition_internal(); + if (subwarp.meta_group_rank() == 0) { + TyVal* thread_scratch_location = get_scratch_location(subwarp.thread_rank()); + inter_warp_lambda(subwarp, thread_scratch_location); + } + warp.sync(); + details::sync_warps_release(sync_location, warp.thread_rank() == 0, details::cta::thread_rank(), numWarps); + } + TyVal result = *warp_scratch_location; + return result; + } + +public: + _CG_STATIC_CONST_DECL unsigned int _group_id = details::multi_tile_group_id; + + using __static_size_tile_base::thread_rank; + + template + _CG_QUALIFIER TyVal shfl(TyVal val, unsigned int src) const { + static_assert(sizeof(TyVal) <= details::multi_warp_scratch::communication_size, + "Collectives with tiles larger than 32 threads are limited to types smaller then 8 bytes"); + return shfl_impl(val, src); + } + + _CG_QUALIFIER void sync() const { + details::sync_warps(get_sync_location(), details::cta::thread_rank(), numWarps); + } + + _CG_QUALIFIER int any(int predicate) const { + auto warp_lambda = [=] (WarpType& warp, int* warp_scratch_location) { + *warp_scratch_location = __any_sync(0xFFFFFFFF, predicate); + }; + auto inter_warp_lambda = + [] (details::internal_thread_block_tile& subwarp, int* thread_scratch_location) { + *thread_scratch_location = __any_sync(0xFFFFFFFFU >> (32 - numWarps), *thread_scratch_location); + }; + return collectives_scheme(warp_lambda, inter_warp_lambda); + } + + _CG_QUALIFIER int all(int predicate) const { + auto warp_lambda = [=] (WarpType& warp, int* warp_scratch_location) { + *warp_scratch_location = __all_sync(0xFFFFFFFF, predicate); + }; + auto inter_warp_lambda = + [] (details::internal_thread_block_tile& subwarp, int* thread_scratch_location) { + *thread_scratch_location = __all_sync(0xFFFFFFFFU >> (32 - numWarps), *thread_scratch_location); + }; + return collectives_scheme(warp_lambda, inter_warp_lambda); + } +}; + + +template +class __multi_warp_thread_block_tile : + public __static_size_multi_warp_tile_base, + public __static_parent_thread_block_tile_base +{ + typedef __static_parent_thread_block_tile_base staticParentBaseT; + typedef __static_size_multi_warp_tile_base staticTileBaseT; +protected: + _CG_QUALIFIER __multi_warp_thread_block_tile(const ParentT& g) : + __static_size_multi_warp_tile_base(g) {} +}; + +template +class __multi_warp_thread_block_tile : public __static_size_multi_warp_tile_base +{ + const unsigned int metaGroupRank; + const unsigned int metaGroupSize; + +protected: + template + _CG_QUALIFIER __multi_warp_thread_block_tile(const __multi_warp_thread_block_tile& g) : + __static_size_multi_warp_tile_base(g), metaGroupRank(g.meta_group_rank()), metaGroupSize(g.meta_group_size()) {} + +public: + _CG_QUALIFIER unsigned int meta_group_rank() const { + return metaGroupRank; + } + + _CG_QUALIFIER unsigned int meta_group_size() const { + return metaGroupSize; + } +}; +#endif + +template +class thread_block_tile; + +namespace details { + template + class thread_block_tile_impl; + + template + class thread_block_tile_impl: public __single_warp_thread_block_tile + { + protected: + template + _CG_QUALIFIER thread_block_tile_impl(const thread_block_tile_impl& g) : + __single_warp_thread_block_tile(g.meta_group_rank(), g.meta_group_size()) {} + + _CG_QUALIFIER thread_block_tile_impl(const thread_block& g) : + __single_warp_thread_block_tile() {} + }; + +#if defined(_CG_CPP11_FEATURES) + template + class thread_block_tile_impl : public __multi_warp_thread_block_tile + { + protected: + template + _CG_QUALIFIER thread_block_tile_impl(const GroupT& g) : + __multi_warp_thread_block_tile(g) {} + }; +#else + template + class thread_block_tile_impl + { + protected: + template + _CG_QUALIFIER thread_block_tile_impl(const GroupT& g) {} + }; +#endif +} + +template +class thread_block_tile : public details::thread_block_tile_impl::value> +{ + friend _CG_QUALIFIER thread_block_tile<1, void> this_thread(); + +protected: + _CG_QUALIFIER thread_block_tile(const ParentT& g) : + details::thread_block_tile_impl::value>(g) {} + +public: + _CG_QUALIFIER operator thread_block_tile() const { + return thread_block_tile(*this); + } +}; + +template +class thread_block_tile : public details::thread_block_tile_impl::value> +{ + template + friend class thread_block_tile; + +protected: + template + _CG_QUALIFIER thread_block_tile(const thread_block_tile& g) : + details::thread_block_tile_impl::value>(g) {} + +public: + template + _CG_QUALIFIER thread_block_tile(const thread_block_tile& g) : + details::thread_block_tile_impl::value>(g) {} +}; + +namespace details { + template + struct tiled_partition_impl; + + template + struct tiled_partition_impl : public thread_block_tile { + _CG_QUALIFIER tiled_partition_impl(const thread_block& g) : + thread_block_tile(g) {} + }; + + // ParentT = static thread_block_tile specialization + template + struct tiled_partition_impl > : + public thread_block_tile > { +#ifdef _CG_CPP11_FEATURES + static_assert(Size < ParentSize, "Tile size bigger or equal to the parent group size"); +#endif + _CG_QUALIFIER tiled_partition_impl(const thread_block_tile& g) : + thread_block_tile >(g) {} + }; + +} + +template +_CG_QUALIFIER thread_block_tile tiled_partition(const ParentT& g) +{ + return details::tiled_partition_impl(g); +} + +/** + * thread_group this_thread() + * + * Constructs a generic thread_group containing only the calling thread + */ +_CG_QUALIFIER thread_block_tile<1, void> this_thread() +{ + // Make thread_block_tile<1, thread_block> parent of the returned group, so it will have its + // meta group rank and size set to 0 and 1 respectively. + return thread_block_tile<1, thread_block_tile<1, thread_block> >(this_thread_block()); +} + +/** + * .sync() + * + * Executes a barrier across the group + * + * Implements both a compiler fence and an architectural fence to prevent, + * memory reordering around the barrier. + */ +_CG_QUALIFIER void thread_group::sync() const +{ + switch (_data.group.type) { + case details::coalesced_group_id: + cooperative_groups::sync(*static_cast(this)); + break; + case details::thread_block_id: + cooperative_groups::sync(*static_cast(this)); + break; + case details::grid_group_id: + cooperative_groups::sync(*static_cast(this)); + break; +#if defined(_CG_HAS_MULTI_GRID_GROUP) && defined(_CG_CPP11_FEATURES) && defined(_CG_ABI_EXPERIMENTAL) + case details::multi_grid_group_id: + cooperative_groups::sync(*static_cast(this)); + break; +#endif +#if defined(_CG_HAS_CLUSTER_GROUP) + case details::cluster_group_id: + cooperative_groups::sync(*static_cast(this)); + break; +#endif + default: + break; + } +} + +/** + * .size() + * + * Returns the total number of threads in the group. + */ +_CG_QUALIFIER unsigned long long thread_group::size() const +{ + unsigned long long size = 0; + switch (_data.group.type) { + case details::coalesced_group_id: + size = cooperative_groups::group_size(*static_cast(this)); + break; + case details::thread_block_id: + size = cooperative_groups::group_size(*static_cast(this)); + break; + case details::grid_group_id: + size = cooperative_groups::group_size(*static_cast(this)); + break; +#if defined(_CG_HAS_MULTI_GRID_GROUP) && defined(_CG_CPP11_FEATURES) && defined(_CG_ABI_EXPERIMENTAL) + case details::multi_grid_group_id: + size = cooperative_groups::group_size(*static_cast(this)); + break; +#endif +#if defined(_CG_HAS_CLUSTER_GROUP) + case details::cluster_group_id: + size = cooperative_groups::group_size(*static_cast(this)); + break; +#endif + default: + break; + } + return size; +} + +/** + * .thread_rank() + * + * Returns the linearized rank of the calling thread along the interval [0, size()). + */ +_CG_QUALIFIER unsigned long long thread_group::thread_rank() const +{ + unsigned long long rank = 0; + switch (_data.group.type) { + case details::coalesced_group_id: + rank = cooperative_groups::thread_rank(*static_cast(this)); + break; + case details::thread_block_id: + rank = cooperative_groups::thread_rank(*static_cast(this)); + break; + case details::grid_group_id: + rank = cooperative_groups::thread_rank(*static_cast(this)); + break; +#if defined(_CG_HAS_MULTI_GRID_GROUP) && defined(_CG_CPP11_FEATURES) && defined(_CG_ABI_EXPERIMENTAL) + case details::multi_grid_group_id: + rank = cooperative_groups::thread_rank(*static_cast(this)); + break; +#endif +#if defined(_CG_HAS_CLUSTER_GROUP) + case details::cluster_group_id: + rank = cooperative_groups::thread_rank(*static_cast(this)); + break; +#endif + default: + break; + } + return rank; +} + +_CG_END_NAMESPACE + +#include +#if (!defined(_MSC_VER) || defined(_WIN64)) +# include +#endif + +# endif /* ! (__cplusplus, __CUDACC__) */ + +#endif /* !_COOPERATIVE_GROUPS_H_ */ diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuComplex.h b/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuComplex.h new file mode 100644 index 0000000000000000000000000000000000000000..7b167111b0b387a5279da6749d946560e1c42c1b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuComplex.h @@ -0,0 +1,348 @@ +/* + * Copyright 1993-2012 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(CU_COMPLEX_H_) +#define CU_COMPLEX_H_ + +#if !defined(__CUDACC_RTC__) +#if defined(__GNUC__) +#if defined(__clang__) || (!defined(__PGIC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 2))) +#pragma GCC diagnostic ignored "-Wunused-function" +#endif +#endif +#endif + +/* When trying to include C header file in C++ Code extern "C" is required + * But the Standard QNX headers already have ifdef extern in them when compiling C++ Code + * extern "C" cannot be nested + * Hence keep the header out of extern "C" block + */ + +#if !defined(__CUDACC__) +#include /* import fabsf, sqrt */ +#endif /* !defined(__CUDACC__) */ + +#if defined(__cplusplus) +extern "C" { +#endif /* __cplusplus */ + +#include "vector_types.h" + +typedef float2 cuFloatComplex; + +__host__ __device__ static __inline__ float cuCrealf (cuFloatComplex x) +{ + return x.x; +} + +__host__ __device__ static __inline__ float cuCimagf (cuFloatComplex x) +{ + return x.y; +} + +__host__ __device__ static __inline__ cuFloatComplex make_cuFloatComplex + (float r, float i) +{ + cuFloatComplex res; + res.x = r; + res.y = i; + return res; +} + +__host__ __device__ static __inline__ cuFloatComplex cuConjf (cuFloatComplex x) +{ + return make_cuFloatComplex (cuCrealf(x), -cuCimagf(x)); +} +__host__ __device__ static __inline__ cuFloatComplex cuCaddf (cuFloatComplex x, + cuFloatComplex y) +{ + return make_cuFloatComplex (cuCrealf(x) + cuCrealf(y), + cuCimagf(x) + cuCimagf(y)); +} + +__host__ __device__ static __inline__ cuFloatComplex cuCsubf (cuFloatComplex x, + cuFloatComplex y) +{ + return make_cuFloatComplex (cuCrealf(x) - cuCrealf(y), + cuCimagf(x) - cuCimagf(y)); +} + +/* This implementation could suffer from intermediate overflow even though + * the final result would be in range. However, various implementations do + * not guard against this (presumably to avoid losing performance), so we + * don't do it either to stay competitive. + */ +__host__ __device__ static __inline__ cuFloatComplex cuCmulf (cuFloatComplex x, + cuFloatComplex y) +{ + cuFloatComplex prod; + prod = make_cuFloatComplex ((cuCrealf(x) * cuCrealf(y)) - + (cuCimagf(x) * cuCimagf(y)), + (cuCrealf(x) * cuCimagf(y)) + + (cuCimagf(x) * cuCrealf(y))); + return prod; +} + +/* This implementation guards against intermediate underflow and overflow + * by scaling. Such guarded implementations are usually the default for + * complex library implementations, with some also offering an unguarded, + * faster version. + */ +__host__ __device__ static __inline__ cuFloatComplex cuCdivf (cuFloatComplex x, + cuFloatComplex y) +{ + cuFloatComplex quot; + float s = fabsf(cuCrealf(y)) + fabsf(cuCimagf(y)); + float oos = 1.0f / s; + float ars = cuCrealf(x) * oos; + float ais = cuCimagf(x) * oos; + float brs = cuCrealf(y) * oos; + float bis = cuCimagf(y) * oos; + s = (brs * brs) + (bis * bis); + oos = 1.0f / s; + quot = make_cuFloatComplex (((ars * brs) + (ais * bis)) * oos, + ((ais * brs) - (ars * bis)) * oos); + return quot; +} + +/* + * We would like to call hypotf(), but it's not available on all platforms. + * This discrete implementation guards against intermediate underflow and + * overflow by scaling. Otherwise we would lose half the exponent range. + * There are various ways of doing guarded computation. For now chose the + * simplest and fastest solution, however this may suffer from inaccuracies + * if sqrt and division are not IEEE compliant. + */ +__host__ __device__ static __inline__ float cuCabsf (cuFloatComplex x) +{ + float a = cuCrealf(x); + float b = cuCimagf(x); + float v, w, t; + a = fabsf(a); + b = fabsf(b); + if (a > b) { + v = a; + w = b; + } else { + v = b; + w = a; + } + t = w / v; + t = 1.0f + t * t; + t = v * sqrtf(t); + if ((v == 0.0f) || (v > 3.402823466e38f) || (w > 3.402823466e38f)) { + t = v + w; + } + return t; +} + +/* Double precision */ +typedef double2 cuDoubleComplex; + +__host__ __device__ static __inline__ double cuCreal (cuDoubleComplex x) +{ + return x.x; +} + +__host__ __device__ static __inline__ double cuCimag (cuDoubleComplex x) +{ + return x.y; +} + +__host__ __device__ static __inline__ cuDoubleComplex make_cuDoubleComplex + (double r, double i) +{ + cuDoubleComplex res; + res.x = r; + res.y = i; + return res; +} + +__host__ __device__ static __inline__ cuDoubleComplex cuConj(cuDoubleComplex x) +{ + return make_cuDoubleComplex (cuCreal(x), -cuCimag(x)); +} + +__host__ __device__ static __inline__ cuDoubleComplex cuCadd(cuDoubleComplex x, + cuDoubleComplex y) +{ + return make_cuDoubleComplex (cuCreal(x) + cuCreal(y), + cuCimag(x) + cuCimag(y)); +} + +__host__ __device__ static __inline__ cuDoubleComplex cuCsub(cuDoubleComplex x, + cuDoubleComplex y) +{ + return make_cuDoubleComplex (cuCreal(x) - cuCreal(y), + cuCimag(x) - cuCimag(y)); +} + +/* This implementation could suffer from intermediate overflow even though + * the final result would be in range. However, various implementations do + * not guard against this (presumably to avoid losing performance), so we + * don't do it either to stay competitive. + */ +__host__ __device__ static __inline__ cuDoubleComplex cuCmul(cuDoubleComplex x, + cuDoubleComplex y) +{ + cuDoubleComplex prod; + prod = make_cuDoubleComplex ((cuCreal(x) * cuCreal(y)) - + (cuCimag(x) * cuCimag(y)), + (cuCreal(x) * cuCimag(y)) + + (cuCimag(x) * cuCreal(y))); + return prod; +} + +/* This implementation guards against intermediate underflow and overflow + * by scaling. Such guarded implementations are usually the default for + * complex library implementations, with some also offering an unguarded, + * faster version. + */ +__host__ __device__ static __inline__ cuDoubleComplex cuCdiv(cuDoubleComplex x, + cuDoubleComplex y) +{ + cuDoubleComplex quot; + double s = (fabs(cuCreal(y))) + (fabs(cuCimag(y))); + double oos = 1.0 / s; + double ars = cuCreal(x) * oos; + double ais = cuCimag(x) * oos; + double brs = cuCreal(y) * oos; + double bis = cuCimag(y) * oos; + s = (brs * brs) + (bis * bis); + oos = 1.0 / s; + quot = make_cuDoubleComplex (((ars * brs) + (ais * bis)) * oos, + ((ais * brs) - (ars * bis)) * oos); + return quot; +} + +/* This implementation guards against intermediate underflow and overflow + * by scaling. Otherwise we would lose half the exponent range. There are + * various ways of doing guarded computation. For now chose the simplest + * and fastest solution, however this may suffer from inaccuracies if sqrt + * and division are not IEEE compliant. + */ +__host__ __device__ static __inline__ double cuCabs (cuDoubleComplex x) +{ + double a = cuCreal(x); + double b = cuCimag(x); + double v, w, t; + a = fabs(a); + b = fabs(b); + if (a > b) { + v = a; + w = b; + } else { + v = b; + w = a; + } + t = w / v; + t = 1.0 + t * t; + t = v * sqrt(t); + if ((v == 0.0) || + (v > 1.79769313486231570e+308) || (w > 1.79769313486231570e+308)) { + t = v + w; + } + return t; +} + +#if defined(__cplusplus) +} +#endif /* __cplusplus */ + +/* aliases */ +typedef cuFloatComplex cuComplex; +__host__ __device__ static __inline__ cuComplex make_cuComplex (float x, + float y) +{ + return make_cuFloatComplex (x, y); +} + +/* float-to-double promotion */ +__host__ __device__ static __inline__ cuDoubleComplex cuComplexFloatToDouble + (cuFloatComplex c) +{ + return make_cuDoubleComplex ((double)cuCrealf(c), (double)cuCimagf(c)); +} + +__host__ __device__ static __inline__ cuFloatComplex cuComplexDoubleToFloat +(cuDoubleComplex c) +{ + return make_cuFloatComplex ((float)cuCreal(c), (float)cuCimag(c)); +} + + +__host__ __device__ static __inline__ cuComplex cuCfmaf( cuComplex x, cuComplex y, cuComplex d) +{ + float real_res; + float imag_res; + + real_res = (cuCrealf(x) * cuCrealf(y)) + cuCrealf(d); + imag_res = (cuCrealf(x) * cuCimagf(y)) + cuCimagf(d); + + real_res = -(cuCimagf(x) * cuCimagf(y)) + real_res; + imag_res = (cuCimagf(x) * cuCrealf(y)) + imag_res; + + return make_cuComplex(real_res, imag_res); +} + +__host__ __device__ static __inline__ cuDoubleComplex cuCfma( cuDoubleComplex x, cuDoubleComplex y, cuDoubleComplex d) +{ + double real_res; + double imag_res; + + real_res = (cuCreal(x) * cuCreal(y)) + cuCreal(d); + imag_res = (cuCreal(x) * cuCimag(y)) + cuCimag(d); + + real_res = -(cuCimag(x) * cuCimag(y)) + real_res; + imag_res = (cuCimag(x) * cuCreal(y)) + imag_res; + + return make_cuDoubleComplex(real_res, imag_res); +} + +#endif /* !defined(CU_COMPLEX_H_) */ diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda.h b/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda.h new file mode 100644 index 0000000000000000000000000000000000000000..fc733fdeacf706b0180bd0f9e915ef50eae1aba3 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda.h @@ -0,0 +1,22119 @@ +/* + * Copyright 1993-2022 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#ifndef __cuda_cuda_h__ +#define __cuda_cuda_h__ + + + + +#include +#ifdef _MSC_VER +typedef unsigned __int32 cuuint32_t; +typedef unsigned __int64 cuuint64_t; +#else +#include +typedef uint32_t cuuint32_t; +typedef uint64_t cuuint64_t; +#endif + +#if defined(__CUDA_API_VERSION_INTERNAL) || defined(__DOXYGEN_ONLY__) || defined(CUDA_ENABLE_DEPRECATED) +#define __CUDA_DEPRECATED +#elif defined(_MSC_VER) +#define __CUDA_DEPRECATED __declspec(deprecated) +#elif defined(__GNUC__) +#define __CUDA_DEPRECATED __attribute__((deprecated)) +#else +#define __CUDA_DEPRECATED +#endif + +#if defined(CUDA_FORCE_API_VERSION) +#error "CUDA_FORCE_API_VERSION is no longer supported." +#endif + +#if defined(__CUDA_API_VERSION_INTERNAL) || defined(CUDA_API_PER_THREAD_DEFAULT_STREAM) + #define __CUDA_API_PER_THREAD_DEFAULT_STREAM + #define __CUDA_API_PTDS(api) api ## _ptds + #define __CUDA_API_PTSZ(api) api ## _ptsz +#else + #define __CUDA_API_PTDS(api) api + #define __CUDA_API_PTSZ(api) api +#endif + +#define cuDeviceTotalMem cuDeviceTotalMem_v2 +#define cuCtxCreate cuCtxCreate_v2 +#define cuCtxCreate_v3 cuCtxCreate_v3 +#define cuModuleGetGlobal cuModuleGetGlobal_v2 +#define cuMemGetInfo cuMemGetInfo_v2 +#define cuMemAlloc cuMemAlloc_v2 +#define cuMemAllocPitch cuMemAllocPitch_v2 +#define cuMemFree cuMemFree_v2 +#define cuMemGetAddressRange cuMemGetAddressRange_v2 +#define cuMemAllocHost cuMemAllocHost_v2 +#define cuMemHostGetDevicePointer cuMemHostGetDevicePointer_v2 +#define cuMemcpyHtoD __CUDA_API_PTDS(cuMemcpyHtoD_v2) +#define cuMemcpyDtoH __CUDA_API_PTDS(cuMemcpyDtoH_v2) +#define cuMemcpyDtoD __CUDA_API_PTDS(cuMemcpyDtoD_v2) +#define cuMemcpyDtoA __CUDA_API_PTDS(cuMemcpyDtoA_v2) +#define cuMemcpyAtoD __CUDA_API_PTDS(cuMemcpyAtoD_v2) +#define cuMemcpyHtoA __CUDA_API_PTDS(cuMemcpyHtoA_v2) +#define cuMemcpyAtoH __CUDA_API_PTDS(cuMemcpyAtoH_v2) +#define cuMemcpyAtoA __CUDA_API_PTDS(cuMemcpyAtoA_v2) +#define cuMemcpyHtoAAsync __CUDA_API_PTSZ(cuMemcpyHtoAAsync_v2) +#define cuMemcpyAtoHAsync __CUDA_API_PTSZ(cuMemcpyAtoHAsync_v2) +#define cuMemcpy2D __CUDA_API_PTDS(cuMemcpy2D_v2) +#define cuMemcpy2DUnaligned __CUDA_API_PTDS(cuMemcpy2DUnaligned_v2) +#define cuMemcpy3D __CUDA_API_PTDS(cuMemcpy3D_v2) +#define cuMemcpyHtoDAsync __CUDA_API_PTSZ(cuMemcpyHtoDAsync_v2) +#define cuMemcpyDtoHAsync __CUDA_API_PTSZ(cuMemcpyDtoHAsync_v2) +#define cuMemcpyDtoDAsync __CUDA_API_PTSZ(cuMemcpyDtoDAsync_v2) +#define cuMemcpy2DAsync __CUDA_API_PTSZ(cuMemcpy2DAsync_v2) +#define cuMemcpy3DAsync __CUDA_API_PTSZ(cuMemcpy3DAsync_v2) +#define cuMemsetD8 __CUDA_API_PTDS(cuMemsetD8_v2) +#define cuMemsetD16 __CUDA_API_PTDS(cuMemsetD16_v2) +#define cuMemsetD32 __CUDA_API_PTDS(cuMemsetD32_v2) +#define cuMemsetD2D8 __CUDA_API_PTDS(cuMemsetD2D8_v2) +#define cuMemsetD2D16 __CUDA_API_PTDS(cuMemsetD2D16_v2) +#define cuMemsetD2D32 __CUDA_API_PTDS(cuMemsetD2D32_v2) +#define cuArrayCreate cuArrayCreate_v2 +#define cuArrayGetDescriptor cuArrayGetDescriptor_v2 +#define cuArray3DCreate cuArray3DCreate_v2 +#define cuArray3DGetDescriptor cuArray3DGetDescriptor_v2 +#define cuTexRefSetAddress cuTexRefSetAddress_v2 +#define cuTexRefGetAddress cuTexRefGetAddress_v2 +#define cuGraphicsResourceGetMappedPointer cuGraphicsResourceGetMappedPointer_v2 +#define cuCtxDestroy cuCtxDestroy_v2 +#define cuCtxPopCurrent cuCtxPopCurrent_v2 +#define cuCtxPushCurrent cuCtxPushCurrent_v2 +#define cuStreamDestroy cuStreamDestroy_v2 +#define cuEventDestroy cuEventDestroy_v2 +#define cuTexRefSetAddress2D cuTexRefSetAddress2D_v3 +#define cuLinkCreate cuLinkCreate_v2 +#define cuLinkAddData cuLinkAddData_v2 +#define cuLinkAddFile cuLinkAddFile_v2 +#define cuMemHostRegister cuMemHostRegister_v2 +#define cuGraphicsResourceSetMapFlags cuGraphicsResourceSetMapFlags_v2 +#define cuStreamBeginCapture __CUDA_API_PTSZ(cuStreamBeginCapture_v2) +#define cuDevicePrimaryCtxRelease cuDevicePrimaryCtxRelease_v2 +#define cuDevicePrimaryCtxReset cuDevicePrimaryCtxReset_v2 +#define cuDevicePrimaryCtxSetFlags cuDevicePrimaryCtxSetFlags_v2 +#define cuDeviceGetUuid_v2 cuDeviceGetUuid_v2 +#define cuIpcOpenMemHandle cuIpcOpenMemHandle_v2 + +#define cuGraphInstantiate cuGraphInstantiateWithFlags + +#define cuGraphExecUpdate cuGraphExecUpdate_v2 +#define cuGetProcAddress cuGetProcAddress_v2 +#define cuGraphAddKernelNode cuGraphAddKernelNode_v2 +#define cuGraphKernelNodeGetParams cuGraphKernelNodeGetParams_v2 +#define cuGraphKernelNodeSetParams cuGraphKernelNodeSetParams_v2 +#define cuGraphExecKernelNodeSetParams cuGraphExecKernelNodeSetParams_v2 + +#define cuStreamWriteValue32 __CUDA_API_PTSZ(cuStreamWriteValue32_v2) +#define cuStreamWaitValue32 __CUDA_API_PTSZ(cuStreamWaitValue32_v2) +#define cuStreamWriteValue64 __CUDA_API_PTSZ(cuStreamWriteValue64_v2) +#define cuStreamWaitValue64 __CUDA_API_PTSZ(cuStreamWaitValue64_v2) +#define cuStreamBatchMemOp __CUDA_API_PTSZ(cuStreamBatchMemOp_v2) +#define cuStreamGetCaptureInfo __CUDA_API_PTSZ(cuStreamGetCaptureInfo_v2) +#define cuStreamGetCaptureInfo_v2 __CUDA_API_PTSZ(cuStreamGetCaptureInfo_v2) + +#if defined(__CUDA_API_PER_THREAD_DEFAULT_STREAM) + #define cuMemcpy __CUDA_API_PTDS(cuMemcpy) + #define cuMemcpyAsync __CUDA_API_PTSZ(cuMemcpyAsync) + #define cuMemcpyPeer __CUDA_API_PTDS(cuMemcpyPeer) + #define cuMemcpyPeerAsync __CUDA_API_PTSZ(cuMemcpyPeerAsync) + #define cuMemcpy3DPeer __CUDA_API_PTDS(cuMemcpy3DPeer) + #define cuMemcpy3DPeerAsync __CUDA_API_PTSZ(cuMemcpy3DPeerAsync) + #define cuMemPrefetchAsync __CUDA_API_PTSZ(cuMemPrefetchAsync) + + #define cuMemsetD8Async __CUDA_API_PTSZ(cuMemsetD8Async) + #define cuMemsetD16Async __CUDA_API_PTSZ(cuMemsetD16Async) + #define cuMemsetD32Async __CUDA_API_PTSZ(cuMemsetD32Async) + #define cuMemsetD2D8Async __CUDA_API_PTSZ(cuMemsetD2D8Async) + #define cuMemsetD2D16Async __CUDA_API_PTSZ(cuMemsetD2D16Async) + #define cuMemsetD2D32Async __CUDA_API_PTSZ(cuMemsetD2D32Async) + + #define cuStreamGetPriority __CUDA_API_PTSZ(cuStreamGetPriority) + #define cuStreamGetId __CUDA_API_PTSZ(cuStreamGetId) + #define cuStreamGetFlags __CUDA_API_PTSZ(cuStreamGetFlags) + #define cuStreamGetCtx __CUDA_API_PTSZ(cuStreamGetCtx) + #define cuStreamWaitEvent __CUDA_API_PTSZ(cuStreamWaitEvent) + #define cuStreamEndCapture __CUDA_API_PTSZ(cuStreamEndCapture) + #define cuStreamIsCapturing __CUDA_API_PTSZ(cuStreamIsCapturing) + #define cuStreamUpdateCaptureDependencies __CUDA_API_PTSZ(cuStreamUpdateCaptureDependencies) + #define cuStreamAddCallback __CUDA_API_PTSZ(cuStreamAddCallback) + #define cuStreamAttachMemAsync __CUDA_API_PTSZ(cuStreamAttachMemAsync) + #define cuStreamQuery __CUDA_API_PTSZ(cuStreamQuery) + #define cuStreamSynchronize __CUDA_API_PTSZ(cuStreamSynchronize) + #define cuEventRecord __CUDA_API_PTSZ(cuEventRecord) + #define cuEventRecordWithFlags __CUDA_API_PTSZ(cuEventRecordWithFlags) + #define cuLaunchKernel __CUDA_API_PTSZ(cuLaunchKernel) + #define cuLaunchKernelEx __CUDA_API_PTSZ(cuLaunchKernelEx) + #define cuLaunchHostFunc __CUDA_API_PTSZ(cuLaunchHostFunc) + #define cuGraphicsMapResources __CUDA_API_PTSZ(cuGraphicsMapResources) + #define cuGraphicsUnmapResources __CUDA_API_PTSZ(cuGraphicsUnmapResources) + + #define cuLaunchCooperativeKernel __CUDA_API_PTSZ(cuLaunchCooperativeKernel) + + #define cuSignalExternalSemaphoresAsync __CUDA_API_PTSZ(cuSignalExternalSemaphoresAsync) + #define cuWaitExternalSemaphoresAsync __CUDA_API_PTSZ(cuWaitExternalSemaphoresAsync) + + #define cuGraphInstantiateWithParams __CUDA_API_PTSZ(cuGraphInstantiateWithParams) + #define cuGraphUpload __CUDA_API_PTSZ(cuGraphUpload) + #define cuGraphLaunch __CUDA_API_PTSZ(cuGraphLaunch) + #define cuStreamCopyAttributes __CUDA_API_PTSZ(cuStreamCopyAttributes) + #define cuStreamGetAttribute __CUDA_API_PTSZ(cuStreamGetAttribute) + #define cuStreamSetAttribute __CUDA_API_PTSZ(cuStreamSetAttribute) + #define cuMemMapArrayAsync __CUDA_API_PTSZ(cuMemMapArrayAsync) + + #define cuMemFreeAsync __CUDA_API_PTSZ(cuMemFreeAsync) + #define cuMemAllocAsync __CUDA_API_PTSZ(cuMemAllocAsync) + #define cuMemAllocFromPoolAsync __CUDA_API_PTSZ(cuMemAllocFromPoolAsync) +#endif + +/** + * \file cuda.h + * \brief Header file for the CUDA Toolkit application programming interface. + * + * \file cudaGL.h + * \brief Header file for the OpenGL interoperability functions of the + * low-level CUDA driver application programming interface. + * + * \file cudaD3D9.h + * \brief Header file for the Direct3D 9 interoperability functions of the + * low-level CUDA driver application programming interface. + */ + +/** + * \defgroup CUDA_TYPES Data types used by CUDA driver + * @{ + */ + +/** + * CUDA API version number + */ +#define CUDA_VERSION 12010 + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * CUDA device pointer + * CUdeviceptr is defined as an unsigned integer type whose size matches the size of a pointer on the target platform. + */ +#if defined(_WIN64) || defined(__LP64__) +typedef unsigned long long CUdeviceptr_v2; +#else +typedef unsigned int CUdeviceptr_v2; +#endif +typedef CUdeviceptr_v2 CUdeviceptr; /**< CUDA device pointer */ + +typedef int CUdevice_v1; /**< CUDA device */ +typedef CUdevice_v1 CUdevice; /**< CUDA device */ +typedef struct CUctx_st *CUcontext; /**< CUDA context */ +typedef struct CUmod_st *CUmodule; /**< CUDA module */ +typedef struct CUfunc_st *CUfunction; /**< CUDA function */ +typedef struct CUlib_st *CUlibrary; /**< CUDA library */ +typedef struct CUkern_st *CUkernel; /**< CUDA kernel */ +typedef struct CUarray_st *CUarray; /**< CUDA array */ +typedef struct CUmipmappedArray_st *CUmipmappedArray; /**< CUDA mipmapped array */ +typedef struct CUtexref_st *CUtexref; /**< CUDA texture reference */ +typedef struct CUsurfref_st *CUsurfref; /**< CUDA surface reference */ +typedef struct CUevent_st *CUevent; /**< CUDA event */ +typedef struct CUstream_st *CUstream; /**< CUDA stream */ +typedef struct CUgraphicsResource_st *CUgraphicsResource; /**< CUDA graphics interop resource */ +typedef unsigned long long CUtexObject_v1; /**< An opaque value that represents a CUDA texture object */ +typedef CUtexObject_v1 CUtexObject; /**< An opaque value that represents a CUDA texture object */ +typedef unsigned long long CUsurfObject_v1; /**< An opaque value that represents a CUDA surface object */ +typedef CUsurfObject_v1 CUsurfObject; /**< An opaque value that represents a CUDA surface object */ +typedef struct CUextMemory_st *CUexternalMemory; /**< CUDA external memory */ +typedef struct CUextSemaphore_st *CUexternalSemaphore; /**< CUDA external semaphore */ +typedef struct CUgraph_st *CUgraph; /**< CUDA graph */ +typedef struct CUgraphNode_st *CUgraphNode; /**< CUDA graph node */ +typedef struct CUgraphExec_st *CUgraphExec; /**< CUDA executable graph */ +typedef struct CUmemPoolHandle_st *CUmemoryPool; /**< CUDA memory pool */ +typedef struct CUuserObject_st *CUuserObject; /**< CUDA user object for graphs */ + +#ifndef CU_UUID_HAS_BEEN_DEFINED +#define CU_UUID_HAS_BEEN_DEFINED +typedef struct CUuuid_st { /**< CUDA definition of UUID */ + char bytes[16]; +} CUuuid; +#endif + +/** + * CUDA IPC handle size + */ +#define CU_IPC_HANDLE_SIZE 64 + +/** + * CUDA IPC event handle + */ +typedef struct CUipcEventHandle_st { + char reserved[CU_IPC_HANDLE_SIZE]; +} CUipcEventHandle_v1; +typedef CUipcEventHandle_v1 CUipcEventHandle; + +/** + * CUDA IPC mem handle + */ +typedef struct CUipcMemHandle_st { + char reserved[CU_IPC_HANDLE_SIZE]; +} CUipcMemHandle_v1; +typedef CUipcMemHandle_v1 CUipcMemHandle; + +/** + * CUDA Ipc Mem Flags + */ +typedef enum CUipcMem_flags_enum { + CU_IPC_MEM_LAZY_ENABLE_PEER_ACCESS = 0x1 /**< Automatically enable peer access between remote devices as needed */ +} CUipcMem_flags; + + +/** + * CUDA Mem Attach Flags + */ +typedef enum CUmemAttach_flags_enum { + CU_MEM_ATTACH_GLOBAL = 0x1, /**< Memory can be accessed by any stream on any device */ + CU_MEM_ATTACH_HOST = 0x2, /**< Memory cannot be accessed by any stream on any device */ + CU_MEM_ATTACH_SINGLE = 0x4 /**< Memory can only be accessed by a single stream on the associated device */ +} CUmemAttach_flags; + +/** + * Context creation flags + */ +typedef enum CUctx_flags_enum { + CU_CTX_SCHED_AUTO = 0x00, /**< Automatic scheduling */ + CU_CTX_SCHED_SPIN = 0x01, /**< Set spin as default scheduling */ + CU_CTX_SCHED_YIELD = 0x02, /**< Set yield as default scheduling */ + CU_CTX_SCHED_BLOCKING_SYNC = 0x04, /**< Set blocking synchronization as default scheduling */ + CU_CTX_BLOCKING_SYNC = 0x04, /**< Set blocking synchronization as default scheduling + * \deprecated This flag was deprecated as of CUDA 4.0 + * and was replaced with ::CU_CTX_SCHED_BLOCKING_SYNC. */ + CU_CTX_SCHED_MASK = 0x07, + CU_CTX_MAP_HOST = 0x08, /**< \deprecated This flag was deprecated as of CUDA 11.0 + * and it no longer has any effect. All contexts + * as of CUDA 3.2 behave as though the flag is enabled. */ + CU_CTX_LMEM_RESIZE_TO_MAX = 0x10, /**< Keep local memory allocation after launch */ + CU_CTX_COREDUMP_ENABLE = 0x20, /**< Trigger coredumps from exceptions in this context */ + CU_CTX_USER_COREDUMP_ENABLE= 0x40, /**< Enable user pipe to trigger coredumps in this context */ + CU_CTX_SYNC_MEMOPS = 0x80, /**< Force synchronous blocking on cudaMemcpy/cudaMemset */ + CU_CTX_FLAGS_MASK = 0xFF +} CUctx_flags; + +/** + * Event sched flags + */ +typedef enum CUevent_sched_flags_enum { + CU_EVENT_SCHED_AUTO = 0x00, /**< Automatic scheduling */ + CU_EVENT_SCHED_SPIN = 0x01, /**< Set spin as default scheduling */ + CU_EVENT_SCHED_YIELD = 0x02, /**< Set yield as default scheduling */ + CU_EVENT_SCHED_BLOCKING_SYNC = 0x04, /**< Set blocking synchronization as default scheduling */ +} CUevent_sched_flags; + +/** + * NVCL event scheduling flags + */ +typedef enum cl_event_flags_enum { + NVCL_EVENT_SCHED_AUTO = 0x00, /**< Automatic scheduling */ + NVCL_EVENT_SCHED_SPIN = 0x01, /**< Set spin as default scheduling */ + NVCL_EVENT_SCHED_YIELD = 0x02, /**< Set yield as default scheduling */ + NVCL_EVENT_SCHED_BLOCKING_SYNC = 0x04, /**< Set blocking synchronization as default scheduling */ +} cl_event_flags; + +/** + * NVCL context scheduling flags + */ +typedef enum cl_context_flags_enum { + NVCL_CTX_SCHED_AUTO = 0x00, /**< Automatic scheduling */ + NVCL_CTX_SCHED_SPIN = 0x01, /**< Set spin as default scheduling */ + NVCL_CTX_SCHED_YIELD = 0x02, /**< Set yield as default scheduling */ + NVCL_CTX_SCHED_BLOCKING_SYNC = 0x04, /**< Set blocking synchronization as default scheduling */ +} cl_context_flags; + + +/** + * Stream creation flags + */ +typedef enum CUstream_flags_enum { + CU_STREAM_DEFAULT = 0x0, /**< Default stream flag */ + CU_STREAM_NON_BLOCKING = 0x1 /**< Stream does not synchronize with stream 0 (the NULL stream) */ +} CUstream_flags; + +/** + * Legacy stream handle + * + * Stream handle that can be passed as a CUstream to use an implicit stream + * with legacy synchronization behavior. + * + * See details of the \link_sync_behavior + */ +#define CU_STREAM_LEGACY ((CUstream)0x1) + +/** + * Per-thread stream handle + * + * Stream handle that can be passed as a CUstream to use an implicit stream + * with per-thread synchronization behavior. + * + * See details of the \link_sync_behavior + */ +#define CU_STREAM_PER_THREAD ((CUstream)0x2) + +/** + * Event creation flags + */ +typedef enum CUevent_flags_enum { + CU_EVENT_DEFAULT = 0x0, /**< Default event flag */ + CU_EVENT_BLOCKING_SYNC = 0x1, /**< Event uses blocking synchronization */ + CU_EVENT_DISABLE_TIMING = 0x2, /**< Event will not record timing data */ + CU_EVENT_INTERPROCESS = 0x4 /**< Event is suitable for interprocess use. CU_EVENT_DISABLE_TIMING must be set */ +} CUevent_flags; + +/** + * Event record flags + */ +typedef enum CUevent_record_flags_enum { + CU_EVENT_RECORD_DEFAULT = 0x0, /**< Default event record flag */ + CU_EVENT_RECORD_EXTERNAL = 0x1 /**< When using stream capture, create an event record node + * instead of the default behavior. This flag is invalid + * when used outside of capture. */ +} CUevent_record_flags; + +/** + * Event wait flags + */ +typedef enum CUevent_wait_flags_enum { + CU_EVENT_WAIT_DEFAULT = 0x0, /**< Default event wait flag */ + CU_EVENT_WAIT_EXTERNAL = 0x1 /**< When using stream capture, create an event wait node + * instead of the default behavior. This flag is invalid + * when used outside of capture.*/ +} CUevent_wait_flags; + +/** + * Flags for ::cuStreamWaitValue32 and ::cuStreamWaitValue64 + */ +typedef enum CUstreamWaitValue_flags_enum { + CU_STREAM_WAIT_VALUE_GEQ = 0x0, /**< Wait until (int32_t)(*addr - value) >= 0 (or int64_t for 64 bit + values). Note this is a cyclic comparison which ignores wraparound. + (Default behavior.) */ + CU_STREAM_WAIT_VALUE_EQ = 0x1, /**< Wait until *addr == value. */ + CU_STREAM_WAIT_VALUE_AND = 0x2, /**< Wait until (*addr & value) != 0. */ + CU_STREAM_WAIT_VALUE_NOR = 0x3, /**< Wait until ~(*addr | value) != 0. Support for this operation can be + queried with ::cuDeviceGetAttribute() and + ::CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_WAIT_VALUE_NOR.*/ + CU_STREAM_WAIT_VALUE_FLUSH = 1<<30 /**< Follow the wait operation with a flush of outstanding remote writes. This + means that, if a remote write operation is guaranteed to have reached the + device before the wait can be satisfied, that write is guaranteed to be + visible to downstream device work. The device is permitted to reorder + remote writes internally. For example, this flag would be required if + two remote writes arrive in a defined order, the wait is satisfied by the + second write, and downstream work needs to observe the first write. + Support for this operation is restricted to selected platforms and can be + queried with ::CU_DEVICE_ATTRIBUTE_CAN_FLUSH_REMOTE_WRITES.*/ +} CUstreamWaitValue_flags; + +/** + * Flags for ::cuStreamWriteValue32 + */ +typedef enum CUstreamWriteValue_flags_enum { + CU_STREAM_WRITE_VALUE_DEFAULT = 0x0, /**< Default behavior */ + CU_STREAM_WRITE_VALUE_NO_MEMORY_BARRIER = 0x1 /**< Permits the write to be reordered with writes which were issued + before it, as a performance optimization. Normally, + ::cuStreamWriteValue32 will provide a memory fence before the + write, which has similar semantics to + __threadfence_system() but is scoped to the stream + rather than a CUDA thread. + This flag is not supported in the v2 API. */ +} CUstreamWriteValue_flags; + +/** + * Operations for ::cuStreamBatchMemOp + */ +typedef enum CUstreamBatchMemOpType_enum { + CU_STREAM_MEM_OP_WAIT_VALUE_32 = 1, /**< Represents a ::cuStreamWaitValue32 operation */ + CU_STREAM_MEM_OP_WRITE_VALUE_32 = 2, /**< Represents a ::cuStreamWriteValue32 operation */ + CU_STREAM_MEM_OP_WAIT_VALUE_64 = 4, /**< Represents a ::cuStreamWaitValue64 operation */ + CU_STREAM_MEM_OP_WRITE_VALUE_64 = 5, /**< Represents a ::cuStreamWriteValue64 operation */ + CU_STREAM_MEM_OP_BARRIER = 6, /**< Insert a memory barrier of the specified type */ + CU_STREAM_MEM_OP_FLUSH_REMOTE_WRITES = 3 /**< This has the same effect as ::CU_STREAM_WAIT_VALUE_FLUSH, but as a + standalone operation. */ +} CUstreamBatchMemOpType; + +/** + * Flags for ::cuStreamMemoryBarrier + */ +typedef enum CUstreamMemoryBarrier_flags_enum { + CU_STREAM_MEMORY_BARRIER_TYPE_SYS = 0x0, /**< System-wide memory barrier. */ + CU_STREAM_MEMORY_BARRIER_TYPE_GPU = 0x1 /**< Limit memory barrier scope to the GPU. */ +} CUstreamMemoryBarrier_flags; + +/** + * Per-operation parameters for ::cuStreamBatchMemOp + */ +typedef union CUstreamBatchMemOpParams_union { + CUstreamBatchMemOpType operation; + struct CUstreamMemOpWaitValueParams_st { + CUstreamBatchMemOpType operation; + CUdeviceptr address; + union { + cuuint32_t value; + cuuint64_t value64; + }; + unsigned int flags; + CUdeviceptr alias; /**< For driver internal use. Initial value is unimportant. */ + } waitValue; + struct CUstreamMemOpWriteValueParams_st { + CUstreamBatchMemOpType operation; + CUdeviceptr address; + union { + cuuint32_t value; + cuuint64_t value64; + }; + unsigned int flags; + CUdeviceptr alias; /**< For driver internal use. Initial value is unimportant. */ + } writeValue; + struct CUstreamMemOpFlushRemoteWritesParams_st { + CUstreamBatchMemOpType operation; + unsigned int flags; + } flushRemoteWrites; + struct CUstreamMemOpMemoryBarrierParams_st { /**< Only supported in the _v2 API */ + CUstreamBatchMemOpType operation; + unsigned int flags; + } memoryBarrier; + cuuint64_t pad[6]; +} CUstreamBatchMemOpParams_v1; +typedef CUstreamBatchMemOpParams_v1 CUstreamBatchMemOpParams; + +typedef struct CUDA_BATCH_MEM_OP_NODE_PARAMS_st { + CUcontext ctx; + unsigned int count; + CUstreamBatchMemOpParams *paramArray; + unsigned int flags; +} CUDA_BATCH_MEM_OP_NODE_PARAMS; + +/** + * Occupancy calculator flag + */ +typedef enum CUoccupancy_flags_enum { + CU_OCCUPANCY_DEFAULT = 0x0, /**< Default behavior */ + CU_OCCUPANCY_DISABLE_CACHING_OVERRIDE = 0x1 /**< Assume global caching is enabled and cannot be automatically turned off */ +} CUoccupancy_flags; + +/** + * Flags for ::cuStreamUpdateCaptureDependencies + */ +typedef enum CUstreamUpdateCaptureDependencies_flags_enum { + CU_STREAM_ADD_CAPTURE_DEPENDENCIES = 0x0, /**< Add new nodes to the dependency set */ + CU_STREAM_SET_CAPTURE_DEPENDENCIES = 0x1 /**< Replace the dependency set with the new nodes */ +} CUstreamUpdateCaptureDependencies_flags; + +/** + * Array formats + */ +typedef enum CUarray_format_enum { + CU_AD_FORMAT_UNSIGNED_INT8 = 0x01, /**< Unsigned 8-bit integers */ + CU_AD_FORMAT_UNSIGNED_INT16 = 0x02, /**< Unsigned 16-bit integers */ + CU_AD_FORMAT_UNSIGNED_INT32 = 0x03, /**< Unsigned 32-bit integers */ + CU_AD_FORMAT_SIGNED_INT8 = 0x08, /**< Signed 8-bit integers */ + CU_AD_FORMAT_SIGNED_INT16 = 0x09, /**< Signed 16-bit integers */ + CU_AD_FORMAT_SIGNED_INT32 = 0x0a, /**< Signed 32-bit integers */ + CU_AD_FORMAT_HALF = 0x10, /**< 16-bit floating point */ + CU_AD_FORMAT_FLOAT = 0x20, /**< 32-bit floating point */ + CU_AD_FORMAT_NV12 = 0xb0, /**< 8-bit YUV planar format, with 4:2:0 sampling */ + CU_AD_FORMAT_UNORM_INT8X1 = 0xc0, /**< 1 channel unsigned 8-bit normalized integer */ + CU_AD_FORMAT_UNORM_INT8X2 = 0xc1, /**< 2 channel unsigned 8-bit normalized integer */ + CU_AD_FORMAT_UNORM_INT8X4 = 0xc2, /**< 4 channel unsigned 8-bit normalized integer */ + CU_AD_FORMAT_UNORM_INT16X1 = 0xc3, /**< 1 channel unsigned 16-bit normalized integer */ + CU_AD_FORMAT_UNORM_INT16X2 = 0xc4, /**< 2 channel unsigned 16-bit normalized integer */ + CU_AD_FORMAT_UNORM_INT16X4 = 0xc5, /**< 4 channel unsigned 16-bit normalized integer */ + CU_AD_FORMAT_SNORM_INT8X1 = 0xc6, /**< 1 channel signed 8-bit normalized integer */ + CU_AD_FORMAT_SNORM_INT8X2 = 0xc7, /**< 2 channel signed 8-bit normalized integer */ + CU_AD_FORMAT_SNORM_INT8X4 = 0xc8, /**< 4 channel signed 8-bit normalized integer */ + CU_AD_FORMAT_SNORM_INT16X1 = 0xc9, /**< 1 channel signed 16-bit normalized integer */ + CU_AD_FORMAT_SNORM_INT16X2 = 0xca, /**< 2 channel signed 16-bit normalized integer */ + CU_AD_FORMAT_SNORM_INT16X4 = 0xcb, /**< 4 channel signed 16-bit normalized integer */ + CU_AD_FORMAT_BC1_UNORM = 0x91, /**< 4 channel unsigned normalized block-compressed (BC1 compression) format */ + CU_AD_FORMAT_BC1_UNORM_SRGB = 0x92, /**< 4 channel unsigned normalized block-compressed (BC1 compression) format with sRGB encoding*/ + CU_AD_FORMAT_BC2_UNORM = 0x93, /**< 4 channel unsigned normalized block-compressed (BC2 compression) format */ + CU_AD_FORMAT_BC2_UNORM_SRGB = 0x94, /**< 4 channel unsigned normalized block-compressed (BC2 compression) format with sRGB encoding*/ + CU_AD_FORMAT_BC3_UNORM = 0x95, /**< 4 channel unsigned normalized block-compressed (BC3 compression) format */ + CU_AD_FORMAT_BC3_UNORM_SRGB = 0x96, /**< 4 channel unsigned normalized block-compressed (BC3 compression) format with sRGB encoding*/ + CU_AD_FORMAT_BC4_UNORM = 0x97, /**< 1 channel unsigned normalized block-compressed (BC4 compression) format */ + CU_AD_FORMAT_BC4_SNORM = 0x98, /**< 1 channel signed normalized block-compressed (BC4 compression) format */ + CU_AD_FORMAT_BC5_UNORM = 0x99, /**< 2 channel unsigned normalized block-compressed (BC5 compression) format */ + CU_AD_FORMAT_BC5_SNORM = 0x9a, /**< 2 channel signed normalized block-compressed (BC5 compression) format */ + CU_AD_FORMAT_BC6H_UF16 = 0x9b, /**< 3 channel unsigned half-float block-compressed (BC6H compression) format */ + CU_AD_FORMAT_BC6H_SF16 = 0x9c, /**< 3 channel signed half-float block-compressed (BC6H compression) format */ + CU_AD_FORMAT_BC7_UNORM = 0x9d, /**< 4 channel unsigned normalized block-compressed (BC7 compression) format */ + CU_AD_FORMAT_BC7_UNORM_SRGB = 0x9e /**< 4 channel unsigned normalized block-compressed (BC7 compression) format with sRGB encoding */ +} CUarray_format; + +/** + * Texture reference addressing modes + */ +typedef enum CUaddress_mode_enum { + CU_TR_ADDRESS_MODE_WRAP = 0, /**< Wrapping address mode */ + CU_TR_ADDRESS_MODE_CLAMP = 1, /**< Clamp to edge address mode */ + CU_TR_ADDRESS_MODE_MIRROR = 2, /**< Mirror address mode */ + CU_TR_ADDRESS_MODE_BORDER = 3 /**< Border address mode */ +} CUaddress_mode; + +/** + * Texture reference filtering modes + */ +typedef enum CUfilter_mode_enum { + CU_TR_FILTER_MODE_POINT = 0, /**< Point filter mode */ + CU_TR_FILTER_MODE_LINEAR = 1 /**< Linear filter mode */ +} CUfilter_mode; + +/** + * Device properties + */ +typedef enum CUdevice_attribute_enum { + CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_BLOCK = 1, /**< Maximum number of threads per block */ + CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_X = 2, /**< Maximum block dimension X */ + CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_Y = 3, /**< Maximum block dimension Y */ + CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_Z = 4, /**< Maximum block dimension Z */ + CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_X = 5, /**< Maximum grid dimension X */ + CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_Y = 6, /**< Maximum grid dimension Y */ + CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_Z = 7, /**< Maximum grid dimension Z */ + CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK = 8, /**< Maximum shared memory available per block in bytes */ + CU_DEVICE_ATTRIBUTE_SHARED_MEMORY_PER_BLOCK = 8, /**< Deprecated, use CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK */ + CU_DEVICE_ATTRIBUTE_TOTAL_CONSTANT_MEMORY = 9, /**< Memory available on device for __constant__ variables in a CUDA C kernel in bytes */ + CU_DEVICE_ATTRIBUTE_WARP_SIZE = 10, /**< Warp size in threads */ + CU_DEVICE_ATTRIBUTE_MAX_PITCH = 11, /**< Maximum pitch in bytes allowed by memory copies */ + CU_DEVICE_ATTRIBUTE_MAX_REGISTERS_PER_BLOCK = 12, /**< Maximum number of 32-bit registers available per block */ + CU_DEVICE_ATTRIBUTE_REGISTERS_PER_BLOCK = 12, /**< Deprecated, use CU_DEVICE_ATTRIBUTE_MAX_REGISTERS_PER_BLOCK */ + CU_DEVICE_ATTRIBUTE_CLOCK_RATE = 13, /**< Typical clock frequency in kilohertz */ + CU_DEVICE_ATTRIBUTE_TEXTURE_ALIGNMENT = 14, /**< Alignment requirement for textures */ + CU_DEVICE_ATTRIBUTE_GPU_OVERLAP = 15, /**< Device can possibly copy memory and execute a kernel concurrently. Deprecated. Use instead CU_DEVICE_ATTRIBUTE_ASYNC_ENGINE_COUNT. */ + CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT = 16, /**< Number of multiprocessors on device */ + CU_DEVICE_ATTRIBUTE_KERNEL_EXEC_TIMEOUT = 17, /**< Specifies whether there is a run time limit on kernels */ + CU_DEVICE_ATTRIBUTE_INTEGRATED = 18, /**< Device is integrated with host memory */ + CU_DEVICE_ATTRIBUTE_CAN_MAP_HOST_MEMORY = 19, /**< Device can map host memory into CUDA address space */ + CU_DEVICE_ATTRIBUTE_COMPUTE_MODE = 20, /**< Compute mode (See ::CUcomputemode for details) */ + CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_WIDTH = 21, /**< Maximum 1D texture width */ + CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_WIDTH = 22, /**< Maximum 2D texture width */ + CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_HEIGHT = 23, /**< Maximum 2D texture height */ + CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_WIDTH = 24, /**< Maximum 3D texture width */ + CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_HEIGHT = 25, /**< Maximum 3D texture height */ + CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_DEPTH = 26, /**< Maximum 3D texture depth */ + CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_WIDTH = 27, /**< Maximum 2D layered texture width */ + CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_HEIGHT = 28, /**< Maximum 2D layered texture height */ + CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_LAYERS = 29, /**< Maximum layers in a 2D layered texture */ + CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_ARRAY_WIDTH = 27, /**< Deprecated, use CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_WIDTH */ + CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_ARRAY_HEIGHT = 28, /**< Deprecated, use CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_HEIGHT */ + CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_ARRAY_NUMSLICES = 29, /**< Deprecated, use CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_LAYERS */ + CU_DEVICE_ATTRIBUTE_SURFACE_ALIGNMENT = 30, /**< Alignment requirement for surfaces */ + CU_DEVICE_ATTRIBUTE_CONCURRENT_KERNELS = 31, /**< Device can possibly execute multiple kernels concurrently */ + CU_DEVICE_ATTRIBUTE_ECC_ENABLED = 32, /**< Device has ECC support enabled */ + CU_DEVICE_ATTRIBUTE_PCI_BUS_ID = 33, /**< PCI bus ID of the device */ + CU_DEVICE_ATTRIBUTE_PCI_DEVICE_ID = 34, /**< PCI device ID of the device */ + CU_DEVICE_ATTRIBUTE_TCC_DRIVER = 35, /**< Device is using TCC driver model */ + CU_DEVICE_ATTRIBUTE_MEMORY_CLOCK_RATE = 36, /**< Peak memory clock frequency in kilohertz */ + CU_DEVICE_ATTRIBUTE_GLOBAL_MEMORY_BUS_WIDTH = 37, /**< Global memory bus width in bits */ + CU_DEVICE_ATTRIBUTE_L2_CACHE_SIZE = 38, /**< Size of L2 cache in bytes */ + CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_MULTIPROCESSOR = 39, /**< Maximum resident threads per multiprocessor */ + CU_DEVICE_ATTRIBUTE_ASYNC_ENGINE_COUNT = 40, /**< Number of asynchronous engines */ + CU_DEVICE_ATTRIBUTE_UNIFIED_ADDRESSING = 41, /**< Device shares a unified address space with the host */ + CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LAYERED_WIDTH = 42, /**< Maximum 1D layered texture width */ + CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LAYERED_LAYERS = 43, /**< Maximum layers in a 1D layered texture */ + CU_DEVICE_ATTRIBUTE_CAN_TEX2D_GATHER = 44, /**< Deprecated, do not use. */ + CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_GATHER_WIDTH = 45, /**< Maximum 2D texture width if CUDA_ARRAY3D_TEXTURE_GATHER is set */ + CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_GATHER_HEIGHT = 46, /**< Maximum 2D texture height if CUDA_ARRAY3D_TEXTURE_GATHER is set */ + CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_WIDTH_ALTERNATE = 47, /**< Alternate maximum 3D texture width */ + CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_HEIGHT_ALTERNATE = 48, /**< Alternate maximum 3D texture height */ + CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_DEPTH_ALTERNATE = 49, /**< Alternate maximum 3D texture depth */ + CU_DEVICE_ATTRIBUTE_PCI_DOMAIN_ID = 50, /**< PCI domain ID of the device */ + CU_DEVICE_ATTRIBUTE_TEXTURE_PITCH_ALIGNMENT = 51, /**< Pitch alignment requirement for textures */ + CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_WIDTH = 52, /**< Maximum cubemap texture width/height */ + CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_LAYERED_WIDTH = 53, /**< Maximum cubemap layered texture width/height */ + CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_LAYERED_LAYERS = 54, /**< Maximum layers in a cubemap layered texture */ + CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_WIDTH = 55, /**< Maximum 1D surface width */ + CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_WIDTH = 56, /**< Maximum 2D surface width */ + CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_HEIGHT = 57, /**< Maximum 2D surface height */ + CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_WIDTH = 58, /**< Maximum 3D surface width */ + CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_HEIGHT = 59, /**< Maximum 3D surface height */ + CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_DEPTH = 60, /**< Maximum 3D surface depth */ + CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_LAYERED_WIDTH = 61, /**< Maximum 1D layered surface width */ + CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_LAYERED_LAYERS = 62, /**< Maximum layers in a 1D layered surface */ + CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_WIDTH = 63, /**< Maximum 2D layered surface width */ + CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_HEIGHT = 64, /**< Maximum 2D layered surface height */ + CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_LAYERS = 65, /**< Maximum layers in a 2D layered surface */ + CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_WIDTH = 66, /**< Maximum cubemap surface width */ + CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_LAYERED_WIDTH = 67, /**< Maximum cubemap layered surface width */ + CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_LAYERED_LAYERS = 68, /**< Maximum layers in a cubemap layered surface */ + CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LINEAR_WIDTH = 69, /**< Deprecated, do not use. Use cudaDeviceGetTexture1DLinearMaxWidth() or cuDeviceGetTexture1DLinearMaxWidth() instead. */ + CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_WIDTH = 70, /**< Maximum 2D linear texture width */ + CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_HEIGHT = 71, /**< Maximum 2D linear texture height */ + CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_PITCH = 72, /**< Maximum 2D linear texture pitch in bytes */ + CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_MIPMAPPED_WIDTH = 73, /**< Maximum mipmapped 2D texture width */ + CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_MIPMAPPED_HEIGHT = 74, /**< Maximum mipmapped 2D texture height */ + CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR = 75, /**< Major compute capability version number */ + CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR = 76, /**< Minor compute capability version number */ + CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_MIPMAPPED_WIDTH = 77, /**< Maximum mipmapped 1D texture width */ + CU_DEVICE_ATTRIBUTE_STREAM_PRIORITIES_SUPPORTED = 78, /**< Device supports stream priorities */ + CU_DEVICE_ATTRIBUTE_GLOBAL_L1_CACHE_SUPPORTED = 79, /**< Device supports caching globals in L1 */ + CU_DEVICE_ATTRIBUTE_LOCAL_L1_CACHE_SUPPORTED = 80, /**< Device supports caching locals in L1 */ + CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_MULTIPROCESSOR = 81, /**< Maximum shared memory available per multiprocessor in bytes */ + CU_DEVICE_ATTRIBUTE_MAX_REGISTERS_PER_MULTIPROCESSOR = 82, /**< Maximum number of 32-bit registers available per multiprocessor */ + CU_DEVICE_ATTRIBUTE_MANAGED_MEMORY = 83, /**< Device can allocate managed memory on this system */ + CU_DEVICE_ATTRIBUTE_MULTI_GPU_BOARD = 84, /**< Device is on a multi-GPU board */ + CU_DEVICE_ATTRIBUTE_MULTI_GPU_BOARD_GROUP_ID = 85, /**< Unique id for a group of devices on the same multi-GPU board */ + CU_DEVICE_ATTRIBUTE_HOST_NATIVE_ATOMIC_SUPPORTED = 86, /**< Link between the device and the host supports native atomic operations (this is a placeholder attribute, and is not supported on any current hardware)*/ + CU_DEVICE_ATTRIBUTE_SINGLE_TO_DOUBLE_PRECISION_PERF_RATIO = 87, /**< Ratio of single precision performance (in floating-point operations per second) to double precision performance */ + CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS = 88, /**< Device supports coherently accessing pageable memory without calling cudaHostRegister on it */ + CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS = 89, /**< Device can coherently access managed memory concurrently with the CPU */ + CU_DEVICE_ATTRIBUTE_COMPUTE_PREEMPTION_SUPPORTED = 90, /**< Device supports compute preemption. */ + CU_DEVICE_ATTRIBUTE_CAN_USE_HOST_POINTER_FOR_REGISTERED_MEM = 91, /**< Device can access host registered memory at the same virtual address as the CPU */ + CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_MEM_OPS_V1 = 92, /**< Deprecated, along with v1 MemOps API, ::cuStreamBatchMemOp and related APIs are supported. */ + CU_DEVICE_ATTRIBUTE_CAN_USE_64_BIT_STREAM_MEM_OPS_V1 = 93, /**< Deprecated, along with v1 MemOps API, 64-bit operations are supported in ::cuStreamBatchMemOp and related APIs. */ + CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_WAIT_VALUE_NOR_V1 = 94, /**< Deprecated, along with v1 MemOps API, ::CU_STREAM_WAIT_VALUE_NOR is supported. */ + CU_DEVICE_ATTRIBUTE_COOPERATIVE_LAUNCH = 95, /**< Device supports launching cooperative kernels via ::cuLaunchCooperativeKernel */ + CU_DEVICE_ATTRIBUTE_COOPERATIVE_MULTI_DEVICE_LAUNCH = 96, /**< Deprecated, ::cuLaunchCooperativeKernelMultiDevice is deprecated. */ + CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK_OPTIN = 97, /**< Maximum optin shared memory per block */ + CU_DEVICE_ATTRIBUTE_CAN_FLUSH_REMOTE_WRITES = 98, /**< The ::CU_STREAM_WAIT_VALUE_FLUSH flag and the ::CU_STREAM_MEM_OP_FLUSH_REMOTE_WRITES MemOp are supported on the device. See \ref CUDA_MEMOP for additional details. */ + CU_DEVICE_ATTRIBUTE_HOST_REGISTER_SUPPORTED = 99, /**< Device supports host memory registration via ::cudaHostRegister. */ + CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS_USES_HOST_PAGE_TABLES = 100, /**< Device accesses pageable memory via the host's page tables. */ + CU_DEVICE_ATTRIBUTE_DIRECT_MANAGED_MEM_ACCESS_FROM_HOST = 101, /**< The host can directly access managed memory on the device without migration. */ + CU_DEVICE_ATTRIBUTE_VIRTUAL_ADDRESS_MANAGEMENT_SUPPORTED = 102, /**< Deprecated, Use CU_DEVICE_ATTRIBUTE_VIRTUAL_MEMORY_MANAGEMENT_SUPPORTED*/ + CU_DEVICE_ATTRIBUTE_VIRTUAL_MEMORY_MANAGEMENT_SUPPORTED = 102, /**< Device supports virtual memory management APIs like ::cuMemAddressReserve, ::cuMemCreate, ::cuMemMap and related APIs */ + CU_DEVICE_ATTRIBUTE_HANDLE_TYPE_POSIX_FILE_DESCRIPTOR_SUPPORTED = 103, /**< Device supports exporting memory to a posix file descriptor with ::cuMemExportToShareableHandle, if requested via ::cuMemCreate */ + CU_DEVICE_ATTRIBUTE_HANDLE_TYPE_WIN32_HANDLE_SUPPORTED = 104, /**< Device supports exporting memory to a Win32 NT handle with ::cuMemExportToShareableHandle, if requested via ::cuMemCreate */ + CU_DEVICE_ATTRIBUTE_HANDLE_TYPE_WIN32_KMT_HANDLE_SUPPORTED = 105, /**< Device supports exporting memory to a Win32 KMT handle with ::cuMemExportToShareableHandle, if requested via ::cuMemCreate */ + CU_DEVICE_ATTRIBUTE_MAX_BLOCKS_PER_MULTIPROCESSOR = 106, /**< Maximum number of blocks per multiprocessor */ + CU_DEVICE_ATTRIBUTE_GENERIC_COMPRESSION_SUPPORTED = 107, /**< Device supports compression of memory */ + CU_DEVICE_ATTRIBUTE_MAX_PERSISTING_L2_CACHE_SIZE = 108, /**< Maximum L2 persisting lines capacity setting in bytes. */ + CU_DEVICE_ATTRIBUTE_MAX_ACCESS_POLICY_WINDOW_SIZE = 109, /**< Maximum value of CUaccessPolicyWindow::num_bytes. */ + CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_WITH_CUDA_VMM_SUPPORTED = 110, /**< Device supports specifying the GPUDirect RDMA flag with ::cuMemCreate */ + CU_DEVICE_ATTRIBUTE_RESERVED_SHARED_MEMORY_PER_BLOCK = 111, /**< Shared memory reserved by CUDA driver per block in bytes */ + CU_DEVICE_ATTRIBUTE_SPARSE_CUDA_ARRAY_SUPPORTED = 112, /**< Device supports sparse CUDA arrays and sparse CUDA mipmapped arrays */ + CU_DEVICE_ATTRIBUTE_READ_ONLY_HOST_REGISTER_SUPPORTED = 113, /**< Device supports using the ::cuMemHostRegister flag ::CU_MEMHOSTERGISTER_READ_ONLY to register memory that must be mapped as read-only to the GPU */ + CU_DEVICE_ATTRIBUTE_TIMELINE_SEMAPHORE_INTEROP_SUPPORTED = 114, /**< External timeline semaphore interop is supported on the device */ + CU_DEVICE_ATTRIBUTE_MEMORY_POOLS_SUPPORTED = 115, /**< Device supports using the ::cuMemAllocAsync and ::cuMemPool family of APIs */ + CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_SUPPORTED = 116, /**< Device supports GPUDirect RDMA APIs, like nvidia_p2p_get_pages (see https://docs.nvidia.com/cuda/gpudirect-rdma for more information) */ + CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_FLUSH_WRITES_OPTIONS = 117, /**< The returned attribute shall be interpreted as a bitmask, where the individual bits are described by the ::CUflushGPUDirectRDMAWritesOptions enum */ + CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_WRITES_ORDERING = 118, /**< GPUDirect RDMA writes to the device do not need to be flushed for consumers within the scope indicated by the returned attribute. See ::CUGPUDirectRDMAWritesOrdering for the numerical values returned here. */ + CU_DEVICE_ATTRIBUTE_MEMPOOL_SUPPORTED_HANDLE_TYPES = 119, /**< Handle types supported with mempool based IPC */ + CU_DEVICE_ATTRIBUTE_CLUSTER_LAUNCH = 120, /**< Indicates device supports cluster launch */ + CU_DEVICE_ATTRIBUTE_DEFERRED_MAPPING_CUDA_ARRAY_SUPPORTED = 121, /**< Device supports deferred mapping CUDA arrays and CUDA mipmapped arrays */ + CU_DEVICE_ATTRIBUTE_CAN_USE_64_BIT_STREAM_MEM_OPS = 122, /**< 64-bit operations are supported in ::cuStreamBatchMemOp and related MemOp APIs. */ + CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_WAIT_VALUE_NOR = 123, /**< ::CU_STREAM_WAIT_VALUE_NOR is supported by MemOp APIs. */ + CU_DEVICE_ATTRIBUTE_DMA_BUF_SUPPORTED = 124, /**< Device supports buffer sharing with dma_buf mechanism. */ + CU_DEVICE_ATTRIBUTE_IPC_EVENT_SUPPORTED = 125, /**< Device supports IPC Events. */ + CU_DEVICE_ATTRIBUTE_MEM_SYNC_DOMAIN_COUNT = 126, /**< Number of memory domains the device supports. */ + CU_DEVICE_ATTRIBUTE_TENSOR_MAP_ACCESS_SUPPORTED = 127, /**< Device supports accessing memory using Tensor Map. */ + CU_DEVICE_ATTRIBUTE_UNIFIED_FUNCTION_POINTERS = 129, /**< Device supports unified function pointers. */ + CU_DEVICE_ATTRIBUTE_MULTICAST_SUPPORTED = 132, /**< Device supports switch multicast and reduction operations. */ + CU_DEVICE_ATTRIBUTE_MAX +} CUdevice_attribute; + +/** + * Legacy device properties + */ +typedef struct CUdevprop_st { + int maxThreadsPerBlock; /**< Maximum number of threads per block */ + int maxThreadsDim[3]; /**< Maximum size of each dimension of a block */ + int maxGridSize[3]; /**< Maximum size of each dimension of a grid */ + int sharedMemPerBlock; /**< Shared memory available per block in bytes */ + int totalConstantMemory; /**< Constant memory available on device in bytes */ + int SIMDWidth; /**< Warp size in threads */ + int memPitch; /**< Maximum pitch in bytes allowed by memory copies */ + int regsPerBlock; /**< 32-bit registers available per block */ + int clockRate; /**< Clock frequency in kilohertz */ + int textureAlign; /**< Alignment requirement for textures */ +} CUdevprop_v1; +typedef CUdevprop_v1 CUdevprop; + +/** + * Pointer information + */ +typedef enum CUpointer_attribute_enum { + CU_POINTER_ATTRIBUTE_CONTEXT = 1, /**< The ::CUcontext on which a pointer was allocated or registered */ + CU_POINTER_ATTRIBUTE_MEMORY_TYPE = 2, /**< The ::CUmemorytype describing the physical location of a pointer */ + CU_POINTER_ATTRIBUTE_DEVICE_POINTER = 3, /**< The address at which a pointer's memory may be accessed on the device */ + CU_POINTER_ATTRIBUTE_HOST_POINTER = 4, /**< The address at which a pointer's memory may be accessed on the host */ + CU_POINTER_ATTRIBUTE_P2P_TOKENS = 5, /**< A pair of tokens for use with the nv-p2p.h Linux kernel interface */ + CU_POINTER_ATTRIBUTE_SYNC_MEMOPS = 6, /**< Synchronize every synchronous memory operation initiated on this region */ + CU_POINTER_ATTRIBUTE_BUFFER_ID = 7, /**< A process-wide unique ID for an allocated memory region*/ + CU_POINTER_ATTRIBUTE_IS_MANAGED = 8, /**< Indicates if the pointer points to managed memory */ + CU_POINTER_ATTRIBUTE_DEVICE_ORDINAL = 9, /**< A device ordinal of a device on which a pointer was allocated or registered */ + CU_POINTER_ATTRIBUTE_IS_LEGACY_CUDA_IPC_CAPABLE = 10, /**< 1 if this pointer maps to an allocation that is suitable for ::cudaIpcGetMemHandle, 0 otherwise **/ + CU_POINTER_ATTRIBUTE_RANGE_START_ADDR = 11, /**< Starting address for this requested pointer */ + CU_POINTER_ATTRIBUTE_RANGE_SIZE = 12, /**< Size of the address range for this requested pointer */ + CU_POINTER_ATTRIBUTE_MAPPED = 13, /**< 1 if this pointer is in a valid address range that is mapped to a backing allocation, 0 otherwise **/ + CU_POINTER_ATTRIBUTE_ALLOWED_HANDLE_TYPES = 14, /**< Bitmask of allowed ::CUmemAllocationHandleType for this allocation **/ + CU_POINTER_ATTRIBUTE_IS_GPU_DIRECT_RDMA_CAPABLE = 15, /**< 1 if the memory this pointer is referencing can be used with the GPUDirect RDMA API **/ + CU_POINTER_ATTRIBUTE_ACCESS_FLAGS = 16, /**< Returns the access flags the device associated with the current context has on the corresponding memory referenced by the pointer given */ + CU_POINTER_ATTRIBUTE_MEMPOOL_HANDLE = 17 /**< Returns the mempool handle for the allocation if it was allocated from a mempool. Otherwise returns NULL. **/ + , + CU_POINTER_ATTRIBUTE_MAPPING_SIZE = 18, /**< Size of the actual underlying mapping that the pointer belongs to **/ + CU_POINTER_ATTRIBUTE_MAPPING_BASE_ADDR = 19, /**< The start address of the mapping that the pointer belongs to **/ + CU_POINTER_ATTRIBUTE_MEMORY_BLOCK_ID = 20 /**< A process-wide unique id corresponding to the physical allocation the pointer belongs to **/ +} CUpointer_attribute; + +/** + * Function properties + */ +typedef enum CUfunction_attribute_enum { + /** + * The maximum number of threads per block, beyond which a launch of the + * function would fail. This number depends on both the function and the + * device on which the function is currently loaded. + */ + CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK = 0, + + /** + * The size in bytes of statically-allocated shared memory required by + * this function. This does not include dynamically-allocated shared + * memory requested by the user at runtime. + */ + CU_FUNC_ATTRIBUTE_SHARED_SIZE_BYTES = 1, + + /** + * The size in bytes of user-allocated constant memory required by this + * function. + */ + CU_FUNC_ATTRIBUTE_CONST_SIZE_BYTES = 2, + + /** + * The size in bytes of local memory used by each thread of this function. + */ + CU_FUNC_ATTRIBUTE_LOCAL_SIZE_BYTES = 3, + + /** + * The number of registers used by each thread of this function. + */ + CU_FUNC_ATTRIBUTE_NUM_REGS = 4, + + /** + * The PTX virtual architecture version for which the function was + * compiled. This value is the major PTX version * 10 + the minor PTX + * version, so a PTX version 1.3 function would return the value 13. + * Note that this may return the undefined value of 0 for cubins + * compiled prior to CUDA 3.0. + */ + CU_FUNC_ATTRIBUTE_PTX_VERSION = 5, + + /** + * The binary architecture version for which the function was compiled. + * This value is the major binary version * 10 + the minor binary version, + * so a binary version 1.3 function would return the value 13. Note that + * this will return a value of 10 for legacy cubins that do not have a + * properly-encoded binary architecture version. + */ + CU_FUNC_ATTRIBUTE_BINARY_VERSION = 6, + + /** + * The attribute to indicate whether the function has been compiled with + * user specified option "-Xptxas --dlcm=ca" set . + */ + CU_FUNC_ATTRIBUTE_CACHE_MODE_CA = 7, + + /** + * The maximum size in bytes of dynamically-allocated shared memory that can be used by + * this function. If the user-specified dynamic shared memory size is larger than this + * value, the launch will fail. + * See ::cuFuncSetAttribute, ::cuKernelSetAttribute + */ + CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES = 8, + + /** + * On devices where the L1 cache and shared memory use the same hardware resources, + * this sets the shared memory carveout preference, in percent of the total shared memory. + * Refer to ::CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_MULTIPROCESSOR. + * This is only a hint, and the driver can choose a different ratio if required to execute the function. + * See ::cuFuncSetAttribute, ::cuKernelSetAttribute + */ + CU_FUNC_ATTRIBUTE_PREFERRED_SHARED_MEMORY_CARVEOUT = 9, + + /** + * If this attribute is set, the kernel must launch with a valid cluster + * size specified. + * See ::cuFuncSetAttribute, ::cuKernelSetAttribute + */ + CU_FUNC_ATTRIBUTE_CLUSTER_SIZE_MUST_BE_SET = 10, + + /** + * The required cluster width in blocks. The values must either all be 0 or + * all be positive. The validity of the cluster dimensions is otherwise + * checked at launch time. + * + * If the value is set during compile time, it cannot be set at runtime. + * Setting it at runtime will return CUDA_ERROR_NOT_PERMITTED. + * See ::cuFuncSetAttribute, ::cuKernelSetAttribute + */ + CU_FUNC_ATTRIBUTE_REQUIRED_CLUSTER_WIDTH = 11, + + /** + * The required cluster height in blocks. The values must either all be 0 or + * all be positive. The validity of the cluster dimensions is otherwise + * checked at launch time. + * + * If the value is set during compile time, it cannot be set at runtime. + * Setting it at runtime should return CUDA_ERROR_NOT_PERMITTED. + * See ::cuFuncSetAttribute, ::cuKernelSetAttribute + */ + CU_FUNC_ATTRIBUTE_REQUIRED_CLUSTER_HEIGHT = 12, + + /** + * The required cluster depth in blocks. The values must either all be 0 or + * all be positive. The validity of the cluster dimensions is otherwise + * checked at launch time. + * + * If the value is set during compile time, it cannot be set at runtime. + * Setting it at runtime should return CUDA_ERROR_NOT_PERMITTED. + * See ::cuFuncSetAttribute, ::cuKernelSetAttribute + */ + CU_FUNC_ATTRIBUTE_REQUIRED_CLUSTER_DEPTH = 13, + + /** + * Whether the function can be launched with non-portable cluster size. 1 is + * allowed, 0 is disallowed. A non-portable cluster size may only function + * on the specific SKUs the program is tested on. The launch might fail if + * the program is run on a different hardware platform. + * + * CUDA API provides cudaOccupancyMaxActiveClusters to assist with checking + * whether the desired size can be launched on the current device. + * + * Portable Cluster Size + * + * A portable cluster size is guaranteed to be functional on all compute + * capabilities higher than the target compute capability. The portable + * cluster size for sm_90 is 8 blocks per cluster. This value may increase + * for future compute capabilities. + * + * The specific hardware unit may support higher cluster sizes that’s not + * guaranteed to be portable. + * See ::cuFuncSetAttribute, ::cuKernelSetAttribute + */ + CU_FUNC_ATTRIBUTE_NON_PORTABLE_CLUSTER_SIZE_ALLOWED = 14, + + /** + * The block scheduling policy of a function. The value type is + * CUclusterSchedulingPolicy / cudaClusterSchedulingPolicy. + * See ::cuFuncSetAttribute, ::cuKernelSetAttribute + */ + CU_FUNC_ATTRIBUTE_CLUSTER_SCHEDULING_POLICY_PREFERENCE = 15, + + CU_FUNC_ATTRIBUTE_MAX +} CUfunction_attribute; + +/** + * Function cache configurations + */ +typedef enum CUfunc_cache_enum { + CU_FUNC_CACHE_PREFER_NONE = 0x00, /**< no preference for shared memory or L1 (default) */ + CU_FUNC_CACHE_PREFER_SHARED = 0x01, /**< prefer larger shared memory and smaller L1 cache */ + CU_FUNC_CACHE_PREFER_L1 = 0x02, /**< prefer larger L1 cache and smaller shared memory */ + CU_FUNC_CACHE_PREFER_EQUAL = 0x03 /**< prefer equal sized L1 cache and shared memory */ +} CUfunc_cache; + +/** + * Shared memory configurations + */ +typedef enum CUsharedconfig_enum { + CU_SHARED_MEM_CONFIG_DEFAULT_BANK_SIZE = 0x00, /**< set default shared memory bank size */ + CU_SHARED_MEM_CONFIG_FOUR_BYTE_BANK_SIZE = 0x01, /**< set shared memory bank width to four bytes */ + CU_SHARED_MEM_CONFIG_EIGHT_BYTE_BANK_SIZE = 0x02 /**< set shared memory bank width to eight bytes */ +} CUsharedconfig; + +/** + * Shared memory carveout configurations. These may be passed to ::cuFuncSetAttribute or ::cuKernelSetAttribute + */ +typedef enum CUshared_carveout_enum { + CU_SHAREDMEM_CARVEOUT_DEFAULT = -1, /**< No preference for shared memory or L1 (default) */ + CU_SHAREDMEM_CARVEOUT_MAX_SHARED = 100, /**< Prefer maximum available shared memory, minimum L1 cache */ + CU_SHAREDMEM_CARVEOUT_MAX_L1 = 0 /**< Prefer maximum available L1 cache, minimum shared memory */ +} CUshared_carveout; + +/** + * Memory types + */ +typedef enum CUmemorytype_enum { + CU_MEMORYTYPE_HOST = 0x01, /**< Host memory */ + CU_MEMORYTYPE_DEVICE = 0x02, /**< Device memory */ + CU_MEMORYTYPE_ARRAY = 0x03, /**< Array memory */ + CU_MEMORYTYPE_UNIFIED = 0x04 /**< Unified device or host memory */ +} CUmemorytype; + +/** + * Compute Modes + */ +typedef enum CUcomputemode_enum { + CU_COMPUTEMODE_DEFAULT = 0, /**< Default compute mode (Multiple contexts allowed per device) */ + CU_COMPUTEMODE_PROHIBITED = 2, /**< Compute-prohibited mode (No contexts can be created on this device at this time) */ + CU_COMPUTEMODE_EXCLUSIVE_PROCESS = 3 /**< Compute-exclusive-process mode (Only one context used by a single process can be present on this device at a time) */ +} CUcomputemode; + +/** + * Memory advise values + */ +typedef enum CUmem_advise_enum { + CU_MEM_ADVISE_SET_READ_MOSTLY = 1, /**< Data will mostly be read and only occasionally be written to */ + CU_MEM_ADVISE_UNSET_READ_MOSTLY = 2, /**< Undo the effect of ::CU_MEM_ADVISE_SET_READ_MOSTLY */ + CU_MEM_ADVISE_SET_PREFERRED_LOCATION = 3, /**< Set the preferred location for the data as the specified device */ + CU_MEM_ADVISE_UNSET_PREFERRED_LOCATION = 4, /**< Clear the preferred location for the data */ + CU_MEM_ADVISE_SET_ACCESSED_BY = 5, /**< Data will be accessed by the specified device, so prevent page faults as much as possible */ + CU_MEM_ADVISE_UNSET_ACCESSED_BY = 6 /**< Let the Unified Memory subsystem decide on the page faulting policy for the specified device */ +} CUmem_advise; + +typedef enum CUmem_range_attribute_enum { + CU_MEM_RANGE_ATTRIBUTE_READ_MOSTLY = 1, /**< Whether the range will mostly be read and only occasionally be written to */ + CU_MEM_RANGE_ATTRIBUTE_PREFERRED_LOCATION = 2, /**< The preferred location of the range */ + CU_MEM_RANGE_ATTRIBUTE_ACCESSED_BY = 3, /**< Memory range has ::CU_MEM_ADVISE_SET_ACCESSED_BY set for specified device */ + CU_MEM_RANGE_ATTRIBUTE_LAST_PREFETCH_LOCATION = 4 /**< The last location to which the range was prefetched */ +} CUmem_range_attribute; + +/** + * Online compiler and linker options + */ +typedef enum CUjit_option_enum +{ + /** + * Max number of registers that a thread may use.\n + * Option type: unsigned int\n + * Applies to: compiler only + */ + CU_JIT_MAX_REGISTERS = 0, + + /** + * IN: Specifies minimum number of threads per block to target compilation + * for\n + * OUT: Returns the number of threads the compiler actually targeted. + * This restricts the resource utilization of the compiler (e.g. max + * registers) such that a block with the given number of threads should be + * able to launch based on register limitations. Note, this option does not + * currently take into account any other resource limitations, such as + * shared memory utilization.\n + * Cannot be combined with ::CU_JIT_TARGET.\n + * Option type: unsigned int\n + * Applies to: compiler only + */ + CU_JIT_THREADS_PER_BLOCK = 1, + + /** + * Overwrites the option value with the total wall clock time, in + * milliseconds, spent in the compiler and linker\n + * Option type: float\n + * Applies to: compiler and linker + */ + CU_JIT_WALL_TIME = 2, + + /** + * Pointer to a buffer in which to print any log messages + * that are informational in nature (the buffer size is specified via + * option ::CU_JIT_INFO_LOG_BUFFER_SIZE_BYTES)\n + * Option type: char *\n + * Applies to: compiler and linker + */ + CU_JIT_INFO_LOG_BUFFER = 3, + + /** + * IN: Log buffer size in bytes. Log messages will be capped at this size + * (including null terminator)\n + * OUT: Amount of log buffer filled with messages\n + * Option type: unsigned int\n + * Applies to: compiler and linker + */ + CU_JIT_INFO_LOG_BUFFER_SIZE_BYTES = 4, + + /** + * Pointer to a buffer in which to print any log messages that + * reflect errors (the buffer size is specified via option + * ::CU_JIT_ERROR_LOG_BUFFER_SIZE_BYTES)\n + * Option type: char *\n + * Applies to: compiler and linker + */ + CU_JIT_ERROR_LOG_BUFFER = 5, + + /** + * IN: Log buffer size in bytes. Log messages will be capped at this size + * (including null terminator)\n + * OUT: Amount of log buffer filled with messages\n + * Option type: unsigned int\n + * Applies to: compiler and linker + */ + CU_JIT_ERROR_LOG_BUFFER_SIZE_BYTES = 6, + + /** + * Level of optimizations to apply to generated code (0 - 4), with 4 + * being the default and highest level of optimizations.\n + * Option type: unsigned int\n + * Applies to: compiler only + */ + CU_JIT_OPTIMIZATION_LEVEL = 7, + + /** + * No option value required. Determines the target based on the current + * attached context (default)\n + * Option type: No option value needed\n + * Applies to: compiler and linker + */ + CU_JIT_TARGET_FROM_CUCONTEXT = 8, + + /** + * Target is chosen based on supplied ::CUjit_target. Cannot be + * combined with ::CU_JIT_THREADS_PER_BLOCK.\n + * Option type: unsigned int for enumerated type ::CUjit_target\n + * Applies to: compiler and linker + */ + CU_JIT_TARGET = 9, + + /** + * Specifies choice of fallback strategy if matching cubin is not found. + * Choice is based on supplied ::CUjit_fallback. This option cannot be + * used with cuLink* APIs as the linker requires exact matches.\n + * Option type: unsigned int for enumerated type ::CUjit_fallback\n + * Applies to: compiler only + */ + CU_JIT_FALLBACK_STRATEGY = 10, + + /** + * Specifies whether to create debug information in output (-g) + * (0: false, default)\n + * Option type: int\n + * Applies to: compiler and linker + */ + CU_JIT_GENERATE_DEBUG_INFO = 11, + + /** + * Generate verbose log messages (0: false, default)\n + * Option type: int\n + * Applies to: compiler and linker + */ + CU_JIT_LOG_VERBOSE = 12, + + /** + * Generate line number information (-lineinfo) (0: false, default)\n + * Option type: int\n + * Applies to: compiler only + */ + CU_JIT_GENERATE_LINE_INFO = 13, + + /** + * Specifies whether to enable caching explicitly (-dlcm) \n + * Choice is based on supplied ::CUjit_cacheMode_enum.\n + * Option type: unsigned int for enumerated type ::CUjit_cacheMode_enum\n + * Applies to: compiler only + */ + CU_JIT_CACHE_MODE = 14, + + /** + * \deprecated + * This jit option is deprecated and should not be used. + */ + CU_JIT_NEW_SM3X_OPT = 15, + + /** + * This jit option is used for internal purpose only. + */ + CU_JIT_FAST_COMPILE = 16, + + /** + * Array of device symbol names that will be relocated to the corresponding + * host addresses stored in ::CU_JIT_GLOBAL_SYMBOL_ADDRESSES.\n + * Must contain ::CU_JIT_GLOBAL_SYMBOL_COUNT entries.\n + * When loading a device module, driver will relocate all encountered + * unresolved symbols to the host addresses.\n + * It is only allowed to register symbols that correspond to unresolved + * global variables.\n + * It is illegal to register the same device symbol at multiple addresses.\n + * Option type: const char **\n + * Applies to: dynamic linker only + */ + CU_JIT_GLOBAL_SYMBOL_NAMES = 17, + + /** + * Array of host addresses that will be used to relocate corresponding + * device symbols stored in ::CU_JIT_GLOBAL_SYMBOL_NAMES.\n + * Must contain ::CU_JIT_GLOBAL_SYMBOL_COUNT entries.\n + * Option type: void **\n + * Applies to: dynamic linker only + */ + CU_JIT_GLOBAL_SYMBOL_ADDRESSES = 18, + + /** + * Number of entries in ::CU_JIT_GLOBAL_SYMBOL_NAMES and + * ::CU_JIT_GLOBAL_SYMBOL_ADDRESSES arrays.\n + * Option type: unsigned int\n + * Applies to: dynamic linker only + */ + CU_JIT_GLOBAL_SYMBOL_COUNT = 19, + + /** + * \deprecated + * Enable link-time optimization (-dlto) for device code (Disabled by default).\n + * This option is not supported on 32-bit platforms.\n + * Option type: int\n + * Applies to: compiler and linker + * + * Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0 + */ + CU_JIT_LTO = 20, + + /** + * \deprecated + * Control single-precision denormals (-ftz) support (0: false, default). + * 1 : flushes denormal values to zero + * 0 : preserves denormal values + * Option type: int\n + * Applies to: link-time optimization specified with CU_JIT_LTO + * + * Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0 + */ + CU_JIT_FTZ = 21, + + /** + * \deprecated + * Control single-precision floating-point division and reciprocals + * (-prec-div) support (1: true, default). + * 1 : Enables the IEEE round-to-nearest mode + * 0 : Enables the fast approximation mode + * Option type: int\n + * Applies to: link-time optimization specified with CU_JIT_LTO + * + * Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0 + */ + CU_JIT_PREC_DIV = 22, + + /** + * \deprecated + * Control single-precision floating-point square root + * (-prec-sqrt) support (1: true, default). + * 1 : Enables the IEEE round-to-nearest mode + * 0 : Enables the fast approximation mode + * Option type: int\n + * Applies to: link-time optimization specified with CU_JIT_LTO + * + * Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0 + */ + CU_JIT_PREC_SQRT = 23, + + /** + * \deprecated + * Enable/Disable the contraction of floating-point multiplies + * and adds/subtracts into floating-point multiply-add (-fma) + * operations (1: Enable, default; 0: Disable). + * Option type: int\n + * Applies to: link-time optimization specified with CU_JIT_LTO + * + * Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0 + */ + CU_JIT_FMA = 24, + + /** + * \deprecated + * Array of kernel names that should be preserved at link time while others + * can be removed.\n + * Must contain ::CU_JIT_REFERENCED_KERNEL_COUNT entries.\n + * Note that kernel names can be mangled by the compiler in which case the + * mangled name needs to be specified.\n + * Wildcard "*" can be used to represent zero or more characters instead of + * specifying the full or mangled name.\n + * It is important to note that the wildcard "*" is also added implicitly. + * For example, specifying "foo" will match "foobaz", "barfoo", "barfoobaz" and + * thus preserve all kernels with those names. This can be avoided by providing + * a more specific name like "barfoobaz".\n + * Option type: const char **\n + * Applies to: dynamic linker only + * + * Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0 + */ + CU_JIT_REFERENCED_KERNEL_NAMES = 25, + + /** + * \deprecated + * Number of entries in ::CU_JIT_REFERENCED_KERNEL_NAMES array.\n + * Option type: unsigned int\n + * Applies to: dynamic linker only + * + * Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0 + */ + CU_JIT_REFERENCED_KERNEL_COUNT = 26, + + /** + * \deprecated + * Array of variable names (__device__ and/or __constant__) that should be + * preserved at link time while others can be removed.\n + * Must contain ::CU_JIT_REFERENCED_VARIABLE_COUNT entries.\n + * Note that variable names can be mangled by the compiler in which case the + * mangled name needs to be specified.\n + * Wildcard "*" can be used to represent zero or more characters instead of + * specifying the full or mangled name.\n + * It is important to note that the wildcard "*" is also added implicitly. + * For example, specifying "foo" will match "foobaz", "barfoo", "barfoobaz" and + * thus preserve all variables with those names. This can be avoided by providing + * a more specific name like "barfoobaz".\n + * Option type: const char **\n + * Applies to: link-time optimization specified with CU_JIT_LTO + * + * Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0 + */ + CU_JIT_REFERENCED_VARIABLE_NAMES = 27, + + /** + * \deprecated + * Number of entries in ::CU_JIT_REFERENCED_VARIABLE_NAMES array.\n + * Option type: unsigned int\n + * Applies to: link-time optimization specified with CU_JIT_LTO + * + * Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0 + */ + CU_JIT_REFERENCED_VARIABLE_COUNT = 28, + + /** + * \deprecated + * This option serves as a hint to enable the JIT compiler/linker + * to remove constant (__constant__) and device (__device__) variables + * unreferenced in device code (Disabled by default).\n + * Note that host references to constant and device variables using APIs like + * ::cuModuleGetGlobal() with this option specified may result in undefined behavior unless + * the variables are explicitly specified using ::CU_JIT_REFERENCED_VARIABLE_NAMES.\n + * Option type: int\n + * Applies to: link-time optimization specified with CU_JIT_LTO + * + * Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0 + */ + CU_JIT_OPTIMIZE_UNUSED_DEVICE_VARIABLES = 29, + + /** + * Generate position independent code (0: false)\n + * Option type: int\n + * Applies to: compiler only + */ + CU_JIT_POSITION_INDEPENDENT_CODE = 30, + + CU_JIT_NUM_OPTIONS + +} CUjit_option; + +/* + * Indicates that compute device class supports accelerated features. + */ +#define CU_COMPUTE_ACCELERATED_TARGET_BASE 0x10000 + +/** + * Online compilation targets + */ +typedef enum CUjit_target_enum +{ + CU_TARGET_COMPUTE_30 = 30, /**< Compute device class 3.0 */ + CU_TARGET_COMPUTE_32 = 32, /**< Compute device class 3.2 */ + CU_TARGET_COMPUTE_35 = 35, /**< Compute device class 3.5 */ + CU_TARGET_COMPUTE_37 = 37, /**< Compute device class 3.7 */ + CU_TARGET_COMPUTE_50 = 50, /**< Compute device class 5.0 */ + CU_TARGET_COMPUTE_52 = 52, /**< Compute device class 5.2 */ + CU_TARGET_COMPUTE_53 = 53, /**< Compute device class 5.3 */ + CU_TARGET_COMPUTE_60 = 60, /**< Compute device class 6.0.*/ + CU_TARGET_COMPUTE_61 = 61, /**< Compute device class 6.1.*/ + CU_TARGET_COMPUTE_62 = 62, /**< Compute device class 6.2.*/ + CU_TARGET_COMPUTE_70 = 70, /**< Compute device class 7.0.*/ + CU_TARGET_COMPUTE_72 = 72, /**< Compute device class 7.2.*/ + CU_TARGET_COMPUTE_75 = 75, /**< Compute device class 7.5.*/ + CU_TARGET_COMPUTE_80 = 80, /**< Compute device class 8.0.*/ + CU_TARGET_COMPUTE_86 = 86, /**< Compute device class 8.6.*/ + CU_TARGET_COMPUTE_87 = 87, /**< Compute device class 8.7.*/ + CU_TARGET_COMPUTE_89 = 89, /**< Compute device class 8.9.*/ + CU_TARGET_COMPUTE_90 = 90, /**< Compute device class 9.0.*/ + + /**< Compute device class 9.0. with accelerated features.*/ + CU_TARGET_COMPUTE_90A = CU_COMPUTE_ACCELERATED_TARGET_BASE + CU_TARGET_COMPUTE_90 +} CUjit_target; + +/** + * Cubin matching fallback strategies + */ +typedef enum CUjit_fallback_enum +{ + CU_PREFER_PTX = 0, /**< Prefer to compile ptx if exact binary match not found */ + + CU_PREFER_BINARY /**< Prefer to fall back to compatible binary code if exact match not found */ + +} CUjit_fallback; + +/** + * Caching modes for dlcm + */ +typedef enum CUjit_cacheMode_enum +{ + CU_JIT_CACHE_OPTION_NONE = 0, /**< Compile with no -dlcm flag specified */ + CU_JIT_CACHE_OPTION_CG, /**< Compile with L1 cache disabled */ + CU_JIT_CACHE_OPTION_CA /**< Compile with L1 cache enabled */ +} CUjit_cacheMode; + +/** + * Device code formats + */ +typedef enum CUjitInputType_enum +{ + /** + * Compiled device-class-specific device code\n + * Applicable options: none + */ + CU_JIT_INPUT_CUBIN = 0, + + /** + * PTX source code\n + * Applicable options: PTX compiler options + */ + CU_JIT_INPUT_PTX = 1, + + /** + * Bundle of multiple cubins and/or PTX of some device code\n + * Applicable options: PTX compiler options, ::CU_JIT_FALLBACK_STRATEGY + */ + CU_JIT_INPUT_FATBINARY = 2, + + /** + * Host object with embedded device code\n + * Applicable options: PTX compiler options, ::CU_JIT_FALLBACK_STRATEGY + */ + CU_JIT_INPUT_OBJECT = 3, + + /** + * Archive of host objects with embedded device code\n + * Applicable options: PTX compiler options, ::CU_JIT_FALLBACK_STRATEGY + */ + CU_JIT_INPUT_LIBRARY = 4, + + /** + * \deprecated + * High-level intermediate code for link-time optimization\n + * Applicable options: NVVM compiler options, PTX compiler options + * + * Only valid with LTO-IR compiled with toolkits prior to CUDA 12.0 + */ + CU_JIT_INPUT_NVVM = 5, + + CU_JIT_NUM_INPUT_TYPES = 6 +} CUjitInputType; + +typedef struct CUlinkState_st *CUlinkState; + +/** + * Flags to register a graphics resource + */ +typedef enum CUgraphicsRegisterFlags_enum { + CU_GRAPHICS_REGISTER_FLAGS_NONE = 0x00, + CU_GRAPHICS_REGISTER_FLAGS_READ_ONLY = 0x01, + CU_GRAPHICS_REGISTER_FLAGS_WRITE_DISCARD = 0x02, + CU_GRAPHICS_REGISTER_FLAGS_SURFACE_LDST = 0x04, + CU_GRAPHICS_REGISTER_FLAGS_TEXTURE_GATHER = 0x08 +} CUgraphicsRegisterFlags; + +/** + * Flags for mapping and unmapping interop resources + */ +typedef enum CUgraphicsMapResourceFlags_enum { + CU_GRAPHICS_MAP_RESOURCE_FLAGS_NONE = 0x00, + CU_GRAPHICS_MAP_RESOURCE_FLAGS_READ_ONLY = 0x01, + CU_GRAPHICS_MAP_RESOURCE_FLAGS_WRITE_DISCARD = 0x02 +} CUgraphicsMapResourceFlags; + +/** + * Array indices for cube faces + */ +typedef enum CUarray_cubemap_face_enum { + CU_CUBEMAP_FACE_POSITIVE_X = 0x00, /**< Positive X face of cubemap */ + CU_CUBEMAP_FACE_NEGATIVE_X = 0x01, /**< Negative X face of cubemap */ + CU_CUBEMAP_FACE_POSITIVE_Y = 0x02, /**< Positive Y face of cubemap */ + CU_CUBEMAP_FACE_NEGATIVE_Y = 0x03, /**< Negative Y face of cubemap */ + CU_CUBEMAP_FACE_POSITIVE_Z = 0x04, /**< Positive Z face of cubemap */ + CU_CUBEMAP_FACE_NEGATIVE_Z = 0x05 /**< Negative Z face of cubemap */ +} CUarray_cubemap_face; + +/** + * Limits + */ +typedef enum CUlimit_enum { + CU_LIMIT_STACK_SIZE = 0x00, /**< GPU thread stack size */ + CU_LIMIT_PRINTF_FIFO_SIZE = 0x01, /**< GPU printf FIFO size */ + CU_LIMIT_MALLOC_HEAP_SIZE = 0x02, /**< GPU malloc heap size */ + CU_LIMIT_DEV_RUNTIME_SYNC_DEPTH = 0x03, /**< GPU device runtime launch synchronize depth */ + CU_LIMIT_DEV_RUNTIME_PENDING_LAUNCH_COUNT = 0x04, /**< GPU device runtime pending launch count */ + CU_LIMIT_MAX_L2_FETCH_GRANULARITY = 0x05, /**< A value between 0 and 128 that indicates the maximum fetch granularity of L2 (in Bytes). This is a hint */ + CU_LIMIT_PERSISTING_L2_CACHE_SIZE = 0x06, /**< A size in bytes for L2 persisting lines cache size */ + CU_LIMIT_MAX +} CUlimit; + +/** + * Resource types + */ +typedef enum CUresourcetype_enum { + CU_RESOURCE_TYPE_ARRAY = 0x00, /**< Array resource */ + CU_RESOURCE_TYPE_MIPMAPPED_ARRAY = 0x01, /**< Mipmapped array resource */ + CU_RESOURCE_TYPE_LINEAR = 0x02, /**< Linear resource */ + CU_RESOURCE_TYPE_PITCH2D = 0x03 /**< Pitch 2D resource */ +} CUresourcetype; + +#ifdef _WIN32 +#define CUDA_CB __stdcall +#else +#define CUDA_CB +#endif + +/** + * CUDA host function + * \param userData Argument value passed to the function + */ +typedef void (CUDA_CB *CUhostFn)(void *userData); + +/** + * Specifies performance hint with ::CUaccessPolicyWindow for hitProp and missProp members. + */ +typedef enum CUaccessProperty_enum { + CU_ACCESS_PROPERTY_NORMAL = 0, /**< Normal cache persistence. */ + CU_ACCESS_PROPERTY_STREAMING = 1, /**< Streaming access is less likely to persit from cache. */ + CU_ACCESS_PROPERTY_PERSISTING = 2 /**< Persisting access is more likely to persist in cache.*/ +} CUaccessProperty; + +/** + * Specifies an access policy for a window, a contiguous extent of memory + * beginning at base_ptr and ending at base_ptr + num_bytes. + * num_bytes is limited by CU_DEVICE_ATTRIBUTE_MAX_ACCESS_POLICY_WINDOW_SIZE. + * Partition into many segments and assign segments such that: + * sum of "hit segments" / window == approx. ratio. + * sum of "miss segments" / window == approx 1-ratio. + * Segments and ratio specifications are fitted to the capabilities of + * the architecture. + * Accesses in a hit segment apply the hitProp access policy. + * Accesses in a miss segment apply the missProp access policy. + */ +typedef struct CUaccessPolicyWindow_st { + void *base_ptr; /**< Starting address of the access policy window. CUDA driver may align it. */ + size_t num_bytes; /**< Size in bytes of the window policy. CUDA driver may restrict the maximum size and alignment. */ + float hitRatio; /**< hitRatio specifies percentage of lines assigned hitProp, rest are assigned missProp. */ + CUaccessProperty hitProp; /**< ::CUaccessProperty set for hit. */ + CUaccessProperty missProp; /**< ::CUaccessProperty set for miss. Must be either NORMAL or STREAMING */ +} CUaccessPolicyWindow_v1; +/** + * Access policy window + */ +typedef CUaccessPolicyWindow_v1 CUaccessPolicyWindow; + +/** + * GPU kernel node parameters + */ +typedef struct CUDA_KERNEL_NODE_PARAMS_st { + CUfunction func; /**< Kernel to launch */ + unsigned int gridDimX; /**< Width of grid in blocks */ + unsigned int gridDimY; /**< Height of grid in blocks */ + unsigned int gridDimZ; /**< Depth of grid in blocks */ + unsigned int blockDimX; /**< X dimension of each thread block */ + unsigned int blockDimY; /**< Y dimension of each thread block */ + unsigned int blockDimZ; /**< Z dimension of each thread block */ + unsigned int sharedMemBytes; /**< Dynamic shared-memory size per thread block in bytes */ + void **kernelParams; /**< Array of pointers to kernel parameters */ + void **extra; /**< Extra options */ +} CUDA_KERNEL_NODE_PARAMS_v1; + +/** + * GPU kernel node parameters + */typedef struct CUDA_KERNEL_NODE_PARAMS_v2_st { + CUfunction func; /**< Kernel to launch */ + unsigned int gridDimX; /**< Width of grid in blocks */ + unsigned int gridDimY; /**< Height of grid in blocks */ + unsigned int gridDimZ; /**< Depth of grid in blocks */ + unsigned int blockDimX; /**< X dimension of each thread block */ + unsigned int blockDimY; /**< Y dimension of each thread block */ + unsigned int blockDimZ; /**< Z dimension of each thread block */ + unsigned int sharedMemBytes; /**< Dynamic shared-memory size per thread block in bytes */ + void **kernelParams; /**< Array of pointers to kernel parameters */ + void **extra; /**< Extra options */ + CUkernel kern; /**< Kernel to launch, will only be referenced if func is NULL */ + CUcontext ctx; /**< Context for the kernel task to run in. The value NULL will indicate the current context should be used by the api. This field is ignored if func is set. */ +} CUDA_KERNEL_NODE_PARAMS_v2; +typedef CUDA_KERNEL_NODE_PARAMS_v2 CUDA_KERNEL_NODE_PARAMS; + +/** + * Memset node parameters + */ +typedef struct CUDA_MEMSET_NODE_PARAMS_st { + CUdeviceptr dst; /**< Destination device pointer */ + size_t pitch; /**< Pitch of destination device pointer. Unused if height is 1 */ + unsigned int value; /**< Value to be set */ + unsigned int elementSize; /**< Size of each element in bytes. Must be 1, 2, or 4. */ + size_t width; /**< Width of the row in elements */ + size_t height; /**< Number of rows */ +} CUDA_MEMSET_NODE_PARAMS_v1; +typedef CUDA_MEMSET_NODE_PARAMS_v1 CUDA_MEMSET_NODE_PARAMS; + +/** + * Host node parameters + */ +typedef struct CUDA_HOST_NODE_PARAMS_st { + CUhostFn fn; /**< The function to call when the node executes */ + void* userData; /**< Argument to pass to the function */ +} CUDA_HOST_NODE_PARAMS_v1; +typedef CUDA_HOST_NODE_PARAMS_v1 CUDA_HOST_NODE_PARAMS; + +/** + * Graph node types + */ +typedef enum CUgraphNodeType_enum { + CU_GRAPH_NODE_TYPE_KERNEL = 0, /**< GPU kernel node */ + CU_GRAPH_NODE_TYPE_MEMCPY = 1, /**< Memcpy node */ + CU_GRAPH_NODE_TYPE_MEMSET = 2, /**< Memset node */ + CU_GRAPH_NODE_TYPE_HOST = 3, /**< Host (executable) node */ + CU_GRAPH_NODE_TYPE_GRAPH = 4, /**< Node which executes an embedded graph */ + CU_GRAPH_NODE_TYPE_EMPTY = 5, /**< Empty (no-op) node */ + CU_GRAPH_NODE_TYPE_WAIT_EVENT = 6, /**< External event wait node */ + CU_GRAPH_NODE_TYPE_EVENT_RECORD = 7, /**< External event record node */ + CU_GRAPH_NODE_TYPE_EXT_SEMAS_SIGNAL = 8, /**< External semaphore signal node */ + CU_GRAPH_NODE_TYPE_EXT_SEMAS_WAIT = 9, /**< External semaphore wait node */ + CU_GRAPH_NODE_TYPE_MEM_ALLOC = 10,/**< Memory Allocation Node */ + CU_GRAPH_NODE_TYPE_MEM_FREE = 11,/**< Memory Free Node */ + CU_GRAPH_NODE_TYPE_BATCH_MEM_OP = 12 /**< Batch MemOp Node */ +} CUgraphNodeType; + +/** + * Graph instantiation results +*/ +typedef enum CUgraphInstantiateResult_enum +{ + CUDA_GRAPH_INSTANTIATE_SUCCESS = 0, /**< Instantiation succeeded */ + CUDA_GRAPH_INSTANTIATE_ERROR = 1, /**< Instantiation failed for an unexpected reason which is described in the return value of the function */ + CUDA_GRAPH_INSTANTIATE_INVALID_STRUCTURE = 2, /**< Instantiation failed due to invalid structure, such as cycles */ + CUDA_GRAPH_INSTANTIATE_NODE_OPERATION_NOT_SUPPORTED = 3, /**< Instantiation for device launch failed because the graph contained an unsupported operation */ + CUDA_GRAPH_INSTANTIATE_MULTIPLE_CTXS_NOT_SUPPORTED = 4 /**< Instantiation for device launch failed due to the nodes belonging to different contexts */ +} CUgraphInstantiateResult; + +/** + * Graph instantiation parameters + */ +typedef struct CUDA_GRAPH_INSTANTIATE_PARAMS_st +{ + cuuint64_t flags; /**< Instantiation flags */ + CUstream hUploadStream; /**< Upload stream */ + CUgraphNode hErrNode_out; /**< The node which caused instantiation to fail, if any */ + CUgraphInstantiateResult result_out; /**< Whether instantiation was successful. If it failed, the reason why */ +} CUDA_GRAPH_INSTANTIATE_PARAMS; + +typedef enum CUsynchronizationPolicy_enum { + CU_SYNC_POLICY_AUTO = 1, + CU_SYNC_POLICY_SPIN = 2, + CU_SYNC_POLICY_YIELD = 3, + CU_SYNC_POLICY_BLOCKING_SYNC = 4 +} CUsynchronizationPolicy; + +/** + * Cluster scheduling policies. These may be passed to ::cuFuncSetAttribute or ::cuKernelSetAttribute + */ +typedef enum CUclusterSchedulingPolicy_enum { + CU_CLUSTER_SCHEDULING_POLICY_DEFAULT = 0, /**< the default policy */ + CU_CLUSTER_SCHEDULING_POLICY_SPREAD = 1, /**< spread the blocks within a cluster to the SMs */ + CU_CLUSTER_SCHEDULING_POLICY_LOAD_BALANCING = 2 /**< allow the hardware to load-balance the blocks in a cluster to the SMs */ +} CUclusterSchedulingPolicy; + +typedef enum CUlaunchMemSyncDomain_enum { + CU_LAUNCH_MEM_SYNC_DOMAIN_DEFAULT = 0, + CU_LAUNCH_MEM_SYNC_DOMAIN_REMOTE = 1 +} CUlaunchMemSyncDomain; + +typedef struct CUlaunchMemSyncDomainMap_st { + unsigned char default_; + unsigned char remote; +} CUlaunchMemSyncDomainMap; + +typedef enum CUlaunchAttributeID_enum { + CU_LAUNCH_ATTRIBUTE_IGNORE = 0 /**< Ignored entry, for convenient composition */ + , CU_LAUNCH_ATTRIBUTE_ACCESS_POLICY_WINDOW = 1 /**< Valid for streams, graph nodes, launches. */ + , CU_LAUNCH_ATTRIBUTE_COOPERATIVE = 2 /**< Valid for graph nodes, launches. */ + , CU_LAUNCH_ATTRIBUTE_SYNCHRONIZATION_POLICY = 3 /**< Valid for streams. */ + , CU_LAUNCH_ATTRIBUTE_CLUSTER_DIMENSION = 4 /**< Valid for graph nodes, launches. */ + , CU_LAUNCH_ATTRIBUTE_CLUSTER_SCHEDULING_POLICY_PREFERENCE = 5 /**< Valid for graph nodes, launches. */ + , CU_LAUNCH_ATTRIBUTE_PROGRAMMATIC_STREAM_SERIALIZATION = 6 /**< Valid for launches. Setting + programmaticStreamSerializationAllowed to non-0 + signals that the kernel will use programmatic + means to resolve its stream dependency, so that + the CUDA runtime should opportunistically allow + the grid's execution to overlap with the previous + kernel in the stream, if that kernel requests the + overlap. The dependent launches can choose to wait + on the dependency using the programmatic sync + (cudaGridDependencySynchronize() or equivalent PTX + instructions). */ + , CU_LAUNCH_ATTRIBUTE_PROGRAMMATIC_EVENT = 7 /**< Valid for launches. Event recorded through this + launch attribute is guaranteed to only trigger + after all block in the associated kernel trigger + the event. A block can trigger the event through + PTX launchdep.release or CUDA builtin function + cudaTriggerProgrammaticLaunchCompletion(). A + trigger can also be inserted at the beginning of + each block's execution if triggerAtBlockStart is + set to non-0. The dependent launches can choose to + wait on the dependency using the programmatic sync + (cudaGridDependencySynchronize() or equivalent PTX + instructions). Note that dependents (including the + CPU thread calling cuEventSynchronize()) are not + guaranteed to observe the release precisely when + it is released. For example, cuEventSynchronize() + may only observe the event trigger long after the + associated kernel has completed. This recording + type is primarily meant for establishing + programmatic dependency between device tasks. The + event supplied must not be an interprocess or + interop event. The event must disable timing (i.e. + created with ::CU_EVENT_DISABLE_TIMING flag set). + */ + , CU_LAUNCH_ATTRIBUTE_PRIORITY = 8 /**< Valid for streams, graph nodes, launches. */ + , CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN_MAP = 9 + , CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN = 10 +#ifdef __CUDA_API_VERSION_INTERNAL + , CU_LAUNCH_ATTRIBUTE_MAX +#endif +} CUlaunchAttributeID; + +typedef union CUlaunchAttributeValue_union { + char pad[64]; /**< Pad to 64 bytes */ + CUaccessPolicyWindow accessPolicyWindow; /**< Attribute ::CUaccessPolicyWindow. */ + int cooperative; /**< Nonzero indicates a cooperative kernel (see ::cuLaunchCooperativeKernel). */ + CUsynchronizationPolicy syncPolicy; /**< ::CUsynchronizationPolicy for work queued up in this stream */ + struct { + unsigned int x; + unsigned int y; + unsigned int z; + } clusterDim; /**< Cluster dimensions for the kernel node. */ + CUclusterSchedulingPolicy clusterSchedulingPolicyPreference; /**< Cluster scheduling policy preference for the kernel node. */ + int programmaticStreamSerializationAllowed; + struct { + CUevent event; + int flags; /* Does not accept ::CU_EVENT_RECORD_EXTERNAL */ + int triggerAtBlockStart; + } programmaticEvent; + int priority; /**< Execution priority of the kernel. */ + CUlaunchMemSyncDomainMap memSyncDomainMap; + CUlaunchMemSyncDomain memSyncDomain; +} CUlaunchAttributeValue; + +typedef struct CUlaunchAttribute_st { + CUlaunchAttributeID id; + char pad[8 - sizeof(CUlaunchAttributeID)]; + CUlaunchAttributeValue value; +} CUlaunchAttribute; + +typedef struct CUlaunchConfig_st { + unsigned int gridDimX; /**< Width of grid in blocks */ + unsigned int gridDimY; /**< Height of grid in blocks */ + unsigned int gridDimZ; /**< Depth of grid in blocks */ + unsigned int blockDimX; /**< X dimension of each thread block */ + unsigned int blockDimY; /**< Y dimension of each thread block */ + unsigned int blockDimZ; /**< Z dimension of each thread block */ + unsigned int sharedMemBytes; /**< Dynamic shared-memory size per thread block in bytes */ + CUstream hStream; /**< Stream identifier */ + CUlaunchAttribute *attrs; /**< nullable if numAttrs == 0 */ + unsigned int numAttrs; /**< number of attributes populated in attrs */ +} CUlaunchConfig; + +typedef CUlaunchAttributeID CUkernelNodeAttrID; +#define CU_KERNEL_NODE_ATTRIBUTE_ACCESS_POLICY_WINDOW CU_LAUNCH_ATTRIBUTE_ACCESS_POLICY_WINDOW +#define CU_KERNEL_NODE_ATTRIBUTE_COOPERATIVE CU_LAUNCH_ATTRIBUTE_COOPERATIVE +#define CU_KERNEL_NODE_ATTRIBUTE_CLUSTER_DIMENSION CU_LAUNCH_ATTRIBUTE_CLUSTER_DIMENSION +#define CU_KERNEL_NODE_ATTRIBUTE_CLUSTER_SCHEDULING_POLICY_PREFERENCE CU_LAUNCH_ATTRIBUTE_CLUSTER_SCHEDULING_POLICY_PREFERENCE +#define CU_KERNEL_NODE_ATTRIBUTE_PRIORITY CU_LAUNCH_ATTRIBUTE_PRIORITY +#define CU_KERNEL_NODE_ATTRIBUTE_MEM_SYNC_DOMAIN_MAP CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN_MAP +#define CU_KERNEL_NODE_ATTRIBUTE_MEM_SYNC_DOMAIN CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN + +typedef CUlaunchAttributeValue CUkernelNodeAttrValue_v1; +typedef CUkernelNodeAttrValue_v1 CUkernelNodeAttrValue; + +/** + * Possible stream capture statuses returned by ::cuStreamIsCapturing + */ +typedef enum CUstreamCaptureStatus_enum { + CU_STREAM_CAPTURE_STATUS_NONE = 0, /**< Stream is not capturing */ + CU_STREAM_CAPTURE_STATUS_ACTIVE = 1, /**< Stream is actively capturing */ + CU_STREAM_CAPTURE_STATUS_INVALIDATED = 2 /**< Stream is part of a capture sequence that + has been invalidated, but not terminated */ +} CUstreamCaptureStatus; + +/** + * Possible modes for stream capture thread interactions. For more details see + * ::cuStreamBeginCapture and ::cuThreadExchangeStreamCaptureMode + */ +typedef enum CUstreamCaptureMode_enum { + CU_STREAM_CAPTURE_MODE_GLOBAL = 0, + CU_STREAM_CAPTURE_MODE_THREAD_LOCAL = 1, + CU_STREAM_CAPTURE_MODE_RELAXED = 2 +} CUstreamCaptureMode; + +typedef CUlaunchAttributeID CUstreamAttrID; +#define CU_STREAM_ATTRIBUTE_ACCESS_POLICY_WINDOW CU_LAUNCH_ATTRIBUTE_ACCESS_POLICY_WINDOW +#define CU_STREAM_ATTRIBUTE_SYNCHRONIZATION_POLICY CU_LAUNCH_ATTRIBUTE_SYNCHRONIZATION_POLICY +#define CU_STREAM_ATTRIBUTE_PRIORITY CU_LAUNCH_ATTRIBUTE_PRIORITY +#define CU_STREAM_ATTRIBUTE_MEM_SYNC_DOMAIN_MAP CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN_MAP +#define CU_STREAM_ATTRIBUTE_MEM_SYNC_DOMAIN CU_LAUNCH_ATTRIBUTE_MEM_SYNC_DOMAIN + +typedef CUlaunchAttributeValue CUstreamAttrValue_v1; +typedef CUstreamAttrValue_v1 CUstreamAttrValue; + +/** + * Flags to specify search options. For more details see ::cuGetProcAddress + */ +typedef enum CUdriverProcAddress_flags_enum { + CU_GET_PROC_ADDRESS_DEFAULT = 0, /**< Default search mode for driver symbols. */ + CU_GET_PROC_ADDRESS_LEGACY_STREAM = 1 << 0, /**< Search for legacy versions of driver symbols. */ + CU_GET_PROC_ADDRESS_PER_THREAD_DEFAULT_STREAM = 1 << 1 /**< Search for per-thread versions of driver symbols. */ +} CUdriverProcAddress_flags; + +/** + * Flags to indicate search status. For more details see ::cuGetProcAddress + */ +typedef enum CUdriverProcAddressQueryResult_enum { + CU_GET_PROC_ADDRESS_SUCCESS = 0, /**< Symbol was succesfully found */ + CU_GET_PROC_ADDRESS_SYMBOL_NOT_FOUND = 1, /**< Symbol was not found in search */ + CU_GET_PROC_ADDRESS_VERSION_NOT_SUFFICIENT = 2 /**< Symbol was found but version supplied was not sufficient */ +} CUdriverProcAddressQueryResult; + +/** + * Execution Affinity Types + */ +typedef enum CUexecAffinityType_enum { + CU_EXEC_AFFINITY_TYPE_SM_COUNT = 0, /**< Create a context with limited SMs. */ + CU_EXEC_AFFINITY_TYPE_MAX +} CUexecAffinityType; + +/** + * Value for ::CU_EXEC_AFFINITY_TYPE_SM_COUNT + */ +typedef struct CUexecAffinitySmCount_st { + unsigned int val; /**< The number of SMs the context is limited to use. */ +} CUexecAffinitySmCount_v1; +typedef CUexecAffinitySmCount_v1 CUexecAffinitySmCount; + +/** + * Execution Affinity Parameters + */ +typedef struct CUexecAffinityParam_st { + CUexecAffinityType type; + union { + CUexecAffinitySmCount smCount; /** Value for ::CU_EXEC_AFFINITY_TYPE_SM_COUNT */ + } param; +} CUexecAffinityParam_v1; +/** + * Execution Affinity Parameters + */ +typedef CUexecAffinityParam_v1 CUexecAffinityParam; + +/** + * Library options to be specified with ::cuLibraryLoadData() or ::cuLibraryLoadFromFile() + */ +typedef enum CUlibraryOption_enum +{ + CU_LIBRARY_HOST_UNIVERSAL_FUNCTION_AND_DATA_TABLE = 0, + + /** + * Specifes that the argument \p code passed to ::cuLibraryLoadData() will be preserved. + * Specifying this option will let the driver know that \p code can be accessed at any point + * until ::cuLibraryUnload(). The default behavior is for the driver to allocate and + * maintain its own copy of \p code. Note that this is only a memory usage optimization + * hint and the driver can choose to ignore it if required. + * Specifying this option with ::cuLibraryLoadFromFile() is invalid and + * will return ::CUDA_ERROR_INVALID_VALUE. + */ + CU_LIBRARY_BINARY_IS_PRESERVED = 1, + + CU_LIBRARY_NUM_OPTIONS +} CUlibraryOption; + +typedef struct CUlibraryHostUniversalFunctionAndDataTable_st +{ + void *functionTable; + size_t functionWindowSize; + void *dataTable; + size_t dataWindowSize; +} CUlibraryHostUniversalFunctionAndDataTable; + +/** + * Error codes + */ +typedef enum cudaError_enum { + /** + * The API call returned with no errors. In the case of query calls, this + * also means that the operation being queried is complete (see + * ::cuEventQuery() and ::cuStreamQuery()). + */ + CUDA_SUCCESS = 0, + + /** + * This indicates that one or more of the parameters passed to the API call + * is not within an acceptable range of values. + */ + CUDA_ERROR_INVALID_VALUE = 1, + + /** + * The API call failed because it was unable to allocate enough memory to + * perform the requested operation. + */ + CUDA_ERROR_OUT_OF_MEMORY = 2, + + /** + * This indicates that the CUDA driver has not been initialized with + * ::cuInit() or that initialization has failed. + */ + CUDA_ERROR_NOT_INITIALIZED = 3, + + /** + * This indicates that the CUDA driver is in the process of shutting down. + */ + CUDA_ERROR_DEINITIALIZED = 4, + + /** + * This indicates profiler is not initialized for this run. This can + * happen when the application is running with external profiling tools + * like visual profiler. + */ + CUDA_ERROR_PROFILER_DISABLED = 5, + + /** + * \deprecated + * This error return is deprecated as of CUDA 5.0. It is no longer an error + * to attempt to enable/disable the profiling via ::cuProfilerStart or + * ::cuProfilerStop without initialization. + */ + CUDA_ERROR_PROFILER_NOT_INITIALIZED = 6, + + /** + * \deprecated + * This error return is deprecated as of CUDA 5.0. It is no longer an error + * to call cuProfilerStart() when profiling is already enabled. + */ + CUDA_ERROR_PROFILER_ALREADY_STARTED = 7, + + /** + * \deprecated + * This error return is deprecated as of CUDA 5.0. It is no longer an error + * to call cuProfilerStop() when profiling is already disabled. + */ + CUDA_ERROR_PROFILER_ALREADY_STOPPED = 8, + + /** + * This indicates that the CUDA driver that the application has loaded is a + * stub library. Applications that run with the stub rather than a real + * driver loaded will result in CUDA API returning this error. + */ + CUDA_ERROR_STUB_LIBRARY = 34, + + /** + * This indicates that requested CUDA device is unavailable at the current + * time. Devices are often unavailable due to use of + * ::CU_COMPUTEMODE_EXCLUSIVE_PROCESS or ::CU_COMPUTEMODE_PROHIBITED. + */ + CUDA_ERROR_DEVICE_UNAVAILABLE = 46, + + /** + * This indicates that no CUDA-capable devices were detected by the installed + * CUDA driver. + */ + CUDA_ERROR_NO_DEVICE = 100, + + /** + * This indicates that the device ordinal supplied by the user does not + * correspond to a valid CUDA device or that the action requested is + * invalid for the specified device. + */ + CUDA_ERROR_INVALID_DEVICE = 101, + + /** + * This error indicates that the Grid license is not applied. + */ + CUDA_ERROR_DEVICE_NOT_LICENSED = 102, + + /** + * This indicates that the device kernel image is invalid. This can also + * indicate an invalid CUDA module. + */ + CUDA_ERROR_INVALID_IMAGE = 200, + + /** + * This most frequently indicates that there is no context bound to the + * current thread. This can also be returned if the context passed to an + * API call is not a valid handle (such as a context that has had + * ::cuCtxDestroy() invoked on it). This can also be returned if a user + * mixes different API versions (i.e. 3010 context with 3020 API calls). + * See ::cuCtxGetApiVersion() for more details. + */ + CUDA_ERROR_INVALID_CONTEXT = 201, + + /** + * This indicated that the context being supplied as a parameter to the + * API call was already the active context. + * \deprecated + * This error return is deprecated as of CUDA 3.2. It is no longer an + * error to attempt to push the active context via ::cuCtxPushCurrent(). + */ + CUDA_ERROR_CONTEXT_ALREADY_CURRENT = 202, + + /** + * This indicates that a map or register operation has failed. + */ + CUDA_ERROR_MAP_FAILED = 205, + + /** + * This indicates that an unmap or unregister operation has failed. + */ + CUDA_ERROR_UNMAP_FAILED = 206, + + /** + * This indicates that the specified array is currently mapped and thus + * cannot be destroyed. + */ + CUDA_ERROR_ARRAY_IS_MAPPED = 207, + + /** + * This indicates that the resource is already mapped. + */ + CUDA_ERROR_ALREADY_MAPPED = 208, + + /** + * This indicates that there is no kernel image available that is suitable + * for the device. This can occur when a user specifies code generation + * options for a particular CUDA source file that do not include the + * corresponding device configuration. + */ + CUDA_ERROR_NO_BINARY_FOR_GPU = 209, + + /** + * This indicates that a resource has already been acquired. + */ + CUDA_ERROR_ALREADY_ACQUIRED = 210, + + /** + * This indicates that a resource is not mapped. + */ + CUDA_ERROR_NOT_MAPPED = 211, + + /** + * This indicates that a mapped resource is not available for access as an + * array. + */ + CUDA_ERROR_NOT_MAPPED_AS_ARRAY = 212, + + /** + * This indicates that a mapped resource is not available for access as a + * pointer. + */ + CUDA_ERROR_NOT_MAPPED_AS_POINTER = 213, + + /** + * This indicates that an uncorrectable ECC error was detected during + * execution. + */ + CUDA_ERROR_ECC_UNCORRECTABLE = 214, + + /** + * This indicates that the ::CUlimit passed to the API call is not + * supported by the active device. + */ + CUDA_ERROR_UNSUPPORTED_LIMIT = 215, + + /** + * This indicates that the ::CUcontext passed to the API call can + * only be bound to a single CPU thread at a time but is already + * bound to a CPU thread. + */ + CUDA_ERROR_CONTEXT_ALREADY_IN_USE = 216, + + /** + * This indicates that peer access is not supported across the given + * devices. + */ + CUDA_ERROR_PEER_ACCESS_UNSUPPORTED = 217, + + /** + * This indicates that a PTX JIT compilation failed. + */ + CUDA_ERROR_INVALID_PTX = 218, + + /** + * This indicates an error with OpenGL or DirectX context. + */ + CUDA_ERROR_INVALID_GRAPHICS_CONTEXT = 219, + + /** + * This indicates that an uncorrectable NVLink error was detected during the + * execution. + */ + CUDA_ERROR_NVLINK_UNCORRECTABLE = 220, + + /** + * This indicates that the PTX JIT compiler library was not found. + */ + CUDA_ERROR_JIT_COMPILER_NOT_FOUND = 221, + + /** + * This indicates that the provided PTX was compiled with an unsupported toolchain. + */ + + CUDA_ERROR_UNSUPPORTED_PTX_VERSION = 222, + + /** + * This indicates that the PTX JIT compilation was disabled. + */ + CUDA_ERROR_JIT_COMPILATION_DISABLED = 223, + + /** + * This indicates that the ::CUexecAffinityType passed to the API call is not + * supported by the active device. + */ + CUDA_ERROR_UNSUPPORTED_EXEC_AFFINITY = 224, + + /** + * This indicates that the code to be compiled by the PTX JIT contains + * unsupported call to cudaDeviceSynchronize. + */ + CUDA_ERROR_UNSUPPORTED_DEVSIDE_SYNC = 225, + + /** + * This indicates that the device kernel source is invalid. This includes + * compilation/linker errors encountered in device code or user error. + */ + CUDA_ERROR_INVALID_SOURCE = 300, + + /** + * This indicates that the file specified was not found. + */ + CUDA_ERROR_FILE_NOT_FOUND = 301, + + /** + * This indicates that a link to a shared object failed to resolve. + */ + CUDA_ERROR_SHARED_OBJECT_SYMBOL_NOT_FOUND = 302, + + /** + * This indicates that initialization of a shared object failed. + */ + CUDA_ERROR_SHARED_OBJECT_INIT_FAILED = 303, + + /** + * This indicates that an OS call failed. + */ + CUDA_ERROR_OPERATING_SYSTEM = 304, + + /** + * This indicates that a resource handle passed to the API call was not + * valid. Resource handles are opaque types like ::CUstream and ::CUevent. + */ + CUDA_ERROR_INVALID_HANDLE = 400, + + /** + * This indicates that a resource required by the API call is not in a + * valid state to perform the requested operation. + */ + CUDA_ERROR_ILLEGAL_STATE = 401, + + /** + * This indicates that a named symbol was not found. Examples of symbols + * are global/constant variable names, driver function names, texture names, + * and surface names. + */ + CUDA_ERROR_NOT_FOUND = 500, + + /** + * This indicates that asynchronous operations issued previously have not + * completed yet. This result is not actually an error, but must be indicated + * differently than ::CUDA_SUCCESS (which indicates completion). Calls that + * may return this value include ::cuEventQuery() and ::cuStreamQuery(). + */ + CUDA_ERROR_NOT_READY = 600, + + /** + * While executing a kernel, the device encountered a + * load or store instruction on an invalid memory address. + * This leaves the process in an inconsistent state and any further CUDA work + * will return the same error. To continue using CUDA, the process must be terminated + * and relaunched. + */ + CUDA_ERROR_ILLEGAL_ADDRESS = 700, + + /** + * This indicates that a launch did not occur because it did not have + * appropriate resources. This error usually indicates that the user has + * attempted to pass too many arguments to the device kernel, or the + * kernel launch specifies too many threads for the kernel's register + * count. Passing arguments of the wrong size (i.e. a 64-bit pointer + * when a 32-bit int is expected) is equivalent to passing too many + * arguments and can also result in this error. + */ + CUDA_ERROR_LAUNCH_OUT_OF_RESOURCES = 701, + + /** + * This indicates that the device kernel took too long to execute. This can + * only occur if timeouts are enabled - see the device attribute + * ::CU_DEVICE_ATTRIBUTE_KERNEL_EXEC_TIMEOUT for more information. + * This leaves the process in an inconsistent state and any further CUDA work + * will return the same error. To continue using CUDA, the process must be terminated + * and relaunched. + */ + CUDA_ERROR_LAUNCH_TIMEOUT = 702, + + /** + * This error indicates a kernel launch that uses an incompatible texturing + * mode. + */ + CUDA_ERROR_LAUNCH_INCOMPATIBLE_TEXTURING = 703, + + /** + * This error indicates that a call to ::cuCtxEnablePeerAccess() is + * trying to re-enable peer access to a context which has already + * had peer access to it enabled. + */ + CUDA_ERROR_PEER_ACCESS_ALREADY_ENABLED = 704, + + /** + * This error indicates that ::cuCtxDisablePeerAccess() is + * trying to disable peer access which has not been enabled yet + * via ::cuCtxEnablePeerAccess(). + */ + CUDA_ERROR_PEER_ACCESS_NOT_ENABLED = 705, + + /** + * This error indicates that the primary context for the specified device + * has already been initialized. + */ + CUDA_ERROR_PRIMARY_CONTEXT_ACTIVE = 708, + + /** + * This error indicates that the context current to the calling thread + * has been destroyed using ::cuCtxDestroy, or is a primary context which + * has not yet been initialized. + */ + CUDA_ERROR_CONTEXT_IS_DESTROYED = 709, + + /** + * A device-side assert triggered during kernel execution. The context + * cannot be used anymore, and must be destroyed. All existing device + * memory allocations from this context are invalid and must be + * reconstructed if the program is to continue using CUDA. + */ + CUDA_ERROR_ASSERT = 710, + + /** + * This error indicates that the hardware resources required to enable + * peer access have been exhausted for one or more of the devices + * passed to ::cuCtxEnablePeerAccess(). + */ + CUDA_ERROR_TOO_MANY_PEERS = 711, + + /** + * This error indicates that the memory range passed to ::cuMemHostRegister() + * has already been registered. + */ + CUDA_ERROR_HOST_MEMORY_ALREADY_REGISTERED = 712, + + /** + * This error indicates that the pointer passed to ::cuMemHostUnregister() + * does not correspond to any currently registered memory region. + */ + CUDA_ERROR_HOST_MEMORY_NOT_REGISTERED = 713, + + /** + * While executing a kernel, the device encountered a stack error. + * This can be due to stack corruption or exceeding the stack size limit. + * This leaves the process in an inconsistent state and any further CUDA work + * will return the same error. To continue using CUDA, the process must be terminated + * and relaunched. + */ + CUDA_ERROR_HARDWARE_STACK_ERROR = 714, + + /** + * While executing a kernel, the device encountered an illegal instruction. + * This leaves the process in an inconsistent state and any further CUDA work + * will return the same error. To continue using CUDA, the process must be terminated + * and relaunched. + */ + CUDA_ERROR_ILLEGAL_INSTRUCTION = 715, + + /** + * While executing a kernel, the device encountered a load or store instruction + * on a memory address which is not aligned. + * This leaves the process in an inconsistent state and any further CUDA work + * will return the same error. To continue using CUDA, the process must be terminated + * and relaunched. + */ + CUDA_ERROR_MISALIGNED_ADDRESS = 716, + + /** + * While executing a kernel, the device encountered an instruction + * which can only operate on memory locations in certain address spaces + * (global, shared, or local), but was supplied a memory address not + * belonging to an allowed address space. + * This leaves the process in an inconsistent state and any further CUDA work + * will return the same error. To continue using CUDA, the process must be terminated + * and relaunched. + */ + CUDA_ERROR_INVALID_ADDRESS_SPACE = 717, + + /** + * While executing a kernel, the device program counter wrapped its address space. + * This leaves the process in an inconsistent state and any further CUDA work + * will return the same error. To continue using CUDA, the process must be terminated + * and relaunched. + */ + CUDA_ERROR_INVALID_PC = 718, + + /** + * An exception occurred on the device while executing a kernel. Common + * causes include dereferencing an invalid device pointer and accessing + * out of bounds shared memory. Less common cases can be system specific - more + * information about these cases can be found in the system specific user guide. + * This leaves the process in an inconsistent state and any further CUDA work + * will return the same error. To continue using CUDA, the process must be terminated + * and relaunched. + */ + CUDA_ERROR_LAUNCH_FAILED = 719, + + /** + * This error indicates that the number of blocks launched per grid for a kernel that was + * launched via either ::cuLaunchCooperativeKernel or ::cuLaunchCooperativeKernelMultiDevice + * exceeds the maximum number of blocks as allowed by ::cuOccupancyMaxActiveBlocksPerMultiprocessor + * or ::cuOccupancyMaxActiveBlocksPerMultiprocessorWithFlags times the number of multiprocessors + * as specified by the device attribute ::CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT. + */ + CUDA_ERROR_COOPERATIVE_LAUNCH_TOO_LARGE = 720, + + /** + * This error indicates that the attempted operation is not permitted. + */ + CUDA_ERROR_NOT_PERMITTED = 800, + + /** + * This error indicates that the attempted operation is not supported + * on the current system or device. + */ + CUDA_ERROR_NOT_SUPPORTED = 801, + + /** + * This error indicates that the system is not yet ready to start any CUDA + * work. To continue using CUDA, verify the system configuration is in a + * valid state and all required driver daemons are actively running. + * More information about this error can be found in the system specific + * user guide. + */ + CUDA_ERROR_SYSTEM_NOT_READY = 802, + + /** + * This error indicates that there is a mismatch between the versions of + * the display driver and the CUDA driver. Refer to the compatibility documentation + * for supported versions. + */ + CUDA_ERROR_SYSTEM_DRIVER_MISMATCH = 803, + + /** + * This error indicates that the system was upgraded to run with forward compatibility + * but the visible hardware detected by CUDA does not support this configuration. + * Refer to the compatibility documentation for the supported hardware matrix or ensure + * that only supported hardware is visible during initialization via the CUDA_VISIBLE_DEVICES + * environment variable. + */ + CUDA_ERROR_COMPAT_NOT_SUPPORTED_ON_DEVICE = 804, + + /** + * This error indicates that the MPS client failed to connect to the MPS control daemon or the MPS server. + */ + CUDA_ERROR_MPS_CONNECTION_FAILED = 805, + + /** + * This error indicates that the remote procedural call between the MPS server and the MPS client failed. + */ + CUDA_ERROR_MPS_RPC_FAILURE = 806, + + /** + * This error indicates that the MPS server is not ready to accept new MPS client requests. + * This error can be returned when the MPS server is in the process of recovering from a fatal failure. + */ + CUDA_ERROR_MPS_SERVER_NOT_READY = 807, + + /** + * This error indicates that the hardware resources required to create MPS client have been exhausted. + */ + CUDA_ERROR_MPS_MAX_CLIENTS_REACHED = 808, + + /** + * This error indicates the the hardware resources required to support device connections have been exhausted. + */ + CUDA_ERROR_MPS_MAX_CONNECTIONS_REACHED = 809, + + /** + * This error indicates that the MPS client has been terminated by the server. To continue using CUDA, the process must be terminated and relaunched. + */ + CUDA_ERROR_MPS_CLIENT_TERMINATED = 810, + + /** + * This error indicates that the module is using CUDA Dynamic Parallelism, but the current configuration, like MPS, does not support it. + */ + CUDA_ERROR_CDP_NOT_SUPPORTED = 811, + + /** + * This error indicates that a module contains an unsupported interaction between different versions of CUDA Dynamic Parallelism. + */ + CUDA_ERROR_CDP_VERSION_MISMATCH = 812, + + /** + * This error indicates that the operation is not permitted when + * the stream is capturing. + */ + CUDA_ERROR_STREAM_CAPTURE_UNSUPPORTED = 900, + + /** + * This error indicates that the current capture sequence on the stream + * has been invalidated due to a previous error. + */ + CUDA_ERROR_STREAM_CAPTURE_INVALIDATED = 901, + + /** + * This error indicates that the operation would have resulted in a merge + * of two independent capture sequences. + */ + CUDA_ERROR_STREAM_CAPTURE_MERGE = 902, + + /** + * This error indicates that the capture was not initiated in this stream. + */ + CUDA_ERROR_STREAM_CAPTURE_UNMATCHED = 903, + + /** + * This error indicates that the capture sequence contains a fork that was + * not joined to the primary stream. + */ + CUDA_ERROR_STREAM_CAPTURE_UNJOINED = 904, + + /** + * This error indicates that a dependency would have been created which + * crosses the capture sequence boundary. Only implicit in-stream ordering + * dependencies are allowed to cross the boundary. + */ + CUDA_ERROR_STREAM_CAPTURE_ISOLATION = 905, + + /** + * This error indicates a disallowed implicit dependency on a current capture + * sequence from cudaStreamLegacy. + */ + CUDA_ERROR_STREAM_CAPTURE_IMPLICIT = 906, + + /** + * This error indicates that the operation is not permitted on an event which + * was last recorded in a capturing stream. + */ + CUDA_ERROR_CAPTURED_EVENT = 907, + + /** + * A stream capture sequence not initiated with the ::CU_STREAM_CAPTURE_MODE_RELAXED + * argument to ::cuStreamBeginCapture was passed to ::cuStreamEndCapture in a + * different thread. + */ + CUDA_ERROR_STREAM_CAPTURE_WRONG_THREAD = 908, + + /** + * This error indicates that the timeout specified for the wait operation has lapsed. + */ + CUDA_ERROR_TIMEOUT = 909, + + /** + * This error indicates that the graph update was not performed because it included + * changes which violated constraints specific to instantiated graph update. + */ + CUDA_ERROR_GRAPH_EXEC_UPDATE_FAILURE = 910, + + /** + * This indicates that an async error has occurred in a device outside of CUDA. + * If CUDA was waiting for an external device's signal before consuming shared data, + * the external device signaled an error indicating that the data is not valid for + * consumption. This leaves the process in an inconsistent state and any further CUDA + * work will return the same error. To continue using CUDA, the process must be + * terminated and relaunched. + */ + CUDA_ERROR_EXTERNAL_DEVICE = 911, + + /** + * Indicates a kernel launch error due to cluster misconfiguration. + */ + CUDA_ERROR_INVALID_CLUSTER_SIZE = 912, + + /** + * This indicates that an unknown internal error has occurred. + */ + CUDA_ERROR_UNKNOWN = 999 +} CUresult; + +/** + * P2P Attributes + */ +typedef enum CUdevice_P2PAttribute_enum { + CU_DEVICE_P2P_ATTRIBUTE_PERFORMANCE_RANK = 0x01, /**< A relative value indicating the performance of the link between two devices */ + CU_DEVICE_P2P_ATTRIBUTE_ACCESS_SUPPORTED = 0x02, /**< P2P Access is enable */ + CU_DEVICE_P2P_ATTRIBUTE_NATIVE_ATOMIC_SUPPORTED = 0x03, /**< Atomic operation over the link supported */ + CU_DEVICE_P2P_ATTRIBUTE_ACCESS_ACCESS_SUPPORTED = 0x04, /**< \deprecated use CU_DEVICE_P2P_ATTRIBUTE_CUDA_ARRAY_ACCESS_SUPPORTED instead */ + CU_DEVICE_P2P_ATTRIBUTE_CUDA_ARRAY_ACCESS_SUPPORTED = 0x04 /**< Accessing CUDA arrays over the link supported */ +} CUdevice_P2PAttribute; + +/** + * CUDA stream callback + * \param hStream The stream the callback was added to, as passed to ::cuStreamAddCallback. May be NULL. + * \param status ::CUDA_SUCCESS or any persistent error on the stream. + * \param userData User parameter provided at registration. + */ +typedef void (CUDA_CB *CUstreamCallback)(CUstream hStream, CUresult status, void *userData); + +/** + * Block size to per-block dynamic shared memory mapping for a certain + * kernel \param blockSize Block size of the kernel. + * + * \return The dynamic shared memory needed by a block. + */ +typedef size_t (CUDA_CB *CUoccupancyB2DSize)(int blockSize); + +/** + * If set, host memory is portable between CUDA contexts. + * Flag for ::cuMemHostAlloc() + */ +#define CU_MEMHOSTALLOC_PORTABLE 0x01 + +/** + * If set, host memory is mapped into CUDA address space and + * ::cuMemHostGetDevicePointer() may be called on the host pointer. + * Flag for ::cuMemHostAlloc() + */ +#define CU_MEMHOSTALLOC_DEVICEMAP 0x02 + +/** + * If set, host memory is allocated as write-combined - fast to write, + * faster to DMA, slow to read except via SSE4 streaming load instruction + * (MOVNTDQA). + * Flag for ::cuMemHostAlloc() + */ +#define CU_MEMHOSTALLOC_WRITECOMBINED 0x04 + +/** + * If set, host memory is portable between CUDA contexts. + * Flag for ::cuMemHostRegister() + */ +#define CU_MEMHOSTREGISTER_PORTABLE 0x01 + +/** + * If set, host memory is mapped into CUDA address space and + * ::cuMemHostGetDevicePointer() may be called on the host pointer. + * Flag for ::cuMemHostRegister() + */ +#define CU_MEMHOSTREGISTER_DEVICEMAP 0x02 + +/** + * If set, the passed memory pointer is treated as pointing to some + * memory-mapped I/O space, e.g. belonging to a third-party PCIe device. + * On Windows the flag is a no-op. + * On Linux that memory is marked as non cache-coherent for the GPU and + * is expected to be physically contiguous. It may return + * ::CUDA_ERROR_NOT_PERMITTED if run as an unprivileged user, + * ::CUDA_ERROR_NOT_SUPPORTED on older Linux kernel versions. + * On all other platforms, it is not supported and ::CUDA_ERROR_NOT_SUPPORTED + * is returned. + * Flag for ::cuMemHostRegister() + */ +#define CU_MEMHOSTREGISTER_IOMEMORY 0x04 + +/** +* If set, the passed memory pointer is treated as pointing to memory that is +* considered read-only by the device. On platforms without +* ::CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS_USES_HOST_PAGE_TABLES, this flag is +* required in order to register memory mapped to the CPU as read-only. Support +* for the use of this flag can be queried from the device attribute +* ::CU_DEVICE_ATTRIBUTE_READ_ONLY_HOST_REGISTER_SUPPORTED. Using this flag with +* a current context associated with a device that does not have this attribute +* set will cause ::cuMemHostRegister to error with ::CUDA_ERROR_NOT_SUPPORTED. +*/ +#define CU_MEMHOSTREGISTER_READ_ONLY 0x08 + +/** + * 2D memory copy parameters + */ +typedef struct CUDA_MEMCPY2D_st { + size_t srcXInBytes; /**< Source X in bytes */ + size_t srcY; /**< Source Y */ + + CUmemorytype srcMemoryType; /**< Source memory type (host, device, array) */ + const void *srcHost; /**< Source host pointer */ + CUdeviceptr srcDevice; /**< Source device pointer */ + CUarray srcArray; /**< Source array reference */ + size_t srcPitch; /**< Source pitch (ignored when src is array) */ + + size_t dstXInBytes; /**< Destination X in bytes */ + size_t dstY; /**< Destination Y */ + + CUmemorytype dstMemoryType; /**< Destination memory type (host, device, array) */ + void *dstHost; /**< Destination host pointer */ + CUdeviceptr dstDevice; /**< Destination device pointer */ + CUarray dstArray; /**< Destination array reference */ + size_t dstPitch; /**< Destination pitch (ignored when dst is array) */ + + size_t WidthInBytes; /**< Width of 2D memory copy in bytes */ + size_t Height; /**< Height of 2D memory copy */ +} CUDA_MEMCPY2D_v2; +typedef CUDA_MEMCPY2D_v2 CUDA_MEMCPY2D; + +/** + * 3D memory copy parameters + */ +typedef struct CUDA_MEMCPY3D_st { + size_t srcXInBytes; /**< Source X in bytes */ + size_t srcY; /**< Source Y */ + size_t srcZ; /**< Source Z */ + size_t srcLOD; /**< Source LOD */ + CUmemorytype srcMemoryType; /**< Source memory type (host, device, array) */ + const void *srcHost; /**< Source host pointer */ + CUdeviceptr srcDevice; /**< Source device pointer */ + CUarray srcArray; /**< Source array reference */ + void *reserved0; /**< Must be NULL */ + size_t srcPitch; /**< Source pitch (ignored when src is array) */ + size_t srcHeight; /**< Source height (ignored when src is array; may be 0 if Depth==1) */ + + size_t dstXInBytes; /**< Destination X in bytes */ + size_t dstY; /**< Destination Y */ + size_t dstZ; /**< Destination Z */ + size_t dstLOD; /**< Destination LOD */ + CUmemorytype dstMemoryType; /**< Destination memory type (host, device, array) */ + void *dstHost; /**< Destination host pointer */ + CUdeviceptr dstDevice; /**< Destination device pointer */ + CUarray dstArray; /**< Destination array reference */ + void *reserved1; /**< Must be NULL */ + size_t dstPitch; /**< Destination pitch (ignored when dst is array) */ + size_t dstHeight; /**< Destination height (ignored when dst is array; may be 0 if Depth==1) */ + + size_t WidthInBytes; /**< Width of 3D memory copy in bytes */ + size_t Height; /**< Height of 3D memory copy */ + size_t Depth; /**< Depth of 3D memory copy */ +} CUDA_MEMCPY3D_v2; +typedef CUDA_MEMCPY3D_v2 CUDA_MEMCPY3D; + +/** + * 3D memory cross-context copy parameters + */ +typedef struct CUDA_MEMCPY3D_PEER_st { + size_t srcXInBytes; /**< Source X in bytes */ + size_t srcY; /**< Source Y */ + size_t srcZ; /**< Source Z */ + size_t srcLOD; /**< Source LOD */ + CUmemorytype srcMemoryType; /**< Source memory type (host, device, array) */ + const void *srcHost; /**< Source host pointer */ + CUdeviceptr srcDevice; /**< Source device pointer */ + CUarray srcArray; /**< Source array reference */ + CUcontext srcContext; /**< Source context (ignored with srcMemoryType is ::CU_MEMORYTYPE_ARRAY) */ + size_t srcPitch; /**< Source pitch (ignored when src is array) */ + size_t srcHeight; /**< Source height (ignored when src is array; may be 0 if Depth==1) */ + + size_t dstXInBytes; /**< Destination X in bytes */ + size_t dstY; /**< Destination Y */ + size_t dstZ; /**< Destination Z */ + size_t dstLOD; /**< Destination LOD */ + CUmemorytype dstMemoryType; /**< Destination memory type (host, device, array) */ + void *dstHost; /**< Destination host pointer */ + CUdeviceptr dstDevice; /**< Destination device pointer */ + CUarray dstArray; /**< Destination array reference */ + CUcontext dstContext; /**< Destination context (ignored with dstMemoryType is ::CU_MEMORYTYPE_ARRAY) */ + size_t dstPitch; /**< Destination pitch (ignored when dst is array) */ + size_t dstHeight; /**< Destination height (ignored when dst is array; may be 0 if Depth==1) */ + + size_t WidthInBytes; /**< Width of 3D memory copy in bytes */ + size_t Height; /**< Height of 3D memory copy */ + size_t Depth; /**< Depth of 3D memory copy */ +} CUDA_MEMCPY3D_PEER_v1; +typedef CUDA_MEMCPY3D_PEER_v1 CUDA_MEMCPY3D_PEER; + +/** + * Array descriptor + */ +typedef struct CUDA_ARRAY_DESCRIPTOR_st +{ + size_t Width; /**< Width of array */ + size_t Height; /**< Height of array */ + + CUarray_format Format; /**< Array format */ + unsigned int NumChannels; /**< Channels per array element */ +} CUDA_ARRAY_DESCRIPTOR_v2; +typedef CUDA_ARRAY_DESCRIPTOR_v2 CUDA_ARRAY_DESCRIPTOR; + +/** + * 3D array descriptor + */ +typedef struct CUDA_ARRAY3D_DESCRIPTOR_st +{ + size_t Width; /**< Width of 3D array */ + size_t Height; /**< Height of 3D array */ + size_t Depth; /**< Depth of 3D array */ + + CUarray_format Format; /**< Array format */ + unsigned int NumChannels; /**< Channels per array element */ + unsigned int Flags; /**< Flags */ +} CUDA_ARRAY3D_DESCRIPTOR_v2; +typedef CUDA_ARRAY3D_DESCRIPTOR_v2 CUDA_ARRAY3D_DESCRIPTOR; + +/** + * Indicates that the layered sparse CUDA array or CUDA mipmapped array has a single mip tail region for all layers + */ +#define CU_ARRAY_SPARSE_PROPERTIES_SINGLE_MIPTAIL 0x1 + +/** + * CUDA array sparse properties + */ +typedef struct CUDA_ARRAY_SPARSE_PROPERTIES_st { + struct { + unsigned int width; /**< Width of sparse tile in elements */ + unsigned int height; /**< Height of sparse tile in elements */ + unsigned int depth; /**< Depth of sparse tile in elements */ + } tileExtent; + + /** + * First mip level at which the mip tail begins. + */ + unsigned int miptailFirstLevel; + /** + * Total size of the mip tail. + */ + unsigned long long miptailSize; + /** + * Flags will either be zero or ::CU_ARRAY_SPARSE_PROPERTIES_SINGLE_MIPTAIL + */ + unsigned int flags; + unsigned int reserved[4]; +} CUDA_ARRAY_SPARSE_PROPERTIES_v1; +typedef CUDA_ARRAY_SPARSE_PROPERTIES_v1 CUDA_ARRAY_SPARSE_PROPERTIES; + +/** + * CUDA array memory requirements + */ +typedef struct CUDA_ARRAY_MEMORY_REQUIREMENTS_st { + size_t size; /**< Total required memory size */ + size_t alignment; /**< alignment requirement */ + unsigned int reserved[4]; +} CUDA_ARRAY_MEMORY_REQUIREMENTS_v1; +typedef CUDA_ARRAY_MEMORY_REQUIREMENTS_v1 CUDA_ARRAY_MEMORY_REQUIREMENTS; + +/** + * CUDA Resource descriptor + */ +typedef struct CUDA_RESOURCE_DESC_st +{ + CUresourcetype resType; /**< Resource type */ + + union { + struct { + CUarray hArray; /**< CUDA array */ + } array; + struct { + CUmipmappedArray hMipmappedArray; /**< CUDA mipmapped array */ + } mipmap; + struct { + CUdeviceptr devPtr; /**< Device pointer */ + CUarray_format format; /**< Array format */ + unsigned int numChannels; /**< Channels per array element */ + size_t sizeInBytes; /**< Size in bytes */ + } linear; + struct { + CUdeviceptr devPtr; /**< Device pointer */ + CUarray_format format; /**< Array format */ + unsigned int numChannels; /**< Channels per array element */ + size_t width; /**< Width of the array in elements */ + size_t height; /**< Height of the array in elements */ + size_t pitchInBytes; /**< Pitch between two rows in bytes */ + } pitch2D; + struct { + int reserved[32]; + } reserved; + } res; + + unsigned int flags; /**< Flags (must be zero) */ +} CUDA_RESOURCE_DESC_v1; +typedef CUDA_RESOURCE_DESC_v1 CUDA_RESOURCE_DESC; + +/** + * Texture descriptor + */ +typedef struct CUDA_TEXTURE_DESC_st { + CUaddress_mode addressMode[3]; /**< Address modes */ + CUfilter_mode filterMode; /**< Filter mode */ + unsigned int flags; /**< Flags */ + unsigned int maxAnisotropy; /**< Maximum anisotropy ratio */ + CUfilter_mode mipmapFilterMode; /**< Mipmap filter mode */ + float mipmapLevelBias; /**< Mipmap level bias */ + float minMipmapLevelClamp; /**< Mipmap minimum level clamp */ + float maxMipmapLevelClamp; /**< Mipmap maximum level clamp */ + float borderColor[4]; /**< Border Color */ + int reserved[12]; +} CUDA_TEXTURE_DESC_v1; +typedef CUDA_TEXTURE_DESC_v1 CUDA_TEXTURE_DESC; + +/** + * Resource view format + */ +typedef enum CUresourceViewFormat_enum +{ + CU_RES_VIEW_FORMAT_NONE = 0x00, /**< No resource view format (use underlying resource format) */ + CU_RES_VIEW_FORMAT_UINT_1X8 = 0x01, /**< 1 channel unsigned 8-bit integers */ + CU_RES_VIEW_FORMAT_UINT_2X8 = 0x02, /**< 2 channel unsigned 8-bit integers */ + CU_RES_VIEW_FORMAT_UINT_4X8 = 0x03, /**< 4 channel unsigned 8-bit integers */ + CU_RES_VIEW_FORMAT_SINT_1X8 = 0x04, /**< 1 channel signed 8-bit integers */ + CU_RES_VIEW_FORMAT_SINT_2X8 = 0x05, /**< 2 channel signed 8-bit integers */ + CU_RES_VIEW_FORMAT_SINT_4X8 = 0x06, /**< 4 channel signed 8-bit integers */ + CU_RES_VIEW_FORMAT_UINT_1X16 = 0x07, /**< 1 channel unsigned 16-bit integers */ + CU_RES_VIEW_FORMAT_UINT_2X16 = 0x08, /**< 2 channel unsigned 16-bit integers */ + CU_RES_VIEW_FORMAT_UINT_4X16 = 0x09, /**< 4 channel unsigned 16-bit integers */ + CU_RES_VIEW_FORMAT_SINT_1X16 = 0x0a, /**< 1 channel signed 16-bit integers */ + CU_RES_VIEW_FORMAT_SINT_2X16 = 0x0b, /**< 2 channel signed 16-bit integers */ + CU_RES_VIEW_FORMAT_SINT_4X16 = 0x0c, /**< 4 channel signed 16-bit integers */ + CU_RES_VIEW_FORMAT_UINT_1X32 = 0x0d, /**< 1 channel unsigned 32-bit integers */ + CU_RES_VIEW_FORMAT_UINT_2X32 = 0x0e, /**< 2 channel unsigned 32-bit integers */ + CU_RES_VIEW_FORMAT_UINT_4X32 = 0x0f, /**< 4 channel unsigned 32-bit integers */ + CU_RES_VIEW_FORMAT_SINT_1X32 = 0x10, /**< 1 channel signed 32-bit integers */ + CU_RES_VIEW_FORMAT_SINT_2X32 = 0x11, /**< 2 channel signed 32-bit integers */ + CU_RES_VIEW_FORMAT_SINT_4X32 = 0x12, /**< 4 channel signed 32-bit integers */ + CU_RES_VIEW_FORMAT_FLOAT_1X16 = 0x13, /**< 1 channel 16-bit floating point */ + CU_RES_VIEW_FORMAT_FLOAT_2X16 = 0x14, /**< 2 channel 16-bit floating point */ + CU_RES_VIEW_FORMAT_FLOAT_4X16 = 0x15, /**< 4 channel 16-bit floating point */ + CU_RES_VIEW_FORMAT_FLOAT_1X32 = 0x16, /**< 1 channel 32-bit floating point */ + CU_RES_VIEW_FORMAT_FLOAT_2X32 = 0x17, /**< 2 channel 32-bit floating point */ + CU_RES_VIEW_FORMAT_FLOAT_4X32 = 0x18, /**< 4 channel 32-bit floating point */ + CU_RES_VIEW_FORMAT_UNSIGNED_BC1 = 0x19, /**< Block compressed 1 */ + CU_RES_VIEW_FORMAT_UNSIGNED_BC2 = 0x1a, /**< Block compressed 2 */ + CU_RES_VIEW_FORMAT_UNSIGNED_BC3 = 0x1b, /**< Block compressed 3 */ + CU_RES_VIEW_FORMAT_UNSIGNED_BC4 = 0x1c, /**< Block compressed 4 unsigned */ + CU_RES_VIEW_FORMAT_SIGNED_BC4 = 0x1d, /**< Block compressed 4 signed */ + CU_RES_VIEW_FORMAT_UNSIGNED_BC5 = 0x1e, /**< Block compressed 5 unsigned */ + CU_RES_VIEW_FORMAT_SIGNED_BC5 = 0x1f, /**< Block compressed 5 signed */ + CU_RES_VIEW_FORMAT_UNSIGNED_BC6H = 0x20, /**< Block compressed 6 unsigned half-float */ + CU_RES_VIEW_FORMAT_SIGNED_BC6H = 0x21, /**< Block compressed 6 signed half-float */ + CU_RES_VIEW_FORMAT_UNSIGNED_BC7 = 0x22 /**< Block compressed 7 */ +} CUresourceViewFormat; + +/** + * Resource view descriptor + */ +typedef struct CUDA_RESOURCE_VIEW_DESC_st +{ + CUresourceViewFormat format; /**< Resource view format */ + size_t width; /**< Width of the resource view */ + size_t height; /**< Height of the resource view */ + size_t depth; /**< Depth of the resource view */ + unsigned int firstMipmapLevel; /**< First defined mipmap level */ + unsigned int lastMipmapLevel; /**< Last defined mipmap level */ + unsigned int firstLayer; /**< First layer index */ + unsigned int lastLayer; /**< Last layer index */ + unsigned int reserved[16]; +} CUDA_RESOURCE_VIEW_DESC_v1; +typedef CUDA_RESOURCE_VIEW_DESC_v1 CUDA_RESOURCE_VIEW_DESC; + +/** + * Size of tensor map descriptor + */ +#define CU_TENSOR_MAP_NUM_QWORDS 16 + +/** + * Tensor map descriptor. Requires compiler support for aligning to 64 bytes. + */ +typedef struct CUtensorMap_st { +#if __cplusplus >= 201103L + alignas(64) +#elif __STDC_VERSION__ >= 201112L + _Alignas(64) +#endif + cuuint64_t opaque[CU_TENSOR_MAP_NUM_QWORDS]; +} CUtensorMap; + +/** + * Tensor map data type + */ +typedef enum CUtensorMapDataType_enum { + CU_TENSOR_MAP_DATA_TYPE_UINT8 = 0, + CU_TENSOR_MAP_DATA_TYPE_UINT16, + CU_TENSOR_MAP_DATA_TYPE_UINT32, + CU_TENSOR_MAP_DATA_TYPE_INT32, + CU_TENSOR_MAP_DATA_TYPE_UINT64, + CU_TENSOR_MAP_DATA_TYPE_INT64, + CU_TENSOR_MAP_DATA_TYPE_FLOAT16, + CU_TENSOR_MAP_DATA_TYPE_FLOAT32, + CU_TENSOR_MAP_DATA_TYPE_FLOAT64, + CU_TENSOR_MAP_DATA_TYPE_BFLOAT16, + CU_TENSOR_MAP_DATA_TYPE_FLOAT32_FTZ, + CU_TENSOR_MAP_DATA_TYPE_TFLOAT32, + CU_TENSOR_MAP_DATA_TYPE_TFLOAT32_FTZ +} CUtensorMapDataType; + +/** + * Tensor map interleave layout type + */ +typedef enum CUtensorMapInterleave_enum { + CU_TENSOR_MAP_INTERLEAVE_NONE = 0, + CU_TENSOR_MAP_INTERLEAVE_16B, + CU_TENSOR_MAP_INTERLEAVE_32B +} CUtensorMapInterleave; + +/** + * Tensor map swizzling mode of shared memory banks + */ +typedef enum CUtensorMapSwizzle_enum { + CU_TENSOR_MAP_SWIZZLE_NONE = 0, + CU_TENSOR_MAP_SWIZZLE_32B, + CU_TENSOR_MAP_SWIZZLE_64B, + CU_TENSOR_MAP_SWIZZLE_128B +} CUtensorMapSwizzle; + +/** + * Tensor map L2 promotion type + */ +typedef enum CUtensorMapL2promotion_enum { + CU_TENSOR_MAP_L2_PROMOTION_NONE = 0, + CU_TENSOR_MAP_L2_PROMOTION_L2_64B, + CU_TENSOR_MAP_L2_PROMOTION_L2_128B, + CU_TENSOR_MAP_L2_PROMOTION_L2_256B +} CUtensorMapL2promotion; + +/** + * Tensor map out-of-bounds fill type + */ +typedef enum CUtensorMapFloatOOBfill_enum { + CU_TENSOR_MAP_FLOAT_OOB_FILL_NONE = 0, + CU_TENSOR_MAP_FLOAT_OOB_FILL_NAN_REQUEST_ZERO_FMA +} CUtensorMapFloatOOBfill; + +/** + * GPU Direct v3 tokens + */ +typedef struct CUDA_POINTER_ATTRIBUTE_P2P_TOKENS_st { + unsigned long long p2pToken; + unsigned int vaSpaceToken; +} CUDA_POINTER_ATTRIBUTE_P2P_TOKENS_v1; +typedef CUDA_POINTER_ATTRIBUTE_P2P_TOKENS_v1 CUDA_POINTER_ATTRIBUTE_P2P_TOKENS; + +/** +* Access flags that specify the level of access the current context's device has +* on the memory referenced. +*/ +typedef enum CUDA_POINTER_ATTRIBUTE_ACCESS_FLAGS_enum { + CU_POINTER_ATTRIBUTE_ACCESS_FLAG_NONE = 0x0, /**< No access, meaning the device cannot access this memory at all, thus must be staged through accessible memory in order to complete certain operations */ + CU_POINTER_ATTRIBUTE_ACCESS_FLAG_READ = 0x1, /**< Read-only access, meaning writes to this memory are considered invalid accesses and thus return error in that case. */ + CU_POINTER_ATTRIBUTE_ACCESS_FLAG_READWRITE = 0x3 /**< Read-write access, the device has full read-write access to the memory */ +} CUDA_POINTER_ATTRIBUTE_ACCESS_FLAGS; + +/** + * Kernel launch parameters + */ +typedef struct CUDA_LAUNCH_PARAMS_st { + CUfunction function; /**< Kernel to launch */ + unsigned int gridDimX; /**< Width of grid in blocks */ + unsigned int gridDimY; /**< Height of grid in blocks */ + unsigned int gridDimZ; /**< Depth of grid in blocks */ + unsigned int blockDimX; /**< X dimension of each thread block */ + unsigned int blockDimY; /**< Y dimension of each thread block */ + unsigned int blockDimZ; /**< Z dimension of each thread block */ + unsigned int sharedMemBytes; /**< Dynamic shared-memory size per thread block in bytes */ + CUstream hStream; /**< Stream identifier */ + void **kernelParams; /**< Array of pointers to kernel parameters */ +} CUDA_LAUNCH_PARAMS_v1; +typedef CUDA_LAUNCH_PARAMS_v1 CUDA_LAUNCH_PARAMS; + +/** + * External memory handle types + */ +typedef enum CUexternalMemoryHandleType_enum { + /** + * Handle is an opaque file descriptor + */ + CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD = 1, + /** + * Handle is an opaque shared NT handle + */ + CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32 = 2, + /** + * Handle is an opaque, globally shared handle + */ + CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT = 3, + /** + * Handle is a D3D12 heap object + */ + CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_HEAP = 4, + /** + * Handle is a D3D12 committed resource + */ + CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_RESOURCE = 5, + /** + * Handle is a shared NT handle to a D3D11 resource + */ + CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_RESOURCE = 6, + /** + * Handle is a globally shared handle to a D3D11 resource + */ + CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_RESOURCE_KMT = 7, + /** + * Handle is an NvSciBuf object + */ + CU_EXTERNAL_MEMORY_HANDLE_TYPE_NVSCIBUF = 8 +} CUexternalMemoryHandleType; + +/** + * Indicates that the external memory object is a dedicated resource + */ +#define CUDA_EXTERNAL_MEMORY_DEDICATED 0x1 + +/** When the \p flags parameter of ::CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS + * contains this flag, it indicates that signaling an external semaphore object + * should skip performing appropriate memory synchronization operations over all + * the external memory objects that are imported as ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_NVSCIBUF, + * which otherwise are performed by default to ensure data coherency with other + * importers of the same NvSciBuf memory objects. + */ +#define CUDA_EXTERNAL_SEMAPHORE_SIGNAL_SKIP_NVSCIBUF_MEMSYNC 0x01 + +/** When the \p flags parameter of ::CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS + * contains this flag, it indicates that waiting on an external semaphore object + * should skip performing appropriate memory synchronization operations over all + * the external memory objects that are imported as ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_NVSCIBUF, + * which otherwise are performed by default to ensure data coherency with other + * importers of the same NvSciBuf memory objects. + */ +#define CUDA_EXTERNAL_SEMAPHORE_WAIT_SKIP_NVSCIBUF_MEMSYNC 0x02 + +/** + * When \p flags of ::cuDeviceGetNvSciSyncAttributes is set to this, + * it indicates that application needs signaler specific NvSciSyncAttr + * to be filled by ::cuDeviceGetNvSciSyncAttributes. + */ +#define CUDA_NVSCISYNC_ATTR_SIGNAL 0x1 + +/** + * When \p flags of ::cuDeviceGetNvSciSyncAttributes is set to this, + * it indicates that application needs waiter specific NvSciSyncAttr + * to be filled by ::cuDeviceGetNvSciSyncAttributes. + */ +#define CUDA_NVSCISYNC_ATTR_WAIT 0x2 +/** + * External memory handle descriptor + */ +typedef struct CUDA_EXTERNAL_MEMORY_HANDLE_DESC_st { + /** + * Type of the handle + */ + CUexternalMemoryHandleType type; + union { + /** + * File descriptor referencing the memory object. Valid + * when type is + * ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD + */ + int fd; + /** + * Win32 handle referencing the semaphore object. Valid when + * type is one of the following: + * - ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32 + * - ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT + * - ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_HEAP + * - ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_RESOURCE + * - ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_RESOURCE + * - ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_RESOURCE_KMT + * Exactly one of 'handle' and 'name' must be non-NULL. If + * type is one of the following: + * ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT + * ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_RESOURCE_KMT + * then 'name' must be NULL. + */ + struct { + /** + * Valid NT handle. Must be NULL if 'name' is non-NULL + */ + void *handle; + /** + * Name of a valid memory object. + * Must be NULL if 'handle' is non-NULL. + */ + const void *name; + } win32; + /** + * A handle representing an NvSciBuf Object. Valid when type + * is ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_NVSCIBUF + */ + const void *nvSciBufObject; + } handle; + /** + * Size of the memory allocation + */ + unsigned long long size; + /** + * Flags must either be zero or ::CUDA_EXTERNAL_MEMORY_DEDICATED + */ + unsigned int flags; + unsigned int reserved[16]; +} CUDA_EXTERNAL_MEMORY_HANDLE_DESC_v1; +typedef CUDA_EXTERNAL_MEMORY_HANDLE_DESC_v1 CUDA_EXTERNAL_MEMORY_HANDLE_DESC; + +/** + * External memory buffer descriptor + */ +typedef struct CUDA_EXTERNAL_MEMORY_BUFFER_DESC_st { + /** + * Offset into the memory object where the buffer's base is + */ + unsigned long long offset; + /** + * Size of the buffer + */ + unsigned long long size; + /** + * Flags reserved for future use. Must be zero. + */ + unsigned int flags; + unsigned int reserved[16]; +} CUDA_EXTERNAL_MEMORY_BUFFER_DESC_v1; +typedef CUDA_EXTERNAL_MEMORY_BUFFER_DESC_v1 CUDA_EXTERNAL_MEMORY_BUFFER_DESC; + +/** + * External memory mipmap descriptor + */ +typedef struct CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC_st { + /** + * Offset into the memory object where the base level of the + * mipmap chain is. + */ + unsigned long long offset; + /** + * Format, dimension and type of base level of the mipmap chain + */ + CUDA_ARRAY3D_DESCRIPTOR arrayDesc; + /** + * Total number of levels in the mipmap chain + */ + unsigned int numLevels; + unsigned int reserved[16]; +} CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC_v1; +typedef CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC_v1 CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC; + +/** + * External semaphore handle types + */ +typedef enum CUexternalSemaphoreHandleType_enum { + /** + * Handle is an opaque file descriptor + */ + CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD = 1, + /** + * Handle is an opaque shared NT handle + */ + CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32 = 2, + /** + * Handle is an opaque, globally shared handle + */ + CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT = 3, + /** + * Handle is a shared NT handle referencing a D3D12 fence object + */ + CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE = 4, + /** + * Handle is a shared NT handle referencing a D3D11 fence object + */ + CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_FENCE = 5, + /** + * Opaque handle to NvSciSync Object + */ + CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_NVSCISYNC = 6, + /** + * Handle is a shared NT handle referencing a D3D11 keyed mutex object + */ + CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX = 7, + /** + * Handle is a globally shared handle referencing a D3D11 keyed mutex object + */ + CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX_KMT = 8, + /** + * Handle is an opaque file descriptor referencing a timeline semaphore + */ + CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_FD = 9, + /** + * Handle is an opaque shared NT handle referencing a timeline semaphore + */ + CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_WIN32 = 10 +} CUexternalSemaphoreHandleType; + +/** + * External semaphore handle descriptor + */ +typedef struct CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC_st { + /** + * Type of the handle + */ + CUexternalSemaphoreHandleType type; + union { + /** + * File descriptor referencing the semaphore object. Valid + * when type is one of the following: + * - ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD + * - ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_FD + */ + int fd; + /** + * Win32 handle referencing the semaphore object. Valid when + * type is one of the following: + * - ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32 + * - ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT + * - ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE + * - ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_FENCE + * - ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX + * - ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_WIN32 + * Exactly one of 'handle' and 'name' must be non-NULL. If + * type is one of the following: + * - ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT + * - ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX_KMT + * then 'name' must be NULL. + */ + struct { + /** + * Valid NT handle. Must be NULL if 'name' is non-NULL + */ + void *handle; + /** + * Name of a valid synchronization primitive. + * Must be NULL if 'handle' is non-NULL. + */ + const void *name; + } win32; + /** + * Valid NvSciSyncObj. Must be non NULL + */ + const void* nvSciSyncObj; + } handle; + /** + * Flags reserved for the future. Must be zero. + */ + unsigned int flags; + unsigned int reserved[16]; +} CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC_v1; +typedef CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC_v1 CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC; + +/** + * External semaphore signal parameters + */ +typedef struct CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_st { + struct { + /** + * Parameters for fence objects + */ + struct { + /** + * Value of fence to be signaled + */ + unsigned long long value; + } fence; + union { + /** + * Pointer to NvSciSyncFence. Valid if ::CUexternalSemaphoreHandleType + * is of type ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_NVSCISYNC. + */ + void *fence; + unsigned long long reserved; + } nvSciSync; + /** + * Parameters for keyed mutex objects + */ + struct { + /** + * Value of key to release the mutex with + */ + unsigned long long key; + } keyedMutex; + unsigned int reserved[12]; + } params; + /** + * Only when ::CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS is used to + * signal a ::CUexternalSemaphore of type + * ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_NVSCISYNC, the valid flag is + * ::CUDA_EXTERNAL_SEMAPHORE_SIGNAL_SKIP_NVSCIBUF_MEMSYNC which indicates + * that while signaling the ::CUexternalSemaphore, no memory synchronization + * operations should be performed for any external memory object imported + * as ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_NVSCIBUF. + * For all other types of ::CUexternalSemaphore, flags must be zero. + */ + unsigned int flags; + unsigned int reserved[16]; +} CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_v1; +typedef CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_v1 CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS; + +/** + * External semaphore wait parameters + */ +typedef struct CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_st { + struct { + /** + * Parameters for fence objects + */ + struct { + /** + * Value of fence to be waited on + */ + unsigned long long value; + } fence; + /** + * Pointer to NvSciSyncFence. Valid if CUexternalSemaphoreHandleType + * is of type CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_NVSCISYNC. + */ + union { + void *fence; + unsigned long long reserved; + } nvSciSync; + /** + * Parameters for keyed mutex objects + */ + struct { + /** + * Value of key to acquire the mutex with + */ + unsigned long long key; + /** + * Timeout in milliseconds to wait to acquire the mutex + */ + unsigned int timeoutMs; + } keyedMutex; + unsigned int reserved[10]; + } params; + /** + * Only when ::CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS is used to wait on + * a ::CUexternalSemaphore of type ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_NVSCISYNC, + * the valid flag is ::CUDA_EXTERNAL_SEMAPHORE_WAIT_SKIP_NVSCIBUF_MEMSYNC + * which indicates that while waiting for the ::CUexternalSemaphore, no memory + * synchronization operations should be performed for any external memory + * object imported as ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_NVSCIBUF. + * For all other types of ::CUexternalSemaphore, flags must be zero. + */ + unsigned int flags; + unsigned int reserved[16]; +} CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_v1; +typedef CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_v1 CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS; + +/** + * Semaphore signal node parameters + */ +typedef struct CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_st { + CUexternalSemaphore* extSemArray; /**< Array of external semaphore handles. */ + const CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS* paramsArray; /**< Array of external semaphore signal parameters. */ + unsigned int numExtSems; /**< Number of handles and parameters supplied in extSemArray and paramsArray. */ +} CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_v1; +typedef CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_v1 CUDA_EXT_SEM_SIGNAL_NODE_PARAMS; + +/** + * Semaphore wait node parameters + */ +typedef struct CUDA_EXT_SEM_WAIT_NODE_PARAMS_st { + CUexternalSemaphore* extSemArray; /**< Array of external semaphore handles. */ + const CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS* paramsArray; /**< Array of external semaphore wait parameters. */ + unsigned int numExtSems; /**< Number of handles and parameters supplied in extSemArray and paramsArray. */ +} CUDA_EXT_SEM_WAIT_NODE_PARAMS_v1; +typedef CUDA_EXT_SEM_WAIT_NODE_PARAMS_v1 CUDA_EXT_SEM_WAIT_NODE_PARAMS; + +typedef unsigned long long CUmemGenericAllocationHandle_v1; +typedef CUmemGenericAllocationHandle_v1 CUmemGenericAllocationHandle; + +/** + * Flags for specifying particular handle types + */ +typedef enum CUmemAllocationHandleType_enum { + CU_MEM_HANDLE_TYPE_NONE = 0x0, /**< Does not allow any export mechanism. > */ + CU_MEM_HANDLE_TYPE_POSIX_FILE_DESCRIPTOR = 0x1, /**< Allows a file descriptor to be used for exporting. Permitted only on POSIX systems. (int) */ + CU_MEM_HANDLE_TYPE_WIN32 = 0x2, /**< Allows a Win32 NT handle to be used for exporting. (HANDLE) */ + CU_MEM_HANDLE_TYPE_WIN32_KMT = 0x4, /**< Allows a Win32 KMT handle to be used for exporting. (D3DKMT_HANDLE) */ + CU_MEM_HANDLE_TYPE_MAX = 0x7FFFFFFF +} CUmemAllocationHandleType; + +/** + * Specifies the memory protection flags for mapping. + */ +typedef enum CUmemAccess_flags_enum { + CU_MEM_ACCESS_FLAGS_PROT_NONE = 0x0, /**< Default, make the address range not accessible */ + CU_MEM_ACCESS_FLAGS_PROT_READ = 0x1, /**< Make the address range read accessible */ + CU_MEM_ACCESS_FLAGS_PROT_READWRITE = 0x3, /**< Make the address range read-write accessible */ + CU_MEM_ACCESS_FLAGS_PROT_MAX = 0x7FFFFFFF +} CUmemAccess_flags; + +/** + * Specifies the type of location + */ +typedef enum CUmemLocationType_enum { + CU_MEM_LOCATION_TYPE_INVALID = 0x0, + CU_MEM_LOCATION_TYPE_DEVICE = 0x1, /**< Location is a device location, thus id is a device ordinal */ + CU_MEM_LOCATION_TYPE_MAX = 0x7FFFFFFF +} CUmemLocationType; + +/** +* Defines the allocation types available +*/ +typedef enum CUmemAllocationType_enum { + CU_MEM_ALLOCATION_TYPE_INVALID = 0x0, + + /** This allocation type is 'pinned', i.e. cannot migrate from its current + * location while the application is actively using it + */ + CU_MEM_ALLOCATION_TYPE_PINNED = 0x1, + CU_MEM_ALLOCATION_TYPE_MAX = 0x7FFFFFFF +} CUmemAllocationType; + +/** +* Flag for requesting different optimal and required granularities for an allocation. +*/ +typedef enum CUmemAllocationGranularity_flags_enum { + CU_MEM_ALLOC_GRANULARITY_MINIMUM = 0x0, /**< Minimum required granularity for allocation */ + CU_MEM_ALLOC_GRANULARITY_RECOMMENDED = 0x1 /**< Recommended granularity for allocation for best performance */ +} CUmemAllocationGranularity_flags; + +/** +* Specifies the handle type for address range +*/ +typedef enum CUmemRangeHandleType_enum +{ + CU_MEM_RANGE_HANDLE_TYPE_DMA_BUF_FD = 0x1, + CU_MEM_RANGE_HANDLE_TYPE_MAX = 0x7FFFFFFF +} CUmemRangeHandleType; + +/** + * Sparse subresource types + */ +typedef enum CUarraySparseSubresourceType_enum { + CU_ARRAY_SPARSE_SUBRESOURCE_TYPE_SPARSE_LEVEL = 0, + CU_ARRAY_SPARSE_SUBRESOURCE_TYPE_MIPTAIL = 1 +} CUarraySparseSubresourceType; + +/** + * Memory operation types + */ +typedef enum CUmemOperationType_enum { + CU_MEM_OPERATION_TYPE_MAP = 1, + CU_MEM_OPERATION_TYPE_UNMAP = 2 +} CUmemOperationType; + +/** + * Memory handle types + */ +typedef enum CUmemHandleType_enum { + CU_MEM_HANDLE_TYPE_GENERIC = 0 +} CUmemHandleType; + +/** + * Specifies the CUDA array or CUDA mipmapped array memory mapping information + */ +typedef struct CUarrayMapInfo_st { + CUresourcetype resourceType; /**< Resource type */ + + union { + CUmipmappedArray mipmap; + CUarray array; + } resource; + + CUarraySparseSubresourceType subresourceType; /**< Sparse subresource type */ + + union { + struct { + unsigned int level; /**< For CUDA mipmapped arrays must a valid mipmap level. For CUDA arrays must be zero */ + unsigned int layer; /**< For CUDA layered arrays must be a valid layer index. Otherwise, must be zero */ + unsigned int offsetX; /**< Starting X offset in elements */ + unsigned int offsetY; /**< Starting Y offset in elements */ + unsigned int offsetZ; /**< Starting Z offset in elements */ + unsigned int extentWidth; /**< Width in elements */ + unsigned int extentHeight; /**< Height in elements */ + unsigned int extentDepth; /**< Depth in elements */ + } sparseLevel; + struct { + unsigned int layer; /**< For CUDA layered arrays must be a valid layer index. Otherwise, must be zero */ + unsigned long long offset; /**< Offset within mip tail */ + unsigned long long size; /**< Extent in bytes */ + } miptail; + } subresource; + + CUmemOperationType memOperationType; /**< Memory operation type */ + CUmemHandleType memHandleType; /**< Memory handle type */ + + union { + CUmemGenericAllocationHandle memHandle; + } memHandle; + + unsigned long long offset; /**< Offset within the memory */ + unsigned int deviceBitMask; /**< Device ordinal bit mask */ + unsigned int flags; /**< flags for future use, must be zero now. */ + unsigned int reserved[2]; /**< Reserved for future use, must be zero now. */ +} CUarrayMapInfo_v1; +typedef CUarrayMapInfo_v1 CUarrayMapInfo; + +/** + * Specifies a memory location. + */ +typedef struct CUmemLocation_st { + CUmemLocationType type; /**< Specifies the location type, which modifies the meaning of id. */ + int id; /**< identifier for a given this location's ::CUmemLocationType. */ +} CUmemLocation_v1; +typedef CUmemLocation_v1 CUmemLocation; + +/** + * Specifies compression attribute for an allocation. + */ +typedef enum CUmemAllocationCompType_enum { + CU_MEM_ALLOCATION_COMP_NONE = 0x0, /**< Allocating non-compressible memory */ + CU_MEM_ALLOCATION_COMP_GENERIC = 0x1 /**< Allocating compressible memory */ +} CUmemAllocationCompType; + +/** + * This flag if set indicates that the memory will be used as a tile pool. + */ +#define CU_MEM_CREATE_USAGE_TILE_POOL 0x1 + +/** +* Specifies the allocation properties for a allocation. +*/ +typedef struct CUmemAllocationProp_st { + /** Allocation type */ + CUmemAllocationType type; + /** requested ::CUmemAllocationHandleType */ + CUmemAllocationHandleType requestedHandleTypes; + /** Location of allocation */ + CUmemLocation location; + /** + * Windows-specific POBJECT_ATTRIBUTES required when + * ::CU_MEM_HANDLE_TYPE_WIN32 is specified. This object attributes structure + * includes security attributes that define + * the scope of which exported allocations may be transferred to other + * processes. In all other cases, this field is required to be zero. + */ + void *win32HandleMetaData; + struct { + /** + * Allocation hint for requesting compressible memory. + * On devices that support Compute Data Compression, compressible + * memory can be used to accelerate accesses to data with unstructured + * sparsity and other compressible data patterns. Applications are + * expected to query allocation property of the handle obtained with + * ::cuMemCreate using ::cuMemGetAllocationPropertiesFromHandle to + * validate if the obtained allocation is compressible or not. Note that + * compressed memory may not be mappable on all devices. + */ + unsigned char compressionType; + unsigned char gpuDirectRDMACapable; + /** Bitmask indicating intended usage for this allocation */ + unsigned short usage; + unsigned char reserved[4]; + } allocFlags; +} CUmemAllocationProp_v1; +typedef CUmemAllocationProp_v1 CUmemAllocationProp; + +/** +* Flags for querying different granularities for a multicast object +*/ +typedef enum CUmulticastGranularity_flags_enum { + CU_MULTICAST_GRANULARITY_MINIMUM = 0x0, /**< Minimum required granularity */ + CU_MULTICAST_GRANULARITY_RECOMMENDED = 0x1 /**< Recommended granularity for best performance */ +} CUmulticastGranularity_flags; + +/** +* Specifies the properties for a multicast object. +*/ +typedef struct CUmulticastObjectProp_st { + /** + * The number of devices in the multicast team that will bind memory to this + * object + */ + unsigned int numDevices; + /** + * The maximum amount of memory that can be bound to this multicast object + * per device + */ + size_t size; + /** + * Bitmask of exportable handle types (see ::CUmemAllocationHandleType) for + * this object + */ + unsigned long long handleTypes; + /** + * Flags for future use, must be zero now + */ + unsigned long long flags; +} CUmulticastObjectProp_v1; +typedef CUmulticastObjectProp_v1 CUmulticastObjectProp; + +/** + * Memory access descriptor + */ +typedef struct CUmemAccessDesc_st { + CUmemLocation location; /**< Location on which the request is to change it's accessibility */ + CUmemAccess_flags flags; /**< ::CUmemProt accessibility flags to set on the request */ +} CUmemAccessDesc_v1; +typedef CUmemAccessDesc_v1 CUmemAccessDesc; + +/** + * CUDA Graph Update error types + */ +typedef enum CUgraphExecUpdateResult_enum { + CU_GRAPH_EXEC_UPDATE_SUCCESS = 0x0, /**< The update succeeded */ + CU_GRAPH_EXEC_UPDATE_ERROR = 0x1, /**< The update failed for an unexpected reason which is described in the return value of the function */ + CU_GRAPH_EXEC_UPDATE_ERROR_TOPOLOGY_CHANGED = 0x2, /**< The update failed because the topology changed */ + CU_GRAPH_EXEC_UPDATE_ERROR_NODE_TYPE_CHANGED = 0x3, /**< The update failed because a node type changed */ + CU_GRAPH_EXEC_UPDATE_ERROR_FUNCTION_CHANGED = 0x4, /**< The update failed because the function of a kernel node changed (CUDA driver < 11.2) */ + CU_GRAPH_EXEC_UPDATE_ERROR_PARAMETERS_CHANGED = 0x5, /**< The update failed because the parameters changed in a way that is not supported */ + CU_GRAPH_EXEC_UPDATE_ERROR_NOT_SUPPORTED = 0x6, /**< The update failed because something about the node is not supported */ + CU_GRAPH_EXEC_UPDATE_ERROR_UNSUPPORTED_FUNCTION_CHANGE = 0x7, /**< The update failed because the function of a kernel node changed in an unsupported way */ + CU_GRAPH_EXEC_UPDATE_ERROR_ATTRIBUTES_CHANGED = 0x8 /**< The update failed because the node attributes changed in a way that is not supported */ +} CUgraphExecUpdateResult; + +/** + * Result information returned by cuGraphExecUpdate + */ +typedef struct CUgraphExecUpdateResultInfo_st { + /** + * Gives more specific detail when a cuda graph update fails. + */ + CUgraphExecUpdateResult result; + + /** + * The "to node" of the error edge when the topologies do not match. + * The error node when the error is associated with a specific node. + * NULL when the error is generic. + */ + CUgraphNode errorNode; + + /** + * The from node of error edge when the topologies do not match. Otherwise NULL. + */ + CUgraphNode errorFromNode; +} CUgraphExecUpdateResultInfo_v1; +typedef CUgraphExecUpdateResultInfo_v1 CUgraphExecUpdateResultInfo; + +/** + * CUDA memory pool attributes + */ +typedef enum CUmemPool_attribute_enum { + /** + * (value type = int) + * Allow cuMemAllocAsync to use memory asynchronously freed + * in another streams as long as a stream ordering dependency + * of the allocating stream on the free action exists. + * Cuda events and null stream interactions can create the required + * stream ordered dependencies. (default enabled) + */ + CU_MEMPOOL_ATTR_REUSE_FOLLOW_EVENT_DEPENDENCIES = 1, + + /** + * (value type = int) + * Allow reuse of already completed frees when there is no dependency + * between the free and allocation. (default enabled) + */ + CU_MEMPOOL_ATTR_REUSE_ALLOW_OPPORTUNISTIC, + + /** + * (value type = int) + * Allow cuMemAllocAsync to insert new stream dependencies + * in order to establish the stream ordering required to reuse + * a piece of memory released by cuFreeAsync (default enabled). + */ + CU_MEMPOOL_ATTR_REUSE_ALLOW_INTERNAL_DEPENDENCIES, + + /** + * (value type = cuuint64_t) + * Amount of reserved memory in bytes to hold onto before trying + * to release memory back to the OS. When more than the release + * threshold bytes of memory are held by the memory pool, the + * allocator will try to release memory back to the OS on the + * next call to stream, event or context synchronize. (default 0) + */ + CU_MEMPOOL_ATTR_RELEASE_THRESHOLD, + + /** + * (value type = cuuint64_t) + * Amount of backing memory currently allocated for the mempool. + */ + CU_MEMPOOL_ATTR_RESERVED_MEM_CURRENT, + + /** + * (value type = cuuint64_t) + * High watermark of backing memory allocated for the mempool since the + * last time it was reset. High watermark can only be reset to zero. + */ + CU_MEMPOOL_ATTR_RESERVED_MEM_HIGH, + + /** + * (value type = cuuint64_t) + * Amount of memory from the pool that is currently in use by the application. + */ + CU_MEMPOOL_ATTR_USED_MEM_CURRENT, + + /** + * (value type = cuuint64_t) + * High watermark of the amount of memory from the pool that was in use by the application since + * the last time it was reset. High watermark can only be reset to zero. + */ + CU_MEMPOOL_ATTR_USED_MEM_HIGH +} CUmemPool_attribute; + +/** + * Specifies the properties of allocations made from the pool. + */ +typedef struct CUmemPoolProps_st { + CUmemAllocationType allocType; /**< Allocation type. Currently must be specified as CU_MEM_ALLOCATION_TYPE_PINNED */ + CUmemAllocationHandleType handleTypes; /**< Handle types that will be supported by allocations from the pool. */ + CUmemLocation location; /**< Location where allocations should reside. */ + /** + * Windows-specific LPSECURITYATTRIBUTES required when + * ::CU_MEM_HANDLE_TYPE_WIN32 is specified. This security attribute defines + * the scope of which exported allocations may be transferred to other + * processes. In all other cases, this field is required to be zero. + */ + void *win32SecurityAttributes; + unsigned char reserved[64]; /**< reserved for future use, must be 0 */ +} CUmemPoolProps_v1; +typedef CUmemPoolProps_v1 CUmemPoolProps; + +/** + * Opaque data for exporting a pool allocation + */ +typedef struct CUmemPoolPtrExportData_st { + unsigned char reserved[64]; +} CUmemPoolPtrExportData_v1; +typedef CUmemPoolPtrExportData_v1 CUmemPoolPtrExportData; + +/** + * Memory allocation node parameters + */ +typedef struct CUDA_MEM_ALLOC_NODE_PARAMS_st { + /** + * in: location where the allocation should reside (specified in ::location). + * ::handleTypes must be ::CU_MEM_HANDLE_TYPE_NONE. IPC is not supported. + */ + CUmemPoolProps poolProps; + const CUmemAccessDesc *accessDescs; /**< in: array of memory access descriptors. Used to describe peer GPU access */ + size_t accessDescCount; /**< in: number of memory access descriptors. Must not exceed the number of GPUs. */ + size_t bytesize; /**< in: size in bytes of the requested allocation */ + CUdeviceptr dptr; /**< out: address of the allocation returned by CUDA */ +} CUDA_MEM_ALLOC_NODE_PARAMS; + +typedef enum CUgraphMem_attribute_enum { + /** + * (value type = cuuint64_t) + * Amount of memory, in bytes, currently associated with graphs + */ + CU_GRAPH_MEM_ATTR_USED_MEM_CURRENT, + + /** + * (value type = cuuint64_t) + * High watermark of memory, in bytes, associated with graphs since the + * last time it was reset. High watermark can only be reset to zero. + */ + CU_GRAPH_MEM_ATTR_USED_MEM_HIGH, + + /** + * (value type = cuuint64_t) + * Amount of memory, in bytes, currently allocated for use by + * the CUDA graphs asynchronous allocator. + */ + CU_GRAPH_MEM_ATTR_RESERVED_MEM_CURRENT, + + /** + * (value type = cuuint64_t) + * High watermark of memory, in bytes, currently allocated for use by + * the CUDA graphs asynchronous allocator. + */ + CU_GRAPH_MEM_ATTR_RESERVED_MEM_HIGH +} CUgraphMem_attribute; + +/** + * If set, each kernel launched as part of ::cuLaunchCooperativeKernelMultiDevice only + * waits for prior work in the stream corresponding to that GPU to complete before the + * kernel begins execution. + */ +#define CUDA_COOPERATIVE_LAUNCH_MULTI_DEVICE_NO_PRE_LAUNCH_SYNC 0x01 + +/** + * If set, any subsequent work pushed in a stream that participated in a call to + * ::cuLaunchCooperativeKernelMultiDevice will only wait for the kernel launched on + * the GPU corresponding to that stream to complete before it begins execution. + */ +#define CUDA_COOPERATIVE_LAUNCH_MULTI_DEVICE_NO_POST_LAUNCH_SYNC 0x02 + +/** + * If set, the CUDA array is a collection of layers, where each layer is either a 1D + * or a 2D array and the Depth member of CUDA_ARRAY3D_DESCRIPTOR specifies the number + * of layers, not the depth of a 3D array. + */ +#define CUDA_ARRAY3D_LAYERED 0x01 + +/** + * Deprecated, use CUDA_ARRAY3D_LAYERED + */ +#define CUDA_ARRAY3D_2DARRAY 0x01 + +/** + * This flag must be set in order to bind a surface reference + * to the CUDA array + */ +#define CUDA_ARRAY3D_SURFACE_LDST 0x02 + +/** + * If set, the CUDA array is a collection of six 2D arrays, representing faces of a cube. The + * width of such a CUDA array must be equal to its height, and Depth must be six. + * If ::CUDA_ARRAY3D_LAYERED flag is also set, then the CUDA array is a collection of cubemaps + * and Depth must be a multiple of six. + */ +#define CUDA_ARRAY3D_CUBEMAP 0x04 + +/** + * This flag must be set in order to perform texture gather operations + * on a CUDA array. + */ +#define CUDA_ARRAY3D_TEXTURE_GATHER 0x08 + +/** + * This flag if set indicates that the CUDA + * array is a DEPTH_TEXTURE. + */ +#define CUDA_ARRAY3D_DEPTH_TEXTURE 0x10 + +/** + * This flag indicates that the CUDA array may be bound as a color target + * in an external graphics API + */ +#define CUDA_ARRAY3D_COLOR_ATTACHMENT 0x20 + +/** + * This flag if set indicates that the CUDA array or CUDA mipmapped array + * is a sparse CUDA array or CUDA mipmapped array respectively + */ +#define CUDA_ARRAY3D_SPARSE 0x40 + +/** + * This flag if set indicates that the CUDA array or CUDA mipmapped array + * will allow deferred memory mapping + */ +#define CUDA_ARRAY3D_DEFERRED_MAPPING 0x80 + +/** + * Override the texref format with a format inferred from the array. + * Flag for ::cuTexRefSetArray() + */ +#define CU_TRSA_OVERRIDE_FORMAT 0x01 + +/** + * Read the texture as integers rather than promoting the values to floats + * in the range [0,1]. + * Flag for ::cuTexRefSetFlags() and ::cuTexObjectCreate() + */ +#define CU_TRSF_READ_AS_INTEGER 0x01 + +/** + * Use normalized texture coordinates in the range [0,1) instead of [0,dim). + * Flag for ::cuTexRefSetFlags() and ::cuTexObjectCreate() + */ +#define CU_TRSF_NORMALIZED_COORDINATES 0x02 + +/** + * Perform sRGB->linear conversion during texture read. + * Flag for ::cuTexRefSetFlags() and ::cuTexObjectCreate() + */ +#define CU_TRSF_SRGB 0x10 + + /** + * Disable any trilinear filtering optimizations. + * Flag for ::cuTexRefSetFlags() and ::cuTexObjectCreate() + */ +#define CU_TRSF_DISABLE_TRILINEAR_OPTIMIZATION 0x20 + +/** + * Enable seamless cube map filtering. + * Flag for ::cuTexObjectCreate() + */ +#define CU_TRSF_SEAMLESS_CUBEMAP 0x40 + +/** + * C++ compile time constant for CU_LAUNCH_PARAM_END + */ +#define CU_LAUNCH_PARAM_END_AS_INT 0x00 + +/** + * End of array terminator for the \p extra parameter to + * ::cuLaunchKernel + */ +#define CU_LAUNCH_PARAM_END ((void*)CU_LAUNCH_PARAM_END_AS_INT) + +/** + * C++ compile time constant for CU_LAUNCH_PARAM_BUFFER_POINTER + */ +#define CU_LAUNCH_PARAM_BUFFER_POINTER_AS_INT 0x01 + +/** + * Indicator that the next value in the \p extra parameter to + * ::cuLaunchKernel will be a pointer to a buffer containing all kernel + * parameters used for launching kernel \p f. This buffer needs to + * honor all alignment/padding requirements of the individual parameters. + * If ::CU_LAUNCH_PARAM_BUFFER_SIZE is not also specified in the + * \p extra array, then ::CU_LAUNCH_PARAM_BUFFER_POINTER will have no + * effect. + */ +#define CU_LAUNCH_PARAM_BUFFER_POINTER ((void*)CU_LAUNCH_PARAM_BUFFER_POINTER_AS_INT) + +/** + * C++ compile time constant for CU_LAUNCH_PARAM_BUFFER_SIZE + */ +#define CU_LAUNCH_PARAM_BUFFER_SIZE_AS_INT 0x02 + +/** + * Indicator that the next value in the \p extra parameter to + * ::cuLaunchKernel will be a pointer to a size_t which contains the + * size of the buffer specified with ::CU_LAUNCH_PARAM_BUFFER_POINTER. + * It is required that ::CU_LAUNCH_PARAM_BUFFER_POINTER also be specified + * in the \p extra array if the value associated with + * ::CU_LAUNCH_PARAM_BUFFER_SIZE is not zero. + */ +#define CU_LAUNCH_PARAM_BUFFER_SIZE ((void*)CU_LAUNCH_PARAM_BUFFER_SIZE_AS_INT) + +/** + * For texture references loaded into the module, use default texunit from + * texture reference. + */ +#define CU_PARAM_TR_DEFAULT -1 + +/** + * Device that represents the CPU + */ +#define CU_DEVICE_CPU ((CUdevice)-1) + +/** + * Device that represents an invalid device + */ +#define CU_DEVICE_INVALID ((CUdevice)-2) + +/** + * Bitmasks for ::CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_FLUSH_WRITES_OPTIONS + */ +typedef enum CUflushGPUDirectRDMAWritesOptions_enum { + CU_FLUSH_GPU_DIRECT_RDMA_WRITES_OPTION_HOST = 1<<0, /**< ::cuFlushGPUDirectRDMAWrites() and its CUDA Runtime API counterpart are supported on the device. */ + CU_FLUSH_GPU_DIRECT_RDMA_WRITES_OPTION_MEMOPS = 1<<1 /**< The ::CU_STREAM_WAIT_VALUE_FLUSH flag and the ::CU_STREAM_MEM_OP_FLUSH_REMOTE_WRITES MemOp are supported on the device. */ +} CUflushGPUDirectRDMAWritesOptions; + +/** + * Platform native ordering for GPUDirect RDMA writes + */ +typedef enum CUGPUDirectRDMAWritesOrdering_enum { + CU_GPU_DIRECT_RDMA_WRITES_ORDERING_NONE = 0, /**< The device does not natively support ordering of remote writes. ::cuFlushGPUDirectRDMAWrites() can be leveraged if supported. */ + CU_GPU_DIRECT_RDMA_WRITES_ORDERING_OWNER = 100, /**< Natively, the device can consistently consume remote writes, although other CUDA devices may not. */ + CU_GPU_DIRECT_RDMA_WRITES_ORDERING_ALL_DEVICES = 200 /**< Any CUDA device in the system can consistently consume remote writes to this device. */ +} CUGPUDirectRDMAWritesOrdering; + +/** + * The scopes for ::cuFlushGPUDirectRDMAWrites + */ +typedef enum CUflushGPUDirectRDMAWritesScope_enum { + CU_FLUSH_GPU_DIRECT_RDMA_WRITES_TO_OWNER = 100, /**< Blocks until remote writes are visible to the CUDA device context owning the data. */ + CU_FLUSH_GPU_DIRECT_RDMA_WRITES_TO_ALL_DEVICES = 200 /**< Blocks until remote writes are visible to all CUDA device contexts. */ +} CUflushGPUDirectRDMAWritesScope; + +/** + * The targets for ::cuFlushGPUDirectRDMAWrites + */ +typedef enum CUflushGPUDirectRDMAWritesTarget_enum { + CU_FLUSH_GPU_DIRECT_RDMA_WRITES_TARGET_CURRENT_CTX = 0 /**< Sets the target for ::cuFlushGPUDirectRDMAWrites() to the currently active CUDA device context. */ +} CUflushGPUDirectRDMAWritesTarget; + +/** + * The additional write options for ::cuGraphDebugDotPrint + */ +typedef enum CUgraphDebugDot_flags_enum { + CU_GRAPH_DEBUG_DOT_FLAGS_VERBOSE = 1<<0, /**< Output all debug data as if every debug flag is enabled */ + CU_GRAPH_DEBUG_DOT_FLAGS_RUNTIME_TYPES = 1<<1, /**< Use CUDA Runtime structures for output */ + CU_GRAPH_DEBUG_DOT_FLAGS_KERNEL_NODE_PARAMS = 1<<2, /**< Adds CUDA_KERNEL_NODE_PARAMS values to output */ + CU_GRAPH_DEBUG_DOT_FLAGS_MEMCPY_NODE_PARAMS = 1<<3, /**< Adds CUDA_MEMCPY3D values to output */ + CU_GRAPH_DEBUG_DOT_FLAGS_MEMSET_NODE_PARAMS = 1<<4, /**< Adds CUDA_MEMSET_NODE_PARAMS values to output */ + CU_GRAPH_DEBUG_DOT_FLAGS_HOST_NODE_PARAMS = 1<<5, /**< Adds CUDA_HOST_NODE_PARAMS values to output */ + CU_GRAPH_DEBUG_DOT_FLAGS_EVENT_NODE_PARAMS = 1<<6, /**< Adds CUevent handle from record and wait nodes to output */ + CU_GRAPH_DEBUG_DOT_FLAGS_EXT_SEMAS_SIGNAL_NODE_PARAMS = 1<<7, /**< Adds CUDA_EXT_SEM_SIGNAL_NODE_PARAMS values to output */ + CU_GRAPH_DEBUG_DOT_FLAGS_EXT_SEMAS_WAIT_NODE_PARAMS = 1<<8, /**< Adds CUDA_EXT_SEM_WAIT_NODE_PARAMS values to output */ + CU_GRAPH_DEBUG_DOT_FLAGS_KERNEL_NODE_ATTRIBUTES = 1<<9, /**< Adds CUkernelNodeAttrValue values to output */ + CU_GRAPH_DEBUG_DOT_FLAGS_HANDLES = 1<<10, /**< Adds node handles and every kernel function handle to output */ + CU_GRAPH_DEBUG_DOT_FLAGS_MEM_ALLOC_NODE_PARAMS = 1<<11, /**< Adds memory alloc node parameters to output */ + CU_GRAPH_DEBUG_DOT_FLAGS_MEM_FREE_NODE_PARAMS = 1<<12, /**< Adds memory free node parameters to output */ + CU_GRAPH_DEBUG_DOT_FLAGS_BATCH_MEM_OP_NODE_PARAMS = 1<<13 /**< Adds batch mem op node parameters to output */ + , CU_GRAPH_DEBUG_DOT_FLAGS_EXTRA_TOPO_INFO = 1<<14 /**< Adds edge numbering information */ +} CUgraphDebugDot_flags; + +/** + * Flags for user objects for graphs + */ +typedef enum CUuserObject_flags_enum { + CU_USER_OBJECT_NO_DESTRUCTOR_SYNC = 1 /**< Indicates the destructor execution is not synchronized by any CUDA handle. */ +} CUuserObject_flags; + +/** + * Flags for retaining user object references for graphs + */ +typedef enum CUuserObjectRetain_flags_enum { + CU_GRAPH_USER_OBJECT_MOVE = 1 /**< Transfer references from the caller rather than creating new references. */ +} CUuserObjectRetain_flags; + +/** + * Flags for instantiating a graph + */ +typedef enum CUgraphInstantiate_flags_enum { + CUDA_GRAPH_INSTANTIATE_FLAG_AUTO_FREE_ON_LAUNCH = 1 /**< Automatically free memory allocated in a graph before relaunching. */ + , CUDA_GRAPH_INSTANTIATE_FLAG_UPLOAD = 2 /**< Automatically upload the graph after instantiaton. */ + , CUDA_GRAPH_INSTANTIATE_FLAG_DEVICE_LAUNCH = 4 /**< Instantiate the graph to be launchable from the device. */ + , CUDA_GRAPH_INSTANTIATE_FLAG_USE_NODE_PRIORITY = 8 /**< Run the graph using the per-node priority attributes rather than the + priority of the stream it is launched into. */ +} CUgraphInstantiate_flags; + +/** @} */ /* END CUDA_TYPES */ + +#if defined(__GNUC__) + #if defined(__CUDA_API_PUSH_VISIBILITY_DEFAULT) + #pragma GCC visibility push(default) + #endif +#endif + +#ifdef _WIN32 +#define CUDAAPI __stdcall +#else +#define CUDAAPI +#endif + +/** + * \defgroup CUDA_ERROR Error Handling + * + * ___MANBRIEF___ error handling functions of the low-level CUDA driver API + * (___CURRENT_FILE___) ___ENDMANBRIEF___ + * + * This section describes the error handling functions of the low-level CUDA + * driver application programming interface. + * + * @{ + */ + +/** + * \brief Gets the string description of an error code + * + * Sets \p *pStr to the address of a NULL-terminated string description + * of the error code \p error. + * If the error code is not recognized, ::CUDA_ERROR_INVALID_VALUE + * will be returned and \p *pStr will be set to the NULL address. + * + * \param error - Error code to convert to string + * \param pStr - Address of the string pointer. + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE + * + * \sa + * ::CUresult, + * ::cudaGetErrorString + */ +CUresult CUDAAPI cuGetErrorString(CUresult error, const char **pStr); + +/** + * \brief Gets the string representation of an error code enum name + * + * Sets \p *pStr to the address of a NULL-terminated string representation + * of the name of the enum error code \p error. + * If the error code is not recognized, ::CUDA_ERROR_INVALID_VALUE + * will be returned and \p *pStr will be set to the NULL address. + * + * \param error - Error code to convert to string + * \param pStr - Address of the string pointer. + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE + * + * \sa + * ::CUresult, + * ::cudaGetErrorName + */ +CUresult CUDAAPI cuGetErrorName(CUresult error, const char **pStr); + +/** @} */ /* END CUDA_ERROR */ + +/** + * \defgroup CUDA_INITIALIZE Initialization + * + * ___MANBRIEF___ initialization functions of the low-level CUDA driver API + * (___CURRENT_FILE___) ___ENDMANBRIEF___ + * + * This section describes the initialization functions of the low-level CUDA + * driver application programming interface. + * + * @{ + */ + +/** + * \brief Initialize the CUDA driver API + * Initializes the driver API and must be called before any other function from + * the driver API in the current process. Currently, the \p Flags parameter must be 0. If ::cuInit() + * has not been called, any function from the driver API will return + * ::CUDA_ERROR_NOT_INITIALIZED. + * + * \param Flags - Initialization flag for CUDA. + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_DEVICE, + * ::CUDA_ERROR_SYSTEM_DRIVER_MISMATCH, + * ::CUDA_ERROR_COMPAT_NOT_SUPPORTED_ON_DEVICE + * \notefnerr + */ +CUresult CUDAAPI cuInit(unsigned int Flags); + +/** @} */ /* END CUDA_INITIALIZE */ + +/** + * \defgroup CUDA_VERSION Version Management + * + * ___MANBRIEF___ version management functions of the low-level CUDA driver + * API (___CURRENT_FILE___) ___ENDMANBRIEF___ + * + * This section describes the version management functions of the low-level + * CUDA driver application programming interface. + * + * @{ + */ + +/** + * \brief Returns the latest CUDA version supported by driver + * + * Returns in \p *driverVersion the version of CUDA supported by + * the driver. The version is returned as + * (1000 × major + 10 × minor). For example, CUDA 9.2 + * would be represented by 9020. + * + * This function automatically returns ::CUDA_ERROR_INVALID_VALUE if + * \p driverVersion is NULL. + * + * \param driverVersion - Returns the CUDA driver version + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE + * \notefnerr + * + * \sa + * ::cudaDriverGetVersion, + * ::cudaRuntimeGetVersion + */ +CUresult CUDAAPI cuDriverGetVersion(int *driverVersion); + +/** @} */ /* END CUDA_VERSION */ + +/** + * \defgroup CUDA_DEVICE Device Management + * + * ___MANBRIEF___ device management functions of the low-level CUDA driver API + * (___CURRENT_FILE___) ___ENDMANBRIEF___ + * + * This section describes the device management functions of the low-level + * CUDA driver application programming interface. + * + * @{ + */ + +/** + * \brief Returns a handle to a compute device + * + * Returns in \p *device a device handle given an ordinal in the range [0, + * ::cuDeviceGetCount()-1]. + * + * \param device - Returned device handle + * \param ordinal - Device number to get handle for + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_DEVICE + * \notefnerr + * + * \sa + * ::cuDeviceGetAttribute, + * ::cuDeviceGetCount, + * ::cuDeviceGetName, + * ::cuDeviceGetUuid, + * ::cuDeviceGetLuid, + * ::cuDeviceTotalMem, + * ::cuDeviceGetExecAffinitySupport + */ +CUresult CUDAAPI cuDeviceGet(CUdevice *device, int ordinal); + +/** + * \brief Returns the number of compute-capable devices + * + * Returns in \p *count the number of devices with compute capability greater + * than or equal to 2.0 that are available for execution. If there is no such + * device, ::cuDeviceGetCount() returns 0. + * + * \param count - Returned number of compute-capable devices + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * \notefnerr + * + * \sa + * ::cuDeviceGetAttribute, + * ::cuDeviceGetName, + * ::cuDeviceGetUuid, + * ::cuDeviceGetLuid, + * ::cuDeviceGet, + * ::cuDeviceTotalMem, + * ::cuDeviceGetExecAffinitySupport, + * ::cudaGetDeviceCount + */ +CUresult CUDAAPI cuDeviceGetCount(int *count); + +/** + * \brief Returns an identifier string for the device + * + * Returns an ASCII string identifying the device \p dev in the NULL-terminated + * string pointed to by \p name. \p len specifies the maximum length of the + * string that may be returned. + * + * \param name - Returned identifier string for the device + * \param len - Maximum length of string to store in \p name + * \param dev - Device to get identifier string for + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_DEVICE + * \notefnerr + * + * \sa + * ::cuDeviceGetAttribute, + * ::cuDeviceGetUuid, + * ::cuDeviceGetLuid, + * ::cuDeviceGetCount, + * ::cuDeviceGet, + * ::cuDeviceTotalMem, + * ::cuDeviceGetExecAffinitySupport, + * ::cudaGetDeviceProperties + */ +CUresult CUDAAPI cuDeviceGetName(char *name, int len, CUdevice dev); + +/** + * \brief Return an UUID for the device + * + * Note there is a later version of this API, ::cuDeviceGetUuid_v2. It will + * supplant this version in 12.0, which is retained for minor version compatibility. + * + * Returns 16-octets identifying the device \p dev in the structure + * pointed by the \p uuid. + * + * \param uuid - Returned UUID + * \param dev - Device to get identifier string for + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_DEVICE + * \notefnerr + * + * \sa + * ::cuDeviceGetUuid_v2 + * ::cuDeviceGetAttribute, + * ::cuDeviceGetCount, + * ::cuDeviceGetName, + * ::cuDeviceGetLuid, + * ::cuDeviceGet, + * ::cuDeviceTotalMem, + * ::cuDeviceGetExecAffinitySupport, + * ::cudaGetDeviceProperties + */ +CUresult CUDAAPI cuDeviceGetUuid(CUuuid *uuid, CUdevice dev); + +/** + * \brief Return an UUID for the device (11.4+) + * + * Returns 16-octets identifying the device \p dev in the structure + * pointed by the \p uuid. If the device is in MIG mode, returns its + * MIG UUID which uniquely identifies the subscribed MIG compute instance. + * + * \param uuid - Returned UUID + * \param dev - Device to get identifier string for + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_DEVICE + * \notefnerr + * + * \sa + * ::cuDeviceGetAttribute, + * ::cuDeviceGetCount, + * ::cuDeviceGetName, + * ::cuDeviceGetLuid, + * ::cuDeviceGet, + * ::cuDeviceTotalMem, + * ::cudaGetDeviceProperties + */ +CUresult CUDAAPI cuDeviceGetUuid_v2(CUuuid *uuid, CUdevice dev); + +/** + * \brief Return an LUID and device node mask for the device + * + * Return identifying information (\p luid and \p deviceNodeMask) to allow + * matching device with graphics APIs. + * + * \param luid - Returned LUID + * \param deviceNodeMask - Returned device node mask + * \param dev - Device to get identifier string for + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_DEVICE + * \notefnerr + * + * \sa + * ::cuDeviceGetAttribute, + * ::cuDeviceGetCount, + * ::cuDeviceGetName, + * ::cuDeviceGet, + * ::cuDeviceTotalMem, + * ::cuDeviceGetExecAffinitySupport, + * ::cudaGetDeviceProperties + */ +CUresult CUDAAPI cuDeviceGetLuid(char *luid, unsigned int *deviceNodeMask, CUdevice dev); + +/** + * \brief Returns the total amount of memory on the device + * + * Returns in \p *bytes the total amount of memory available on the device + * \p dev in bytes. + * + * \param bytes - Returned memory available on device in bytes + * \param dev - Device handle + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_DEVICE + * \notefnerr + * + * \sa + * ::cuDeviceGetAttribute, + * ::cuDeviceGetCount, + * ::cuDeviceGetName, + * ::cuDeviceGetUuid, + * ::cuDeviceGet, + * ::cuDeviceGetExecAffinitySupport, + * ::cudaMemGetInfo + */ +CUresult CUDAAPI cuDeviceTotalMem(size_t *bytes, CUdevice dev); + +/** + * \brief Returns the maximum number of elements allocatable in a 1D linear texture for a given texture element size. + * + * Returns in \p maxWidthInElements the maximum number of texture elements allocatable in a 1D linear texture + * for given \p format and \p numChannels. + * + * \param maxWidthInElements - Returned maximum number of texture elements allocatable for given \p format and \p numChannels. + * \param format - Texture format. + * \param numChannels - Number of channels per texture element. + * \param dev - Device handle. + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_DEVICE + * \notefnerr + * + * \sa + * ::cuDeviceGetAttribute, + * ::cuDeviceGetCount, + * ::cuDeviceGetName, + * ::cuDeviceGetUuid, + * ::cuDeviceGet, + * ::cudaMemGetInfo, + * ::cuDeviceTotalMem + */ +CUresult CUDAAPI cuDeviceGetTexture1DLinearMaxWidth(size_t *maxWidthInElements, CUarray_format format, unsigned numChannels, CUdevice dev); + +/** + * \brief Returns information about the device + * + * Returns in \p *pi the integer value of the attribute \p attrib on device + * \p dev. The supported attributes are: + * - ::CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_BLOCK: Maximum number of threads per + * block; + * - ::CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_X: Maximum x-dimension of a block + * - ::CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_Y: Maximum y-dimension of a block + * - ::CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_Z: Maximum z-dimension of a block + * - ::CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_X: Maximum x-dimension of a grid + * - ::CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_Y: Maximum y-dimension of a grid + * - ::CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_Z: Maximum z-dimension of a grid + * - ::CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK: Maximum amount of + * shared memory available to a thread block in bytes + * - ::CU_DEVICE_ATTRIBUTE_TOTAL_CONSTANT_MEMORY: Memory available on device for + * __constant__ variables in a CUDA C kernel in bytes + * - ::CU_DEVICE_ATTRIBUTE_WARP_SIZE: Warp size in threads + * - ::CU_DEVICE_ATTRIBUTE_MAX_PITCH: Maximum pitch in bytes allowed by the + * memory copy functions that involve memory regions allocated through + * ::cuMemAllocPitch() + * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_WIDTH: Maximum 1D + * texture width + * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LINEAR_WIDTH: Maximum width + * for a 1D texture bound to linear memory + * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_MIPMAPPED_WIDTH: Maximum + * mipmapped 1D texture width + * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_WIDTH: Maximum 2D + * texture width + * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_HEIGHT: Maximum 2D + * texture height + * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_WIDTH: Maximum width + * for a 2D texture bound to linear memory + * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_HEIGHT: Maximum height + * for a 2D texture bound to linear memory + * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_PITCH: Maximum pitch + * in bytes for a 2D texture bound to linear memory + * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_MIPMAPPED_WIDTH: Maximum + * mipmapped 2D texture width + * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_MIPMAPPED_HEIGHT: Maximum + * mipmapped 2D texture height + * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_WIDTH: Maximum 3D + * texture width + * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_HEIGHT: Maximum 3D + * texture height + * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_DEPTH: Maximum 3D + * texture depth + * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_WIDTH_ALTERNATE: + * Alternate maximum 3D texture width, 0 if no alternate + * maximum 3D texture size is supported + * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_HEIGHT_ALTERNATE: + * Alternate maximum 3D texture height, 0 if no alternate + * maximum 3D texture size is supported + * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_DEPTH_ALTERNATE: + * Alternate maximum 3D texture depth, 0 if no alternate + * maximum 3D texture size is supported + * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_WIDTH: + * Maximum cubemap texture width or height + * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LAYERED_WIDTH: + * Maximum 1D layered texture width + * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LAYERED_LAYERS: + * Maximum layers in a 1D layered texture + * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_WIDTH: + * Maximum 2D layered texture width + * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_HEIGHT: + * Maximum 2D layered texture height + * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_LAYERS: + * Maximum layers in a 2D layered texture + * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_LAYERED_WIDTH: + * Maximum cubemap layered texture width or height + * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_LAYERED_LAYERS: + * Maximum layers in a cubemap layered texture + * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_WIDTH: + * Maximum 1D surface width + * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_WIDTH: + * Maximum 2D surface width + * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_HEIGHT: + * Maximum 2D surface height + * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_WIDTH: + * Maximum 3D surface width + * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_HEIGHT: + * Maximum 3D surface height + * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_DEPTH: + * Maximum 3D surface depth + * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_LAYERED_WIDTH: + * Maximum 1D layered surface width + * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_LAYERED_LAYERS: + * Maximum layers in a 1D layered surface + * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_WIDTH: + * Maximum 2D layered surface width + * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_HEIGHT: + * Maximum 2D layered surface height + * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_LAYERS: + * Maximum layers in a 2D layered surface + * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_WIDTH: + * Maximum cubemap surface width + * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_LAYERED_WIDTH: + * Maximum cubemap layered surface width + * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_LAYERED_LAYERS: + * Maximum layers in a cubemap layered surface + * - ::CU_DEVICE_ATTRIBUTE_MAX_REGISTERS_PER_BLOCK: Maximum number of 32-bit + * registers available to a thread block + * - ::CU_DEVICE_ATTRIBUTE_CLOCK_RATE: The typical clock frequency in kilohertz + * - ::CU_DEVICE_ATTRIBUTE_TEXTURE_ALIGNMENT: Alignment requirement; texture + * base addresses aligned to ::textureAlign bytes do not need an offset + * applied to texture fetches + * - ::CU_DEVICE_ATTRIBUTE_TEXTURE_PITCH_ALIGNMENT: Pitch alignment requirement + * for 2D texture references bound to pitched memory + * - ::CU_DEVICE_ATTRIBUTE_GPU_OVERLAP: 1 if the device can concurrently copy + * memory between host and device while executing a kernel, or 0 if not + * - ::CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT: Number of multiprocessors on + * the device + * - ::CU_DEVICE_ATTRIBUTE_KERNEL_EXEC_TIMEOUT: 1 if there is a run time limit + * for kernels executed on the device, or 0 if not + * - ::CU_DEVICE_ATTRIBUTE_INTEGRATED: 1 if the device is integrated with the + * memory subsystem, or 0 if not + * - ::CU_DEVICE_ATTRIBUTE_CAN_MAP_HOST_MEMORY: 1 if the device can map host + * memory into the CUDA address space, or 0 if not + * - ::CU_DEVICE_ATTRIBUTE_COMPUTE_MODE: Compute mode that device is currently + * in. Available modes are as follows: + * - ::CU_COMPUTEMODE_DEFAULT: Default mode - Device is not restricted and + * can have multiple CUDA contexts present at a single time. + * - ::CU_COMPUTEMODE_PROHIBITED: Compute-prohibited mode - Device is + * prohibited from creating new CUDA contexts. + * - ::CU_COMPUTEMODE_EXCLUSIVE_PROCESS: Compute-exclusive-process mode - Device + * can have only one context used by a single process at a time. + * - ::CU_DEVICE_ATTRIBUTE_CONCURRENT_KERNELS: 1 if the device supports + * executing multiple kernels within the same context simultaneously, or 0 if + * not. It is not guaranteed that multiple kernels will be resident + * on the device concurrently so this feature should not be relied upon for + * correctness. + * - ::CU_DEVICE_ATTRIBUTE_ECC_ENABLED: 1 if error correction is enabled on the + * device, 0 if error correction is disabled or not supported by the device + * - ::CU_DEVICE_ATTRIBUTE_PCI_BUS_ID: PCI bus identifier of the device + * - ::CU_DEVICE_ATTRIBUTE_PCI_DEVICE_ID: PCI device (also known as slot) identifier + * of the device + * - ::CU_DEVICE_ATTRIBUTE_PCI_DOMAIN_ID: PCI domain identifier of the device + * - ::CU_DEVICE_ATTRIBUTE_TCC_DRIVER: 1 if the device is using a TCC driver. TCC + * is only available on Tesla hardware running Windows Vista or later + * - ::CU_DEVICE_ATTRIBUTE_MEMORY_CLOCK_RATE: Peak memory clock frequency in kilohertz + * - ::CU_DEVICE_ATTRIBUTE_GLOBAL_MEMORY_BUS_WIDTH: Global memory bus width in bits + * - ::CU_DEVICE_ATTRIBUTE_L2_CACHE_SIZE: Size of L2 cache in bytes. 0 if the device doesn't have L2 cache + * - ::CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_MULTIPROCESSOR: Maximum resident threads per multiprocessor + * - ::CU_DEVICE_ATTRIBUTE_UNIFIED_ADDRESSING: 1 if the device shares a unified address space with + * the host, or 0 if not + * - ::CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR: Major compute capability version number + * - ::CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR: Minor compute capability version number + * - ::CU_DEVICE_ATTRIBUTE_GLOBAL_L1_CACHE_SUPPORTED: 1 if device supports caching globals + * in L1 cache, 0 if caching globals in L1 cache is not supported by the device + * - ::CU_DEVICE_ATTRIBUTE_LOCAL_L1_CACHE_SUPPORTED: 1 if device supports caching locals + * in L1 cache, 0 if caching locals in L1 cache is not supported by the device + * - ::CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_MULTIPROCESSOR: Maximum amount of + * shared memory available to a multiprocessor in bytes; this amount is shared + * by all thread blocks simultaneously resident on a multiprocessor + * - ::CU_DEVICE_ATTRIBUTE_MAX_REGISTERS_PER_MULTIPROCESSOR: Maximum number of 32-bit + * registers available to a multiprocessor; this number is shared by all thread + * blocks simultaneously resident on a multiprocessor + * - ::CU_DEVICE_ATTRIBUTE_MANAGED_MEMORY: 1 if device supports allocating managed memory + * on this system, 0 if allocating managed memory is not supported by the device on this system. + * - ::CU_DEVICE_ATTRIBUTE_MULTI_GPU_BOARD: 1 if device is on a multi-GPU board, 0 if not. + * - ::CU_DEVICE_ATTRIBUTE_MULTI_GPU_BOARD_GROUP_ID: Unique identifier for a group of devices + * associated with the same board. Devices on the same multi-GPU board will share the same identifier. + * - ::CU_DEVICE_ATTRIBUTE_HOST_NATIVE_ATOMIC_SUPPORTED: 1 if Link between the device and the host + * supports native atomic operations. + * - ::CU_DEVICE_ATTRIBUTE_SINGLE_TO_DOUBLE_PRECISION_PERF_RATIO: Ratio of single precision performance + * (in floating-point operations per second) to double precision performance. + * - ::CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS: Device supports coherently accessing + * pageable memory without calling cudaHostRegister on it. + * - ::CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS: Device can coherently access managed memory + * concurrently with the CPU. + * - ::CU_DEVICE_ATTRIBUTE_COMPUTE_PREEMPTION_SUPPORTED: Device supports Compute Preemption. + * - ::CU_DEVICE_ATTRIBUTE_CAN_USE_HOST_POINTER_FOR_REGISTERED_MEM: Device can access host registered + * memory at the same virtual address as the CPU. + * - ::CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK_OPTIN: The maximum per block shared memory size + * supported on this device. This is the maximum value that can be opted into when using the cuFuncSetAttribute() or cuKernelSetAttribute() call. + * For more details see ::CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES + * - ::CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS_USES_HOST_PAGE_TABLES: Device accesses pageable memory via the host's + * page tables. + * - ::CU_DEVICE_ATTRIBUTE_DIRECT_MANAGED_MEM_ACCESS_FROM_HOST: The host can directly access managed memory on the device without migration. + * - ::CU_DEVICE_ATTRIBUTE_VIRTUAL_MEMORY_MANAGEMENT_SUPPORTED: Device supports virtual memory management APIs like ::cuMemAddressReserve, ::cuMemCreate, ::cuMemMap and related APIs + * - ::CU_DEVICE_ATTRIBUTE_HANDLE_TYPE_POSIX_FILE_DESCRIPTOR_SUPPORTED: Device supports exporting memory to a posix file descriptor with ::cuMemExportToShareableHandle, if requested via ::cuMemCreate + * - ::CU_DEVICE_ATTRIBUTE_HANDLE_TYPE_WIN32_HANDLE_SUPPORTED: Device supports exporting memory to a Win32 NT handle with ::cuMemExportToShareableHandle, if requested via ::cuMemCreate + * - ::CU_DEVICE_ATTRIBUTE_HANDLE_TYPE_WIN32_KMT_HANDLE_SUPPORTED: Device supports exporting memory to a Win32 KMT handle with ::cuMemExportToShareableHandle, if requested via ::cuMemCreate + * - ::CU_DEVICE_ATTRIBUTE_MAX_BLOCKS_PER_MULTIPROCESSOR: Maximum number of thread blocks that can reside on a multiprocessor + * - ::CU_DEVICE_ATTRIBUTE_GENERIC_COMPRESSION_SUPPORTED: Device supports compressible memory allocation via ::cuMemCreate + * - ::CU_DEVICE_ATTRIBUTE_MAX_PERSISTING_L2_CACHE_SIZE: Maximum L2 persisting lines capacity setting in bytes + * - ::CU_DEVICE_ATTRIBUTE_MAX_ACCESS_POLICY_WINDOW_SIZE: Maximum value of CUaccessPolicyWindow::num_bytes + * - ::CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_WITH_CUDA_VMM_SUPPORTED: Device supports specifying the GPUDirect RDMA flag with ::cuMemCreate. + * - ::CU_DEVICE_ATTRIBUTE_RESERVED_SHARED_MEMORY_PER_BLOCK: Amount of shared memory per block reserved by CUDA driver in bytes + * - ::CU_DEVICE_ATTRIBUTE_SPARSE_CUDA_ARRAY_SUPPORTED: Device supports sparse CUDA arrays and sparse CUDA mipmapped arrays. + * - ::CU_DEVICE_ATTRIBUTE_READ_ONLY_HOST_REGISTER_SUPPORTED: Device supports using the ::cuMemHostRegister flag ::CU_MEMHOSTERGISTER_READ_ONLY to register memory that must be mapped as read-only to the GPU + * - ::CU_DEVICE_ATTRIBUTE_MEMORY_POOLS_SUPPORTED: Device supports using the ::cuMemAllocAsync and ::cuMemPool family of APIs + * - ::CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_SUPPORTED: Device supports GPUDirect RDMA APIs, like nvidia_p2p_get_pages (see https://docs.nvidia.com/cuda/gpudirect-rdma for more information) + * - ::CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_FLUSH_WRITES_OPTIONS: The returned attribute shall be interpreted as a bitmask, where the individual bits are described by the ::CUflushGPUDirectRDMAWritesOptions enum + * - ::CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_WRITES_ORDERING: GPUDirect RDMA writes to the device do not need to be flushed for consumers within the scope indicated by the returned attribute. See ::CUGPUDirectRDMAWritesOrdering for the numerical values returned here. + * - ::CU_DEVICE_ATTRIBUTE_MEMPOOL_SUPPORTED_HANDLE_TYPES: Bitmask of handle types supported with mempool based IPC + * - ::CU_DEVICE_ATTRIBUTE_DEFERRED_MAPPING_CUDA_ARRAY_SUPPORTED: Device supports deferred mapping CUDA arrays and CUDA mipmapped arrays. + * + * \param pi - Returned device attribute value + * \param attrib - Device attribute to query + * \param dev - Device handle + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_DEVICE + * \notefnerr + * + * \sa + * ::cuDeviceGetCount, + * ::cuDeviceGetName, + * ::cuDeviceGetUuid, + * ::cuDeviceGet, + * ::cuDeviceTotalMem, + * ::cuDeviceGetExecAffinitySupport, + * ::cudaDeviceGetAttribute, + * ::cudaGetDeviceProperties + */ +CUresult CUDAAPI cuDeviceGetAttribute(int *pi, CUdevice_attribute attrib, CUdevice dev); + +/** + * \brief Return NvSciSync attributes that this device can support. + * + * Returns in \p nvSciSyncAttrList, the properties of NvSciSync that + * this CUDA device, \p dev can support. The returned \p nvSciSyncAttrList + * can be used to create an NvSciSync object that matches this device's capabilities. + * + * If NvSciSyncAttrKey_RequiredPerm field in \p nvSciSyncAttrList is + * already set this API will return ::CUDA_ERROR_INVALID_VALUE. + * + * The applications should set \p nvSciSyncAttrList to a valid + * NvSciSyncAttrList failing which this API will return + * ::CUDA_ERROR_INVALID_HANDLE. + * + * The \p flags controls how applications intends to use + * the NvSciSync created from the \p nvSciSyncAttrList. The valid flags are: + * - ::CUDA_NVSCISYNC_ATTR_SIGNAL, specifies that the applications intends to + * signal an NvSciSync on this CUDA device. + * - ::CUDA_NVSCISYNC_ATTR_WAIT, specifies that the applications intends to + * wait on an NvSciSync on this CUDA device. + * + * At least one of these flags must be set, failing which the API + * returns ::CUDA_ERROR_INVALID_VALUE. Both the flags are orthogonal + * to one another: a developer may set both these flags that allows to + * set both wait and signal specific attributes in the same \p nvSciSyncAttrList. + * + * Note that this API updates the input \p nvSciSyncAttrList with values equivalent + * to the following public attribute key-values: + * NvSciSyncAttrKey_RequiredPerm is set to + * - NvSciSyncAccessPerm_SignalOnly if ::CUDA_NVSCISYNC_ATTR_SIGNAL is set in \p flags. + * - NvSciSyncAccessPerm_WaitOnly if ::CUDA_NVSCISYNC_ATTR_WAIT is set in \p flags. + * - NvSciSyncAccessPerm_WaitSignal if both ::CUDA_NVSCISYNC_ATTR_WAIT and + * ::CUDA_NVSCISYNC_ATTR_SIGNAL are set in \p flags. + * NvSciSyncAttrKey_PrimitiveInfo is set to + * - NvSciSyncAttrValPrimitiveType_SysmemSemaphore on any valid \p device. + * - NvSciSyncAttrValPrimitiveType_Syncpoint if \p device is a Tegra device. + * - NvSciSyncAttrValPrimitiveType_SysmemSemaphorePayload64b if \p device is GA10X+. + * NvSciSyncAttrKey_GpuId is set to the same UUID that is returned for this + * \p device from ::cuDeviceGetUuid. + * + * \param nvSciSyncAttrList - Return NvSciSync attributes supported. + * \param dev - Valid Cuda Device to get NvSciSync attributes for. + * \param flags - flags describing NvSciSync usage. + * + * \return + * + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_INVALID_DEVICE, + * ::CUDA_ERROR_NOT_SUPPORTED, + * ::CUDA_ERROR_OUT_OF_MEMORY + * + * \sa + * ::cuImportExternalSemaphore, + * ::cuDestroyExternalSemaphore, + * ::cuSignalExternalSemaphoresAsync, + * ::cuWaitExternalSemaphoresAsync + */ +CUresult CUDAAPI cuDeviceGetNvSciSyncAttributes(void *nvSciSyncAttrList, CUdevice dev, int flags); + +/** + * \brief Sets the current memory pool of a device + * + * The memory pool must be local to the specified device. + * ::cuMemAllocAsync allocates from the current mempool of the provided stream's device. + * By default, a device's current memory pool is its default memory pool. + * + * \note Use ::cuMemAllocFromPoolAsync to specify asynchronous allocations from a device different + * than the one the stream runs on. + * + * \returns + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE + * + * \sa ::cuDeviceGetDefaultMemPool, ::cuDeviceGetMemPool, ::cuMemPoolCreate, ::cuMemPoolDestroy, ::cuMemAllocFromPoolAsync + */ +CUresult CUDAAPI cuDeviceSetMemPool(CUdevice dev, CUmemoryPool pool); + +/** + * \brief Gets the current mempool for a device + * + * Returns the last pool provided to ::cuDeviceSetMemPool for this device + * or the device's default memory pool if ::cuDeviceSetMemPool has never been called. + * By default the current mempool is the default mempool for a device. + * Otherwise the returned pool must have been set with ::cuDeviceSetMemPool. + * + * \returns + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE + * + * \sa ::cuDeviceGetDefaultMemPool, ::cuMemPoolCreate, ::cuDeviceSetMemPool + */ +CUresult CUDAAPI cuDeviceGetMemPool(CUmemoryPool *pool, CUdevice dev); + +/** + * \brief Returns the default mempool of a device + * + * The default mempool of a device contains device memory from that device. + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_DEVICE, + * ::CUDA_ERROR_NOT_SUPPORTED + * \notefnerr + * + * \sa ::cuMemAllocAsync, ::cuMemPoolTrimTo, ::cuMemPoolGetAttribute, ::cuMemPoolSetAttribute, cuMemPoolSetAccess, ::cuDeviceGetMemPool, ::cuMemPoolCreate + */ +CUresult CUDAAPI cuDeviceGetDefaultMemPool(CUmemoryPool *pool_out, CUdevice dev); + +/** + * \brief Returns information about the execution affinity support of the device. + * + * Returns in \p *pi whether execution affinity type \p type is supported by device \p dev. + * The supported types are: + * - ::CU_EXEC_AFFINITY_TYPE_SM_COUNT: 1 if context with limited SMs is supported by the device, + * or 0 if not; + * + * \param pi - 1 if the execution affinity type \p type is supported by the device, or 0 if not + * \param type - Execution affinity type to query + * \param dev - Device handle + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_DEVICE + * \notefnerr + * + * \sa + * ::cuDeviceGetAttribute, + * ::cuDeviceGetCount, + * ::cuDeviceGetName, + * ::cuDeviceGetUuid, + * ::cuDeviceGet, + * ::cuDeviceTotalMem + */ +CUresult CUDAAPI cuDeviceGetExecAffinitySupport(int *pi, CUexecAffinityType type, CUdevice dev); + +/** + * \brief Blocks until remote writes are visible to the specified scope + * + * Blocks until GPUDirect RDMA writes to the target context via mappings + * created through APIs like nvidia_p2p_get_pages (see + * https://docs.nvidia.com/cuda/gpudirect-rdma for more information), are + * visible to the specified scope. + * + * If the scope equals or lies within the scope indicated by + * ::CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_WRITES_ORDERING, the call + * will be a no-op and can be safely omitted for performance. This can be + * determined by comparing the numerical values between the two enums, with + * smaller scopes having smaller values. + * + * Users may query support for this API via + * ::CU_DEVICE_ATTRIBUTE_FLUSH_FLUSH_GPU_DIRECT_RDMA_OPTIONS. + * + * \param target - The target of the operation, see ::CUflushGPUDirectRDMAWritesTarget + * \param scope - The scope of the operation, see ::CUflushGPUDirectRDMAWritesScope + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * \notefnerr + * + */ +CUresult CUDAAPI cuFlushGPUDirectRDMAWrites(CUflushGPUDirectRDMAWritesTarget target, CUflushGPUDirectRDMAWritesScope scope); + +/** @} */ /* END CUDA_DEVICE */ + +/** + * \defgroup CUDA_DEVICE_DEPRECATED Device Management [DEPRECATED] + * + * ___MANBRIEF___ deprecated device management functions of the low-level CUDA + * driver API (___CURRENT_FILE___) ___ENDMANBRIEF___ + * + * This section describes the device management functions of the low-level + * CUDA driver application programming interface. + * + * @{ + */ + +/** + * \brief Returns properties for a selected device + * + * \deprecated + * + * This function was deprecated as of CUDA 5.0 and replaced by ::cuDeviceGetAttribute(). + * + * Returns in \p *prop the properties of device \p dev. The ::CUdevprop + * structure is defined as: + * + * \code + typedef struct CUdevprop_st { + int maxThreadsPerBlock; + int maxThreadsDim[3]; + int maxGridSize[3]; + int sharedMemPerBlock; + int totalConstantMemory; + int SIMDWidth; + int memPitch; + int regsPerBlock; + int clockRate; + int textureAlign + } CUdevprop; + * \endcode + * where: + * + * - ::maxThreadsPerBlock is the maximum number of threads per block; + * - ::maxThreadsDim[3] is the maximum sizes of each dimension of a block; + * - ::maxGridSize[3] is the maximum sizes of each dimension of a grid; + * - ::sharedMemPerBlock is the total amount of shared memory available per + * block in bytes; + * - ::totalConstantMemory is the total amount of constant memory available on + * the device in bytes; + * - ::SIMDWidth is the warp size; + * - ::memPitch is the maximum pitch allowed by the memory copy functions that + * involve memory regions allocated through ::cuMemAllocPitch(); + * - ::regsPerBlock is the total number of registers available per block; + * - ::clockRate is the clock frequency in kilohertz; + * - ::textureAlign is the alignment requirement; texture base addresses that + * are aligned to ::textureAlign bytes do not need an offset applied to + * texture fetches. + * + * \param prop - Returned properties of device + * \param dev - Device to get properties for + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_DEVICE + * \notefnerr + * + * \sa + * ::cuDeviceGetAttribute, + * ::cuDeviceGetCount, + * ::cuDeviceGetName, + * ::cuDeviceGetUuid, + * ::cuDeviceGet, + * ::cuDeviceTotalMem + */ +__CUDA_DEPRECATED CUresult CUDAAPI cuDeviceGetProperties(CUdevprop *prop, CUdevice dev); + +/** + * \brief Returns the compute capability of the device + * + * \deprecated + * + * This function was deprecated as of CUDA 5.0 and its functionality superseded + * by ::cuDeviceGetAttribute(). + * + * Returns in \p *major and \p *minor the major and minor revision numbers that + * define the compute capability of the device \p dev. + * + * \param major - Major revision number + * \param minor - Minor revision number + * \param dev - Device handle + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_DEVICE + * \notefnerr + * + * \sa + * ::cuDeviceGetAttribute, + * ::cuDeviceGetCount, + * ::cuDeviceGetName, + * ::cuDeviceGetUuid, + * ::cuDeviceGet, + * ::cuDeviceTotalMem + */ +__CUDA_DEPRECATED CUresult CUDAAPI cuDeviceComputeCapability(int *major, int *minor, CUdevice dev); + +/** @} */ /* END CUDA_DEVICE_DEPRECATED */ + +/** + * \defgroup CUDA_PRIMARY_CTX Primary Context Management + * + * ___MANBRIEF___ primary context management functions of the low-level CUDA driver + * API (___CURRENT_FILE___) ___ENDMANBRIEF___ + * + * This section describes the primary context management functions of the low-level + * CUDA driver application programming interface. + * + * The primary context is unique per device and shared with the CUDA runtime API. + * These functions allow integration with other libraries using CUDA. + * + * @{ + */ + +/** + * \brief Retain the primary context on the GPU + * + * Retains the primary context on the device. + * Once the user successfully retains the primary context, the primary context + * will be active and available to the user until the user releases it + * with ::cuDevicePrimaryCtxRelease() or resets it with ::cuDevicePrimaryCtxReset(). + * Unlike ::cuCtxCreate() the newly retained context is not pushed onto the stack. + * + * Retaining the primary context for the first time will fail with ::CUDA_ERROR_UNKNOWN + * if the compute mode of the device is ::CU_COMPUTEMODE_PROHIBITED. The function + * ::cuDeviceGetAttribute() can be used with ::CU_DEVICE_ATTRIBUTE_COMPUTE_MODE to + * determine the compute mode of the device. + * The nvidia-smi tool can be used to set the compute mode for + * devices. Documentation for nvidia-smi can be obtained by passing a + * -h option to it. + * + * Please note that the primary context always supports pinned allocations. Other + * flags can be specified by ::cuDevicePrimaryCtxSetFlags(). + * + * \param pctx - Returned context handle of the new context + * \param dev - Device for which primary context is requested + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_DEVICE, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_OUT_OF_MEMORY, + * ::CUDA_ERROR_UNKNOWN + * \notefnerr + * + * \sa ::cuDevicePrimaryCtxRelease, + * ::cuDevicePrimaryCtxSetFlags, + * ::cuCtxCreate, + * ::cuCtxGetApiVersion, + * ::cuCtxGetCacheConfig, + * ::cuCtxGetDevice, + * ::cuCtxGetFlags, + * ::cuCtxGetLimit, + * ::cuCtxPopCurrent, + * ::cuCtxPushCurrent, + * ::cuCtxSetCacheConfig, + * ::cuCtxSetLimit, + * ::cuCtxSynchronize + */ +CUresult CUDAAPI cuDevicePrimaryCtxRetain(CUcontext *pctx, CUdevice dev); + +/** + * \brief Release the primary context on the GPU + * + * Releases the primary context interop on the device. + * A retained context should always be released once the user is done using + * it. The context is automatically reset once the last reference to it is + * released. This behavior is different when the primary context was retained + * by the CUDA runtime from CUDA 4.0 and earlier. In this case, the primary + * context remains always active. + * + * Releasing a primary context that has not been previously retained will + * fail with ::CUDA_ERROR_INVALID_CONTEXT. + * + * Please note that unlike ::cuCtxDestroy() this method does not pop the context + * from stack in any circumstances. + * + * \param dev - Device which primary context is released + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_DEVICE, + * ::CUDA_ERROR_INVALID_CONTEXT + * \notefnerr + * + * \sa ::cuDevicePrimaryCtxRetain, + * ::cuCtxDestroy, + * ::cuCtxGetApiVersion, + * ::cuCtxGetCacheConfig, + * ::cuCtxGetDevice, + * ::cuCtxGetFlags, + * ::cuCtxGetLimit, + * ::cuCtxPopCurrent, + * ::cuCtxPushCurrent, + * ::cuCtxSetCacheConfig, + * ::cuCtxSetLimit, + * ::cuCtxSynchronize + */ +CUresult CUDAAPI cuDevicePrimaryCtxRelease(CUdevice dev); + +/** + * \brief Set flags for the primary context + * + * Sets the flags for the primary context on the device overwriting perviously + * set ones. + * + * The three LSBs of the \p flags parameter can be used to control how the OS + * thread, which owns the CUDA context at the time of an API call, interacts + * with the OS scheduler when waiting for results from the GPU. Only one of + * the scheduling flags can be set when creating a context. + * + * - ::CU_CTX_SCHED_SPIN: Instruct CUDA to actively spin when waiting for + * results from the GPU. This can decrease latency when waiting for the GPU, + * but may lower the performance of CPU threads if they are performing work in + * parallel with the CUDA thread. + * + * - ::CU_CTX_SCHED_YIELD: Instruct CUDA to yield its thread when waiting for + * results from the GPU. This can increase latency when waiting for the GPU, + * but can increase the performance of CPU threads performing work in parallel + * with the GPU. + * + * - ::CU_CTX_SCHED_BLOCKING_SYNC: Instruct CUDA to block the CPU thread on a + * synchronization primitive when waiting for the GPU to finish work. + * + * - ::CU_CTX_BLOCKING_SYNC: Instruct CUDA to block the CPU thread on a + * synchronization primitive when waiting for the GPU to finish work.
+ * Deprecated: This flag was deprecated as of CUDA 4.0 and was + * replaced with ::CU_CTX_SCHED_BLOCKING_SYNC. + * + * - ::CU_CTX_SCHED_AUTO: The default value if the \p flags parameter is zero, + * uses a heuristic based on the number of active CUDA contexts in the + * process \e C and the number of logical processors in the system \e P. If + * \e C > \e P, then CUDA will yield to other OS threads when waiting for + * the GPU (::CU_CTX_SCHED_YIELD), otherwise CUDA will not yield while + * waiting for results and actively spin on the processor (::CU_CTX_SCHED_SPIN). + * Additionally, on Tegra devices, ::CU_CTX_SCHED_AUTO uses a heuristic based on + * the power profile of the platform and may choose ::CU_CTX_SCHED_BLOCKING_SYNC + * for low-powered devices. + * + * - ::CU_CTX_LMEM_RESIZE_TO_MAX: Instruct CUDA to not reduce local memory + * after resizing local memory for a kernel. This can prevent thrashing by + * local memory allocations when launching many kernels with high local + * memory usage at the cost of potentially increased memory usage.
+ * Deprecated: This flag is deprecated and the behavior enabled + * by this flag is now the default and cannot be disabled. + * + * - ::CU_CTX_COREDUMP_ENABLE: If GPU coredumps have not been enabled globally + * with ::cuCoredumpSetAttributeGlobal or environment variables, this flag can + * be set during context creation to instruct CUDA to create a coredump if + * this context raises an exception during execution. These environment variables + * are described in the CUDA-GDB user guide under the "GPU core dump support" + * section. + * The initial settings will be taken from the global settings at the time of + * context creation. The other settings that control coredump output can be + * modified by calling ::cuCoredumpSetAttribute from the created context after + * it becomes current. + * + * - ::CU_CTX_USER_COREDUMP_ENABLE: If user-triggered GPU coredumps have not + * been enabled globally with ::cuCoredumpSetAttributeGlobal or environment + * variables, this flag can be set during context creation to instruct CUDA to + * create a coredump if data is written to a certain pipe that is present in the + * OS space. These environment variables are described in the CUDA-GDB user + * guide under the "GPU core dump support" section. + * It is important to note that the pipe name *must* be set with + * ::cuCoredumpSetAttributeGlobal before creating the context if this flag is + * used. Setting this flag implies that ::CU_CTX_COREDUMP_ENABLE is set. + * The initial settings will be taken from the global settings at the time of + * context creation. The other settings that control coredump output can be + * modified by calling ::cuCoredumpSetAttribute from the created context after + * it becomes current. + * + * \param dev - Device for which the primary context flags are set + * \param flags - New flags for the device + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_DEVICE, + * ::CUDA_ERROR_INVALID_VALUE, + * \notefnerr + * + * \sa ::cuDevicePrimaryCtxRetain, + * ::cuDevicePrimaryCtxGetState, + * ::cuCtxCreate, + * ::cuCtxGetFlags, + * ::cuCtxSetFlags, + * ::cudaSetDeviceFlags + */ +CUresult CUDAAPI cuDevicePrimaryCtxSetFlags(CUdevice dev, unsigned int flags); + +/** + * \brief Get the state of the primary context + * + * Returns in \p *flags the flags for the primary context of \p dev, and in + * \p *active whether it is active. See ::cuDevicePrimaryCtxSetFlags for flag + * values. + * + * \param dev - Device to get primary context flags for + * \param flags - Pointer to store flags + * \param active - Pointer to store context state; 0 = inactive, 1 = active + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_DEVICE, + * ::CUDA_ERROR_INVALID_VALUE, + * \notefnerr + * + * \sa + * ::cuDevicePrimaryCtxSetFlags, + * ::cuCtxGetFlags, + * ::cuCtxSetFlags, + * ::cudaGetDeviceFlags + */ +CUresult CUDAAPI cuDevicePrimaryCtxGetState(CUdevice dev, unsigned int *flags, int *active); + +/** + * \brief Destroy all allocations and reset all state on the primary context + * + * Explicitly destroys and cleans up all resources associated with the current + * device in the current process. + * + * Note that it is responsibility of the calling function to ensure that no + * other module in the process is using the device any more. For that reason + * it is recommended to use ::cuDevicePrimaryCtxRelease() in most cases. + * However it is safe for other modules to call ::cuDevicePrimaryCtxRelease() + * even after resetting the device. + * Resetting the primary context does not release it, an application that has + * retained the primary context should explicitly release its usage. + * + * \param dev - Device for which primary context is destroyed + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_DEVICE, + * ::CUDA_ERROR_PRIMARY_CONTEXT_ACTIVE + * \notefnerr + * + * \sa ::cuDevicePrimaryCtxRetain, + * ::cuDevicePrimaryCtxRelease, + * ::cuCtxGetApiVersion, + * ::cuCtxGetCacheConfig, + * ::cuCtxGetDevice, + * ::cuCtxGetFlags, + * ::cuCtxGetLimit, + * ::cuCtxPopCurrent, + * ::cuCtxPushCurrent, + * ::cuCtxSetCacheConfig, + * ::cuCtxSetLimit, + * ::cuCtxSynchronize, + * ::cudaDeviceReset + */ +CUresult CUDAAPI cuDevicePrimaryCtxReset(CUdevice dev); + +/** @} */ /* END CUDA_PRIMARY_CTX */ + +/** + * \defgroup CUDA_CTX Context Management + * + * ___MANBRIEF___ context management functions of the low-level CUDA driver + * API (___CURRENT_FILE___) ___ENDMANBRIEF___ + * + * This section describes the context management functions of the low-level + * CUDA driver application programming interface. + * + * Please note that some functions are described in + * \ref CUDA_PRIMARY_CTX "Primary Context Management" section. + * + * @{ + */ + +/** + * \brief Create a CUDA context + * + * \note In most cases it is recommended to use ::cuDevicePrimaryCtxRetain. + * + * Creates a new CUDA context and associates it with the calling thread. The + * \p flags parameter is described below. The context is created with a usage + * count of 1 and the caller of ::cuCtxCreate() must call ::cuCtxDestroy() + * when done using the context. If a context is already current to the thread, + * it is supplanted by the newly created context and may be restored by a subsequent + * call to ::cuCtxPopCurrent(). + * + * The three LSBs of the \p flags parameter can be used to control how the OS + * thread, which owns the CUDA context at the time of an API call, interacts + * with the OS scheduler when waiting for results from the GPU. Only one of + * the scheduling flags can be set when creating a context. + * + * - ::CU_CTX_SCHED_SPIN: Instruct CUDA to actively spin when waiting for + * results from the GPU. This can decrease latency when waiting for the GPU, + * but may lower the performance of CPU threads if they are performing work in + * parallel with the CUDA thread. + * + * - ::CU_CTX_SCHED_YIELD: Instruct CUDA to yield its thread when waiting for + * results from the GPU. This can increase latency when waiting for the GPU, + * but can increase the performance of CPU threads performing work in parallel + * with the GPU. + * + * - ::CU_CTX_SCHED_BLOCKING_SYNC: Instruct CUDA to block the CPU thread on a + * synchronization primitive when waiting for the GPU to finish work. + * + * - ::CU_CTX_BLOCKING_SYNC: Instruct CUDA to block the CPU thread on a + * synchronization primitive when waiting for the GPU to finish work.
+ * Deprecated: This flag was deprecated as of CUDA 4.0 and was + * replaced with ::CU_CTX_SCHED_BLOCKING_SYNC. + * + * - ::CU_CTX_SCHED_AUTO: The default value if the \p flags parameter is zero, + * uses a heuristic based on the number of active CUDA contexts in the + * process \e C and the number of logical processors in the system \e P. If + * \e C > \e P, then CUDA will yield to other OS threads when waiting for + * the GPU (::CU_CTX_SCHED_YIELD), otherwise CUDA will not yield while + * waiting for results and actively spin on the processor (::CU_CTX_SCHED_SPIN). + * Additionally, on Tegra devices, ::CU_CTX_SCHED_AUTO uses a heuristic based on + * the power profile of the platform and may choose ::CU_CTX_SCHED_BLOCKING_SYNC + * for low-powered devices. + * + * - ::CU_CTX_MAP_HOST: Instruct CUDA to support mapped pinned allocations. + * This flag must be set in order to allocate pinned host memory that is + * accessible to the GPU. + * + * - ::CU_CTX_LMEM_RESIZE_TO_MAX: Instruct CUDA to not reduce local memory + * after resizing local memory for a kernel. This can prevent thrashing by + * local memory allocations when launching many kernels with high local + * memory usage at the cost of potentially increased memory usage.
+ * Deprecated: This flag is deprecated and the behavior enabled + * by this flag is now the default and cannot be disabled. + * Instead, the per-thread stack size can be controlled with ::cuCtxSetLimit(). + * + * - ::CU_CTX_COREDUMP_ENABLE: If GPU coredumps have not been enabled globally + * with ::cuCoredumpSetAttributeGlobal or environment variables, this flag can + * be set during context creation to instruct CUDA to create a coredump if + * this context raises an exception during execution. These environment variables + * are described in the CUDA-GDB user guide under the "GPU core dump support" + * section. + * The initial attributes will be taken from the global attributes at the time of + * context creation. The other attributes that control coredump output can be + * modified by calling ::cuCoredumpSetAttribute from the created context after + * it becomes current. + * + * - ::CU_CTX_USER_COREDUMP_ENABLE: If user-triggered GPU coredumps have not + * been enabled globally with ::cuCoredumpSetAttributeGlobal or environment + * variables, this flag can be set during context creation to instruct CUDA to + * create a coredump if data is written to a certain pipe that is present in the + * OS space. These environment variables are described in the CUDA-GDB user + * guide under the "GPU core dump support" section. + * It is important to note that the pipe name *must* be set with + * ::cuCoredumpSetAttributeGlobal before creating the context if this flag is + * used. Setting this flag implies that ::CU_CTX_COREDUMP_ENABLE is set. + * The initial attributes will be taken from the global attributes at the time of + * context creation. The other attributes that control coredump output can be + * modified by calling ::cuCoredumpSetAttribute from the created context after + * it becomes current. + * Setting this flag on any context creation is equivalent to setting the + * ::CU_COREDUMP_ENABLE_USER_TRIGGER attribute to \p true globally. + * + * Context creation will fail with ::CUDA_ERROR_UNKNOWN if the compute mode of + * the device is ::CU_COMPUTEMODE_PROHIBITED. The function ::cuDeviceGetAttribute() + * can be used with ::CU_DEVICE_ATTRIBUTE_COMPUTE_MODE to determine the + * compute mode of the device. The nvidia-smi tool can be used to set + * the compute mode for * devices. + * Documentation for nvidia-smi can be obtained by passing a + * -h option to it. + * + * \param pctx - Returned context handle of the new context + * \param flags - Context creation flags + * \param dev - Device to create context on + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_DEVICE, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_OUT_OF_MEMORY, + * ::CUDA_ERROR_UNKNOWN + * \notefnerr + * + * \sa ::cuCtxDestroy, + * ::cuCtxGetApiVersion, + * ::cuCtxGetCacheConfig, + * ::cuCtxGetDevice, + * ::cuCtxGetFlags, + * ::cuCtxGetLimit, + * ::cuCtxPopCurrent, + * ::cuCtxPushCurrent, + * ::cuCtxSetCacheConfig, + * ::cuCtxSetLimit, + * ::cuCoredumpSetAttributeGlobal, + * ::cuCoredumpSetAttribute, + * ::cuCtxSynchronize + */ +CUresult CUDAAPI cuCtxCreate(CUcontext *pctx, unsigned int flags, CUdevice dev); + +/** + * \brief Create a CUDA context with execution affinity + * + * Creates a new CUDA context with execution affinity and associates it with + * the calling thread. The \p paramsArray and \p flags parameter are described below. + * The context is created with a usage count of 1 and the caller of ::cuCtxCreate() must + * call ::cuCtxDestroy() when done using the context. If a context is already + * current to the thread, it is supplanted by the newly created context and may + * be restored by a subsequent call to ::cuCtxPopCurrent(). + * + * The type and the amount of execution resource the context can use is limited by \p paramsArray + * and \p numParams. The \p paramsArray is an array of \p CUexecAffinityParam and the \p numParams + * describes the size of the array. If two \p CUexecAffinityParam in the array have the same type, + * the latter execution affinity parameter overrides the former execution affinity parameter. + * The supported execution affinity types are: + * - ::CU_EXEC_AFFINITY_TYPE_SM_COUNT limits the portion of SMs that the context can use. The portion + * of SMs is specified as the number of SMs via \p CUexecAffinitySmCount. This limit will be internally + * rounded up to the next hardware-supported amount. Hence, it is imperative to query the actual execution + * affinity of the context via \p cuCtxGetExecAffinity after context creation. Currently, this attribute + * is only supported under Volta+ MPS. + * + * The three LSBs of the \p flags parameter can be used to control how the OS + * thread, which owns the CUDA context at the time of an API call, interacts + * with the OS scheduler when waiting for results from the GPU. Only one of + * the scheduling flags can be set when creating a context. + * + * - ::CU_CTX_SCHED_SPIN: Instruct CUDA to actively spin when waiting for + * results from the GPU. This can decrease latency when waiting for the GPU, + * but may lower the performance of CPU threads if they are performing work in + * parallel with the CUDA thread. + * + * - ::CU_CTX_SCHED_YIELD: Instruct CUDA to yield its thread when waiting for + * results from the GPU. This can increase latency when waiting for the GPU, + * but can increase the performance of CPU threads performing work in parallel + * with the GPU. + * + * - ::CU_CTX_SCHED_BLOCKING_SYNC: Instruct CUDA to block the CPU thread on a + * synchronization primitive when waiting for the GPU to finish work. + * + * - ::CU_CTX_BLOCKING_SYNC: Instruct CUDA to block the CPU thread on a + * synchronization primitive when waiting for the GPU to finish work.
+ * Deprecated: This flag was deprecated as of CUDA 4.0 and was + * replaced with ::CU_CTX_SCHED_BLOCKING_SYNC. + * + * - ::CU_CTX_SCHED_AUTO: The default value if the \p flags parameter is zero, + * uses a heuristic based on the number of active CUDA contexts in the + * process \e C and the number of logical processors in the system \e P. If + * \e C > \e P, then CUDA will yield to other OS threads when waiting for + * the GPU (::CU_CTX_SCHED_YIELD), otherwise CUDA will not yield while + * waiting for results and actively spin on the processor (::CU_CTX_SCHED_SPIN). + * Additionally, on Tegra devices, ::CU_CTX_SCHED_AUTO uses a heuristic based on + * the power profile of the platform and may choose ::CU_CTX_SCHED_BLOCKING_SYNC + * for low-powered devices. + * + * - ::CU_CTX_MAP_HOST: Instruct CUDA to support mapped pinned allocations. + * This flag must be set in order to allocate pinned host memory that is + * accessible to the GPU. + * + * - ::CU_CTX_LMEM_RESIZE_TO_MAX: Instruct CUDA to not reduce local memory + * after resizing local memory for a kernel. This can prevent thrashing by + * local memory allocations when launching many kernels with high local + * memory usage at the cost of potentially increased memory usage.
+ * Deprecated: This flag is deprecated and the behavior enabled + * by this flag is now the default and cannot be disabled. + * Instead, the per-thread stack size can be controlled with ::cuCtxSetLimit(). + * + * - ::CU_CTX_COREDUMP_ENABLE: If GPU coredumps have not been enabled globally + * with ::cuCoredumpSetAttributeGlobal or environment variables, this flag can + * be set during context creation to instruct CUDA to create a coredump if + * this context raises an exception during execution. These environment variables + * are described in the CUDA-GDB user guide under the "GPU core dump support" + * section. + * The initial attributes will be taken from the global attributes at the time of + * context creation. The other attributes that control coredump output can be + * modified by calling ::cuCoredumpSetAttribute from the created context after + * it becomes current. + * + * - ::CU_CTX_USER_COREDUMP_ENABLE: If user-triggered GPU coredumps have not + * been enabled globally with ::cuCoredumpSetAttributeGlobal or environment + * variables, this flag can be set during context creation to instruct CUDA to + * create a coredump if data is written to a certain pipe that is present in the + * OS space. These environment variables are described in the CUDA-GDB user + * guide under the "GPU core dump support" section. + * It is important to note that the pipe name *must* be set with + * ::cuCoredumpSetAttributeGlobal before creating the context if this flag is + * used. Setting this flag implies that ::CU_CTX_COREDUMP_ENABLE is set. + * The initial attributes will be taken from the global attributes at the time of + * context creation. The other attributes that control coredump output can be + * modified by calling ::cuCoredumpSetAttribute from the created context after + * it becomes current. + * Setting this flag on any context creation is equivalent to setting the + * ::CU_COREDUMP_ENABLE_USER_TRIGGER attribute to \p true globally. + * + * Context creation will fail with ::CUDA_ERROR_UNKNOWN if the compute mode of + * the device is ::CU_COMPUTEMODE_PROHIBITED. The function ::cuDeviceGetAttribute() + * can be used with ::CU_DEVICE_ATTRIBUTE_COMPUTE_MODE to determine the + * compute mode of the device. The nvidia-smi tool can be used to set + * the compute mode for * devices. + * Documentation for nvidia-smi can be obtained by passing a + * -h option to it. + * + * \param pctx - Returned context handle of the new context + * \param paramsArray - Execution affinity parameters + * \param numParams - Number of execution affinity parameters + * \param flags - Context creation flags + * \param dev - Device to create context on + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_DEVICE, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_OUT_OF_MEMORY, + * ::CUDA_ERROR_UNSUPPORTED_EXEC_AFFINITY, + * ::CUDA_ERROR_UNKNOWN + * \notefnerr + * + * \sa ::cuCtxDestroy, + * ::cuCtxGetApiVersion, + * ::cuCtxGetCacheConfig, + * ::cuCtxGetDevice, + * ::cuCtxGetFlags, + * ::cuCtxGetLimit, + * ::cuCtxPopCurrent, + * ::cuCtxPushCurrent, + * ::cuCtxSetCacheConfig, + * ::cuCtxSetLimit, + * ::cuCtxSynchronize, + * ::cuCoredumpSetAttributeGlobal, + * ::cuCoredumpSetAttribute, + * ::CUexecAffinityParam + */ +CUresult CUDAAPI cuCtxCreate_v3(CUcontext *pctx, CUexecAffinityParam *paramsArray, int numParams, unsigned int flags, CUdevice dev); + +/** + * \brief Destroy a CUDA context + * + * Destroys the CUDA context specified by \p ctx. The context \p ctx will be + * destroyed regardless of how many threads it is current to. + * It is the responsibility of the calling function to ensure that no API + * call issues using \p ctx while ::cuCtxDestroy() is executing. + * + * Destroys and cleans up all resources associated with the context. + * It is the caller's responsibility to ensure that the context or its resources + * are not accessed or passed in subsequent API calls and doing so will result in undefined behavior. + * These resources include CUDA types such as ::CUmodule, ::CUfunction, ::CUstream, ::CUevent, + * ::CUarray, ::CUmipmappedArray, ::CUtexObject, ::CUsurfObject, ::CUtexref, ::CUsurfref, + * ::CUgraphicsResource, ::CUlinkState, ::CUexternalMemory and ::CUexternalSemaphore. + * + * If \p ctx is current to the calling thread then \p ctx will also be + * popped from the current thread's context stack (as though ::cuCtxPopCurrent() + * were called). If \p ctx is current to other threads, then \p ctx will + * remain current to those threads, and attempting to access \p ctx from + * those threads will result in the error ::CUDA_ERROR_CONTEXT_IS_DESTROYED. + * + * \param ctx - Context to destroy + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * \notefnerr + * + * \sa ::cuCtxCreate, + * ::cuCtxGetApiVersion, + * ::cuCtxGetCacheConfig, + * ::cuCtxGetDevice, + * ::cuCtxGetFlags, + * ::cuCtxGetLimit, + * ::cuCtxPopCurrent, + * ::cuCtxPushCurrent, + * ::cuCtxSetCacheConfig, + * ::cuCtxSetLimit, + * ::cuCtxSynchronize + */ +CUresult CUDAAPI cuCtxDestroy(CUcontext ctx); + +/** + * \brief Pushes a context on the current CPU thread + * + * Pushes the given context \p ctx onto the CPU thread's stack of current + * contexts. The specified context becomes the CPU thread's current context, so + * all CUDA functions that operate on the current context are affected. + * + * The previous current context may be made current again by calling + * ::cuCtxDestroy() or ::cuCtxPopCurrent(). + * + * \param ctx - Context to push + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * \notefnerr + * + * \sa ::cuCtxCreate, + * ::cuCtxDestroy, + * ::cuCtxGetApiVersion, + * ::cuCtxGetCacheConfig, + * ::cuCtxGetDevice, + * ::cuCtxGetFlags, + * ::cuCtxGetLimit, + * ::cuCtxPopCurrent, + * ::cuCtxSetCacheConfig, + * ::cuCtxSetLimit, + * ::cuCtxSynchronize + */ +CUresult CUDAAPI cuCtxPushCurrent(CUcontext ctx); + +/** + * \brief Pops the current CUDA context from the current CPU thread. + * + * Pops the current CUDA context from the CPU thread and passes back the + * old context handle in \p *pctx. That context may then be made current + * to a different CPU thread by calling ::cuCtxPushCurrent(). + * + * If a context was current to the CPU thread before ::cuCtxCreate() or + * ::cuCtxPushCurrent() was called, this function makes that context current to + * the CPU thread again. + * + * \param pctx - Returned popped context handle + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT + * \notefnerr + * + * \sa ::cuCtxCreate, + * ::cuCtxDestroy, + * ::cuCtxGetApiVersion, + * ::cuCtxGetCacheConfig, + * ::cuCtxGetDevice, + * ::cuCtxGetFlags, + * ::cuCtxGetLimit, + * ::cuCtxPushCurrent, + * ::cuCtxSetCacheConfig, + * ::cuCtxSetLimit, + * ::cuCtxSynchronize + */ +CUresult CUDAAPI cuCtxPopCurrent(CUcontext *pctx); + +/** + * \brief Binds the specified CUDA context to the calling CPU thread + * + * Binds the specified CUDA context to the calling CPU thread. + * If \p ctx is NULL then the CUDA context previously bound to the + * calling CPU thread is unbound and ::CUDA_SUCCESS is returned. + * + * If there exists a CUDA context stack on the calling CPU thread, this + * will replace the top of that stack with \p ctx. + * If \p ctx is NULL then this will be equivalent to popping the top + * of the calling CPU thread's CUDA context stack (or a no-op if the + * calling CPU thread's CUDA context stack is empty). + * + * \param ctx - Context to bind to the calling CPU thread + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT + * \notefnerr + * + * \sa + * ::cuCtxGetCurrent, + * ::cuCtxCreate, + * ::cuCtxDestroy, + * ::cudaSetDevice + */ +CUresult CUDAAPI cuCtxSetCurrent(CUcontext ctx); + +/** + * \brief Returns the CUDA context bound to the calling CPU thread. + * + * Returns in \p *pctx the CUDA context bound to the calling CPU thread. + * If no context is bound to the calling CPU thread then \p *pctx is + * set to NULL and ::CUDA_SUCCESS is returned. + * + * \param pctx - Returned context handle + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * \notefnerr + * + * \sa + * ::cuCtxSetCurrent, + * ::cuCtxCreate, + * ::cuCtxDestroy, + * ::cudaGetDevice + */ +CUresult CUDAAPI cuCtxGetCurrent(CUcontext *pctx); + +/** + * \brief Returns the device ID for the current context + * + * Returns in \p *device the ordinal of the current context's device. + * + * \param device - Returned device ID for the current context + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * \notefnerr + * + * \sa ::cuCtxCreate, + * ::cuCtxDestroy, + * ::cuCtxGetApiVersion, + * ::cuCtxGetCacheConfig, + * ::cuCtxGetFlags, + * ::cuCtxGetLimit, + * ::cuCtxPopCurrent, + * ::cuCtxPushCurrent, + * ::cuCtxSetCacheConfig, + * ::cuCtxSetLimit, + * ::cuCtxSynchronize, + * ::cudaGetDevice + */ +CUresult CUDAAPI cuCtxGetDevice(CUdevice *device); + +/** + * \brief Returns the flags for the current context + * + * Returns in \p *flags the flags of the current context. See ::cuCtxCreate + * for flag values. + * + * \param flags - Pointer to store flags of current context + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * \notefnerr + * + * \sa ::cuCtxCreate, + * ::cuCtxGetApiVersion, + * ::cuCtxGetCacheConfig, + * ::cuCtxGetCurrent, + * ::cuCtxGetDevice, + * ::cuCtxGetLimit, + * ::cuCtxGetSharedMemConfig, + * ::cuCtxGetStreamPriorityRange, + * ::cuCtxSetFlags, + * ::cudaGetDeviceFlags + */ +CUresult CUDAAPI cuCtxGetFlags(unsigned int *flags); + +/** + * \brief Sets the flags for the current context + * + * \param flags - Flags to set on the current context + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * \notefnerr + * + * \sa ::cuCtxCreate, + * ::cuCtxGetApiVersion, + * ::cuCtxGetCacheConfig, + * ::cuCtxGetCurrent, + * ::cuCtxGetDevice, + * ::cuCtxGetLimit, + * ::cuCtxGetSharedMemConfig, + * ::cuCtxGetStreamPriorityRange, + * ::cuCtxGetFlags, + * ::cudaGetDeviceFlags, + * ::cuDevicePrimaryCtxSetFlags, + */ +CUresult CUDAAPI cuCtxSetFlags(unsigned int flags); + +/** + * \brief Returns the unique Id associated with the context supplied + * + * Returns in \p ctxId the unique Id which is associated with a given context. + * The Id is unique for the life of the program for this instance of CUDA. + * If context is supplied as NULL and there is one current, the Id of the + * current context is returned. + * + * \param ctx - Context for which to obtain the Id + * \param ctxId - Pointer to store the Id of the context + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_CONTEXT_IS_DESTROYED, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * \notefnerr + * + * \sa ::cuCtxCreate, + * ::cuCtxDestroy, + * ::cuCtxGetApiVersion, + * ::cuCtxGetCacheConfig, + * ::cuCtxGetDevice, + * ::cuCtxGetFlags, + * ::cuCtxGetLimit, + * ::cuCtxPushCurrent + */ +CUresult CUDAAPI cuCtxGetId(CUcontext ctx, unsigned long long *ctxId); + +/** + * \brief Block for a context's tasks to complete + * + * Blocks until the device has completed all preceding requested tasks. + * ::cuCtxSynchronize() returns an error if one of the preceding tasks failed. + * If the context was created with the ::CU_CTX_SCHED_BLOCKING_SYNC flag, the + * CPU thread will block until the GPU context has finished its work. + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT + * \notefnerr + * + * \sa ::cuCtxCreate, + * ::cuCtxDestroy, + * ::cuCtxGetApiVersion, + * ::cuCtxGetCacheConfig, + * ::cuCtxGetDevice, + * ::cuCtxGetFlags, + * ::cuCtxGetLimit, + * ::cuCtxPopCurrent, + * ::cuCtxPushCurrent, + * ::cuCtxSetCacheConfig, + * ::cuCtxSetLimit, + * ::cudaDeviceSynchronize + */ +CUresult CUDAAPI cuCtxSynchronize(void); + +/** + * \brief Set resource limits + * + * Setting \p limit to \p value is a request by the application to update + * the current limit maintained by the context. The driver is free to + * modify the requested value to meet h/w requirements (this could be + * clamping to minimum or maximum values, rounding up to nearest element + * size, etc). The application can use ::cuCtxGetLimit() to find out exactly + * what the limit has been set to. + * + * Setting each ::CUlimit has its own specific restrictions, so each is + * discussed here. + * + * - ::CU_LIMIT_STACK_SIZE controls the stack size in bytes of each GPU thread. + * The driver automatically increases the per-thread stack size + * for each kernel launch as needed. This size isn't reset back to the + * original value after each launch. Setting this value will take effect + * immediately, and if necessary, the device will block until all preceding + * requested tasks are complete. + * + * - ::CU_LIMIT_PRINTF_FIFO_SIZE controls the size in bytes of the FIFO used + * by the ::printf() device system call. Setting ::CU_LIMIT_PRINTF_FIFO_SIZE + * must be performed before launching any kernel that uses the ::printf() + * device system call, otherwise ::CUDA_ERROR_INVALID_VALUE will be returned. + * + * - ::CU_LIMIT_MALLOC_HEAP_SIZE controls the size in bytes of the heap used + * by the ::malloc() and ::free() device system calls. Setting + * ::CU_LIMIT_MALLOC_HEAP_SIZE must be performed before launching any kernel + * that uses the ::malloc() or ::free() device system calls, otherwise + * ::CUDA_ERROR_INVALID_VALUE will be returned. + * + * - ::CU_LIMIT_DEV_RUNTIME_SYNC_DEPTH controls the maximum nesting depth of + * a grid at which a thread can safely call ::cudaDeviceSynchronize(). Setting + * this limit must be performed before any launch of a kernel that uses the + * device runtime and calls ::cudaDeviceSynchronize() above the default sync + * depth, two levels of grids. Calls to ::cudaDeviceSynchronize() will fail + * with error code ::cudaErrorSyncDepthExceeded if the limitation is + * violated. This limit can be set smaller than the default or up the maximum + * launch depth of 24. When setting this limit, keep in mind that additional + * levels of sync depth require the driver to reserve large amounts of device + * memory which can no longer be used for user allocations. If these + * reservations of device memory fail, ::cuCtxSetLimit() will return + * ::CUDA_ERROR_OUT_OF_MEMORY, and the limit can be reset to a lower value. + * This limit is only applicable to devices of compute capability < 9.0. + * Attempting to set this limit on devices of other compute capability + * versions will result in the error ::CUDA_ERROR_UNSUPPORTED_LIMIT being + * returned. + * + * - ::CU_LIMIT_DEV_RUNTIME_PENDING_LAUNCH_COUNT controls the maximum number of + * outstanding device runtime launches that can be made from the current + * context. A grid is outstanding from the point of launch up until the grid + * is known to have been completed. Device runtime launches which violate + * this limitation fail and return ::cudaErrorLaunchPendingCountExceeded when + * ::cudaGetLastError() is called after launch. If more pending launches than + * the default (2048 launches) are needed for a module using the device + * runtime, this limit can be increased. Keep in mind that being able to + * sustain additional pending launches will require the driver to reserve + * larger amounts of device memory upfront which can no longer be used for + * allocations. If these reservations fail, ::cuCtxSetLimit() will return + * ::CUDA_ERROR_OUT_OF_MEMORY, and the limit can be reset to a lower value. + * This limit is only applicable to devices of compute capability 3.5 and + * higher. Attempting to set this limit on devices of compute capability less + * than 3.5 will result in the error ::CUDA_ERROR_UNSUPPORTED_LIMIT being + * returned. + * + * - ::CU_LIMIT_MAX_L2_FETCH_GRANULARITY controls the L2 cache fetch granularity. + * Values can range from 0B to 128B. This is purely a performance hint and + * it can be ignored or clamped depending on the platform. + * + * - ::CU_LIMIT_PERSISTING_L2_CACHE_SIZE controls size in bytes available for + * persisting L2 cache. This is purely a performance hint and it can be + * ignored or clamped depending on the platform. + * + * \param limit - Limit to set + * \param value - Size of limit + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_UNSUPPORTED_LIMIT, + * ::CUDA_ERROR_OUT_OF_MEMORY, + * ::CUDA_ERROR_INVALID_CONTEXT + * \notefnerr + * + * \sa ::cuCtxCreate, + * ::cuCtxDestroy, + * ::cuCtxGetApiVersion, + * ::cuCtxGetCacheConfig, + * ::cuCtxGetDevice, + * ::cuCtxGetFlags, + * ::cuCtxGetLimit, + * ::cuCtxPopCurrent, + * ::cuCtxPushCurrent, + * ::cuCtxSetCacheConfig, + * ::cuCtxSynchronize, + * ::cudaDeviceSetLimit + */ +CUresult CUDAAPI cuCtxSetLimit(CUlimit limit, size_t value); + +/** + * \brief Returns resource limits + * + * Returns in \p *pvalue the current size of \p limit. The supported + * ::CUlimit values are: + * - ::CU_LIMIT_STACK_SIZE: stack size in bytes of each GPU thread. + * - ::CU_LIMIT_PRINTF_FIFO_SIZE: size in bytes of the FIFO used by the + * ::printf() device system call. + * - ::CU_LIMIT_MALLOC_HEAP_SIZE: size in bytes of the heap used by the + * ::malloc() and ::free() device system calls. + * - ::CU_LIMIT_DEV_RUNTIME_SYNC_DEPTH: maximum grid depth at which a thread + * can issue the device runtime call ::cudaDeviceSynchronize() to wait on + * child grid launches to complete. + * - ::CU_LIMIT_DEV_RUNTIME_PENDING_LAUNCH_COUNT: maximum number of outstanding + * device runtime launches that can be made from this context. + * - ::CU_LIMIT_MAX_L2_FETCH_GRANULARITY: L2 cache fetch granularity. + * - ::CU_LIMIT_PERSISTING_L2_CACHE_SIZE: Persisting L2 cache size in bytes + * + * \param limit - Limit to query + * \param pvalue - Returned size of limit + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_UNSUPPORTED_LIMIT + * \notefnerr + * + * \sa ::cuCtxCreate, + * ::cuCtxDestroy, + * ::cuCtxGetApiVersion, + * ::cuCtxGetCacheConfig, + * ::cuCtxGetDevice, + * ::cuCtxGetFlags, + * ::cuCtxPopCurrent, + * ::cuCtxPushCurrent, + * ::cuCtxSetCacheConfig, + * ::cuCtxSetLimit, + * ::cuCtxSynchronize, + * ::cudaDeviceGetLimit + */ +CUresult CUDAAPI cuCtxGetLimit(size_t *pvalue, CUlimit limit); + +/** + * \brief Returns the preferred cache configuration for the current context. + * + * On devices where the L1 cache and shared memory use the same hardware + * resources, this function returns through \p pconfig the preferred cache configuration + * for the current context. This is only a preference. The driver will use + * the requested configuration if possible, but it is free to choose a different + * configuration if required to execute functions. + * + * This will return a \p pconfig of ::CU_FUNC_CACHE_PREFER_NONE on devices + * where the size of the L1 cache and shared memory are fixed. + * + * The supported cache configurations are: + * - ::CU_FUNC_CACHE_PREFER_NONE: no preference for shared memory or L1 (default) + * - ::CU_FUNC_CACHE_PREFER_SHARED: prefer larger shared memory and smaller L1 cache + * - ::CU_FUNC_CACHE_PREFER_L1: prefer larger L1 cache and smaller shared memory + * - ::CU_FUNC_CACHE_PREFER_EQUAL: prefer equal sized L1 cache and shared memory + * + * \param pconfig - Returned cache configuration + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * \notefnerr + * + * \sa ::cuCtxCreate, + * ::cuCtxDestroy, + * ::cuCtxGetApiVersion, + * ::cuCtxGetDevice, + * ::cuCtxGetFlags, + * ::cuCtxGetLimit, + * ::cuCtxPopCurrent, + * ::cuCtxPushCurrent, + * ::cuCtxSetCacheConfig, + * ::cuCtxSetLimit, + * ::cuCtxSynchronize, + * ::cuFuncSetCacheConfig, + * ::cudaDeviceGetCacheConfig + */ +CUresult CUDAAPI cuCtxGetCacheConfig(CUfunc_cache *pconfig); + +/** + * \brief Sets the preferred cache configuration for the current context. + * + * On devices where the L1 cache and shared memory use the same hardware + * resources, this sets through \p config the preferred cache configuration for + * the current context. This is only a preference. The driver will use + * the requested configuration if possible, but it is free to choose a different + * configuration if required to execute the function. Any function preference + * set via ::cuFuncSetCacheConfig() or ::cuKernelSetCacheConfig() will be preferred over this context-wide + * setting. Setting the context-wide cache configuration to + * ::CU_FUNC_CACHE_PREFER_NONE will cause subsequent kernel launches to prefer + * to not change the cache configuration unless required to launch the kernel. + * + * This setting does nothing on devices where the size of the L1 cache and + * shared memory are fixed. + * + * Launching a kernel with a different preference than the most recent + * preference setting may insert a device-side synchronization point. + * + * The supported cache configurations are: + * - ::CU_FUNC_CACHE_PREFER_NONE: no preference for shared memory or L1 (default) + * - ::CU_FUNC_CACHE_PREFER_SHARED: prefer larger shared memory and smaller L1 cache + * - ::CU_FUNC_CACHE_PREFER_L1: prefer larger L1 cache and smaller shared memory + * - ::CU_FUNC_CACHE_PREFER_EQUAL: prefer equal sized L1 cache and shared memory + * + * \param config - Requested cache configuration + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * \notefnerr + * + * \sa ::cuCtxCreate, + * ::cuCtxDestroy, + * ::cuCtxGetApiVersion, + * ::cuCtxGetCacheConfig, + * ::cuCtxGetDevice, + * ::cuCtxGetFlags, + * ::cuCtxGetLimit, + * ::cuCtxPopCurrent, + * ::cuCtxPushCurrent, + * ::cuCtxSetLimit, + * ::cuCtxSynchronize, + * ::cuFuncSetCacheConfig, + * ::cudaDeviceSetCacheConfig, + * ::cuKernelSetCacheConfig + */ +CUresult CUDAAPI cuCtxSetCacheConfig(CUfunc_cache config); + +/** + * \brief Returns the current shared memory configuration for the current context. + * + * This function will return in \p pConfig the current size of shared memory banks + * in the current context. On devices with configurable shared memory banks, + * ::cuCtxSetSharedMemConfig can be used to change this setting, so that all + * subsequent kernel launches will by default use the new bank size. When + * ::cuCtxGetSharedMemConfig is called on devices without configurable shared + * memory, it will return the fixed bank size of the hardware. + * + * The returned bank configurations can be either: + * - ::CU_SHARED_MEM_CONFIG_FOUR_BYTE_BANK_SIZE: shared memory bank width is + * four bytes. + * - ::CU_SHARED_MEM_CONFIG_EIGHT_BYTE_BANK_SIZE: shared memory bank width will + * eight bytes. + * + * \param pConfig - returned shared memory configuration + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * \notefnerr + * + * \sa ::cuCtxCreate, + * ::cuCtxDestroy, + * ::cuCtxGetApiVersion, + * ::cuCtxGetCacheConfig, + * ::cuCtxGetDevice, + * ::cuCtxGetFlags, + * ::cuCtxGetLimit, + * ::cuCtxPopCurrent, + * ::cuCtxPushCurrent, + * ::cuCtxSetLimit, + * ::cuCtxSynchronize, + * ::cuCtxGetSharedMemConfig, + * ::cuFuncSetCacheConfig, + * ::cudaDeviceGetSharedMemConfig + */ +CUresult CUDAAPI cuCtxGetSharedMemConfig(CUsharedconfig *pConfig); + +/** + * \brief Sets the shared memory configuration for the current context. + * + * On devices with configurable shared memory banks, this function will set + * the context's shared memory bank size which is used for subsequent kernel + * launches. + * + * Changed the shared memory configuration between launches may insert a device + * side synchronization point between those launches. + * + * Changing the shared memory bank size will not increase shared memory usage + * or affect occupancy of kernels, but may have major effects on performance. + * Larger bank sizes will allow for greater potential bandwidth to shared memory, + * but will change what kinds of accesses to shared memory will result in bank + * conflicts. + * + * This function will do nothing on devices with fixed shared memory bank size. + * + * The supported bank configurations are: + * - ::CU_SHARED_MEM_CONFIG_DEFAULT_BANK_SIZE: set bank width to the default initial + * setting (currently, four bytes). + * - ::CU_SHARED_MEM_CONFIG_FOUR_BYTE_BANK_SIZE: set shared memory bank width to + * be natively four bytes. + * - ::CU_SHARED_MEM_CONFIG_EIGHT_BYTE_BANK_SIZE: set shared memory bank width to + * be natively eight bytes. + * + * \param config - requested shared memory configuration + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * \notefnerr + * + * \sa ::cuCtxCreate, + * ::cuCtxDestroy, + * ::cuCtxGetApiVersion, + * ::cuCtxGetCacheConfig, + * ::cuCtxGetDevice, + * ::cuCtxGetFlags, + * ::cuCtxGetLimit, + * ::cuCtxPopCurrent, + * ::cuCtxPushCurrent, + * ::cuCtxSetLimit, + * ::cuCtxSynchronize, + * ::cuCtxGetSharedMemConfig, + * ::cuFuncSetCacheConfig, + * ::cudaDeviceSetSharedMemConfig + */ +CUresult CUDAAPI cuCtxSetSharedMemConfig(CUsharedconfig config); + +/** + * \brief Gets the context's API version. + * + * Returns a version number in \p version corresponding to the capabilities of + * the context (e.g. 3010 or 3020), which library developers can use to direct + * callers to a specific API version. If \p ctx is NULL, returns the API version + * used to create the currently bound context. + * + * Note that new API versions are only introduced when context capabilities are + * changed that break binary compatibility, so the API version and driver version + * may be different. For example, it is valid for the API version to be 3020 while + * the driver version is 4020. + * + * \param ctx - Context to check + * \param version - Pointer to version + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_UNKNOWN + * \notefnerr + * + * \sa ::cuCtxCreate, + * ::cuCtxDestroy, + * ::cuCtxGetDevice, + * ::cuCtxGetFlags, + * ::cuCtxGetLimit, + * ::cuCtxPopCurrent, + * ::cuCtxPushCurrent, + * ::cuCtxSetCacheConfig, + * ::cuCtxSetLimit, + * ::cuCtxSynchronize + */ +CUresult CUDAAPI cuCtxGetApiVersion(CUcontext ctx, unsigned int *version); + +/** + * \brief Returns numerical values that correspond to the least and + * greatest stream priorities. + * + * Returns in \p *leastPriority and \p *greatestPriority the numerical values that correspond + * to the least and greatest stream priorities respectively. Stream priorities + * follow a convention where lower numbers imply greater priorities. The range of + * meaningful stream priorities is given by [\p *greatestPriority, \p *leastPriority]. + * If the user attempts to create a stream with a priority value that is + * outside the meaningful range as specified by this API, the priority is + * automatically clamped down or up to either \p *leastPriority or \p *greatestPriority + * respectively. See ::cuStreamCreateWithPriority for details on creating a + * priority stream. + * A NULL may be passed in for \p *leastPriority or \p *greatestPriority if the value + * is not desired. + * + * This function will return '0' in both \p *leastPriority and \p *greatestPriority if + * the current context's device does not support stream priorities + * (see ::cuDeviceGetAttribute). + * + * \param leastPriority - Pointer to an int in which the numerical value for least + * stream priority is returned + * \param greatestPriority - Pointer to an int in which the numerical value for greatest + * stream priority is returned + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE, + * \notefnerr + * + * \sa ::cuStreamCreateWithPriority, + * ::cuStreamGetPriority, + * ::cuCtxGetDevice, + * ::cuCtxGetFlags, + * ::cuCtxSetLimit, + * ::cuCtxSynchronize, + * ::cudaDeviceGetStreamPriorityRange + */ +CUresult CUDAAPI cuCtxGetStreamPriorityRange(int *leastPriority, int *greatestPriority); + +/** + * \brief Resets all persisting lines in cache to normal status. + * + * ::cuCtxResetPersistingL2Cache Resets all persisting lines in cache to normal + * status. Takes effect on function return. + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_NOT_SUPPORTED + * \notefnerr + * + * \sa + * ::CUaccessPolicyWindow + */ +CUresult CUDAAPI cuCtxResetPersistingL2Cache(void); + +/** + * \brief Returns the execution affinity setting for the current context. + * + * Returns in \p *pExecAffinity the current value of \p type. The supported + * ::CUexecAffinityType values are: + * - ::CU_EXEC_AFFINITY_TYPE_SM_COUNT: number of SMs the context is limited to use. + * + * \param type - Execution affinity type to query + * \param pExecAffinity - Returned execution affinity + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_UNSUPPORTED_EXEC_AFFINITY + * \notefnerr + * + * \sa + * ::CUexecAffinityParam + */ +CUresult CUDAAPI cuCtxGetExecAffinity(CUexecAffinityParam *pExecAffinity, CUexecAffinityType type); + + +/** @} */ /* END CUDA_CTX */ + +/** + * \defgroup CUDA_CTX_DEPRECATED Context Management [DEPRECATED] + * + * ___MANBRIEF___ deprecated context management functions of the low-level CUDA + * driver API (___CURRENT_FILE___) ___ENDMANBRIEF___ + * + * This section describes the deprecated context management functions of the low-level + * CUDA driver application programming interface. + * + * @{ + */ + +/** + * \brief Increment a context's usage-count + * + * \deprecated + * + * Note that this function is deprecated and should not be used. + * + * Increments the usage count of the context and passes back a context handle + * in \p *pctx that must be passed to ::cuCtxDetach() when the application is + * done with the context. ::cuCtxAttach() fails if there is no context current + * to the thread. + * + * Currently, the \p flags parameter must be 0. + * + * \param pctx - Returned context handle of the current context + * \param flags - Context attach flags (must be 0) + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * \notefnerr + * + * \sa ::cuCtxCreate, + * ::cuCtxDestroy, + * ::cuCtxDetach, + * ::cuCtxGetApiVersion, + * ::cuCtxGetCacheConfig, + * ::cuCtxGetDevice, + * ::cuCtxGetFlags, + * ::cuCtxGetLimit, + * ::cuCtxPopCurrent, + * ::cuCtxPushCurrent, + * ::cuCtxSetCacheConfig, + * ::cuCtxSetLimit, + * ::cuCtxSynchronize + */ +__CUDA_DEPRECATED CUresult CUDAAPI cuCtxAttach(CUcontext *pctx, unsigned int flags); + +/** + * \brief Decrement a context's usage-count + * + * \deprecated + * + * Note that this function is deprecated and should not be used. + * + * Decrements the usage count of the context \p ctx, and destroys the context + * if the usage count goes to 0. The context must be a handle that was passed + * back by ::cuCtxCreate() or ::cuCtxAttach(), and must be current to the + * calling thread. + * + * \param ctx - Context to destroy + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT + * \notefnerr + * + * \sa ::cuCtxCreate, + * ::cuCtxDestroy, + * ::cuCtxGetApiVersion, + * ::cuCtxGetCacheConfig, + * ::cuCtxGetDevice, + * ::cuCtxGetFlags, + * ::cuCtxGetLimit, + * ::cuCtxPopCurrent, + * ::cuCtxPushCurrent, + * ::cuCtxSetCacheConfig, + * ::cuCtxSetLimit, + * ::cuCtxSynchronize + */ +__CUDA_DEPRECATED CUresult CUDAAPI cuCtxDetach(CUcontext ctx); + +/** @} */ /* END CUDA_CTX_DEPRECATED */ + + +/** + * \defgroup CUDA_MODULE Module Management + * + * ___MANBRIEF___ module management functions of the low-level CUDA driver API + * (___CURRENT_FILE___) ___ENDMANBRIEF___ + * + * This section describes the module management functions of the low-level CUDA + * driver application programming interface. + * + * @{ + */ + +/** + * \brief Loads a compute module + * + * Takes a filename \p fname and loads the corresponding module \p module into + * the current context. The CUDA driver API does not attempt to lazily + * allocate the resources needed by a module; if the memory for functions and + * data (constant and global) needed by the module cannot be allocated, + * ::cuModuleLoad() fails. The file should be a \e cubin file as output by + * \b nvcc, or a \e PTX file either as output by \b nvcc or handwritten, or + * a \e fatbin file as output by \b nvcc from toolchain 4.0 or later. + * + * \param module - Returned module + * \param fname - Filename of module to load + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_PTX, + * ::CUDA_ERROR_UNSUPPORTED_PTX_VERSION, + * ::CUDA_ERROR_NOT_FOUND, + * ::CUDA_ERROR_OUT_OF_MEMORY, + * ::CUDA_ERROR_FILE_NOT_FOUND, + * ::CUDA_ERROR_NO_BINARY_FOR_GPU, + * ::CUDA_ERROR_SHARED_OBJECT_SYMBOL_NOT_FOUND, + * ::CUDA_ERROR_SHARED_OBJECT_INIT_FAILED, + * ::CUDA_ERROR_JIT_COMPILER_NOT_FOUND + * \notefnerr + * + * \sa ::cuModuleGetFunction, + * ::cuModuleGetGlobal, + * ::cuModuleGetTexRef, + * ::cuModuleLoadData, + * ::cuModuleLoadDataEx, + * ::cuModuleLoadFatBinary, + * ::cuModuleUnload + */ +CUresult CUDAAPI cuModuleLoad(CUmodule *module, const char *fname); + +/** + * \brief Load a module's data + * + * Takes a pointer \p image and loads the corresponding module \p module into + * the current context. The pointer may be obtained by mapping a \e cubin or + * \e PTX or \e fatbin file, passing a \e cubin or \e PTX or \e fatbin file + * as a NULL-terminated text string, or incorporating a \e cubin or \e fatbin + * object into the executable resources and using operating system calls such + * as Windows \c FindResource() to obtain the pointer. + * + * \param module - Returned module + * \param image - Module data to load + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_PTX, + * ::CUDA_ERROR_UNSUPPORTED_PTX_VERSION, + * ::CUDA_ERROR_OUT_OF_MEMORY, + * ::CUDA_ERROR_NO_BINARY_FOR_GPU, + * ::CUDA_ERROR_SHARED_OBJECT_SYMBOL_NOT_FOUND, + * ::CUDA_ERROR_SHARED_OBJECT_INIT_FAILED, + * ::CUDA_ERROR_JIT_COMPILER_NOT_FOUND + * \notefnerr + * + * \sa ::cuModuleGetFunction, + * ::cuModuleGetGlobal, + * ::cuModuleGetTexRef, + * ::cuModuleLoad, + * ::cuModuleLoadDataEx, + * ::cuModuleLoadFatBinary, + * ::cuModuleUnload + */ +CUresult CUDAAPI cuModuleLoadData(CUmodule *module, const void *image); + +/** + * \brief Load a module's data with options + * + * Takes a pointer \p image and loads the corresponding module \p module into + * the current context. The pointer may be obtained by mapping a \e cubin or + * \e PTX or \e fatbin file, passing a \e cubin or \e PTX or \e fatbin file + * as a NULL-terminated text string, or incorporating a \e cubin or \e fatbin + * object into the executable resources and using operating system calls such + * as Windows \c FindResource() to obtain the pointer. Options are passed as + * an array via \p options and any corresponding parameters are passed in + * \p optionValues. The number of total options is supplied via \p numOptions. + * Any outputs will be returned via \p optionValues. + * + * \param module - Returned module + * \param image - Module data to load + * \param numOptions - Number of options + * \param options - Options for JIT + * \param optionValues - Option values for JIT + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_PTX, + * ::CUDA_ERROR_UNSUPPORTED_PTX_VERSION, + * ::CUDA_ERROR_OUT_OF_MEMORY, + * ::CUDA_ERROR_NO_BINARY_FOR_GPU, + * ::CUDA_ERROR_SHARED_OBJECT_SYMBOL_NOT_FOUND, + * ::CUDA_ERROR_SHARED_OBJECT_INIT_FAILED, + * ::CUDA_ERROR_JIT_COMPILER_NOT_FOUND + * \notefnerr + * + * \sa ::cuModuleGetFunction, + * ::cuModuleGetGlobal, + * ::cuModuleGetTexRef, + * ::cuModuleLoad, + * ::cuModuleLoadData, + * ::cuModuleLoadFatBinary, + * ::cuModuleUnload + */ +CUresult CUDAAPI cuModuleLoadDataEx(CUmodule *module, const void *image, unsigned int numOptions, CUjit_option *options, void **optionValues); + +/** + * \brief Load a module's data + * + * Takes a pointer \p fatCubin and loads the corresponding module \p module + * into the current context. The pointer represents a fat binary object, + * which is a collection of different \e cubin and/or \e PTX files, all + * representing the same device code, but compiled and optimized for different + * architectures. + * + * Prior to CUDA 4.0, there was no documented API for constructing and using + * fat binary objects by programmers. Starting with CUDA 4.0, fat binary + * objects can be constructed by providing the -fatbin option to \b nvcc. + * More information can be found in the \b nvcc document. + * + * \param module - Returned module + * \param fatCubin - Fat binary to load + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_PTX, + * ::CUDA_ERROR_UNSUPPORTED_PTX_VERSION, + * ::CUDA_ERROR_NOT_FOUND, + * ::CUDA_ERROR_OUT_OF_MEMORY, + * ::CUDA_ERROR_NO_BINARY_FOR_GPU, + * ::CUDA_ERROR_SHARED_OBJECT_SYMBOL_NOT_FOUND, + * ::CUDA_ERROR_SHARED_OBJECT_INIT_FAILED, + * ::CUDA_ERROR_JIT_COMPILER_NOT_FOUND + * \notefnerr + * + * \sa ::cuModuleGetFunction, + * ::cuModuleGetGlobal, + * ::cuModuleGetTexRef, + * ::cuModuleLoad, + * ::cuModuleLoadData, + * ::cuModuleLoadDataEx, + * ::cuModuleUnload + */ +CUresult CUDAAPI cuModuleLoadFatBinary(CUmodule *module, const void *fatCubin); + +/** + * \brief Unloads a module + * + * Unloads a module \p hmod from the current context. + * + * \param hmod - Module to unload + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * \notefnerr + * \note_destroy_ub + * + * \sa ::cuModuleGetFunction, + * ::cuModuleGetGlobal, + * ::cuModuleGetTexRef, + * ::cuModuleLoad, + * ::cuModuleLoadData, + * ::cuModuleLoadDataEx, + * ::cuModuleLoadFatBinary + */ +CUresult CUDAAPI cuModuleUnload(CUmodule hmod); + +/** + * CUDA Lazy Loading status + */ +typedef enum CUmoduleLoadingMode_enum { + CU_MODULE_EAGER_LOADING = 0x1, /**< Lazy Kernel Loading is not enabled */ + CU_MODULE_LAZY_LOADING = 0x2, /**< Lazy Kernel Loading is enabled */ +} CUmoduleLoadingMode; + +/** + * \brief Query lazy loading mode + * + * Returns lazy loading mode + * Module loading mode is controlled by CUDA_MODULE_LOADING env variable + * + * \param mode - Returns the lazy loading mode + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE, + * \notefnerr + * + * \sa + * ::cuModuleLoad, + */ +CUresult CUDAAPI cuModuleGetLoadingMode(CUmoduleLoadingMode *mode); + +/** + * \brief Returns a function handle + * + * Returns in \p *hfunc the handle of the function of name \p name located in + * module \p hmod. If no function of that name exists, ::cuModuleGetFunction() + * returns ::CUDA_ERROR_NOT_FOUND. + * + * \param hfunc - Returned function handle + * \param hmod - Module to retrieve function from + * \param name - Name of function to retrieve + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_NOT_FOUND + * \notefnerr + * + * \sa ::cuModuleGetGlobal, + * ::cuModuleGetTexRef, + * ::cuModuleLoad, + * ::cuModuleLoadData, + * ::cuModuleLoadDataEx, + * ::cuModuleLoadFatBinary, + * ::cuModuleUnload + */ +CUresult CUDAAPI cuModuleGetFunction(CUfunction *hfunc, CUmodule hmod, const char *name); + +/** + * \brief Returns a global pointer from a module + * + * Returns in \p *dptr and \p *bytes the base pointer and size of the + * global of name \p name located in module \p hmod. If no variable of that name + * exists, ::cuModuleGetGlobal() returns ::CUDA_ERROR_NOT_FOUND. + * One of the parameters \p dptr or \p bytes (not both) can be NULL in which + * case it is ignored. + * + * \param dptr - Returned global device pointer + * \param bytes - Returned global size in bytes + * \param hmod - Module to retrieve global from + * \param name - Name of global to retrieve + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_NOT_FOUND + * \notefnerr + * + * \sa ::cuModuleGetFunction, + * ::cuModuleGetTexRef, + * ::cuModuleLoad, + * ::cuModuleLoadData, + * ::cuModuleLoadDataEx, + * ::cuModuleLoadFatBinary, + * ::cuModuleUnload, + * ::cudaGetSymbolAddress, + * ::cudaGetSymbolSize + */ +CUresult CUDAAPI cuModuleGetGlobal(CUdeviceptr *dptr, size_t *bytes, CUmodule hmod, const char *name); + +/** + * \brief Creates a pending JIT linker invocation. + * + * If the call is successful, the caller owns the returned CUlinkState, which + * should eventually be destroyed with ::cuLinkDestroy. The + * device code machine size (32 or 64 bit) will match the calling application. + * + * Both linker and compiler options may be specified. Compiler options will + * be applied to inputs to this linker action which must be compiled from PTX. + * The options ::CU_JIT_WALL_TIME, + * ::CU_JIT_INFO_LOG_BUFFER_SIZE_BYTES, and ::CU_JIT_ERROR_LOG_BUFFER_SIZE_BYTES + * will accumulate data until the CUlinkState is destroyed. + * + * \p optionValues must remain valid for the life of the CUlinkState if output + * options are used. No other references to inputs are maintained after this + * call returns. + * + * \note For LTO-IR input, only LTO-IR compiled with toolkits prior to CUDA 12.0 will be accepted + * + * \param numOptions Size of options arrays + * \param options Array of linker and compiler options + * \param optionValues Array of option values, each cast to void * + * \param stateOut On success, this will contain a CUlinkState to specify + * and complete this action + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_OUT_OF_MEMORY, + * ::CUDA_ERROR_JIT_COMPILER_NOT_FOUND + * \notefnerr + * + * \sa ::cuLinkAddData, + * ::cuLinkAddFile, + * ::cuLinkComplete, + * ::cuLinkDestroy + */ +CUresult CUDAAPI +cuLinkCreate(unsigned int numOptions, CUjit_option *options, void **optionValues, CUlinkState *stateOut); + +/** + * \brief Add an input to a pending linker invocation + * + * Ownership of \p data is retained by the caller. No reference is retained to any + * inputs after this call returns. + * + * This method accepts only compiler options, which are used if the data must + * be compiled from PTX, and does not accept any of + * ::CU_JIT_WALL_TIME, ::CU_JIT_INFO_LOG_BUFFER, ::CU_JIT_ERROR_LOG_BUFFER, + * ::CU_JIT_TARGET_FROM_CUCONTEXT, or ::CU_JIT_TARGET. + * + * \note For LTO-IR input, only LTO-IR compiled with toolkits prior to CUDA 12.0 will be accepted + * + * \param state A pending linker action. + * \param type The type of the input data. + * \param data The input data. PTX must be NULL-terminated. + * \param size The length of the input data. + * \param name An optional name for this input in log messages. + * \param numOptions Size of options. + * \param options Options to be applied only for this input (overrides options from ::cuLinkCreate). + * \param optionValues Array of option values, each cast to void *. + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_IMAGE, + * ::CUDA_ERROR_INVALID_PTX, + * ::CUDA_ERROR_UNSUPPORTED_PTX_VERSION, + * ::CUDA_ERROR_OUT_OF_MEMORY, + * ::CUDA_ERROR_NO_BINARY_FOR_GPU + * + * \sa ::cuLinkCreate, + * ::cuLinkAddFile, + * ::cuLinkComplete, + * ::cuLinkDestroy + */ +CUresult CUDAAPI +cuLinkAddData(CUlinkState state, CUjitInputType type, void *data, size_t size, const char *name, + unsigned int numOptions, CUjit_option *options, void **optionValues); + +/** + * \brief Add a file input to a pending linker invocation + * + * No reference is retained to any inputs after this call returns. + * + * This method accepts only compiler options, which are used if the input + * must be compiled from PTX, and does not accept any of + * ::CU_JIT_WALL_TIME, ::CU_JIT_INFO_LOG_BUFFER, ::CU_JIT_ERROR_LOG_BUFFER, + * ::CU_JIT_TARGET_FROM_CUCONTEXT, or ::CU_JIT_TARGET. + * + * This method is equivalent to invoking ::cuLinkAddData on the contents + * of the file. + * + * \note For LTO-IR input, only LTO-IR compiled with toolkits prior to CUDA 12.0 will be accepted + * + * \param state A pending linker action + * \param type The type of the input data + * \param path Path to the input file + * \param numOptions Size of options + * \param options Options to be applied only for this input (overrides options from ::cuLinkCreate) + * \param optionValues Array of option values, each cast to void * + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_FILE_NOT_FOUND + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_IMAGE, + * ::CUDA_ERROR_INVALID_PTX, + * ::CUDA_ERROR_UNSUPPORTED_PTX_VERSION, + * ::CUDA_ERROR_OUT_OF_MEMORY, + * ::CUDA_ERROR_NO_BINARY_FOR_GPU + * + * \sa ::cuLinkCreate, + * ::cuLinkAddData, + * ::cuLinkComplete, + * ::cuLinkDestroy + */ +CUresult CUDAAPI +cuLinkAddFile(CUlinkState state, CUjitInputType type, const char *path, + unsigned int numOptions, CUjit_option *options, void **optionValues); + +/** + * \brief Complete a pending linker invocation + * + * Completes the pending linker action and returns the cubin image for the linked + * device code, which can be used with ::cuModuleLoadData. The cubin is owned by + * \p state, so it should be loaded before \p state is destroyed via ::cuLinkDestroy. + * This call does not destroy \p state. + * + * \param state A pending linker invocation + * \param cubinOut On success, this will point to the output image + * \param sizeOut Optional parameter to receive the size of the generated image + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_OUT_OF_MEMORY + * + * \sa ::cuLinkCreate, + * ::cuLinkAddData, + * ::cuLinkAddFile, + * ::cuLinkDestroy, + * ::cuModuleLoadData + */ +CUresult CUDAAPI +cuLinkComplete(CUlinkState state, void **cubinOut, size_t *sizeOut); + +/** + * \brief Destroys state for a JIT linker invocation. + * + * \param state State object for the linker invocation + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_HANDLE + * + * \sa ::cuLinkCreate + */ +CUresult CUDAAPI +cuLinkDestroy(CUlinkState state); + +/** @} */ /* END CUDA_MODULE */ + +/** + * \defgroup CUDA_MODULE_DEPRECATED Module Management [DEPRECATED] + * + * ___MANBRIEF___ deprecated module management functions of the low-level CUDA + * driver API (___CURRENT_FILE___) ___ENDMANBRIEF___ + * + * This section describes the deprecated module management functions of the low-level + * CUDA driver application programming interface. + * + * @{ + */ + +/** + * \brief Returns a handle to a texture reference + * + * \deprecated + * + * Returns in \p *pTexRef the handle of the texture reference of name \p name + * in the module \p hmod. If no texture reference of that name exists, + * ::cuModuleGetTexRef() returns ::CUDA_ERROR_NOT_FOUND. This texture reference + * handle should not be destroyed, since it will be destroyed when the module + * is unloaded. + * + * \param pTexRef - Returned texture reference + * \param hmod - Module to retrieve texture reference from + * \param name - Name of texture reference to retrieve + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_NOT_FOUND + * \notefnerr + * + * \sa + * ::cuModuleGetFunction, + * ::cuModuleGetGlobal, + * ::cuModuleGetSurfRef, + * ::cuModuleLoad, + * ::cuModuleLoadData, + * ::cuModuleLoadDataEx, + * ::cuModuleLoadFatBinary, + * ::cuModuleUnload + */ +__CUDA_DEPRECATED CUresult CUDAAPI cuModuleGetTexRef(CUtexref *pTexRef, CUmodule hmod, const char *name); + +/** + * \brief Returns a handle to a surface reference + * + * \deprecated + * + * Returns in \p *pSurfRef the handle of the surface reference of name \p name + * in the module \p hmod. If no surface reference of that name exists, + * ::cuModuleGetSurfRef() returns ::CUDA_ERROR_NOT_FOUND. + * + * \param pSurfRef - Returned surface reference + * \param hmod - Module to retrieve surface reference from + * \param name - Name of surface reference to retrieve + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_NOT_FOUND + * \notefnerr + * + * \sa + * ::cuModuleGetFunction, + * ::cuModuleGetGlobal, + * ::cuModuleGetTexRef, + * ::cuModuleLoad, + * ::cuModuleLoadData, + * ::cuModuleLoadDataEx, + * ::cuModuleLoadFatBinary, + * ::cuModuleUnload + */ +__CUDA_DEPRECATED CUresult CUDAAPI cuModuleGetSurfRef(CUsurfref *pSurfRef, CUmodule hmod, const char *name); + +/** @} */ /* END CUDA_MODULE_DEPRECATED */ + +/** + * \defgroup CUDA_LIBRARY Library Management + * + * ___MANBRIEF___ library management functions of the low-level CUDA driver API + * (___CURRENT_FILE___) ___ENDMANBRIEF___ + * + * This section describes the library management functions of the low-level CUDA + * driver application programming interface. + * + * @{ + */ + +/** + * \brief Load a library with specified code and options + * + * Takes a pointer \p code and loads the corresponding library \p library into + * all contexts existent at the time of the call and future contexts at the time + * of creation until the library is unloaded with ::cuLibraryUnload(). + * + * The pointer may be obtained by mapping a \e cubin or \e PTX or \e fatbin file, + * passing a \e cubin or \e PTX or \e fatbin file as a NULL-terminated text string, or + * incorporating a \e cubin or \e fatbin object into the executable resources and + * using operating system calls such as Windows \c FindResource() to obtain the pointer. + * + * Options are passed as an array via \p jitOptions and any corresponding parameters are passed in + * \p jitOptionsValues. The number of total JIT options is supplied via \p numJitOptions. + * Any outputs will be returned via \p jitOptionsValues. + * + * Library load options are passed as an array via \p libraryOptions and any corresponding parameters are passed in + * \p libraryOptionValues. The number of total library load options is supplied via \p numLibraryOptions. + * + * \param library - Returned library + * \param code - Code to load + * \param jitOptions - Options for JIT + * \param jitOptionsValues - Option values for JIT + * \param numJitOptions - Number of options + * \param libraryOptions - Options for loading + * \param libraryOptionValues - Option values for loading + * \param numLibraryOptions - Number of options for loading + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_PTX, + * ::CUDA_ERROR_UNSUPPORTED_PTX_VERSION, + * ::CUDA_ERROR_OUT_OF_MEMORY, + * ::CUDA_ERROR_NO_BINARY_FOR_GPU, + * ::CUDA_ERROR_SHARED_OBJECT_SYMBOL_NOT_FOUND, + * ::CUDA_ERROR_SHARED_OBJECT_INIT_FAILED, + * ::CUDA_ERROR_JIT_COMPILER_NOT_FOUND + * + * \sa ::cuLibraryLoadFromFile, + * ::cuLibraryUnload, + * ::cuModuleLoad, + * ::cuModuleLoadData, + * ::cuModuleLoadDataEx + */ +CUresult CUDAAPI cuLibraryLoadData(CUlibrary *library, const void *code, + CUjit_option *jitOptions, void **jitOptionsValues, unsigned int numJitOptions, + CUlibraryOption *libraryOptions, void** libraryOptionValues, unsigned int numLibraryOptions); + +/** + * \brief Load a library with specified file and options + * + * Takes a filename \p fileName and loads the corresponding library \p library into + * all contexts existent at the time of the call and future contexts at the time of + * creation until the library is unloaded with ::cuLibraryUnload(). + * + * The file should be a \e cubin file as output by \b nvcc, or a \e PTX file either + * as output by \b nvcc or handwritten, or a \e fatbin file as output by \b nvcc + * from toolchain 4.0 or later. + * + * Options are passed as an array via \p jitOptions and any corresponding parameters are + * passed in \p jitOptionsValues. The number of total options is supplied via \p numJitOptions. + * Any outputs will be returned via \p jitOptionsValues. + * + * Library load options are passed as an array via \p libraryOptions and any corresponding parameters are passed in + * \p libraryOptionValues. The number of total library load options is supplied via \p numLibraryOptions. + * + * \param library - Returned library + * \param fileName - File to load from + * \param jitOptions - Options for JIT + * \param jitOptionsValues - Option values for JIT + * \param numJitOptions - Number of options + * \param libraryOptions - Options for loading + * \param libraryOptionValues - Option values for loading + * \param numLibraryOptions - Number of options for loading + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_PTX, + * ::CUDA_ERROR_UNSUPPORTED_PTX_VERSION, + * ::CUDA_ERROR_OUT_OF_MEMORY, + * ::CUDA_ERROR_NO_BINARY_FOR_GPU, + * ::CUDA_ERROR_SHARED_OBJECT_SYMBOL_NOT_FOUND, + * ::CUDA_ERROR_SHARED_OBJECT_INIT_FAILED, + * ::CUDA_ERROR_JIT_COMPILER_NOT_FOUND + * + * \sa ::cuLibraryLoadData, + * ::cuLibraryUnload, + * ::cuModuleLoad, + * ::cuModuleLoadData, + * ::cuModuleLoadDataEx + */ +CUresult CUDAAPI cuLibraryLoadFromFile(CUlibrary *library, const char *fileName, + CUjit_option *jitOptions, void **jitOptionsValues, unsigned int numJitOptions, + CUlibraryOption *libraryOptions, void **libraryOptionValues, unsigned int numLibraryOptions); + +/** + * \brief Unloads a library + * + * Unloads the library specified with \p library + * + * \param library - Library to unload + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_VALUE + * + * \sa ::cuLibraryLoadData, + * ::cuLibraryLoadFromFile, + * ::cuModuleUnload + */ +CUresult CUDAAPI cuLibraryUnload(CUlibrary library); + +/** + * \brief Returns a kernel handle + * + * Returns in \p pKernel the handle of the kernel with name \p name located in library \p library. + * If kernel handle is not found, the call returns ::CUDA_ERROR_NOT_FOUND. + * + * \param pKernel - Returned kernel handle + * \param library - Library to retrieve kernel from + * \param name - Name of kernel to retrieve + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_NOT_FOUND, + * + * \sa ::cuLibraryLoadData, + * ::cuLibraryLoadFromFile, + * ::cuLibraryUnload, + * ::cuKernelGetFunction, + * ::cuLibraryGetModule, + * ::cuModuleGetFunction + */ +CUresult CUDAAPI cuLibraryGetKernel(CUkernel *pKernel, CUlibrary library, const char *name); + +/** + * \brief Returns a module handle + * + * Returns in \p pMod the module handle associated with the current context located in + * library \p library. If module handle is not found, the call returns ::CUDA_ERROR_NOT_FOUND. + * + * \param pMod - Returned module handle + * \param library - Library to retrieve module from + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_NOT_FOUND, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_CONTEXT_IS_DESTROYED + * + * \sa ::cuLibraryLoadData, + * ::cuLibraryLoadFromFile, + * ::cuLibraryUnload, + * ::cuModuleGetFunction + */ +CUresult CUDAAPI cuLibraryGetModule(CUmodule *pMod, CUlibrary library); + +/** + * \brief Returns a function handle + * + * Returns in \p pFunc the handle of the function for the requested kernel \p kernel and + * the current context. If function handle is not found, the call returns ::CUDA_ERROR_NOT_FOUND. + * + * \param pFunc - Returned function handle + * \param kernel - Kernel to retrieve function for the requested context + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_NOT_FOUND, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_CONTEXT_IS_DESTROYED + * + * \sa ::cuLibraryLoadData, + * ::cuLibraryLoadFromFile, + * ::cuLibraryUnload, + * ::cuLibraryGetKernel, + * ::cuLibraryGetModule, + * ::cuModuleGetFunction + */ +CUresult CUDAAPI cuKernelGetFunction(CUfunction *pFunc, CUkernel kernel); + +/** + * \brief Returns a global device pointer + * + * Returns in \p *dptr and \p *bytes the base pointer and size of the global with + * name \p name for the requested library \p library and the current context. + * If no global for the requested name \p name exists, the call returns ::CUDA_ERROR_NOT_FOUND. + * One of the parameters \p dptr or \p bytes (not both) can be NULL in which + * case it is ignored. + * + * \param dptr - Returned global device pointer for the requested context + * \param bytes - Returned global size in bytes + * \param library - Library to retrieve global from + * \param name - Name of global to retrieve + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_NOT_FOUND, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_CONTEXT_IS_DESTROYED + * + * \sa ::cuLibraryLoadData, + * ::cuLibraryLoadFromFile, + * ::cuLibraryUnload, + * ::cuLibraryGetModule, + * cuModuleGetGlobal + */ +CUresult CUDAAPI cuLibraryGetGlobal(CUdeviceptr *dptr, size_t *bytes, CUlibrary library, const char *name); + +/** + * \brief Returns a pointer to managed memory + * + * Returns in \p *dptr and \p *bytes the base pointer and size of the managed memory with + * name \p name for the requested library \p library. If no managed memory with the + * requested name \p name exists, the call returns ::CUDA_ERROR_NOT_FOUND. One of the parameters + * \p dptr or \p bytes (not both) can be NULL in which case it is ignored. + * Note that managed memory for library \p library is shared across devices and is registered + * when the library is loaded into atleast one context. + * + * \note The API requires a CUDA context to be present and initialized on at least one device. + * If no context is present, the call returns ::CUDA_ERROR_NOT_FOUND. + * + * \param dptr - Returned pointer to the managed memory + * \param bytes - Returned memory size in bytes + * \param library - Library to retrieve managed memory from + * \param name - Name of managed memory to retrieve + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_NOT_FOUND, + * + * \sa ::cuLibraryLoadData, + * ::cuLibraryLoadFromFile, + * ::cuLibraryUnload, + */ +CUresult CUDAAPI cuLibraryGetManaged(CUdeviceptr *dptr, size_t *bytes, CUlibrary library, const char *name); + +/** + * \brief Returns a pointer to a unified function + * + * Returns in \p *fptr the function pointer to a unified function denoted by \p symbol. + * If no unified function with name \p symbol exists, the call returns ::CUDA_ERROR_NOT_FOUND. + * If there is no device with attribute ::CU_DEVICE_ATTRIBUTE_UNIFIED_FUNCTION_POINTERS present in the system, + * the call may return ::CUDA_ERROR_NOT_FOUND. + * + * \param fptr - Returned pointer to a unified function + * \param library - Library to retrieve function pointer memory from + * \param symbol - Name of function pointer to retrieve + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_NOT_FOUND, + * + * \sa ::cuLibraryLoadData, + * ::cuLibraryLoadFromFile, + * ::cuLibraryUnload, + */ +CUresult CUDAAPI cuLibraryGetUnifiedFunction(void **fptr, CUlibrary library, const char *symbol); + +/** + * \brief Returns information about a kernel + * + * Returns in \p *pi the integer value of the attribute \p attrib for the kernel + * \p kernel for the requested device \p dev. The supported attributes are: + * - ::CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK: The maximum number of threads + * per block, beyond which a launch of the kernel would fail. This number + * depends on both the kernel and the requested device. + * - ::CU_FUNC_ATTRIBUTE_SHARED_SIZE_BYTES: The size in bytes of + * statically-allocated shared memory per block required by this kernel. + * This does not include dynamically-allocated shared memory requested by + * the user at runtime. + * - ::CU_FUNC_ATTRIBUTE_CONST_SIZE_BYTES: The size in bytes of user-allocated + * constant memory required by this kernel. + * - ::CU_FUNC_ATTRIBUTE_LOCAL_SIZE_BYTES: The size in bytes of local memory + * used by each thread of this kernel. + * - ::CU_FUNC_ATTRIBUTE_NUM_REGS: The number of registers used by each thread + * of this kernel. + * - ::CU_FUNC_ATTRIBUTE_PTX_VERSION: The PTX virtual architecture version for + * which the kernel was compiled. This value is the major PTX version * 10 + * + the minor PTX version, so a PTX version 1.3 function would return the + * value 13. Note that this may return the undefined value of 0 for cubins + * compiled prior to CUDA 3.0. + * - ::CU_FUNC_ATTRIBUTE_BINARY_VERSION: The binary architecture version for + * which the kernel was compiled. This value is the major binary + * version * 10 + the minor binary version, so a binary version 1.3 function + * would return the value 13. Note that this will return a value of 10 for + * legacy cubins that do not have a properly-encoded binary architecture + * version. + * - ::CU_FUNC_CACHE_MODE_CA: The attribute to indicate whether the kernel has + * been compiled with user specified option "-Xptxas --dlcm=ca" set. + * - ::CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES: The maximum size in bytes of + * dynamically-allocated shared memory. + * - ::CU_FUNC_ATTRIBUTE_PREFERRED_SHARED_MEMORY_CARVEOUT: Preferred shared memory-L1 + * cache split ratio in percent of total shared memory. + * - ::CU_FUNC_ATTRIBUTE_CLUSTER_SIZE_MUST_BE_SET: If this attribute is set, the + * kernel must launch with a valid cluster size specified. + * - ::CU_FUNC_ATTRIBUTE_REQUIRED_CLUSTER_WIDTH: The required cluster width in + * blocks. + * - ::CU_FUNC_ATTRIBUTE_REQUIRED_CLUSTER_HEIGHT: The required cluster height in + * blocks. + * - ::CU_FUNC_ATTRIBUTE_REQUIRED_CLUSTER_DEPTH: The required cluster depth in + * blocks. + * - ::CU_FUNC_ATTRIBUTE_NON_PORTABLE_CLUSTER_SIZE_ALLOWED: Indicates whether + * the function can be launched with non-portable cluster size. 1 is allowed, + * 0 is disallowed. A non-portable cluster size may only function on the + * specific SKUs the program is tested on. The launch might fail if the + * program is run on a different hardware platform. CUDA API provides + * cudaOccupancyMaxActiveClusters to assist with checking whether the desired + * size can be launched on the current device. A portable cluster size is + * guaranteed to be functional on all compute capabilities higher than the + * target compute capability. The portable cluster size for sm_90 is 8 blocks + * per cluster. This value may increase for future compute capabilities. The + * specific hardware unit may support higher cluster sizes that’s not + * guaranteed to be portable. + * - ::CU_FUNC_ATTRIBUTE_CLUSTER_SCHEDULING_POLICY_PREFERENCE: The block + * scheduling policy of a function. The value type is CUclusterSchedulingPolicy. + * + * \note If another thread is trying to set the same attribute on the same device using + * ::cuKernelSetAttribute() simultaneously, the attribute query will give the old or new + * value depending on the interleavings chosen by the OS scheduler and memory consistency. + * + * \param pi - Returned attribute value + * \param attrib - Attribute requested + * \param kernel - Kernel to query attribute of + * \param dev - Device to query attribute of + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_DEVICE + * + * \sa ::cuLibraryLoadData, + * ::cuLibraryLoadFromFile, + * ::cuLibraryUnload, + * ::cuKernelSetAttribute, + * ::cuLibraryGetKernel, + * ::cuLaunchKernel, + * ::cuKernelGetFunction, + * ::cuLibraryGetModule, + * ::cuModuleGetFunction, + * ::cuFuncGetAttribute + */ +CUresult CUDAAPI cuKernelGetAttribute(int *pi, CUfunction_attribute attrib, CUkernel kernel, CUdevice dev); + +/** + * \brief Sets information about a kernel + * + * This call sets the value of a specified attribute \p attrib on the kernel \p kernel + * for the requested device \p dev to an integer value specified by \p val. + * This function returns CUDA_SUCCESS if the new value of the attribute could be + * successfully set. If the set fails, this call will return an error. + * Not all attributes can have values set. Attempting to set a value on a read-only + * attribute will result in an error (CUDA_ERROR_INVALID_VALUE) + * + * Note that attributes set using ::cuFuncSetAttribute() will override the attribute + * set by this API irrespective of whether the call to ::cuFuncSetAttribute() is made + * before or after this API call. However, ::cuKernelGetAttribute() will always + * return the attribute value set by this API. + * + * Supported attributes are: + * - ::CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES: This is the maximum size in bytes of + * dynamically-allocated shared memory. The value should contain the requested + * maximum size of dynamically-allocated shared memory. The sum of this value and + * the function attribute ::CU_FUNC_ATTRIBUTE_SHARED_SIZE_BYTES cannot exceed the + * device attribute ::CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK_OPTIN. + * The maximal size of requestable dynamic shared memory may differ by GPU + * architecture. + * - ::CU_FUNC_ATTRIBUTE_PREFERRED_SHARED_MEMORY_CARVEOUT: On devices where the L1 + * cache and shared memory use the same hardware resources, this sets the shared memory + * carveout preference, in percent of the total shared memory. + * See ::CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_MULTIPROCESSOR + * This is only a hint, and the driver can choose a different ratio if required to execute the function. + * - ::CU_FUNC_ATTRIBUTE_REQUIRED_CLUSTER_WIDTH: The required cluster width in + * blocks. The width, height, and depth values must either all be 0 or all be + * positive. The validity of the cluster dimensions is checked at launch time. + * If the value is set during compile time, it cannot be set at runtime. + * Setting it at runtime will return CUDA_ERROR_NOT_PERMITTED. + * - ::CU_FUNC_ATTRIBUTE_REQUIRED_CLUSTER_HEIGHT: The required cluster height in + * blocks. The width, height, and depth values must either all be 0 or all be + * positive. The validity of the cluster dimensions is checked at launch time. + * If the value is set during compile time, it cannot be set at runtime. + * Setting it at runtime will return CUDA_ERROR_NOT_PERMITTED. + * - ::CU_FUNC_ATTRIBUTE_REQUIRED_CLUSTER_DEPTH: The required cluster depth in + * blocks. The width, height, and depth values must either all be 0 or all be + * positive. The validity of the cluster dimensions is checked at launch time. + * If the value is set during compile time, it cannot be set at runtime. + * Setting it at runtime will return CUDA_ERROR_NOT_PERMITTED. + * - ::CU_FUNC_ATTRIBUTE_CLUSTER_SCHEDULING_POLICY_PREFERENCE: The block + * scheduling policy of a function. The value type is CUclusterSchedulingPolicy. + * + * \note The API has stricter locking requirements in comparison to its legacy counterpart + * ::cuFuncSetAttribute() due to device-wide semantics. If multiple threads are trying to + * set the same attribute on the same device simultaneously, the attribute setting will depend + * on the interleavings chosen by the OS scheduler and memory consistency. + * + * \param attrib - Attribute requested + * \param val - Value to set + * \param kernel - Kernel to set attribute of + * \param dev - Device to set attribute of + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_DEVICE, + * ::CUDA_ERROR_OUT_OF_MEMORY + * + * \sa ::cuLibraryLoadData, + * ::cuLibraryLoadFromFile, + * ::cuLibraryUnload, + * ::cuKernelGetAttribute, + * ::cuLibraryGetKernel, + * ::cuLaunchKernel, + * ::cuKernelGetFunction, + * ::cuLibraryGetModule, + * ::cuModuleGetFunction, + * ::cuFuncSetAttribute + */ +CUresult CUDAAPI cuKernelSetAttribute(CUfunction_attribute attrib, int val, CUkernel kernel, CUdevice dev); + +/** + * \brief Sets the preferred cache configuration for a device kernel. + * + * On devices where the L1 cache and shared memory use the same hardware + * resources, this sets through \p config the preferred cache configuration for + * the device kernel \p kernel on the requested device \p dev. This is only a preference. + * The driver will use the requested configuration if possible, but it is free to choose a different + * configuration if required to execute \p kernel. Any context-wide preference + * set via ::cuCtxSetCacheConfig() will be overridden by this per-kernel + * setting. + * + * Note that attributes set using ::cuFuncSetCacheConfig() will override the attribute + * set by this API irrespective of whether the call to ::cuFuncSetCacheConfig() is made + * before or after this API call. + * + * This setting does nothing on devices where the size of the L1 cache and + * shared memory are fixed. + * + * Launching a kernel with a different preference than the most recent + * preference setting may insert a device-side synchronization point. + * + * + * The supported cache configurations are: + * - ::CU_FUNC_CACHE_PREFER_NONE: no preference for shared memory or L1 (default) + * - ::CU_FUNC_CACHE_PREFER_SHARED: prefer larger shared memory and smaller L1 cache + * - ::CU_FUNC_CACHE_PREFER_L1: prefer larger L1 cache and smaller shared memory + * - ::CU_FUNC_CACHE_PREFER_EQUAL: prefer equal sized L1 cache and shared memory + * + * \note The API has stricter locking requirements in comparison to its legacy counterpart + * ::cuFuncSetCacheConfig() due to device-wide semantics. If multiple threads are trying to + * set a config on the same device simultaneously, the cache config setting will depend + * on the interleavings chosen by the OS scheduler and memory consistency. + * + * \param kernel - Kernel to configure cache for + * \param config - Requested cache configuration + * \param dev - Device to set attribute of + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_DEVICE, + * ::CUDA_ERROR_OUT_OF_MEMORY + * + * \sa ::cuLibraryLoadData, + * ::cuLibraryLoadFromFile, + * ::cuLibraryUnload, + * ::cuLibraryGetKernel, + * ::cuKernelGetFunction, + * ::cuLibraryGetModule, + * ::cuModuleGetFunction, + * ::cuFuncSetCacheConfig, + * ::cuCtxSetCacheConfig, + * ::cuLaunchKernel + */ +CUresult CUDAAPI cuKernelSetCacheConfig(CUkernel kernel, CUfunc_cache config, CUdevice dev); + +/** @} */ /* END CUDA_LIBRARY */ + +/** + * \defgroup CUDA_MEM Memory Management + * + * ___MANBRIEF___ memory management functions of the low-level CUDA driver API + * (___CURRENT_FILE___) ___ENDMANBRIEF___ + * + * This section describes the memory management functions of the low-level CUDA + * driver application programming interface. + * + * @{ + */ + +/** + * \brief Gets free and total memory + * + * Returns in \p *total the total amount of memory available to the the current context. + * Returns in \p *free the amount of memory on the device that is free according to the OS. + * CUDA is not guaranteed to be able to allocate all of the memory that the OS reports as free. + * In a multi-tenet situation, free estimate returned is prone to race condition where + * a new allocation/free done by a different process or a different thread in the same + * process between the time when free memory was estimated and reported, will result in + * deviation in free value reported and actual free memory. + * + * The integrated GPU on Tegra shares memory with CPU and other component + * of the SoC. The free and total values returned by the API excludes + * the SWAP memory space maintained by the OS on some platforms. + * The OS may move some of the memory pages into swap area as the GPU or + * CPU allocate or access memory. See Tegra app note on how to calculate + * total and free memory on Tegra. + * + * \param free - Returned free memory in bytes + * \param total - Returned total memory in bytes + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * \notefnerr + * + * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, + * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, + * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + * ::cuMemGetAddressRange, ::cuMemHostAlloc, + * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16, + * ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32, + * ::cudaMemGetInfo + */ +CUresult CUDAAPI cuMemGetInfo(size_t *free, size_t *total); + +/** + * \brief Allocates device memory + * + * Allocates \p bytesize bytes of linear memory on the device and returns in + * \p *dptr a pointer to the allocated memory. The allocated memory is suitably + * aligned for any kind of variable. The memory is not cleared. If \p bytesize + * is 0, ::cuMemAlloc() returns ::CUDA_ERROR_INVALID_VALUE. + * + * \param dptr - Returned device pointer + * \param bytesize - Requested allocation size in bytes + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_OUT_OF_MEMORY + * \notefnerr + * + * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAllocHost, + * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, + * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, + * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16, + * ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32, + * ::cudaMalloc + */ +CUresult CUDAAPI cuMemAlloc(CUdeviceptr *dptr, size_t bytesize); + +/** + * \brief Allocates pitched device memory + * + * Allocates at least \p WidthInBytes * \p Height bytes of linear memory on + * the device and returns in \p *dptr a pointer to the allocated memory. The + * function may pad the allocation to ensure that corresponding pointers in + * any given row will continue to meet the alignment requirements for + * coalescing as the address is updated from row to row. \p ElementSizeBytes + * specifies the size of the largest reads and writes that will be performed + * on the memory range. \p ElementSizeBytes may be 4, 8 or 16 (since coalesced + * memory transactions are not possible on other data sizes). If + * \p ElementSizeBytes is smaller than the actual read/write size of a kernel, + * the kernel will run correctly, but possibly at reduced speed. The pitch + * returned in \p *pPitch by ::cuMemAllocPitch() is the width in bytes of the + * allocation. The intended usage of pitch is as a separate parameter of the + * allocation, used to compute addresses within the 2D array. Given the row + * and column of an array element of type \b T, the address is computed as: + * \code + T* pElement = (T*)((char*)BaseAddress + Row * Pitch) + Column; + * \endcode + * + * The pitch returned by ::cuMemAllocPitch() is guaranteed to work with + * ::cuMemcpy2D() under all circumstances. For allocations of 2D arrays, it is + * recommended that programmers consider performing pitch allocations using + * ::cuMemAllocPitch(). Due to alignment restrictions in the hardware, this is + * especially true if the application will be performing 2D memory copies + * between different regions of device memory (whether linear memory or CUDA + * arrays). + * + * The byte alignment of the pitch returned by ::cuMemAllocPitch() is guaranteed + * to match or exceed the alignment requirement for texture binding with + * ::cuTexRefSetAddress2D(). + * + * \param dptr - Returned device pointer + * \param pPitch - Returned pitch of allocation in bytes + * \param WidthInBytes - Requested allocation width in bytes + * \param Height - Requested allocation height in rows + * \param ElementSizeBytes - Size of largest reads/writes for range + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_OUT_OF_MEMORY + * \notefnerr + * + * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, + * ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, + * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, + * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16, + * ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32, + * ::cudaMallocPitch + */ +CUresult CUDAAPI cuMemAllocPitch(CUdeviceptr *dptr, size_t *pPitch, size_t WidthInBytes, size_t Height, unsigned int ElementSizeBytes); + +/** + * \brief Frees device memory + * + * Frees the memory space pointed to by \p dptr, which must have been returned + * by a previous call to one of the following memory allocation APIs - ::cuMemAlloc(), + * ::cuMemAllocPitch(), ::cuMemAllocManaged(), ::cuMemAllocAsync(), ::cuMemAllocFromPoolAsync() + * + * Note - This API will not perform any implict synchronization when the pointer was allocated with + * ::cuMemAllocAsync or ::cuMemAllocFromPoolAsync. Callers must ensure that all accesses to the + * pointer have completed before invoking ::cuMemFree. For best performance and memory reuse, users + * should use ::cuMemFreeAsync to free memory allocated via the stream ordered memory allocator. + * + * \param dptr - Pointer to memory to free + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * \notefnerr + * + * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, + * ::cuMemAllocPitch, ::cuMemAllocManaged, ::cuMemAllocAsync, ::cuMemAllocFromPoolAsync, + * ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, ::cuMemcpy3D, ::cuMemcpy3DAsync, + * ::cuMemcpyAtoA, ::cuMemcpyAtoD, ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, + * ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, + * ::cuMemcpyHtoAAsync, ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFreeHost, + * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, ::cuMemFreeAsync, + * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16, + * ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32, + * ::cudaFree + */ +CUresult CUDAAPI cuMemFree(CUdeviceptr dptr); + +/** + * \brief Get information on memory allocations + * + * Returns the base address in \p *pbase and size in \p *psize of the + * allocation by ::cuMemAlloc() or ::cuMemAllocPitch() that contains the input + * pointer \p dptr. Both parameters \p pbase and \p psize are optional. If one + * of them is NULL, it is ignored. + * + * \param pbase - Returned base address + * \param psize - Returned size of device memory allocation + * \param dptr - Device pointer to query + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_NOT_FOUND, + * ::CUDA_ERROR_INVALID_VALUE + * \notefnerr + * + * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, + * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, + * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + * ::cuMemGetInfo, ::cuMemHostAlloc, + * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16, + * ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32 + */ +CUresult CUDAAPI cuMemGetAddressRange(CUdeviceptr *pbase, size_t *psize, CUdeviceptr dptr); + +/** + * \brief Allocates page-locked host memory + * + * Allocates \p bytesize bytes of host memory that is page-locked and + * accessible to the device. The driver tracks the virtual memory ranges + * allocated with this function and automatically accelerates calls to + * functions such as ::cuMemcpy(). Since the memory can be accessed directly by + * the device, it can be read or written with much higher bandwidth than + * pageable memory obtained with functions such as ::malloc(). Allocating + * excessive amounts of memory with ::cuMemAllocHost() may degrade system + * performance, since it reduces the amount of memory available to the system + * for paging. As a result, this function is best used sparingly to allocate + * staging areas for data exchange between host and device. + * + * Note all host memory allocated using ::cuMemHostAlloc() will automatically + * be immediately accessible to all contexts on all devices which support unified + * addressing (as may be queried using ::CU_DEVICE_ATTRIBUTE_UNIFIED_ADDRESSING). + * The device pointer that may be used to access this host memory from those + * contexts is always equal to the returned host pointer \p *pp. + * See \ref CUDA_UNIFIED for additional details. + * + * \param pp - Returned host pointer to page-locked memory + * \param bytesize - Requested allocation size in bytes + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_OUT_OF_MEMORY + * \notefnerr + * + * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, + * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, + * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, + * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16, + * ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32, + * ::cudaMallocHost + */ +CUresult CUDAAPI cuMemAllocHost(void **pp, size_t bytesize); + +/** + * \brief Frees page-locked host memory + * + * Frees the memory space pointed to by \p p, which must have been returned by + * a previous call to ::cuMemAllocHost(). + * + * \param p - Pointer to memory to free + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * \notefnerr + * + * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, + * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, + * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, + * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, + * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16, + * ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32, + * ::cudaFreeHost + */ +CUresult CUDAAPI cuMemFreeHost(void *p); + +/** + * \brief Allocates page-locked host memory + * + * Allocates \p bytesize bytes of host memory that is page-locked and accessible + * to the device. The driver tracks the virtual memory ranges allocated with + * this function and automatically accelerates calls to functions such as + * ::cuMemcpyHtoD(). Since the memory can be accessed directly by the device, + * it can be read or written with much higher bandwidth than pageable memory + * obtained with functions such as ::malloc(). Allocating excessive amounts of + * pinned memory may degrade system performance, since it reduces the amount + * of memory available to the system for paging. As a result, this function is + * best used sparingly to allocate staging areas for data exchange between + * host and device. + * + * The \p Flags parameter enables different options to be specified that + * affect the allocation, as follows. + * + * - ::CU_MEMHOSTALLOC_PORTABLE: The memory returned by this call will be + * considered as pinned memory by all CUDA contexts, not just the one that + * performed the allocation. + * + * - ::CU_MEMHOSTALLOC_DEVICEMAP: Maps the allocation into the CUDA address + * space. The device pointer to the memory may be obtained by calling + * ::cuMemHostGetDevicePointer(). + * + * - ::CU_MEMHOSTALLOC_WRITECOMBINED: Allocates the memory as write-combined + * (WC). WC memory can be transferred across the PCI Express bus more + * quickly on some system configurations, but cannot be read efficiently by + * most CPUs. WC memory is a good option for buffers that will be written by + * the CPU and read by the GPU via mapped pinned memory or host->device + * transfers. + * + * All of these flags are orthogonal to one another: a developer may allocate + * memory that is portable, mapped and/or write-combined with no restrictions. + * + * The ::CU_MEMHOSTALLOC_DEVICEMAP flag may be specified on CUDA contexts for + * devices that do not support mapped pinned memory. The failure is deferred + * to ::cuMemHostGetDevicePointer() because the memory may be mapped into + * other CUDA contexts via the ::CU_MEMHOSTALLOC_PORTABLE flag. + * + * The memory allocated by this function must be freed with ::cuMemFreeHost(). + * + * Note all host memory allocated using ::cuMemHostAlloc() will automatically + * be immediately accessible to all contexts on all devices which support unified + * addressing (as may be queried using ::CU_DEVICE_ATTRIBUTE_UNIFIED_ADDRESSING). + * Unless the flag ::CU_MEMHOSTALLOC_WRITECOMBINED is specified, the device pointer + * that may be used to access this host memory from those contexts is always equal + * to the returned host pointer \p *pp. If the flag ::CU_MEMHOSTALLOC_WRITECOMBINED + * is specified, then the function ::cuMemHostGetDevicePointer() must be used + * to query the device pointer, even if the context supports unified addressing. + * See \ref CUDA_UNIFIED for additional details. + * + * \param pp - Returned host pointer to page-locked memory + * \param bytesize - Requested allocation size in bytes + * \param Flags - Flags for allocation request + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_OUT_OF_MEMORY + * \notefnerr + * + * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, + * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, + * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + * ::cuMemGetAddressRange, ::cuMemGetInfo, + * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16, + * ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32, + * ::cudaHostAlloc + */ +CUresult CUDAAPI cuMemHostAlloc(void **pp, size_t bytesize, unsigned int Flags); + +/** + * \brief Passes back device pointer of mapped pinned memory + * + * Passes back the device pointer \p pdptr corresponding to the mapped, pinned + * host buffer \p p allocated by ::cuMemHostAlloc. + * + * ::cuMemHostGetDevicePointer() will fail if the ::CU_MEMHOSTALLOC_DEVICEMAP + * flag was not specified at the time the memory was allocated, or if the + * function is called on a GPU that does not support mapped pinned memory. + * + * For devices that have a non-zero value for the device attribute + * ::CU_DEVICE_ATTRIBUTE_CAN_USE_HOST_POINTER_FOR_REGISTERED_MEM, the memory + * can also be accessed from the device using the host pointer \p p. + * The device pointer returned by ::cuMemHostGetDevicePointer() may or may not + * match the original host pointer \p p and depends on the devices visible to the + * application. If all devices visible to the application have a non-zero value for the + * device attribute, the device pointer returned by ::cuMemHostGetDevicePointer() + * will match the original pointer \p p. If any device visible to the application + * has a zero value for the device attribute, the device pointer returned by + * ::cuMemHostGetDevicePointer() will not match the original host pointer \p p, + * but it will be suitable for use on all devices provided Unified Virtual Addressing + * is enabled. In such systems, it is valid to access the memory using either pointer + * on devices that have a non-zero value for the device attribute. Note however that + * such devices should access the memory using only one of the two pointers and not both. + * + * \p Flags provides for future releases. For now, it must be set to 0. + * + * \param pdptr - Returned device pointer + * \param p - Host pointer + * \param Flags - Options (must be 0) + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * \notefnerr + * + * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, + * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, + * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, + * ::cuMemsetD2D8, ::cuMemsetD2D16, + * ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32, + * ::cudaHostGetDevicePointer + */ +CUresult CUDAAPI cuMemHostGetDevicePointer(CUdeviceptr *pdptr, void *p, unsigned int Flags); + +/** + * \brief Passes back flags that were used for a pinned allocation + * + * Passes back the flags \p pFlags that were specified when allocating + * the pinned host buffer \p p allocated by ::cuMemHostAlloc. + * + * ::cuMemHostGetFlags() will fail if the pointer does not reside in + * an allocation performed by ::cuMemAllocHost() or ::cuMemHostAlloc(). + * + * \param pFlags - Returned flags word + * \param p - Host pointer + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * \notefnerr + * + * \sa + * ::cuMemAllocHost, + * ::cuMemHostAlloc, + * ::cudaHostGetFlags + */ +CUresult CUDAAPI cuMemHostGetFlags(unsigned int *pFlags, void *p); + +/** + * \brief Allocates memory that will be automatically managed by the Unified Memory system + * + * Allocates \p bytesize bytes of managed memory on the device and returns in + * \p *dptr a pointer to the allocated memory. If the device doesn't support + * allocating managed memory, ::CUDA_ERROR_NOT_SUPPORTED is returned. Support + * for managed memory can be queried using the device attribute + * ::CU_DEVICE_ATTRIBUTE_MANAGED_MEMORY. The allocated memory is suitably + * aligned for any kind of variable. The memory is not cleared. If \p bytesize + * is 0, ::cuMemAllocManaged returns ::CUDA_ERROR_INVALID_VALUE. The pointer + * is valid on the CPU and on all GPUs in the system that support managed memory. + * All accesses to this pointer must obey the Unified Memory programming model. + * + * \p flags specifies the default stream association for this allocation. + * \p flags must be one of ::CU_MEM_ATTACH_GLOBAL or ::CU_MEM_ATTACH_HOST. If + * ::CU_MEM_ATTACH_GLOBAL is specified, then this memory is accessible from + * any stream on any device. If ::CU_MEM_ATTACH_HOST is specified, then the + * allocation should not be accessed from devices that have a zero value for the + * device attribute ::CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS; an explicit call to + * ::cuStreamAttachMemAsync will be required to enable access on such devices. + * + * If the association is later changed via ::cuStreamAttachMemAsync to + * a single stream, the default association as specified during ::cuMemAllocManaged + * is restored when that stream is destroyed. For __managed__ variables, the + * default association is always ::CU_MEM_ATTACH_GLOBAL. Note that destroying a + * stream is an asynchronous operation, and as a result, the change to default + * association won't happen until all work in the stream has completed. + * + * Memory allocated with ::cuMemAllocManaged should be released with ::cuMemFree. + * + * Device memory oversubscription is possible for GPUs that have a non-zero value for the + * device attribute ::CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS. Managed memory on + * such GPUs may be evicted from device memory to host memory at any time by the Unified + * Memory driver in order to make room for other allocations. + * + * In a multi-GPU system where all GPUs have a non-zero value for the device attribute + * ::CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS, managed memory may not be populated when this + * API returns and instead may be populated on access. In such systems, managed memory can + * migrate to any processor's memory at any time. The Unified Memory driver will employ heuristics to + * maintain data locality and prevent excessive page faults to the extent possible. The application + * can also guide the driver about memory usage patterns via ::cuMemAdvise. The application + * can also explicitly migrate memory to a desired processor's memory via + * ::cuMemPrefetchAsync. + * + * In a multi-GPU system where all of the GPUs have a zero value for the device attribute + * ::CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS and all the GPUs have peer-to-peer support + * with each other, the physical storage for managed memory is created on the GPU which is active + * at the time ::cuMemAllocManaged is called. All other GPUs will reference the data at reduced + * bandwidth via peer mappings over the PCIe bus. The Unified Memory driver does not migrate + * memory among such GPUs. + * + * In a multi-GPU system where not all GPUs have peer-to-peer support with each other and + * where the value of the device attribute ::CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS + * is zero for at least one of those GPUs, the location chosen for physical storage of managed + * memory is system-dependent. + * - On Linux, the location chosen will be device memory as long as the current set of active + * contexts are on devices that either have peer-to-peer support with each other or have a + * non-zero value for the device attribute ::CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS. + * If there is an active context on a GPU that does not have a non-zero value for that device + * attribute and it does not have peer-to-peer support with the other devices that have active + * contexts on them, then the location for physical storage will be 'zero-copy' or host memory. + * Note that this means that managed memory that is located in device memory is migrated to + * host memory if a new context is created on a GPU that doesn't have a non-zero value for + * the device attribute and does not support peer-to-peer with at least one of the other devices + * that has an active context. This in turn implies that context creation may fail if there is + * insufficient host memory to migrate all managed allocations. + * - On Windows, the physical storage is always created in 'zero-copy' or host memory. + * All GPUs will reference the data at reduced bandwidth over the PCIe bus. In these + * circumstances, use of the environment variable CUDA_VISIBLE_DEVICES is recommended to + * restrict CUDA to only use those GPUs that have peer-to-peer support. + * Alternatively, users can also set CUDA_MANAGED_FORCE_DEVICE_ALLOC to a + * non-zero value to force the driver to always use device memory for physical storage. + * When this environment variable is set to a non-zero value, all contexts created in + * that process on devices that support managed memory have to be peer-to-peer compatible + * with each other. Context creation will fail if a context is created on a device that + * supports managed memory and is not peer-to-peer compatible with any of the other + * managed memory supporting devices on which contexts were previously created, even if + * those contexts have been destroyed. These environment variables are described + * in the CUDA programming guide under the "CUDA environment variables" section. + * - On ARM, managed memory is not available on discrete gpu with Drive PX-2. + * + * \param dptr - Returned device pointer + * \param bytesize - Requested allocation size in bytes + * \param flags - Must be one of ::CU_MEM_ATTACH_GLOBAL or ::CU_MEM_ATTACH_HOST + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_NOT_SUPPORTED, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_OUT_OF_MEMORY + * \notefnerr + * + * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAllocHost, + * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, + * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, + * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16, + * ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32, + * ::cuDeviceGetAttribute, ::cuStreamAttachMemAsync, + * ::cudaMallocManaged + */ +CUresult CUDAAPI cuMemAllocManaged(CUdeviceptr *dptr, size_t bytesize, unsigned int flags); + +/** + * \brief Returns a handle to a compute device + * + * Returns in \p *device a device handle given a PCI bus ID string. + * + * \param dev - Returned device handle + * + * \param pciBusId - String in one of the following forms: + * [domain]:[bus]:[device].[function] + * [domain]:[bus]:[device] + * [bus]:[device].[function] + * where \p domain, \p bus, \p device, and \p function are all hexadecimal values + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_DEVICE + * \notefnerr + * + * \sa + * ::cuDeviceGet, + * ::cuDeviceGetAttribute, + * ::cuDeviceGetPCIBusId, + * ::cudaDeviceGetByPCIBusId + */ +CUresult CUDAAPI cuDeviceGetByPCIBusId(CUdevice *dev, const char *pciBusId); + +/** + * \brief Returns a PCI Bus Id string for the device + * + * Returns an ASCII string identifying the device \p dev in the NULL-terminated + * string pointed to by \p pciBusId. \p len specifies the maximum length of the + * string that may be returned. + * + * \param pciBusId - Returned identifier string for the device in the following format + * [domain]:[bus]:[device].[function] + * where \p domain, \p bus, \p device, and \p function are all hexadecimal values. + * pciBusId should be large enough to store 13 characters including the NULL-terminator. + * + * \param len - Maximum length of string to store in \p name + * + * \param dev - Device to get identifier string for + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_DEVICE + * \notefnerr + * + * \sa + * ::cuDeviceGet, + * ::cuDeviceGetAttribute, + * ::cuDeviceGetByPCIBusId, + * ::cudaDeviceGetPCIBusId + */ +CUresult CUDAAPI cuDeviceGetPCIBusId(char *pciBusId, int len, CUdevice dev); + +/** + * \brief Gets an interprocess handle for a previously allocated event + * + * Takes as input a previously allocated event. This event must have been + * created with the ::CU_EVENT_INTERPROCESS and ::CU_EVENT_DISABLE_TIMING + * flags set. This opaque handle may be copied into other processes and + * opened with ::cuIpcOpenEventHandle to allow efficient hardware + * synchronization between GPU work in different processes. + * + * After the event has been opened in the importing process, + * ::cuEventRecord, ::cuEventSynchronize, ::cuStreamWaitEvent and + * ::cuEventQuery may be used in either process. Performing operations + * on the imported event after the exported event has been freed + * with ::cuEventDestroy will result in undefined behavior. + * + * IPC functionality is restricted to devices with support for unified + * addressing on Linux and Windows operating systems. + * IPC functionality on Windows is restricted to GPUs in TCC mode + * Users can test their device for IPC functionality by calling + * ::cuapiDeviceGetAttribute with ::CU_DEVICE_ATTRIBUTE_IPC_EVENT_SUPPORTED + * + * \param pHandle - Pointer to a user allocated CUipcEventHandle + * in which to return the opaque event handle + * \param event - Event allocated with ::CU_EVENT_INTERPROCESS and + * ::CU_EVENT_DISABLE_TIMING flags. + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_OUT_OF_MEMORY, + * ::CUDA_ERROR_MAP_FAILED, + * ::CUDA_ERROR_INVALID_VALUE + * + * \sa + * ::cuEventCreate, + * ::cuEventDestroy, + * ::cuEventSynchronize, + * ::cuEventQuery, + * ::cuStreamWaitEvent, + * ::cuIpcOpenEventHandle, + * ::cuIpcGetMemHandle, + * ::cuIpcOpenMemHandle, + * ::cuIpcCloseMemHandle, + * ::cudaIpcGetEventHandle + */ +CUresult CUDAAPI cuIpcGetEventHandle(CUipcEventHandle *pHandle, CUevent event); + +/** + * \brief Opens an interprocess event handle for use in the current process + * + * Opens an interprocess event handle exported from another process with + * ::cuIpcGetEventHandle. This function returns a ::CUevent that behaves like + * a locally created event with the ::CU_EVENT_DISABLE_TIMING flag specified. + * This event must be freed with ::cuEventDestroy. + * + * Performing operations on the imported event after the exported event has + * been freed with ::cuEventDestroy will result in undefined behavior. + * + * IPC functionality is restricted to devices with support for unified + * addressing on Linux and Windows operating systems. + * IPC functionality on Windows is restricted to GPUs in TCC mode + * Users can test their device for IPC functionality by calling + * ::cuapiDeviceGetAttribute with ::CU_DEVICE_ATTRIBUTE_IPC_EVENT_SUPPORTED + * + * \param phEvent - Returns the imported event + * \param handle - Interprocess handle to open + * + * \returns + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_MAP_FAILED, + * ::CUDA_ERROR_PEER_ACCESS_UNSUPPORTED, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_INVALID_VALUE + * + * \sa + * ::cuEventCreate, + * ::cuEventDestroy, + * ::cuEventSynchronize, + * ::cuEventQuery, + * ::cuStreamWaitEvent, + * ::cuIpcGetEventHandle, + * ::cuIpcGetMemHandle, + * ::cuIpcOpenMemHandle, + * ::cuIpcCloseMemHandle, + * ::cudaIpcOpenEventHandle + */ +CUresult CUDAAPI cuIpcOpenEventHandle(CUevent *phEvent, CUipcEventHandle handle); + +/** + * \brief Gets an interprocess memory handle for an existing device memory + * allocation + * + * Takes a pointer to the base of an existing device memory allocation created + * with ::cuMemAlloc and exports it for use in another process. This is a + * lightweight operation and may be called multiple times on an allocation + * without adverse effects. + * + * If a region of memory is freed with ::cuMemFree and a subsequent call + * to ::cuMemAlloc returns memory with the same device address, + * ::cuIpcGetMemHandle will return a unique handle for the + * new memory. + * + * IPC functionality is restricted to devices with support for unified + * addressing on Linux and Windows operating systems. + * IPC functionality on Windows is restricted to GPUs in TCC mode + * Users can test their device for IPC functionality by calling + * ::cuapiDeviceGetAttribute with ::CU_DEVICE_ATTRIBUTE_IPC_EVENT_SUPPORTED + * + * \param pHandle - Pointer to user allocated ::CUipcMemHandle to return + * the handle in. + * \param dptr - Base pointer to previously allocated device memory + * + * \returns + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_OUT_OF_MEMORY, + * ::CUDA_ERROR_MAP_FAILED, + * ::CUDA_ERROR_INVALID_VALUE + * + * \sa + * ::cuMemAlloc, + * ::cuMemFree, + * ::cuIpcGetEventHandle, + * ::cuIpcOpenEventHandle, + * ::cuIpcOpenMemHandle, + * ::cuIpcCloseMemHandle, + * ::cudaIpcGetMemHandle + */ +CUresult CUDAAPI cuIpcGetMemHandle(CUipcMemHandle *pHandle, CUdeviceptr dptr); + +/** + * \brief Opens an interprocess memory handle exported from another process + * and returns a device pointer usable in the local process. + * + * Maps memory exported from another process with ::cuIpcGetMemHandle into + * the current device address space. For contexts on different devices + * ::cuIpcOpenMemHandle can attempt to enable peer access between the + * devices as if the user called ::cuCtxEnablePeerAccess. This behavior is + * controlled by the ::CU_IPC_MEM_LAZY_ENABLE_PEER_ACCESS flag. + * ::cuDeviceCanAccessPeer can determine if a mapping is possible. + * + * Contexts that may open ::CUipcMemHandles are restricted in the following way. + * ::CUipcMemHandles from each ::CUdevice in a given process may only be opened + * by one ::CUcontext per ::CUdevice per other process. + * + * If the memory handle has already been opened by the current context, the + * reference count on the handle is incremented by 1 and the existing device pointer + * is returned. + * + * Memory returned from ::cuIpcOpenMemHandle must be freed with + * ::cuIpcCloseMemHandle. + * + * Calling ::cuMemFree on an exported memory region before calling + * ::cuIpcCloseMemHandle in the importing context will result in undefined + * behavior. + * + * IPC functionality is restricted to devices with support for unified + * addressing on Linux and Windows operating systems. + * IPC functionality on Windows is restricted to GPUs in TCC mode + * Users can test their device for IPC functionality by calling + * ::cuapiDeviceGetAttribute with ::CU_DEVICE_ATTRIBUTE_IPC_EVENT_SUPPORTED + * + * \param pdptr - Returned device pointer + * \param handle - ::CUipcMemHandle to open + * \param Flags - Flags for this operation. Must be specified as ::CU_IPC_MEM_LAZY_ENABLE_PEER_ACCESS + * + * \returns + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_MAP_FAILED, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_TOO_MANY_PEERS, + * ::CUDA_ERROR_INVALID_VALUE + * + * \note No guarantees are made about the address returned in \p *pdptr. + * In particular, multiple processes may not receive the same address for the same \p handle. + * + * \sa + * ::cuMemAlloc, + * ::cuMemFree, + * ::cuIpcGetEventHandle, + * ::cuIpcOpenEventHandle, + * ::cuIpcGetMemHandle, + * ::cuIpcCloseMemHandle, + * ::cuCtxEnablePeerAccess, + * ::cuDeviceCanAccessPeer, + * ::cudaIpcOpenMemHandle + */ +CUresult CUDAAPI cuIpcOpenMemHandle(CUdeviceptr *pdptr, CUipcMemHandle handle, unsigned int Flags); + +/** + * \brief Attempts to close memory mapped with ::cuIpcOpenMemHandle + * + * Decrements the reference count of the memory returned by ::cuIpcOpenMemHandle by 1. + * When the reference count reaches 0, this API unmaps the memory. The original allocation + * in the exporting process as well as imported mappings in other processes + * will be unaffected. + * + * Any resources used to enable peer access will be freed if this is the + * last mapping using them. + * + * IPC functionality is restricted to devices with support for unified + * addressing on Linux and Windows operating systems. + * IPC functionality on Windows is restricted to GPUs in TCC mode + * Users can test their device for IPC functionality by calling + * ::cuapiDeviceGetAttribute with ::CU_DEVICE_ATTRIBUTE_IPC_EVENT_SUPPORTED + * + * \param dptr - Device pointer returned by ::cuIpcOpenMemHandle + * + * \returns + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_MAP_FAILED, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_INVALID_VALUE + * \sa + * ::cuMemAlloc, + * ::cuMemFree, + * ::cuIpcGetEventHandle, + * ::cuIpcOpenEventHandle, + * ::cuIpcGetMemHandle, + * ::cuIpcOpenMemHandle, + * ::cudaIpcCloseMemHandle + */ +CUresult CUDAAPI cuIpcCloseMemHandle(CUdeviceptr dptr); + +/** + * \brief Registers an existing host memory range for use by CUDA + * + * Page-locks the memory range specified by \p p and \p bytesize and maps it + * for the device(s) as specified by \p Flags. This memory range also is added + * to the same tracking mechanism as ::cuMemHostAlloc to automatically accelerate + * calls to functions such as ::cuMemcpyHtoD(). Since the memory can be accessed + * directly by the device, it can be read or written with much higher bandwidth + * than pageable memory that has not been registered. Page-locking excessive + * amounts of memory may degrade system performance, since it reduces the amount + * of memory available to the system for paging. As a result, this function is + * best used sparingly to register staging areas for data exchange between + * host and device. + * + * On systems where ::CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS_USES_HOST_PAGE_TABLES + * is true, ::cuMemHostRegister will not page-lock the memory range specified + * by \p ptr but only populate unpopulated pages. + * + * The \p Flags parameter enables different options to be specified that + * affect the allocation, as follows. + * + * - ::CU_MEMHOSTREGISTER_PORTABLE: The memory returned by this call will be + * considered as pinned memory by all CUDA contexts, not just the one that + * performed the allocation. + * + * - ::CU_MEMHOSTREGISTER_DEVICEMAP: Maps the allocation into the CUDA address + * space. The device pointer to the memory may be obtained by calling + * ::cuMemHostGetDevicePointer(). + * + * - ::CU_MEMHOSTREGISTER_IOMEMORY: The pointer is treated as pointing to some + * I/O memory space, e.g. the PCI Express resource of a 3rd party device. + * + * - ::CU_MEMHOSTREGISTER_READ_ONLY: The pointer is treated as pointing to memory + * that is considered read-only by the device. On platforms without + * ::CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS_USES_HOST_PAGE_TABLES, this flag is + * required in order to register memory mapped to the CPU as read-only. Support + * for the use of this flag can be queried from the device attribute + * ::CU_DEVICE_ATTRIBUTE_READ_ONLY_HOST_REGISTER_SUPPORTED. Using this flag with + * a current context associated with a device that does not have this attribute + * set will cause ::cuMemHostRegister to error with CUDA_ERROR_NOT_SUPPORTED. + * + * All of these flags are orthogonal to one another: a developer may page-lock + * memory that is portable or mapped with no restrictions. + * + * The ::CU_MEMHOSTREGISTER_DEVICEMAP flag may be specified on CUDA contexts for + * devices that do not support mapped pinned memory. The failure is deferred + * to ::cuMemHostGetDevicePointer() because the memory may be mapped into + * other CUDA contexts via the ::CU_MEMHOSTREGISTER_PORTABLE flag. + * + * For devices that have a non-zero value for the device attribute + * ::CU_DEVICE_ATTRIBUTE_CAN_USE_HOST_POINTER_FOR_REGISTERED_MEM, the memory + * can also be accessed from the device using the host pointer \p p. + * The device pointer returned by ::cuMemHostGetDevicePointer() may or may not + * match the original host pointer \p ptr and depends on the devices visible to the + * application. If all devices visible to the application have a non-zero value for the + * device attribute, the device pointer returned by ::cuMemHostGetDevicePointer() + * will match the original pointer \p ptr. If any device visible to the application + * has a zero value for the device attribute, the device pointer returned by + * ::cuMemHostGetDevicePointer() will not match the original host pointer \p ptr, + * but it will be suitable for use on all devices provided Unified Virtual Addressing + * is enabled. In such systems, it is valid to access the memory using either pointer + * on devices that have a non-zero value for the device attribute. Note however that + * such devices should access the memory using only of the two pointers and not both. + * + * The memory page-locked by this function must be unregistered with + * ::cuMemHostUnregister(). + * + * \param p - Host pointer to memory to page-lock + * \param bytesize - Size in bytes of the address range to page-lock + * \param Flags - Flags for allocation request + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_OUT_OF_MEMORY, + * ::CUDA_ERROR_HOST_MEMORY_ALREADY_REGISTERED, + * ::CUDA_ERROR_NOT_PERMITTED, + * ::CUDA_ERROR_NOT_SUPPORTED + * \notefnerr + * + * \sa + * ::cuMemHostUnregister, + * ::cuMemHostGetFlags, + * ::cuMemHostGetDevicePointer, + * ::cudaHostRegister + */ +CUresult CUDAAPI cuMemHostRegister(void *p, size_t bytesize, unsigned int Flags); + +/** + * \brief Unregisters a memory range that was registered with cuMemHostRegister. + * + * Unmaps the memory range whose base address is specified by \p p, and makes + * it pageable again. + * + * The base address must be the same one specified to ::cuMemHostRegister(). + * + * \param p - Host pointer to memory to unregister + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_OUT_OF_MEMORY, + * ::CUDA_ERROR_HOST_MEMORY_NOT_REGISTERED, + * \notefnerr + * + * \sa + * ::cuMemHostRegister, + * ::cudaHostUnregister + */ +CUresult CUDAAPI cuMemHostUnregister(void *p); + +/** + * \brief Copies memory + * + * Copies data between two pointers. + * \p dst and \p src are base pointers of the destination and source, respectively. + * \p ByteCount specifies the number of bytes to copy. + * Note that this function infers the type of the transfer (host to host, host to + * device, device to device, or device to host) from the pointer values. This + * function is only allowed in contexts which support unified addressing. + * + * \param dst - Destination unified virtual address space pointer + * \param src - Source unified virtual address space pointer + * \param ByteCount - Size of memory copy in bytes + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * \notefnerr + * \note_sync + * \note_memcpy + * + * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, + * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, + * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, + * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16, + * ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32, + * ::cudaMemcpy, + * ::cudaMemcpyToSymbol, + * ::cudaMemcpyFromSymbol + */ +CUresult CUDAAPI cuMemcpy(CUdeviceptr dst, CUdeviceptr src, size_t ByteCount); + +/** + * \brief Copies device memory between two contexts + * + * Copies from device memory in one context to device memory in another + * context. \p dstDevice is the base device pointer of the destination memory + * and \p dstContext is the destination context. \p srcDevice is the base + * device pointer of the source memory and \p srcContext is the source pointer. + * \p ByteCount specifies the number of bytes to copy. + * + * \param dstDevice - Destination device pointer + * \param dstContext - Destination context + * \param srcDevice - Source device pointer + * \param srcContext - Source context + * \param ByteCount - Size of memory copy in bytes + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * \notefnerr + * \note_sync + * + * \sa ::cuMemcpyDtoD, ::cuMemcpy3DPeer, ::cuMemcpyDtoDAsync, ::cuMemcpyPeerAsync, + * ::cuMemcpy3DPeerAsync, + * ::cudaMemcpyPeer + */ +CUresult CUDAAPI cuMemcpyPeer(CUdeviceptr dstDevice, CUcontext dstContext, CUdeviceptr srcDevice, CUcontext srcContext, size_t ByteCount); + +/** + * \brief Copies memory from Host to Device + * + * Copies from host memory to device memory. \p dstDevice and \p srcHost are + * the base addresses of the destination and source, respectively. \p ByteCount + * specifies the number of bytes to copy. + * + * \param dstDevice - Destination device pointer + * \param srcHost - Source host pointer + * \param ByteCount - Size of memory copy in bytes + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * \notefnerr + * \note_sync + * \note_memcpy + * + * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, + * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, + * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + * ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, + * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16, + * ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32, + * ::cudaMemcpy, + * ::cudaMemcpyToSymbol + */ +CUresult CUDAAPI cuMemcpyHtoD(CUdeviceptr dstDevice, const void *srcHost, size_t ByteCount); + +/** + * \brief Copies memory from Device to Host + * + * Copies from device to host memory. \p dstHost and \p srcDevice specify the + * base pointers of the destination and source, respectively. \p ByteCount + * specifies the number of bytes to copy. + * + * \param dstHost - Destination host pointer + * \param srcDevice - Source device pointer + * \param ByteCount - Size of memory copy in bytes + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * \notefnerr + * \note_sync + * \note_memcpy + * + * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, + * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, + * ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, + * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16, + * ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32, + * ::cudaMemcpy, + * ::cudaMemcpyFromSymbol + */ +CUresult CUDAAPI cuMemcpyDtoH(void *dstHost, CUdeviceptr srcDevice, size_t ByteCount); + +/** + * \brief Copies memory from Device to Device + * + * Copies from device memory to device memory. \p dstDevice and \p srcDevice + * are the base pointers of the destination and source, respectively. + * \p ByteCount specifies the number of bytes to copy. + * + * \param dstDevice - Destination device pointer + * \param srcDevice - Source device pointer + * \param ByteCount - Size of memory copy in bytes + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * \notefnerr + * \note_sync + * + * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, + * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, + * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, + * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16, + * ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32, + * ::cudaMemcpy, + * ::cudaMemcpyToSymbol, + * ::cudaMemcpyFromSymbol + */ +CUresult CUDAAPI cuMemcpyDtoD(CUdeviceptr dstDevice, CUdeviceptr srcDevice, size_t ByteCount); + +/** + * \brief Copies memory from Device to Array + * + * Copies from device memory to a 1D CUDA array. \p dstArray and \p dstOffset + * specify the CUDA array handle and starting index of the destination data. + * \p srcDevice specifies the base pointer of the source. \p ByteCount + * specifies the number of bytes to copy. + * + * \param dstArray - Destination array + * \param dstOffset - Offset in bytes of destination array + * \param srcDevice - Source device pointer + * \param ByteCount - Size of memory copy in bytes + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * \notefnerr + * \note_sync + * + * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, + * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, + * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, + * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16, + * ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32, + * ::cudaMemcpyToArray + */ +CUresult CUDAAPI cuMemcpyDtoA(CUarray dstArray, size_t dstOffset, CUdeviceptr srcDevice, size_t ByteCount); + +/** + * \brief Copies memory from Array to Device + * + * Copies from one 1D CUDA array to device memory. \p dstDevice specifies the + * base pointer of the destination and must be naturally aligned with the CUDA + * array elements. \p srcArray and \p srcOffset specify the CUDA array handle + * and the offset in bytes into the array where the copy is to begin. + * \p ByteCount specifies the number of bytes to copy and must be evenly + * divisible by the array element size. + * + * \param dstDevice - Destination device pointer + * \param srcArray - Source array + * \param srcOffset - Offset in bytes of source array + * \param ByteCount - Size of memory copy in bytes + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * \notefnerr + * \note_sync + * + * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, + * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, + * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, + * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, + * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16, + * ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32, + * ::cudaMemcpyFromArray + */ +CUresult CUDAAPI cuMemcpyAtoD(CUdeviceptr dstDevice, CUarray srcArray, size_t srcOffset, size_t ByteCount); + +/** + * \brief Copies memory from Host to Array + * + * Copies from host memory to a 1D CUDA array. \p dstArray and \p dstOffset + * specify the CUDA array handle and starting offset in bytes of the destination + * data. \p pSrc specifies the base address of the source. \p ByteCount specifies + * the number of bytes to copy. + * + * \param dstArray - Destination array + * \param dstOffset - Offset in bytes of destination array + * \param srcHost - Source host pointer + * \param ByteCount - Size of memory copy in bytes + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * \notefnerr + * \note_sync + * \note_memcpy + * + * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, + * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, + * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoAAsync, + * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, + * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16, + * ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32, + * ::cudaMemcpyToArray + */ +CUresult CUDAAPI cuMemcpyHtoA(CUarray dstArray, size_t dstOffset, const void *srcHost, size_t ByteCount); + +/** + * \brief Copies memory from Array to Host + * + * Copies from one 1D CUDA array to host memory. \p dstHost specifies the base + * pointer of the destination. \p srcArray and \p srcOffset specify the CUDA + * array handle and starting offset in bytes of the source data. + * \p ByteCount specifies the number of bytes to copy. + * + * \param dstHost - Destination device pointer + * \param srcArray - Source array + * \param srcOffset - Offset in bytes of source array + * \param ByteCount - Size of memory copy in bytes + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * \notefnerr + * \note_sync + * \note_memcpy + * + * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, + * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + * ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, + * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, + * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16, + * ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32, + * ::cudaMemcpyFromArray + */ +CUresult CUDAAPI cuMemcpyAtoH(void *dstHost, CUarray srcArray, size_t srcOffset, size_t ByteCount); + +/** + * \brief Copies memory from Array to Array + * + * Copies from one 1D CUDA array to another. \p dstArray and \p srcArray + * specify the handles of the destination and source CUDA arrays for the copy, + * respectively. \p dstOffset and \p srcOffset specify the destination and + * source offsets in bytes into the CUDA arrays. \p ByteCount is the number of + * bytes to be copied. The size of the elements in the CUDA arrays need not be + * the same format, but the elements must be the same size; and count must be + * evenly divisible by that size. + * + * \param dstArray - Destination array + * \param dstOffset - Offset in bytes of destination array + * \param srcArray - Source array + * \param srcOffset - Offset in bytes of source array + * \param ByteCount - Size of memory copy in bytes + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * \notefnerr + * \note_sync + * + * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, + * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoD, + * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, + * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, + * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16, + * ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32, + * ::cudaMemcpyArrayToArray + */ +CUresult CUDAAPI cuMemcpyAtoA(CUarray dstArray, size_t dstOffset, CUarray srcArray, size_t srcOffset, size_t ByteCount); + +/** + * \brief Copies memory for 2D arrays + * + * Perform a 2D memory copy according to the parameters specified in \p pCopy. + * The ::CUDA_MEMCPY2D structure is defined as: + * + * \code + typedef struct CUDA_MEMCPY2D_st { + unsigned int srcXInBytes, srcY; + CUmemorytype srcMemoryType; + const void *srcHost; + CUdeviceptr srcDevice; + CUarray srcArray; + unsigned int srcPitch; + + unsigned int dstXInBytes, dstY; + CUmemorytype dstMemoryType; + void *dstHost; + CUdeviceptr dstDevice; + CUarray dstArray; + unsigned int dstPitch; + + unsigned int WidthInBytes; + unsigned int Height; + } CUDA_MEMCPY2D; + * \endcode + * where: + * - ::srcMemoryType and ::dstMemoryType specify the type of memory of the + * source and destination, respectively; ::CUmemorytype_enum is defined as: + * + * \code + typedef enum CUmemorytype_enum { + CU_MEMORYTYPE_HOST = 0x01, + CU_MEMORYTYPE_DEVICE = 0x02, + CU_MEMORYTYPE_ARRAY = 0x03, + CU_MEMORYTYPE_UNIFIED = 0x04 + } CUmemorytype; + * \endcode + * + * \par + * If ::srcMemoryType is ::CU_MEMORYTYPE_UNIFIED, ::srcDevice and ::srcPitch + * specify the (unified virtual address space) base address of the source data + * and the bytes per row to apply. ::srcArray is ignored. + * This value may be used only if unified addressing is supported in the calling + * context. + * + * \par + * If ::srcMemoryType is ::CU_MEMORYTYPE_HOST, ::srcHost and ::srcPitch + * specify the (host) base address of the source data and the bytes per row to + * apply. ::srcArray is ignored. + * + * \par + * If ::srcMemoryType is ::CU_MEMORYTYPE_DEVICE, ::srcDevice and ::srcPitch + * specify the (device) base address of the source data and the bytes per row + * to apply. ::srcArray is ignored. + * + * \par + * If ::srcMemoryType is ::CU_MEMORYTYPE_ARRAY, ::srcArray specifies the + * handle of the source data. ::srcHost, ::srcDevice and ::srcPitch are + * ignored. + * + * \par + * If ::dstMemoryType is ::CU_MEMORYTYPE_HOST, ::dstHost and ::dstPitch + * specify the (host) base address of the destination data and the bytes per + * row to apply. ::dstArray is ignored. + * + * \par + * If ::dstMemoryType is ::CU_MEMORYTYPE_UNIFIED, ::dstDevice and ::dstPitch + * specify the (unified virtual address space) base address of the source data + * and the bytes per row to apply. ::dstArray is ignored. + * This value may be used only if unified addressing is supported in the calling + * context. + * + * \par + * If ::dstMemoryType is ::CU_MEMORYTYPE_DEVICE, ::dstDevice and ::dstPitch + * specify the (device) base address of the destination data and the bytes per + * row to apply. ::dstArray is ignored. + * + * \par + * If ::dstMemoryType is ::CU_MEMORYTYPE_ARRAY, ::dstArray specifies the + * handle of the destination data. ::dstHost, ::dstDevice and ::dstPitch are + * ignored. + * + * - ::srcXInBytes and ::srcY specify the base address of the source data for + * the copy. + * + * \par + * For host pointers, the starting address is + * \code + void* Start = (void*)((char*)srcHost+srcY*srcPitch + srcXInBytes); + * \endcode + * + * \par + * For device pointers, the starting address is + * \code + CUdeviceptr Start = srcDevice+srcY*srcPitch+srcXInBytes; + * \endcode + * + * \par + * For CUDA arrays, ::srcXInBytes must be evenly divisible by the array + * element size. + * + * - ::dstXInBytes and ::dstY specify the base address of the destination data + * for the copy. + * + * \par + * For host pointers, the base address is + * \code + void* dstStart = (void*)((char*)dstHost+dstY*dstPitch + dstXInBytes); + * \endcode + * + * \par + * For device pointers, the starting address is + * \code + CUdeviceptr dstStart = dstDevice+dstY*dstPitch+dstXInBytes; + * \endcode + * + * \par + * For CUDA arrays, ::dstXInBytes must be evenly divisible by the array + * element size. + * + * - ::WidthInBytes and ::Height specify the width (in bytes) and height of + * the 2D copy being performed. + * - If specified, ::srcPitch must be greater than or equal to ::WidthInBytes + + * ::srcXInBytes, and ::dstPitch must be greater than or equal to + * ::WidthInBytes + dstXInBytes. + * + * \par + * ::cuMemcpy2D() returns an error if any pitch is greater than the maximum + * allowed (::CU_DEVICE_ATTRIBUTE_MAX_PITCH). ::cuMemAllocPitch() passes back + * pitches that always work with ::cuMemcpy2D(). On intra-device memory copies + * (device to device, CUDA array to device, CUDA array to CUDA array), + * ::cuMemcpy2D() may fail for pitches not computed by ::cuMemAllocPitch(). + * ::cuMemcpy2DUnaligned() does not have this restriction, but may run + * significantly slower in the cases where ::cuMemcpy2D() would have returned + * an error code. + * + * \param pCopy - Parameters for the memory copy + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * \notefnerr + * \note_sync + * + * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, + * ::cuMemAllocPitch, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, + * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, + * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16, + * ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32, + * ::cudaMemcpy2D, + * ::cudaMemcpy2DToArray, + * ::cudaMemcpy2DFromArray + */ +CUresult CUDAAPI cuMemcpy2D(const CUDA_MEMCPY2D *pCopy); + +/** + * \brief Copies memory for 2D arrays + * + * Perform a 2D memory copy according to the parameters specified in \p pCopy. + * The ::CUDA_MEMCPY2D structure is defined as: + * + * \code + typedef struct CUDA_MEMCPY2D_st { + unsigned int srcXInBytes, srcY; + CUmemorytype srcMemoryType; + const void *srcHost; + CUdeviceptr srcDevice; + CUarray srcArray; + unsigned int srcPitch; + unsigned int dstXInBytes, dstY; + CUmemorytype dstMemoryType; + void *dstHost; + CUdeviceptr dstDevice; + CUarray dstArray; + unsigned int dstPitch; + unsigned int WidthInBytes; + unsigned int Height; + } CUDA_MEMCPY2D; + * \endcode + * where: + * - ::srcMemoryType and ::dstMemoryType specify the type of memory of the + * source and destination, respectively; ::CUmemorytype_enum is defined as: + * + * \code + typedef enum CUmemorytype_enum { + CU_MEMORYTYPE_HOST = 0x01, + CU_MEMORYTYPE_DEVICE = 0x02, + CU_MEMORYTYPE_ARRAY = 0x03, + CU_MEMORYTYPE_UNIFIED = 0x04 + } CUmemorytype; + * \endcode + * + * \par + * If ::srcMemoryType is ::CU_MEMORYTYPE_UNIFIED, ::srcDevice and ::srcPitch + * specify the (unified virtual address space) base address of the source data + * and the bytes per row to apply. ::srcArray is ignored. + * This value may be used only if unified addressing is supported in the calling + * context. + * + * \par + * If ::srcMemoryType is ::CU_MEMORYTYPE_HOST, ::srcHost and ::srcPitch + * specify the (host) base address of the source data and the bytes per row to + * apply. ::srcArray is ignored. + * + * \par + * If ::srcMemoryType is ::CU_MEMORYTYPE_DEVICE, ::srcDevice and ::srcPitch + * specify the (device) base address of the source data and the bytes per row + * to apply. ::srcArray is ignored. + * + * \par + * If ::srcMemoryType is ::CU_MEMORYTYPE_ARRAY, ::srcArray specifies the + * handle of the source data. ::srcHost, ::srcDevice and ::srcPitch are + * ignored. + * + * \par + * If ::dstMemoryType is ::CU_MEMORYTYPE_UNIFIED, ::dstDevice and ::dstPitch + * specify the (unified virtual address space) base address of the source data + * and the bytes per row to apply. ::dstArray is ignored. + * This value may be used only if unified addressing is supported in the calling + * context. + * + * \par + * If ::dstMemoryType is ::CU_MEMORYTYPE_HOST, ::dstHost and ::dstPitch + * specify the (host) base address of the destination data and the bytes per + * row to apply. ::dstArray is ignored. + * + * \par + * If ::dstMemoryType is ::CU_MEMORYTYPE_DEVICE, ::dstDevice and ::dstPitch + * specify the (device) base address of the destination data and the bytes per + * row to apply. ::dstArray is ignored. + * + * \par + * If ::dstMemoryType is ::CU_MEMORYTYPE_ARRAY, ::dstArray specifies the + * handle of the destination data. ::dstHost, ::dstDevice and ::dstPitch are + * ignored. + * + * - ::srcXInBytes and ::srcY specify the base address of the source data for + * the copy. + * + * \par + * For host pointers, the starting address is + * \code + void* Start = (void*)((char*)srcHost+srcY*srcPitch + srcXInBytes); + * \endcode + * + * \par + * For device pointers, the starting address is + * \code + CUdeviceptr Start = srcDevice+srcY*srcPitch+srcXInBytes; + * \endcode + * + * \par + * For CUDA arrays, ::srcXInBytes must be evenly divisible by the array + * element size. + * + * - ::dstXInBytes and ::dstY specify the base address of the destination data + * for the copy. + * + * \par + * For host pointers, the base address is + * \code + void* dstStart = (void*)((char*)dstHost+dstY*dstPitch + dstXInBytes); + * \endcode + * + * \par + * For device pointers, the starting address is + * \code + CUdeviceptr dstStart = dstDevice+dstY*dstPitch+dstXInBytes; + * \endcode + * + * \par + * For CUDA arrays, ::dstXInBytes must be evenly divisible by the array + * element size. + * + * - ::WidthInBytes and ::Height specify the width (in bytes) and height of + * the 2D copy being performed. + * - If specified, ::srcPitch must be greater than or equal to ::WidthInBytes + + * ::srcXInBytes, and ::dstPitch must be greater than or equal to + * ::WidthInBytes + dstXInBytes. + * + * \par + * ::cuMemcpy2D() returns an error if any pitch is greater than the maximum + * allowed (::CU_DEVICE_ATTRIBUTE_MAX_PITCH). ::cuMemAllocPitch() passes back + * pitches that always work with ::cuMemcpy2D(). On intra-device memory copies + * (device to device, CUDA array to device, CUDA array to CUDA array), + * ::cuMemcpy2D() may fail for pitches not computed by ::cuMemAllocPitch(). + * ::cuMemcpy2DUnaligned() does not have this restriction, but may run + * significantly slower in the cases where ::cuMemcpy2D() would have returned + * an error code. + * + * \param pCopy - Parameters for the memory copy + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * \notefnerr + * \note_sync + * + * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, + * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, + * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, + * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, + * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16, + * ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32, + * ::cudaMemcpy2D, + * ::cudaMemcpy2DToArray, + * ::cudaMemcpy2DFromArray + */ +CUresult CUDAAPI cuMemcpy2DUnaligned(const CUDA_MEMCPY2D *pCopy); + +/** + * \brief Copies memory for 3D arrays + * + * Perform a 3D memory copy according to the parameters specified in + * \p pCopy. The ::CUDA_MEMCPY3D structure is defined as: + * + * \code + typedef struct CUDA_MEMCPY3D_st { + + unsigned int srcXInBytes, srcY, srcZ; + unsigned int srcLOD; + CUmemorytype srcMemoryType; + const void *srcHost; + CUdeviceptr srcDevice; + CUarray srcArray; + unsigned int srcPitch; // ignored when src is array + unsigned int srcHeight; // ignored when src is array; may be 0 if Depth==1 + + unsigned int dstXInBytes, dstY, dstZ; + unsigned int dstLOD; + CUmemorytype dstMemoryType; + void *dstHost; + CUdeviceptr dstDevice; + CUarray dstArray; + unsigned int dstPitch; // ignored when dst is array + unsigned int dstHeight; // ignored when dst is array; may be 0 if Depth==1 + + unsigned int WidthInBytes; + unsigned int Height; + unsigned int Depth; + } CUDA_MEMCPY3D; + * \endcode + * where: + * - ::srcMemoryType and ::dstMemoryType specify the type of memory of the + * source and destination, respectively; ::CUmemorytype_enum is defined as: + * + * \code + typedef enum CUmemorytype_enum { + CU_MEMORYTYPE_HOST = 0x01, + CU_MEMORYTYPE_DEVICE = 0x02, + CU_MEMORYTYPE_ARRAY = 0x03, + CU_MEMORYTYPE_UNIFIED = 0x04 + } CUmemorytype; + * \endcode + * + * \par + * If ::srcMemoryType is ::CU_MEMORYTYPE_UNIFIED, ::srcDevice and ::srcPitch + * specify the (unified virtual address space) base address of the source data + * and the bytes per row to apply. ::srcArray is ignored. + * This value may be used only if unified addressing is supported in the calling + * context. + * + * \par + * If ::srcMemoryType is ::CU_MEMORYTYPE_HOST, ::srcHost, ::srcPitch and + * ::srcHeight specify the (host) base address of the source data, the bytes + * per row, and the height of each 2D slice of the 3D array. ::srcArray is + * ignored. + * + * \par + * If ::srcMemoryType is ::CU_MEMORYTYPE_DEVICE, ::srcDevice, ::srcPitch and + * ::srcHeight specify the (device) base address of the source data, the bytes + * per row, and the height of each 2D slice of the 3D array. ::srcArray is + * ignored. + * + * \par + * If ::srcMemoryType is ::CU_MEMORYTYPE_ARRAY, ::srcArray specifies the + * handle of the source data. ::srcHost, ::srcDevice, ::srcPitch and + * ::srcHeight are ignored. + * + * \par + * If ::dstMemoryType is ::CU_MEMORYTYPE_UNIFIED, ::dstDevice and ::dstPitch + * specify the (unified virtual address space) base address of the source data + * and the bytes per row to apply. ::dstArray is ignored. + * This value may be used only if unified addressing is supported in the calling + * context. + * + * \par + * If ::dstMemoryType is ::CU_MEMORYTYPE_HOST, ::dstHost and ::dstPitch + * specify the (host) base address of the destination data, the bytes per row, + * and the height of each 2D slice of the 3D array. ::dstArray is ignored. + * + * \par + * If ::dstMemoryType is ::CU_MEMORYTYPE_DEVICE, ::dstDevice and ::dstPitch + * specify the (device) base address of the destination data, the bytes per + * row, and the height of each 2D slice of the 3D array. ::dstArray is ignored. + * + * \par + * If ::dstMemoryType is ::CU_MEMORYTYPE_ARRAY, ::dstArray specifies the + * handle of the destination data. ::dstHost, ::dstDevice, ::dstPitch and + * ::dstHeight are ignored. + * + * - ::srcXInBytes, ::srcY and ::srcZ specify the base address of the source + * data for the copy. + * + * \par + * For host pointers, the starting address is + * \code + void* Start = (void*)((char*)srcHost+(srcZ*srcHeight+srcY)*srcPitch + srcXInBytes); + * \endcode + * + * \par + * For device pointers, the starting address is + * \code + CUdeviceptr Start = srcDevice+(srcZ*srcHeight+srcY)*srcPitch+srcXInBytes; + * \endcode + * + * \par + * For CUDA arrays, ::srcXInBytes must be evenly divisible by the array + * element size. + * + * - dstXInBytes, ::dstY and ::dstZ specify the base address of the + * destination data for the copy. + * + * \par + * For host pointers, the base address is + * \code + void* dstStart = (void*)((char*)dstHost+(dstZ*dstHeight+dstY)*dstPitch + dstXInBytes); + * \endcode + * + * \par + * For device pointers, the starting address is + * \code + CUdeviceptr dstStart = dstDevice+(dstZ*dstHeight+dstY)*dstPitch+dstXInBytes; + * \endcode + * + * \par + * For CUDA arrays, ::dstXInBytes must be evenly divisible by the array + * element size. + * + * - ::WidthInBytes, ::Height and ::Depth specify the width (in bytes), height + * and depth of the 3D copy being performed. + * - If specified, ::srcPitch must be greater than or equal to ::WidthInBytes + + * ::srcXInBytes, and ::dstPitch must be greater than or equal to + * ::WidthInBytes + dstXInBytes. + * - If specified, ::srcHeight must be greater than or equal to ::Height + + * ::srcY, and ::dstHeight must be greater than or equal to ::Height + ::dstY. + * + * \par + * ::cuMemcpy3D() returns an error if any pitch is greater than the maximum + * allowed (::CU_DEVICE_ATTRIBUTE_MAX_PITCH). + * + * The ::srcLOD and ::dstLOD members of the ::CUDA_MEMCPY3D structure must be + * set to 0. + * + * \param pCopy - Parameters for the memory copy + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * \notefnerr + * \note_sync + * + * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, + * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + * ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, + * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, + * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16, + * ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32, + * ::cudaMemcpy3D + */ +CUresult CUDAAPI cuMemcpy3D(const CUDA_MEMCPY3D *pCopy); + +/** + * \brief Copies memory between contexts + * + * Perform a 3D memory copy according to the parameters specified in + * \p pCopy. See the definition of the ::CUDA_MEMCPY3D_PEER structure + * for documentation of its parameters. + * + * \param pCopy - Parameters for the memory copy + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * \notefnerr + * \note_sync + * + * \sa ::cuMemcpyDtoD, ::cuMemcpyPeer, ::cuMemcpyDtoDAsync, ::cuMemcpyPeerAsync, + * ::cuMemcpy3DPeerAsync, + * ::cudaMemcpy3DPeer + */ +CUresult CUDAAPI cuMemcpy3DPeer(const CUDA_MEMCPY3D_PEER *pCopy); + +/** + * \brief Copies memory asynchronously + * + * Copies data between two pointers. + * \p dst and \p src are base pointers of the destination and source, respectively. + * \p ByteCount specifies the number of bytes to copy. + * Note that this function infers the type of the transfer (host to host, host to + * device, device to device, or device to host) from the pointer values. This + * function is only allowed in contexts which support unified addressing. + * + * \param dst - Destination unified virtual address space pointer + * \param src - Source unified virtual address space pointer + * \param ByteCount - Size of memory copy in bytes + * \param hStream - Stream identifier + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_HANDLE + * \notefnerr + * \note_async + * \note_null_stream + * \note_memcpy + * + * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, + * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, + * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, + * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D8Async, + * ::cuMemsetD2D16, ::cuMemsetD2D16Async, ::cuMemsetD2D32, ::cuMemsetD2D32Async, + * ::cuMemsetD8, ::cuMemsetD8Async, ::cuMemsetD16, ::cuMemsetD16Async, + * ::cuMemsetD32, ::cuMemsetD32Async, + * ::cudaMemcpyAsync, + * ::cudaMemcpyToSymbolAsync, + * ::cudaMemcpyFromSymbolAsync + */ +CUresult CUDAAPI cuMemcpyAsync(CUdeviceptr dst, CUdeviceptr src, size_t ByteCount, CUstream hStream); + +/** + * \brief Copies device memory between two contexts asynchronously. + * + * Copies from device memory in one context to device memory in another + * context. \p dstDevice is the base device pointer of the destination memory + * and \p dstContext is the destination context. \p srcDevice is the base + * device pointer of the source memory and \p srcContext is the source pointer. + * \p ByteCount specifies the number of bytes to copy. + * + * \param dstDevice - Destination device pointer + * \param dstContext - Destination context + * \param srcDevice - Source device pointer + * \param srcContext - Source context + * \param ByteCount - Size of memory copy in bytes + * \param hStream - Stream identifier + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_HANDLE + * \notefnerr + * \note_async + * \note_null_stream + * + * \sa ::cuMemcpyDtoD, ::cuMemcpyPeer, ::cuMemcpy3DPeer, ::cuMemcpyDtoDAsync, + * ::cuMemcpy3DPeerAsync, + * ::cudaMemcpyPeerAsync + */ +CUresult CUDAAPI cuMemcpyPeerAsync(CUdeviceptr dstDevice, CUcontext dstContext, CUdeviceptr srcDevice, CUcontext srcContext, size_t ByteCount, CUstream hStream); + +/** + * \brief Copies memory from Host to Device + * + * Copies from host memory to device memory. \p dstDevice and \p srcHost are + * the base addresses of the destination and source, respectively. \p ByteCount + * specifies the number of bytes to copy. + * + * \param dstDevice - Destination device pointer + * \param srcHost - Source host pointer + * \param ByteCount - Size of memory copy in bytes + * \param hStream - Stream identifier + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_HANDLE + * \notefnerr + * \note_async + * \note_null_stream + * \note_memcpy + * + * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, + * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, + * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + * ::cuMemcpyHtoD, ::cuMemFree, ::cuMemFreeHost, + * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, + * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D8Async, + * ::cuMemsetD2D16, ::cuMemsetD2D16Async, ::cuMemsetD2D32, ::cuMemsetD2D32Async, + * ::cuMemsetD8, ::cuMemsetD8Async, ::cuMemsetD16, ::cuMemsetD16Async, + * ::cuMemsetD32, ::cuMemsetD32Async, + * ::cudaMemcpyAsync, + * ::cudaMemcpyToSymbolAsync + */ +CUresult CUDAAPI cuMemcpyHtoDAsync(CUdeviceptr dstDevice, const void *srcHost, size_t ByteCount, CUstream hStream); + +/** + * \brief Copies memory from Device to Host + * + * Copies from device to host memory. \p dstHost and \p srcDevice specify the + * base pointers of the destination and source, respectively. \p ByteCount + * specifies the number of bytes to copy. + * + * \param dstHost - Destination host pointer + * \param srcDevice - Source device pointer + * \param ByteCount - Size of memory copy in bytes + * \param hStream - Stream identifier + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_HANDLE + * \notefnerr + * \note_async + * \note_null_stream + * \note_memcpy + * + * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, + * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, + * ::cuMemcpyDtoH, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, + * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D8Async, + * ::cuMemsetD2D16, ::cuMemsetD2D16Async, ::cuMemsetD2D32, ::cuMemsetD2D32Async, + * ::cuMemsetD8, ::cuMemsetD8Async, ::cuMemsetD16, ::cuMemsetD16Async, + * ::cuMemsetD32, ::cuMemsetD32Async, + * ::cudaMemcpyAsync, + * ::cudaMemcpyFromSymbolAsync + */ +CUresult CUDAAPI cuMemcpyDtoHAsync(void *dstHost, CUdeviceptr srcDevice, size_t ByteCount, CUstream hStream); + +/** + * \brief Copies memory from Device to Device + * + * Copies from device memory to device memory. \p dstDevice and \p srcDevice + * are the base pointers of the destination and source, respectively. + * \p ByteCount specifies the number of bytes to copy. + * + * \param dstDevice - Destination device pointer + * \param srcDevice - Source device pointer + * \param ByteCount - Size of memory copy in bytes + * \param hStream - Stream identifier + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_HANDLE + * \notefnerr + * \note_async + * \note_null_stream + * + * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, + * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, + * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, + * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D8Async, + * ::cuMemsetD2D16, ::cuMemsetD2D16Async, ::cuMemsetD2D32, ::cuMemsetD2D32Async, + * ::cuMemsetD8, ::cuMemsetD8Async, ::cuMemsetD16, ::cuMemsetD16Async, + * ::cuMemsetD32, ::cuMemsetD32Async, + * ::cudaMemcpyAsync, + * ::cudaMemcpyToSymbolAsync, + * ::cudaMemcpyFromSymbolAsync + */ +CUresult CUDAAPI cuMemcpyDtoDAsync(CUdeviceptr dstDevice, CUdeviceptr srcDevice, size_t ByteCount, CUstream hStream); + +/** + * \brief Copies memory from Host to Array + * + * Copies from host memory to a 1D CUDA array. \p dstArray and \p dstOffset + * specify the CUDA array handle and starting offset in bytes of the + * destination data. \p srcHost specifies the base address of the source. + * \p ByteCount specifies the number of bytes to copy. + * + * \param dstArray - Destination array + * \param dstOffset - Offset in bytes of destination array + * \param srcHost - Source host pointer + * \param ByteCount - Size of memory copy in bytes + * \param hStream - Stream identifier + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_HANDLE + * \notefnerr + * \note_async + * \note_null_stream + * \note_memcpy + * + * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, + * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, + * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, + * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, + * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D8Async, + * ::cuMemsetD2D16, ::cuMemsetD2D16Async, ::cuMemsetD2D32, ::cuMemsetD2D32Async, + * ::cuMemsetD8, ::cuMemsetD8Async, ::cuMemsetD16, ::cuMemsetD16Async, + * ::cuMemsetD32, ::cuMemsetD32Async, + * ::cudaMemcpyToArrayAsync + */ +CUresult CUDAAPI cuMemcpyHtoAAsync(CUarray dstArray, size_t dstOffset, const void *srcHost, size_t ByteCount, CUstream hStream); + +/** + * \brief Copies memory from Array to Host + * + * Copies from one 1D CUDA array to host memory. \p dstHost specifies the base + * pointer of the destination. \p srcArray and \p srcOffset specify the CUDA + * array handle and starting offset in bytes of the source data. + * \p ByteCount specifies the number of bytes to copy. + * + * \param dstHost - Destination pointer + * \param srcArray - Source array + * \param srcOffset - Offset in bytes of source array + * \param ByteCount - Size of memory copy in bytes + * \param hStream - Stream identifier + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_HANDLE + * \notefnerr + * \note_async + * \note_null_stream + * \note_memcpy + * + * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, + * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + * ::cuMemcpyAtoH, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, + * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, + * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D8Async, + * ::cuMemsetD2D16, ::cuMemsetD2D16Async, ::cuMemsetD2D32, ::cuMemsetD2D32Async, + * ::cuMemsetD8, ::cuMemsetD8Async, ::cuMemsetD16, ::cuMemsetD16Async, + * ::cuMemsetD32, ::cuMemsetD32Async, + * ::cudaMemcpyFromArrayAsync + */ +CUresult CUDAAPI cuMemcpyAtoHAsync(void *dstHost, CUarray srcArray, size_t srcOffset, size_t ByteCount, CUstream hStream); + +/** + * \brief Copies memory for 2D arrays + * + * Perform a 2D memory copy according to the parameters specified in \p pCopy. + * The ::CUDA_MEMCPY2D structure is defined as: + * + * \code + typedef struct CUDA_MEMCPY2D_st { + unsigned int srcXInBytes, srcY; + CUmemorytype srcMemoryType; + const void *srcHost; + CUdeviceptr srcDevice; + CUarray srcArray; + unsigned int srcPitch; + unsigned int dstXInBytes, dstY; + CUmemorytype dstMemoryType; + void *dstHost; + CUdeviceptr dstDevice; + CUarray dstArray; + unsigned int dstPitch; + unsigned int WidthInBytes; + unsigned int Height; + } CUDA_MEMCPY2D; + * \endcode + * where: + * - ::srcMemoryType and ::dstMemoryType specify the type of memory of the + * source and destination, respectively; ::CUmemorytype_enum is defined as: + * + * \code + typedef enum CUmemorytype_enum { + CU_MEMORYTYPE_HOST = 0x01, + CU_MEMORYTYPE_DEVICE = 0x02, + CU_MEMORYTYPE_ARRAY = 0x03, + CU_MEMORYTYPE_UNIFIED = 0x04 + } CUmemorytype; + * \endcode + * + * \par + * If ::srcMemoryType is ::CU_MEMORYTYPE_HOST, ::srcHost and ::srcPitch + * specify the (host) base address of the source data and the bytes per row to + * apply. ::srcArray is ignored. + * + * \par + * If ::srcMemoryType is ::CU_MEMORYTYPE_UNIFIED, ::srcDevice and ::srcPitch + * specify the (unified virtual address space) base address of the source data + * and the bytes per row to apply. ::srcArray is ignored. + * This value may be used only if unified addressing is supported in the calling + * context. + * + * \par + * If ::srcMemoryType is ::CU_MEMORYTYPE_DEVICE, ::srcDevice and ::srcPitch + * specify the (device) base address of the source data and the bytes per row + * to apply. ::srcArray is ignored. + * + * \par + * If ::srcMemoryType is ::CU_MEMORYTYPE_ARRAY, ::srcArray specifies the + * handle of the source data. ::srcHost, ::srcDevice and ::srcPitch are + * ignored. + * + * \par + * If ::dstMemoryType is ::CU_MEMORYTYPE_UNIFIED, ::dstDevice and ::dstPitch + * specify the (unified virtual address space) base address of the source data + * and the bytes per row to apply. ::dstArray is ignored. + * This value may be used only if unified addressing is supported in the calling + * context. + * + * \par + * If ::dstMemoryType is ::CU_MEMORYTYPE_HOST, ::dstHost and ::dstPitch + * specify the (host) base address of the destination data and the bytes per + * row to apply. ::dstArray is ignored. + * + * \par + * If ::dstMemoryType is ::CU_MEMORYTYPE_DEVICE, ::dstDevice and ::dstPitch + * specify the (device) base address of the destination data and the bytes per + * row to apply. ::dstArray is ignored. + * + * \par + * If ::dstMemoryType is ::CU_MEMORYTYPE_ARRAY, ::dstArray specifies the + * handle of the destination data. ::dstHost, ::dstDevice and ::dstPitch are + * ignored. + * + * - ::srcXInBytes and ::srcY specify the base address of the source data for + * the copy. + * + * \par + * For host pointers, the starting address is + * \code + void* Start = (void*)((char*)srcHost+srcY*srcPitch + srcXInBytes); + * \endcode + * + * \par + * For device pointers, the starting address is + * \code + CUdeviceptr Start = srcDevice+srcY*srcPitch+srcXInBytes; + * \endcode + * + * \par + * For CUDA arrays, ::srcXInBytes must be evenly divisible by the array + * element size. + * + * - ::dstXInBytes and ::dstY specify the base address of the destination data + * for the copy. + * + * \par + * For host pointers, the base address is + * \code + void* dstStart = (void*)((char*)dstHost+dstY*dstPitch + dstXInBytes); + * \endcode + * + * \par + * For device pointers, the starting address is + * \code + CUdeviceptr dstStart = dstDevice+dstY*dstPitch+dstXInBytes; + * \endcode + * + * \par + * For CUDA arrays, ::dstXInBytes must be evenly divisible by the array + * element size. + * + * - ::WidthInBytes and ::Height specify the width (in bytes) and height of + * the 2D copy being performed. + * - If specified, ::srcPitch must be greater than or equal to ::WidthInBytes + + * ::srcXInBytes, and ::dstPitch must be greater than or equal to + * ::WidthInBytes + dstXInBytes. + * - If specified, ::srcPitch must be greater than or equal to ::WidthInBytes + + * ::srcXInBytes, and ::dstPitch must be greater than or equal to + * ::WidthInBytes + dstXInBytes. + * - If specified, ::srcHeight must be greater than or equal to ::Height + + * ::srcY, and ::dstHeight must be greater than or equal to ::Height + ::dstY. + * + * \par + * ::cuMemcpy2DAsync() returns an error if any pitch is greater than the maximum + * allowed (::CU_DEVICE_ATTRIBUTE_MAX_PITCH). ::cuMemAllocPitch() passes back + * pitches that always work with ::cuMemcpy2D(). On intra-device memory copies + * (device to device, CUDA array to device, CUDA array to CUDA array), + * ::cuMemcpy2DAsync() may fail for pitches not computed by ::cuMemAllocPitch(). + * + * \param pCopy - Parameters for the memory copy + * \param hStream - Stream identifier + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_HANDLE + * \notefnerr + * \note_async + * \note_null_stream + * + * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, + * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DUnaligned, + * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, + * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, + * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D8Async, + * ::cuMemsetD2D16, ::cuMemsetD2D16Async, ::cuMemsetD2D32, ::cuMemsetD2D32Async, + * ::cuMemsetD8, ::cuMemsetD8Async, ::cuMemsetD16, ::cuMemsetD16Async, + * ::cuMemsetD32, ::cuMemsetD32Async, + * ::cudaMemcpy2DAsync, + * ::cudaMemcpy2DToArrayAsync, + * ::cudaMemcpy2DFromArrayAsync + */ +CUresult CUDAAPI cuMemcpy2DAsync(const CUDA_MEMCPY2D *pCopy, CUstream hStream); + +/** + * \brief Copies memory for 3D arrays + * + * Perform a 3D memory copy according to the parameters specified in + * \p pCopy. The ::CUDA_MEMCPY3D structure is defined as: + * + * \code + typedef struct CUDA_MEMCPY3D_st { + + unsigned int srcXInBytes, srcY, srcZ; + unsigned int srcLOD; + CUmemorytype srcMemoryType; + const void *srcHost; + CUdeviceptr srcDevice; + CUarray srcArray; + unsigned int srcPitch; // ignored when src is array + unsigned int srcHeight; // ignored when src is array; may be 0 if Depth==1 + + unsigned int dstXInBytes, dstY, dstZ; + unsigned int dstLOD; + CUmemorytype dstMemoryType; + void *dstHost; + CUdeviceptr dstDevice; + CUarray dstArray; + unsigned int dstPitch; // ignored when dst is array + unsigned int dstHeight; // ignored when dst is array; may be 0 if Depth==1 + + unsigned int WidthInBytes; + unsigned int Height; + unsigned int Depth; + } CUDA_MEMCPY3D; + * \endcode + * where: + * - ::srcMemoryType and ::dstMemoryType specify the type of memory of the + * source and destination, respectively; ::CUmemorytype_enum is defined as: + * + * \code + typedef enum CUmemorytype_enum { + CU_MEMORYTYPE_HOST = 0x01, + CU_MEMORYTYPE_DEVICE = 0x02, + CU_MEMORYTYPE_ARRAY = 0x03, + CU_MEMORYTYPE_UNIFIED = 0x04 + } CUmemorytype; + * \endcode + * + * \par + * If ::srcMemoryType is ::CU_MEMORYTYPE_UNIFIED, ::srcDevice and ::srcPitch + * specify the (unified virtual address space) base address of the source data + * and the bytes per row to apply. ::srcArray is ignored. + * This value may be used only if unified addressing is supported in the calling + * context. + * + * \par + * If ::srcMemoryType is ::CU_MEMORYTYPE_HOST, ::srcHost, ::srcPitch and + * ::srcHeight specify the (host) base address of the source data, the bytes + * per row, and the height of each 2D slice of the 3D array. ::srcArray is + * ignored. + * + * \par + * If ::srcMemoryType is ::CU_MEMORYTYPE_DEVICE, ::srcDevice, ::srcPitch and + * ::srcHeight specify the (device) base address of the source data, the bytes + * per row, and the height of each 2D slice of the 3D array. ::srcArray is + * ignored. + * + * \par + * If ::srcMemoryType is ::CU_MEMORYTYPE_ARRAY, ::srcArray specifies the + * handle of the source data. ::srcHost, ::srcDevice, ::srcPitch and + * ::srcHeight are ignored. + * + * \par + * If ::dstMemoryType is ::CU_MEMORYTYPE_UNIFIED, ::dstDevice and ::dstPitch + * specify the (unified virtual address space) base address of the source data + * and the bytes per row to apply. ::dstArray is ignored. + * This value may be used only if unified addressing is supported in the calling + * context. + * + * \par + * If ::dstMemoryType is ::CU_MEMORYTYPE_HOST, ::dstHost and ::dstPitch + * specify the (host) base address of the destination data, the bytes per row, + * and the height of each 2D slice of the 3D array. ::dstArray is ignored. + * + * \par + * If ::dstMemoryType is ::CU_MEMORYTYPE_DEVICE, ::dstDevice and ::dstPitch + * specify the (device) base address of the destination data, the bytes per + * row, and the height of each 2D slice of the 3D array. ::dstArray is ignored. + * + * \par + * If ::dstMemoryType is ::CU_MEMORYTYPE_ARRAY, ::dstArray specifies the + * handle of the destination data. ::dstHost, ::dstDevice, ::dstPitch and + * ::dstHeight are ignored. + * + * - ::srcXInBytes, ::srcY and ::srcZ specify the base address of the source + * data for the copy. + * + * \par + * For host pointers, the starting address is + * \code + void* Start = (void*)((char*)srcHost+(srcZ*srcHeight+srcY)*srcPitch + srcXInBytes); + * \endcode + * + * \par + * For device pointers, the starting address is + * \code + CUdeviceptr Start = srcDevice+(srcZ*srcHeight+srcY)*srcPitch+srcXInBytes; + * \endcode + * + * \par + * For CUDA arrays, ::srcXInBytes must be evenly divisible by the array + * element size. + * + * - dstXInBytes, ::dstY and ::dstZ specify the base address of the + * destination data for the copy. + * + * \par + * For host pointers, the base address is + * \code + void* dstStart = (void*)((char*)dstHost+(dstZ*dstHeight+dstY)*dstPitch + dstXInBytes); + * \endcode + * + * \par + * For device pointers, the starting address is + * \code + CUdeviceptr dstStart = dstDevice+(dstZ*dstHeight+dstY)*dstPitch+dstXInBytes; + * \endcode + * + * \par + * For CUDA arrays, ::dstXInBytes must be evenly divisible by the array + * element size. + * + * - ::WidthInBytes, ::Height and ::Depth specify the width (in bytes), height + * and depth of the 3D copy being performed. + * - If specified, ::srcPitch must be greater than or equal to ::WidthInBytes + + * ::srcXInBytes, and ::dstPitch must be greater than or equal to + * ::WidthInBytes + dstXInBytes. + * - If specified, ::srcHeight must be greater than or equal to ::Height + + * ::srcY, and ::dstHeight must be greater than or equal to ::Height + ::dstY. + * + * \par + * ::cuMemcpy3DAsync() returns an error if any pitch is greater than the maximum + * allowed (::CU_DEVICE_ATTRIBUTE_MAX_PITCH). + * + * The ::srcLOD and ::dstLOD members of the ::CUDA_MEMCPY3D structure must be + * set to 0. + * + * \param pCopy - Parameters for the memory copy + * \param hStream - Stream identifier + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_HANDLE + * \notefnerr + * \note_async + * \note_null_stream + * + * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, + * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + * ::cuMemcpy3D, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, + * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, + * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D8Async, + * ::cuMemsetD2D16, ::cuMemsetD2D16Async, ::cuMemsetD2D32, ::cuMemsetD2D32Async, + * ::cuMemsetD8, ::cuMemsetD8Async, ::cuMemsetD16, ::cuMemsetD16Async, + * ::cuMemsetD32, ::cuMemsetD32Async, + * ::cudaMemcpy3DAsync + */ +CUresult CUDAAPI cuMemcpy3DAsync(const CUDA_MEMCPY3D *pCopy, CUstream hStream); + +/** + * \brief Copies memory between contexts asynchronously. + * + * Perform a 3D memory copy according to the parameters specified in + * \p pCopy. See the definition of the ::CUDA_MEMCPY3D_PEER structure + * for documentation of its parameters. + * + * \param pCopy - Parameters for the memory copy + * \param hStream - Stream identifier + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * \notefnerr + * \note_async + * \note_null_stream + * + * \sa ::cuMemcpyDtoD, ::cuMemcpyPeer, ::cuMemcpyDtoDAsync, ::cuMemcpyPeerAsync, + * ::cuMemcpy3DPeerAsync, + * ::cudaMemcpy3DPeerAsync + */ +CUresult CUDAAPI cuMemcpy3DPeerAsync(const CUDA_MEMCPY3D_PEER *pCopy, CUstream hStream); + +/** + * \brief Initializes device memory + * + * Sets the memory range of \p N 8-bit values to the specified value + * \p uc. + * + * \param dstDevice - Destination device pointer + * \param uc - Value to set + * \param N - Number of elements + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * \notefnerr + * \note_memset + * + * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, + * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, + * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, + * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D8Async, + * ::cuMemsetD2D16, ::cuMemsetD2D16Async, ::cuMemsetD2D32, ::cuMemsetD2D32Async, + * ::cuMemsetD8Async, ::cuMemsetD16, ::cuMemsetD16Async, + * ::cuMemsetD32, ::cuMemsetD32Async, + * ::cudaMemset + */ +CUresult CUDAAPI cuMemsetD8(CUdeviceptr dstDevice, unsigned char uc, size_t N); + +/** + * \brief Initializes device memory + * + * Sets the memory range of \p N 16-bit values to the specified value + * \p us. The \p dstDevice pointer must be two byte aligned. + * + * \param dstDevice - Destination device pointer + * \param us - Value to set + * \param N - Number of elements + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * \notefnerr + * \note_memset + * + * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, + * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, + * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, + * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D8Async, + * ::cuMemsetD2D16, ::cuMemsetD2D16Async, ::cuMemsetD2D32, ::cuMemsetD2D32Async, + * ::cuMemsetD8, ::cuMemsetD8Async, ::cuMemsetD16Async, + * ::cuMemsetD32, ::cuMemsetD32Async, + * ::cudaMemset + */ +CUresult CUDAAPI cuMemsetD16(CUdeviceptr dstDevice, unsigned short us, size_t N); + +/** + * \brief Initializes device memory + * + * Sets the memory range of \p N 32-bit values to the specified value + * \p ui. The \p dstDevice pointer must be four byte aligned. + * + * \param dstDevice - Destination device pointer + * \param ui - Value to set + * \param N - Number of elements + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * \notefnerr + * \note_memset + * + * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, + * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, + * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, + * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D8Async, + * ::cuMemsetD2D16, ::cuMemsetD2D16Async, ::cuMemsetD2D32, ::cuMemsetD2D32Async, + * ::cuMemsetD8, ::cuMemsetD8Async, ::cuMemsetD16, ::cuMemsetD16Async, + * ::cuMemsetD32Async, + * ::cudaMemset + */ +CUresult CUDAAPI cuMemsetD32(CUdeviceptr dstDevice, unsigned int ui, size_t N); + +/** + * \brief Initializes device memory + * + * Sets the 2D memory range of \p Width 8-bit values to the specified value + * \p uc. \p Height specifies the number of rows to set, and \p dstPitch + * specifies the number of bytes between each row. This function performs + * fastest when the pitch is one that has been passed back by + * ::cuMemAllocPitch(). + * + * \param dstDevice - Destination device pointer + * \param dstPitch - Pitch of destination device pointer(Unused if \p Height is 1) + * \param uc - Value to set + * \param Width - Width of row + * \param Height - Number of rows + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * \notefnerr + * \note_memset + * + * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, + * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, + * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, + * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8Async, + * ::cuMemsetD2D16, ::cuMemsetD2D16Async, ::cuMemsetD2D32, ::cuMemsetD2D32Async, + * ::cuMemsetD8, ::cuMemsetD8Async, ::cuMemsetD16, ::cuMemsetD16Async, + * ::cuMemsetD32, ::cuMemsetD32Async, + * ::cudaMemset2D + */ +CUresult CUDAAPI cuMemsetD2D8(CUdeviceptr dstDevice, size_t dstPitch, unsigned char uc, size_t Width, size_t Height); + +/** + * \brief Initializes device memory + * + * Sets the 2D memory range of \p Width 16-bit values to the specified value + * \p us. \p Height specifies the number of rows to set, and \p dstPitch + * specifies the number of bytes between each row. The \p dstDevice pointer + * and \p dstPitch offset must be two byte aligned. This function performs + * fastest when the pitch is one that has been passed back by + * ::cuMemAllocPitch(). + * + * \param dstDevice - Destination device pointer + * \param dstPitch - Pitch of destination device pointer(Unused if \p Height is 1) + * \param us - Value to set + * \param Width - Width of row + * \param Height - Number of rows + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * \notefnerr + * \note_memset + * + * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, + * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, + * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, + * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D8Async, + * ::cuMemsetD2D16Async, ::cuMemsetD2D32, ::cuMemsetD2D32Async, + * ::cuMemsetD8, ::cuMemsetD8Async, ::cuMemsetD16, ::cuMemsetD16Async, + * ::cuMemsetD32, ::cuMemsetD32Async, + * ::cudaMemset2D + */ +CUresult CUDAAPI cuMemsetD2D16(CUdeviceptr dstDevice, size_t dstPitch, unsigned short us, size_t Width, size_t Height); + +/** + * \brief Initializes device memory + * + * Sets the 2D memory range of \p Width 32-bit values to the specified value + * \p ui. \p Height specifies the number of rows to set, and \p dstPitch + * specifies the number of bytes between each row. The \p dstDevice pointer + * and \p dstPitch offset must be four byte aligned. This function performs + * fastest when the pitch is one that has been passed back by + * ::cuMemAllocPitch(). + * + * \param dstDevice - Destination device pointer + * \param dstPitch - Pitch of destination device pointer(Unused if \p Height is 1) + * \param ui - Value to set + * \param Width - Width of row + * \param Height - Number of rows + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * \notefnerr + * \note_memset + * + * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, + * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, + * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, + * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D8Async, + * ::cuMemsetD2D16, ::cuMemsetD2D16Async, ::cuMemsetD2D32Async, + * ::cuMemsetD8, ::cuMemsetD8Async, ::cuMemsetD16, ::cuMemsetD16Async, + * ::cuMemsetD32, ::cuMemsetD32Async, + * ::cudaMemset2D + */ +CUresult CUDAAPI cuMemsetD2D32(CUdeviceptr dstDevice, size_t dstPitch, unsigned int ui, size_t Width, size_t Height); + +/** + * \brief Sets device memory + * + * Sets the memory range of \p N 8-bit values to the specified value + * \p uc. + * + * \param dstDevice - Destination device pointer + * \param uc - Value to set + * \param N - Number of elements + * \param hStream - Stream identifier + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * \notefnerr + * \note_memset + * \note_null_stream + * + * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, + * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, + * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, + * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D8Async, + * ::cuMemsetD2D16, ::cuMemsetD2D16Async, ::cuMemsetD2D32, ::cuMemsetD2D32Async, + * ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD16Async, + * ::cuMemsetD32, ::cuMemsetD32Async, + * ::cudaMemsetAsync + */ +CUresult CUDAAPI cuMemsetD8Async(CUdeviceptr dstDevice, unsigned char uc, size_t N, CUstream hStream); + +/** + * \brief Sets device memory + * + * Sets the memory range of \p N 16-bit values to the specified value + * \p us. The \p dstDevice pointer must be two byte aligned. + * + * \param dstDevice - Destination device pointer + * \param us - Value to set + * \param N - Number of elements + * \param hStream - Stream identifier + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * \notefnerr + * \note_memset + * \note_null_stream + * + * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, + * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, + * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, + * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D8Async, + * ::cuMemsetD2D16, ::cuMemsetD2D16Async, ::cuMemsetD2D32, ::cuMemsetD2D32Async, + * ::cuMemsetD8, ::cuMemsetD8Async, ::cuMemsetD16, + * ::cuMemsetD32, ::cuMemsetD32Async, + * ::cudaMemsetAsync + */ +CUresult CUDAAPI cuMemsetD16Async(CUdeviceptr dstDevice, unsigned short us, size_t N, CUstream hStream); + +/** + * \brief Sets device memory + * + * Sets the memory range of \p N 32-bit values to the specified value + * \p ui. The \p dstDevice pointer must be four byte aligned. + * + * \param dstDevice - Destination device pointer + * \param ui - Value to set + * \param N - Number of elements + * \param hStream - Stream identifier + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * \notefnerr + * \note_memset + * \note_null_stream + * + * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, + * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, + * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, + * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D8Async, + * ::cuMemsetD2D16, ::cuMemsetD2D16Async, ::cuMemsetD2D32, ::cuMemsetD2D32Async, + * ::cuMemsetD8, ::cuMemsetD8Async, ::cuMemsetD16, ::cuMemsetD16Async, ::cuMemsetD32, + * ::cudaMemsetAsync + */ +CUresult CUDAAPI cuMemsetD32Async(CUdeviceptr dstDevice, unsigned int ui, size_t N, CUstream hStream); + +/** + * \brief Sets device memory + * + * Sets the 2D memory range of \p Width 8-bit values to the specified value + * \p uc. \p Height specifies the number of rows to set, and \p dstPitch + * specifies the number of bytes between each row. This function performs + * fastest when the pitch is one that has been passed back by + * ::cuMemAllocPitch(). + * + * \param dstDevice - Destination device pointer + * \param dstPitch - Pitch of destination device pointer(Unused if \p Height is 1) + * \param uc - Value to set + * \param Width - Width of row + * \param Height - Number of rows + * \param hStream - Stream identifier + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * \notefnerr + * \note_memset + * \note_null_stream + * + * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, + * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, + * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, + * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, + * ::cuMemsetD2D16, ::cuMemsetD2D16Async, ::cuMemsetD2D32, ::cuMemsetD2D32Async, + * ::cuMemsetD8, ::cuMemsetD8Async, ::cuMemsetD16, ::cuMemsetD16Async, + * ::cuMemsetD32, ::cuMemsetD32Async, + * ::cudaMemset2DAsync + */ +CUresult CUDAAPI cuMemsetD2D8Async(CUdeviceptr dstDevice, size_t dstPitch, unsigned char uc, size_t Width, size_t Height, CUstream hStream); + +/** + * \brief Sets device memory + * + * Sets the 2D memory range of \p Width 16-bit values to the specified value + * \p us. \p Height specifies the number of rows to set, and \p dstPitch + * specifies the number of bytes between each row. The \p dstDevice pointer + * and \p dstPitch offset must be two byte aligned. This function performs + * fastest when the pitch is one that has been passed back by + * ::cuMemAllocPitch(). + * + * \param dstDevice - Destination device pointer + * \param dstPitch - Pitch of destination device pointer(Unused if \p Height is 1) + * \param us - Value to set + * \param Width - Width of row + * \param Height - Number of rows + * \param hStream - Stream identifier + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * \notefnerr + * \note_memset + * \note_null_stream + * + * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, + * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, + * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, + * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D8Async, + * ::cuMemsetD2D16, ::cuMemsetD2D32, ::cuMemsetD2D32Async, + * ::cuMemsetD8, ::cuMemsetD8Async, ::cuMemsetD16, ::cuMemsetD16Async, + * ::cuMemsetD32, ::cuMemsetD32Async, + * ::cudaMemset2DAsync + */ +CUresult CUDAAPI cuMemsetD2D16Async(CUdeviceptr dstDevice, size_t dstPitch, unsigned short us, size_t Width, size_t Height, CUstream hStream); + +/** + * \brief Sets device memory + * + * Sets the 2D memory range of \p Width 32-bit values to the specified value + * \p ui. \p Height specifies the number of rows to set, and \p dstPitch + * specifies the number of bytes between each row. The \p dstDevice pointer + * and \p dstPitch offset must be four byte aligned. This function performs + * fastest when the pitch is one that has been passed back by + * ::cuMemAllocPitch(). + * + * \param dstDevice - Destination device pointer + * \param dstPitch - Pitch of destination device pointer(Unused if \p Height is 1) + * \param ui - Value to set + * \param Width - Width of row + * \param Height - Number of rows + * \param hStream - Stream identifier + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * \notefnerr + * \note_memset + * \note_null_stream + * + * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, + * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, + * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, + * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D8Async, + * ::cuMemsetD2D16, ::cuMemsetD2D16Async, ::cuMemsetD2D32, + * ::cuMemsetD8, ::cuMemsetD8Async, ::cuMemsetD16, ::cuMemsetD16Async, + * ::cuMemsetD32, ::cuMemsetD32Async, + * ::cudaMemset2DAsync + */ +CUresult CUDAAPI cuMemsetD2D32Async(CUdeviceptr dstDevice, size_t dstPitch, unsigned int ui, size_t Width, size_t Height, CUstream hStream); + +/** + * \brief Creates a 1D or 2D CUDA array + * + * Creates a CUDA array according to the ::CUDA_ARRAY_DESCRIPTOR structure + * \p pAllocateArray and returns a handle to the new CUDA array in \p *pHandle. + * The ::CUDA_ARRAY_DESCRIPTOR is defined as: + * + * \code + typedef struct { + unsigned int Width; + unsigned int Height; + CUarray_format Format; + unsigned int NumChannels; + } CUDA_ARRAY_DESCRIPTOR; + * \endcode + * where: + * + * - \p Width, and \p Height are the width, and height of the CUDA array (in + * elements); the CUDA array is one-dimensional if height is 0, two-dimensional + * otherwise; + * - ::Format specifies the format of the elements; ::CUarray_format is + * defined as: + * \code + typedef enum CUarray_format_enum { + CU_AD_FORMAT_UNSIGNED_INT8 = 0x01, + CU_AD_FORMAT_UNSIGNED_INT16 = 0x02, + CU_AD_FORMAT_UNSIGNED_INT32 = 0x03, + CU_AD_FORMAT_SIGNED_INT8 = 0x08, + CU_AD_FORMAT_SIGNED_INT16 = 0x09, + CU_AD_FORMAT_SIGNED_INT32 = 0x0a, + CU_AD_FORMAT_HALF = 0x10, + CU_AD_FORMAT_FLOAT = 0x20 + } CUarray_format; + * \endcode + * - \p NumChannels specifies the number of packed components per CUDA array + * element; it may be 1, 2, or 4; + * + * Here are examples of CUDA array descriptions: + * + * Description for a CUDA array of 2048 floats: + * \code + CUDA_ARRAY_DESCRIPTOR desc; + desc.Format = CU_AD_FORMAT_FLOAT; + desc.NumChannels = 1; + desc.Width = 2048; + desc.Height = 1; + * \endcode + * + * Description for a 64 x 64 CUDA array of floats: + * \code + CUDA_ARRAY_DESCRIPTOR desc; + desc.Format = CU_AD_FORMAT_FLOAT; + desc.NumChannels = 1; + desc.Width = 64; + desc.Height = 64; + * \endcode + * + * Description for a \p width x \p height CUDA array of 64-bit, 4x16-bit + * float16's: + * \code + CUDA_ARRAY_DESCRIPTOR desc; + desc.Format = CU_AD_FORMAT_HALF; + desc.NumChannels = 4; + desc.Width = width; + desc.Height = height; + * \endcode + * + * Description for a \p width x \p height CUDA array of 16-bit elements, each + * of which is two 8-bit unsigned chars: + * \code + CUDA_ARRAY_DESCRIPTOR arrayDesc; + desc.Format = CU_AD_FORMAT_UNSIGNED_INT8; + desc.NumChannels = 2; + desc.Width = width; + desc.Height = height; + * \endcode + * + * \param pHandle - Returned array + * \param pAllocateArray - Array descriptor + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_OUT_OF_MEMORY, + * ::CUDA_ERROR_UNKNOWN + * \notefnerr + * + * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, + * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, + * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, + * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, + * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16, + * ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32, + * ::cudaMallocArray + */ +CUresult CUDAAPI cuArrayCreate(CUarray *pHandle, const CUDA_ARRAY_DESCRIPTOR *pAllocateArray); + +/** + * \brief Get a 1D or 2D CUDA array descriptor + * + * Returns in \p *pArrayDescriptor a descriptor containing information on the + * format and dimensions of the CUDA array \p hArray. It is useful for + * subroutines that have been passed a CUDA array, but need to know the CUDA + * array parameters for validation or other purposes. + * + * \param pArrayDescriptor - Returned array descriptor + * \param hArray - Array to get descriptor of + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_HANDLE + * \notefnerr + * + * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + * ::cuArrayDestroy, ::cuMemAlloc, ::cuMemAllocHost, + * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, + * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, + * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16, + * ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32, + * ::cudaArrayGetInfo + */ +CUresult CUDAAPI cuArrayGetDescriptor(CUDA_ARRAY_DESCRIPTOR *pArrayDescriptor, CUarray hArray); + +/** + * \brief Returns the layout properties of a sparse CUDA array + * + * Returns the layout properties of a sparse CUDA array in \p sparseProperties + * If the CUDA array is not allocated with flag ::CUDA_ARRAY3D_SPARSE + * ::CUDA_ERROR_INVALID_VALUE will be returned. + * + * If the returned value in ::CUDA_ARRAY_SPARSE_PROPERTIES::flags contains ::CU_ARRAY_SPARSE_PROPERTIES_SINGLE_MIPTAIL, + * then ::CUDA_ARRAY_SPARSE_PROPERTIES::miptailSize represents the total size of the array. Otherwise, it will be zero. + * Also, the returned value in ::CUDA_ARRAY_SPARSE_PROPERTIES::miptailFirstLevel is always zero. + * Note that the \p array must have been allocated using ::cuArrayCreate or ::cuArray3DCreate. For CUDA arrays obtained + * using ::cuMipmappedArrayGetLevel, ::CUDA_ERROR_INVALID_VALUE will be returned. Instead, ::cuMipmappedArrayGetSparseProperties + * must be used to obtain the sparse properties of the entire CUDA mipmapped array to which \p array belongs to. + * + * \return + * ::CUDA_SUCCESS + * ::CUDA_ERROR_INVALID_VALUE + * + * \param[out] sparseProperties - Pointer to ::CUDA_ARRAY_SPARSE_PROPERTIES + * \param[in] array - CUDA array to get the sparse properties of + * \sa ::cuMipmappedArrayGetSparseProperties, ::cuMemMapArrayAsync + */ +CUresult CUDAAPI cuArrayGetSparseProperties(CUDA_ARRAY_SPARSE_PROPERTIES *sparseProperties, CUarray array); + +/** + * \brief Returns the layout properties of a sparse CUDA mipmapped array + * + * Returns the sparse array layout properties in \p sparseProperties + * If the CUDA mipmapped array is not allocated with flag ::CUDA_ARRAY3D_SPARSE + * ::CUDA_ERROR_INVALID_VALUE will be returned. + * + * For non-layered CUDA mipmapped arrays, ::CUDA_ARRAY_SPARSE_PROPERTIES::miptailSize returns the + * size of the mip tail region. The mip tail region includes all mip levels whose width, height or depth + * is less than that of the tile. + * For layered CUDA mipmapped arrays, if ::CUDA_ARRAY_SPARSE_PROPERTIES::flags contains ::CU_ARRAY_SPARSE_PROPERTIES_SINGLE_MIPTAIL, + * then ::CUDA_ARRAY_SPARSE_PROPERTIES::miptailSize specifies the size of the mip tail of all layers combined. + * Otherwise, ::CUDA_ARRAY_SPARSE_PROPERTIES::miptailSize specifies mip tail size per layer. + * The returned value of ::CUDA_ARRAY_SPARSE_PROPERTIES::miptailFirstLevel is valid only if ::CUDA_ARRAY_SPARSE_PROPERTIES::miptailSize is non-zero. + * + * \return + * ::CUDA_SUCCESS + * ::CUDA_ERROR_INVALID_VALUE + * + * \param[out] sparseProperties - Pointer to ::CUDA_ARRAY_SPARSE_PROPERTIES + * \param[in] mipmap - CUDA mipmapped array to get the sparse properties of + * \sa ::cuArrayGetSparseProperties, ::cuMemMapArrayAsync + */ +CUresult CUDAAPI cuMipmappedArrayGetSparseProperties(CUDA_ARRAY_SPARSE_PROPERTIES *sparseProperties, CUmipmappedArray mipmap); + +/** + * \brief Returns the memory requirements of a CUDA array + * + * Returns the memory requirements of a CUDA array in \p memoryRequirements + * If the CUDA array is not allocated with flag ::CUDA_ARRAY3D_DEFERRED_MAPPING + * ::CUDA_ERROR_INVALID_VALUE will be returned. + * + * The returned value in ::CUDA_ARRAY_MEMORY_REQUIREMENTS::size + * represents the total size of the CUDA array. + * The returned value in ::CUDA_ARRAY_MEMORY_REQUIREMENTS::alignment + * represents the alignment necessary for mapping the CUDA array. + * + * \return + * ::CUDA_SUCCESS + * ::CUDA_ERROR_INVALID_VALUE + * + * \param[out] memoryRequirements - Pointer to ::CUDA_ARRAY_MEMORY_REQUIREMENTS + * \param[in] array - CUDA array to get the memory requirements of + * \param[in] device - Device to get the memory requirements for + * \sa ::cuMipmappedArrayGetMemoryRequirements, ::cuMemMapArrayAsync + */ +CUresult CUDAAPI cuArrayGetMemoryRequirements(CUDA_ARRAY_MEMORY_REQUIREMENTS *memoryRequirements, CUarray array, CUdevice device); + +/** + * \brief Returns the memory requirements of a CUDA mipmapped array + * + * Returns the memory requirements of a CUDA mipmapped array in \p memoryRequirements + * If the CUDA mipmapped array is not allocated with flag ::CUDA_ARRAY3D_DEFERRED_MAPPING + * ::CUDA_ERROR_INVALID_VALUE will be returned. + * + * The returned value in ::CUDA_ARRAY_MEMORY_REQUIREMENTS::size + * represents the total size of the CUDA mipmapped array. + * The returned value in ::CUDA_ARRAY_MEMORY_REQUIREMENTS::alignment + * represents the alignment necessary for mapping the CUDA mipmapped + * array. + * + * \return + * ::CUDA_SUCCESS + * ::CUDA_ERROR_INVALID_VALUE + * + * \param[out] memoryRequirements - Pointer to ::CUDA_ARRAY_MEMORY_REQUIREMENTS + * \param[in] mipmap - CUDA mipmapped array to get the memory requirements of + * \param[in] device - Device to get the memory requirements for + * \sa ::cuArrayGetMemoryRequirements, ::cuMemMapArrayAsync + */ +CUresult CUDAAPI cuMipmappedArrayGetMemoryRequirements(CUDA_ARRAY_MEMORY_REQUIREMENTS *memoryRequirements, CUmipmappedArray mipmap, CUdevice device); + +/** + * \brief Gets a CUDA array plane from a CUDA array + * + * Returns in \p pPlaneArray a CUDA array that represents a single format plane + * of the CUDA array \p hArray. + * + * If \p planeIdx is greater than the maximum number of planes in this array or if the array does + * not have a multi-planar format e.g: ::CU_AD_FORMAT_NV12, then ::CUDA_ERROR_INVALID_VALUE is returned. + * + * Note that if the \p hArray has format ::CU_AD_FORMAT_NV12, then passing in 0 for \p planeIdx returns + * a CUDA array of the same size as \p hArray but with one channel and ::CU_AD_FORMAT_UNSIGNED_INT8 as its format. + * If 1 is passed for \p planeIdx, then the returned CUDA array has half the height and width + * of \p hArray with two channels and ::CU_AD_FORMAT_UNSIGNED_INT8 as its format. + * + * \param pPlaneArray - Returned CUDA array referenced by the \p planeIdx + * \param hArray - Multiplanar CUDA array + * \param planeIdx - Plane index + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_HANDLE + * \notefnerr + * + * \sa + * ::cuArrayCreate, + * ::cudaArrayGetPlane + */ +CUresult CUDAAPI cuArrayGetPlane(CUarray *pPlaneArray, CUarray hArray, unsigned int planeIdx); + +/** + * \brief Destroys a CUDA array + * + * Destroys the CUDA array \p hArray. + * + * \param hArray - Array to destroy + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_ARRAY_IS_MAPPED, + * ::CUDA_ERROR_CONTEXT_IS_DESTROYED + * \notefnerr + * + * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, + * ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, + * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, + * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, + * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16, + * ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32, + * ::cudaFreeArray + */ +CUresult CUDAAPI cuArrayDestroy(CUarray hArray); + +/** + * \brief Creates a 3D CUDA array + * + * Creates a CUDA array according to the ::CUDA_ARRAY3D_DESCRIPTOR structure + * \p pAllocateArray and returns a handle to the new CUDA array in \p *pHandle. + * The ::CUDA_ARRAY3D_DESCRIPTOR is defined as: + * + * \code + typedef struct { + unsigned int Width; + unsigned int Height; + unsigned int Depth; + CUarray_format Format; + unsigned int NumChannels; + unsigned int Flags; + } CUDA_ARRAY3D_DESCRIPTOR; + * \endcode + * where: + * + * - \p Width, \p Height, and \p Depth are the width, height, and depth of the + * CUDA array (in elements); the following types of CUDA arrays can be allocated: + * - A 1D array is allocated if \p Height and \p Depth extents are both zero. + * - A 2D array is allocated if only \p Depth extent is zero. + * - A 3D array is allocated if all three extents are non-zero. + * - A 1D layered CUDA array is allocated if only \p Height is zero and the + * ::CUDA_ARRAY3D_LAYERED flag is set. Each layer is a 1D array. The number + * of layers is determined by the depth extent. + * - A 2D layered CUDA array is allocated if all three extents are non-zero and + * the ::CUDA_ARRAY3D_LAYERED flag is set. Each layer is a 2D array. The number + * of layers is determined by the depth extent. + * - A cubemap CUDA array is allocated if all three extents are non-zero and the + * ::CUDA_ARRAY3D_CUBEMAP flag is set. \p Width must be equal to \p Height, and + * \p Depth must be six. A cubemap is a special type of 2D layered CUDA array, + * where the six layers represent the six faces of a cube. The order of the six + * layers in memory is the same as that listed in ::CUarray_cubemap_face. + * - A cubemap layered CUDA array is allocated if all three extents are non-zero, + * and both, ::CUDA_ARRAY3D_CUBEMAP and ::CUDA_ARRAY3D_LAYERED flags are set. + * \p Width must be equal to \p Height, and \p Depth must be a multiple of six. + * A cubemap layered CUDA array is a special type of 2D layered CUDA array that + * consists of a collection of cubemaps. The first six layers represent the first + * cubemap, the next six layers form the second cubemap, and so on. + * + * - ::Format specifies the format of the elements; ::CUarray_format is + * defined as: + * \code + typedef enum CUarray_format_enum { + CU_AD_FORMAT_UNSIGNED_INT8 = 0x01, + CU_AD_FORMAT_UNSIGNED_INT16 = 0x02, + CU_AD_FORMAT_UNSIGNED_INT32 = 0x03, + CU_AD_FORMAT_SIGNED_INT8 = 0x08, + CU_AD_FORMAT_SIGNED_INT16 = 0x09, + CU_AD_FORMAT_SIGNED_INT32 = 0x0a, + CU_AD_FORMAT_HALF = 0x10, + CU_AD_FORMAT_FLOAT = 0x20 + } CUarray_format; + * \endcode + * + * - \p NumChannels specifies the number of packed components per CUDA array + * element; it may be 1, 2, or 4; + * + * - ::Flags may be set to + * - ::CUDA_ARRAY3D_LAYERED to enable creation of layered CUDA arrays. If this flag is set, + * \p Depth specifies the number of layers, not the depth of a 3D array. + * - ::CUDA_ARRAY3D_SURFACE_LDST to enable surface references to be bound to the CUDA array. + * If this flag is not set, ::cuSurfRefSetArray will fail when attempting to bind the CUDA array + * to a surface reference. + * - ::CUDA_ARRAY3D_CUBEMAP to enable creation of cubemaps. If this flag is set, \p Width must be + * equal to \p Height, and \p Depth must be six. If the ::CUDA_ARRAY3D_LAYERED flag is also set, + * then \p Depth must be a multiple of six. + * - ::CUDA_ARRAY3D_TEXTURE_GATHER to indicate that the CUDA array will be used for texture gather. + * Texture gather can only be performed on 2D CUDA arrays. + * + * \p Width, \p Height and \p Depth must meet certain size requirements as listed in the following table. + * All values are specified in elements. Note that for brevity's sake, the full name of the device attribute + * is not specified. For ex., TEXTURE1D_WIDTH refers to the device attribute + * ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_WIDTH. + * + * Note that 2D CUDA arrays have different size requirements if the ::CUDA_ARRAY3D_TEXTURE_GATHER flag + * is set. \p Width and \p Height must not be greater than ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_GATHER_WIDTH + * and ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_GATHER_HEIGHT respectively, in that case. + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
CUDA array typeValid extents that must always be met
{(width range in elements), (height range), + * (depth range)}
Valid extents with CUDA_ARRAY3D_SURFACE_LDST set
+ * {(width range in elements), (height range), (depth range)}
1D{ (1,TEXTURE1D_WIDTH), 0, 0 }{ (1,SURFACE1D_WIDTH), 0, 0 }
2D{ (1,TEXTURE2D_WIDTH), (1,TEXTURE2D_HEIGHT), 0 }{ (1,SURFACE2D_WIDTH), (1,SURFACE2D_HEIGHT), 0 }
3D{ (1,TEXTURE3D_WIDTH), (1,TEXTURE3D_HEIGHT), (1,TEXTURE3D_DEPTH) } + *
OR
{ (1,TEXTURE3D_WIDTH_ALTERNATE), (1,TEXTURE3D_HEIGHT_ALTERNATE), + * (1,TEXTURE3D_DEPTH_ALTERNATE) }
{ (1,SURFACE3D_WIDTH), (1,SURFACE3D_HEIGHT), + * (1,SURFACE3D_DEPTH) }
1D Layered{ (1,TEXTURE1D_LAYERED_WIDTH), 0, + * (1,TEXTURE1D_LAYERED_LAYERS) }{ (1,SURFACE1D_LAYERED_WIDTH), 0, + * (1,SURFACE1D_LAYERED_LAYERS) }
2D Layered{ (1,TEXTURE2D_LAYERED_WIDTH), (1,TEXTURE2D_LAYERED_HEIGHT), + * (1,TEXTURE2D_LAYERED_LAYERS) }{ (1,SURFACE2D_LAYERED_WIDTH), (1,SURFACE2D_LAYERED_HEIGHT), + * (1,SURFACE2D_LAYERED_LAYERS) }
Cubemap{ (1,TEXTURECUBEMAP_WIDTH), (1,TEXTURECUBEMAP_WIDTH), 6 }{ (1,SURFACECUBEMAP_WIDTH), + * (1,SURFACECUBEMAP_WIDTH), 6 }
Cubemap Layered{ (1,TEXTURECUBEMAP_LAYERED_WIDTH), (1,TEXTURECUBEMAP_LAYERED_WIDTH), + * (1,TEXTURECUBEMAP_LAYERED_LAYERS) }{ (1,SURFACECUBEMAP_LAYERED_WIDTH), (1,SURFACECUBEMAP_LAYERED_WIDTH), + * (1,SURFACECUBEMAP_LAYERED_LAYERS) }
+ * + * Here are examples of CUDA array descriptions: + * + * Description for a CUDA array of 2048 floats: + * \code + CUDA_ARRAY3D_DESCRIPTOR desc; + desc.Format = CU_AD_FORMAT_FLOAT; + desc.NumChannels = 1; + desc.Width = 2048; + desc.Height = 0; + desc.Depth = 0; + * \endcode + * + * Description for a 64 x 64 CUDA array of floats: + * \code + CUDA_ARRAY3D_DESCRIPTOR desc; + desc.Format = CU_AD_FORMAT_FLOAT; + desc.NumChannels = 1; + desc.Width = 64; + desc.Height = 64; + desc.Depth = 0; + * \endcode + * + * Description for a \p width x \p height x \p depth CUDA array of 64-bit, + * 4x16-bit float16's: + * \code + CUDA_ARRAY3D_DESCRIPTOR desc; + desc.Format = CU_AD_FORMAT_HALF; + desc.NumChannels = 4; + desc.Width = width; + desc.Height = height; + desc.Depth = depth; + * \endcode + * + * \param pHandle - Returned array + * \param pAllocateArray - 3D array descriptor + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_OUT_OF_MEMORY, + * ::CUDA_ERROR_UNKNOWN + * \notefnerr + * + * \sa ::cuArray3DGetDescriptor, ::cuArrayCreate, + * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, + * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, + * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, + * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16, + * ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32, + * ::cudaMalloc3DArray + */ +CUresult CUDAAPI cuArray3DCreate(CUarray *pHandle, const CUDA_ARRAY3D_DESCRIPTOR *pAllocateArray); + +/** + * \brief Get a 3D CUDA array descriptor + * + * Returns in \p *pArrayDescriptor a descriptor containing information on the + * format and dimensions of the CUDA array \p hArray. It is useful for + * subroutines that have been passed a CUDA array, but need to know the CUDA + * array parameters for validation or other purposes. + * + * This function may be called on 1D and 2D arrays, in which case the \p Height + * and/or \p Depth members of the descriptor struct will be set to 0. + * + * \param pArrayDescriptor - Returned 3D array descriptor + * \param hArray - 3D array to get descriptor of + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_CONTEXT_IS_DESTROYED + * \notefnerr + * + * \sa ::cuArray3DCreate, ::cuArrayCreate, + * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, + * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, + * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, + * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, + * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, + * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, + * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, + * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16, + * ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32, + * ::cudaArrayGetInfo + */ +CUresult CUDAAPI cuArray3DGetDescriptor(CUDA_ARRAY3D_DESCRIPTOR *pArrayDescriptor, CUarray hArray); + +/** + * \brief Creates a CUDA mipmapped array + * + * Creates a CUDA mipmapped array according to the ::CUDA_ARRAY3D_DESCRIPTOR structure + * \p pMipmappedArrayDesc and returns a handle to the new CUDA mipmapped array in \p *pHandle. + * \p numMipmapLevels specifies the number of mipmap levels to be allocated. This value is + * clamped to the range [1, 1 + floor(log2(max(width, height, depth)))]. + * + * The ::CUDA_ARRAY3D_DESCRIPTOR is defined as: + * + * \code + typedef struct { + unsigned int Width; + unsigned int Height; + unsigned int Depth; + CUarray_format Format; + unsigned int NumChannels; + unsigned int Flags; + } CUDA_ARRAY3D_DESCRIPTOR; + * \endcode + * where: + * + * - \p Width, \p Height, and \p Depth are the width, height, and depth of the + * CUDA array (in elements); the following types of CUDA arrays can be allocated: + * - A 1D mipmapped array is allocated if \p Height and \p Depth extents are both zero. + * - A 2D mipmapped array is allocated if only \p Depth extent is zero. + * - A 3D mipmapped array is allocated if all three extents are non-zero. + * - A 1D layered CUDA mipmapped array is allocated if only \p Height is zero and the + * ::CUDA_ARRAY3D_LAYERED flag is set. Each layer is a 1D array. The number + * of layers is determined by the depth extent. + * - A 2D layered CUDA mipmapped array is allocated if all three extents are non-zero and + * the ::CUDA_ARRAY3D_LAYERED flag is set. Each layer is a 2D array. The number + * of layers is determined by the depth extent. + * - A cubemap CUDA mipmapped array is allocated if all three extents are non-zero and the + * ::CUDA_ARRAY3D_CUBEMAP flag is set. \p Width must be equal to \p Height, and + * \p Depth must be six. A cubemap is a special type of 2D layered CUDA array, + * where the six layers represent the six faces of a cube. The order of the six + * layers in memory is the same as that listed in ::CUarray_cubemap_face. + * - A cubemap layered CUDA mipmapped array is allocated if all three extents are non-zero, + * and both, ::CUDA_ARRAY3D_CUBEMAP and ::CUDA_ARRAY3D_LAYERED flags are set. + * \p Width must be equal to \p Height, and \p Depth must be a multiple of six. + * A cubemap layered CUDA array is a special type of 2D layered CUDA array that + * consists of a collection of cubemaps. The first six layers represent the first + * cubemap, the next six layers form the second cubemap, and so on. + * + * - ::Format specifies the format of the elements; ::CUarray_format is + * defined as: + * \code + typedef enum CUarray_format_enum { + CU_AD_FORMAT_UNSIGNED_INT8 = 0x01, + CU_AD_FORMAT_UNSIGNED_INT16 = 0x02, + CU_AD_FORMAT_UNSIGNED_INT32 = 0x03, + CU_AD_FORMAT_SIGNED_INT8 = 0x08, + CU_AD_FORMAT_SIGNED_INT16 = 0x09, + CU_AD_FORMAT_SIGNED_INT32 = 0x0a, + CU_AD_FORMAT_HALF = 0x10, + CU_AD_FORMAT_FLOAT = 0x20 + } CUarray_format; + * \endcode + * + * - \p NumChannels specifies the number of packed components per CUDA array + * element; it may be 1, 2, or 4; + * + * - ::Flags may be set to + * - ::CUDA_ARRAY3D_LAYERED to enable creation of layered CUDA mipmapped arrays. If this flag is set, + * \p Depth specifies the number of layers, not the depth of a 3D array. + * - ::CUDA_ARRAY3D_SURFACE_LDST to enable surface references to be bound to individual mipmap levels of + * the CUDA mipmapped array. If this flag is not set, ::cuSurfRefSetArray will fail when attempting to + * bind a mipmap level of the CUDA mipmapped array to a surface reference. + * - ::CUDA_ARRAY3D_CUBEMAP to enable creation of mipmapped cubemaps. If this flag is set, \p Width must be + * equal to \p Height, and \p Depth must be six. If the ::CUDA_ARRAY3D_LAYERED flag is also set, + * then \p Depth must be a multiple of six. + * - ::CUDA_ARRAY3D_TEXTURE_GATHER to indicate that the CUDA mipmapped array will be used for texture gather. + * Texture gather can only be performed on 2D CUDA mipmapped arrays. + * + * \p Width, \p Height and \p Depth must meet certain size requirements as listed in the following table. + * All values are specified in elements. Note that for brevity's sake, the full name of the device attribute + * is not specified. For ex., TEXTURE1D_MIPMAPPED_WIDTH refers to the device attribute + * ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_MIPMAPPED_WIDTH. + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
CUDA array typeValid extents that must always be met
{(width range in elements), (height range), + * (depth range)}
Valid extents with CUDA_ARRAY3D_SURFACE_LDST set
+ * {(width range in elements), (height range), (depth range)}
1D{ (1,TEXTURE1D_MIPMAPPED_WIDTH), 0, 0 }{ (1,SURFACE1D_WIDTH), 0, 0 }
2D{ (1,TEXTURE2D_MIPMAPPED_WIDTH), (1,TEXTURE2D_MIPMAPPED_HEIGHT), 0 }{ (1,SURFACE2D_WIDTH), (1,SURFACE2D_HEIGHT), 0 }
3D{ (1,TEXTURE3D_WIDTH), (1,TEXTURE3D_HEIGHT), (1,TEXTURE3D_DEPTH) } + *
OR
{ (1,TEXTURE3D_WIDTH_ALTERNATE), (1,TEXTURE3D_HEIGHT_ALTERNATE), + * (1,TEXTURE3D_DEPTH_ALTERNATE) }
{ (1,SURFACE3D_WIDTH), (1,SURFACE3D_HEIGHT), + * (1,SURFACE3D_DEPTH) }
1D Layered{ (1,TEXTURE1D_LAYERED_WIDTH), 0, + * (1,TEXTURE1D_LAYERED_LAYERS) }{ (1,SURFACE1D_LAYERED_WIDTH), 0, + * (1,SURFACE1D_LAYERED_LAYERS) }
2D Layered{ (1,TEXTURE2D_LAYERED_WIDTH), (1,TEXTURE2D_LAYERED_HEIGHT), + * (1,TEXTURE2D_LAYERED_LAYERS) }{ (1,SURFACE2D_LAYERED_WIDTH), (1,SURFACE2D_LAYERED_HEIGHT), + * (1,SURFACE2D_LAYERED_LAYERS) }
Cubemap{ (1,TEXTURECUBEMAP_WIDTH), (1,TEXTURECUBEMAP_WIDTH), 6 }{ (1,SURFACECUBEMAP_WIDTH), + * (1,SURFACECUBEMAP_WIDTH), 6 }
Cubemap Layered{ (1,TEXTURECUBEMAP_LAYERED_WIDTH), (1,TEXTURECUBEMAP_LAYERED_WIDTH), + * (1,TEXTURECUBEMAP_LAYERED_LAYERS) }{ (1,SURFACECUBEMAP_LAYERED_WIDTH), (1,SURFACECUBEMAP_LAYERED_WIDTH), + * (1,SURFACECUBEMAP_LAYERED_LAYERS) }
+ * + * + * \param pHandle - Returned mipmapped array + * \param pMipmappedArrayDesc - mipmapped array descriptor + * \param numMipmapLevels - Number of mipmap levels + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_OUT_OF_MEMORY, + * ::CUDA_ERROR_UNKNOWN + * \notefnerr + * + * \sa + * ::cuMipmappedArrayDestroy, + * ::cuMipmappedArrayGetLevel, + * ::cuArrayCreate, + * ::cudaMallocMipmappedArray + */ +CUresult CUDAAPI cuMipmappedArrayCreate(CUmipmappedArray *pHandle, const CUDA_ARRAY3D_DESCRIPTOR *pMipmappedArrayDesc, unsigned int numMipmapLevels); + +/** + * \brief Gets a mipmap level of a CUDA mipmapped array + * + * Returns in \p *pLevelArray a CUDA array that represents a single mipmap level + * of the CUDA mipmapped array \p hMipmappedArray. + * + * If \p level is greater than the maximum number of levels in this mipmapped array, + * ::CUDA_ERROR_INVALID_VALUE is returned. + * + * \param pLevelArray - Returned mipmap level CUDA array + * \param hMipmappedArray - CUDA mipmapped array + * \param level - Mipmap level + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_HANDLE + * \notefnerr + * + * \sa + * ::cuMipmappedArrayCreate, + * ::cuMipmappedArrayDestroy, + * ::cuArrayCreate, + * ::cudaGetMipmappedArrayLevel + */ +CUresult CUDAAPI cuMipmappedArrayGetLevel(CUarray *pLevelArray, CUmipmappedArray hMipmappedArray, unsigned int level); + +/** + * \brief Destroys a CUDA mipmapped array + * + * Destroys the CUDA mipmapped array \p hMipmappedArray. + * + * \param hMipmappedArray - Mipmapped array to destroy + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_ARRAY_IS_MAPPED, + * ::CUDA_ERROR_CONTEXT_IS_DESTROYED + * \notefnerr + * + * \sa + * ::cuMipmappedArrayCreate, + * ::cuMipmappedArrayGetLevel, + * ::cuArrayCreate, + * ::cudaFreeMipmappedArray + */ +CUresult CUDAAPI cuMipmappedArrayDestroy(CUmipmappedArray hMipmappedArray); + +/** +* \brief Retrieve handle for an address range +* +* Get a handle of the specified type to an address range. The address range +* must have been obtained by a prior call to either ::cuMemAlloc or ::cuMemAddressReserve. +* If the address range was obtained via ::cuMemAddressReserve, it must also be fully mapped via ::cuMemMap. +* +* Users must ensure the \p dptr and \p size are aligned to the host page size. +* +* When requesting CUmemRangeHandleType::CU_MEM_RANGE_HANDLE_TYPE_DMA_BUF_FD, +* users are expected to query for dma_buf support for the platform +* by using ::CU_DEVICE_ATTRIBUTE_DMA_BUF_SUPPORTED device attribute before calling +* this API. The \p handle will be interpreted as a pointer to an integer to store the dma_buf file descriptor. +* Users must ensure the entire address range is backed and mapped when +* the address range is allocated by ::cuMemAddressReserve. All the physical +* allocations backing the address range must be resident on the same device and +* have identical allocation properties. Users are also expected to retrieve a +* new handle every time the underlying physical allocation(s) corresponding +* to a previously queried VA range are changed. +* +* \param[out] handle - Pointer to the location where the returned handle will be stored. +* \param[in] dptr - Pointer to a valid CUDA device allocation. Must be aligned to host page size. +* \param[in] size - Length of the address range. Must be aligned to host page size. +* \param[in] handleType - Type of handle requested (defines type and size of the \p handle output parameter) +* \param[in] flags - Reserved, must be zero +* +* \return +* CUDA_SUCCESS +* CUDA_ERROR_INVALID_VALUE +* CUDA_ERROR_NOT_SUPPORTED +*/ +CUresult CUDAAPI cuMemGetHandleForAddressRange(void *handle, CUdeviceptr dptr, size_t size, CUmemRangeHandleType handleType, unsigned long long flags); + +/** @} */ /* END CUDA_MEM */ + +/** + * \defgroup CUDA_VA Virtual Memory Management + * + * ___MANBRIEF___ virtual memory management functions of the low-level CUDA driver API + * (___CURRENT_FILE___) ___ENDMANBRIEF___ + * + * This section describes the virtual memory management functions of the low-level CUDA + * driver application programming interface. + * + * @{ + */ + +/** +* \brief Allocate an address range reservation. +* +* Reserves a virtual address range based on the given parameters, giving +* the starting address of the range in \p ptr. This API requires a system that +* supports UVA. The size and address parameters must be a multiple of the +* host page size and the alignment must be a power of two or zero for default +* alignment. +* +* \param[out] ptr - Resulting pointer to start of virtual address range allocated +* \param[in] size - Size of the reserved virtual address range requested +* \param[in] alignment - Alignment of the reserved virtual address range requested +* \param[in] addr - Fixed starting address range requested +* \param[in] flags - Currently unused, must be zero +* \return +* ::CUDA_SUCCESS, +* ::CUDA_ERROR_INVALID_VALUE, +* ::CUDA_ERROR_OUT_OF_MEMORY, +* ::CUDA_ERROR_NOT_INITIALIZED, +* ::CUDA_ERROR_DEINITIALIZED, +* ::CUDA_ERROR_NOT_PERMITTED, +* ::CUDA_ERROR_NOT_SUPPORTED +* +* \sa ::cuMemAddressFree +*/ +CUresult CUDAAPI cuMemAddressReserve(CUdeviceptr *ptr, size_t size, size_t alignment, CUdeviceptr addr, unsigned long long flags); + +/** +* \brief Free an address range reservation. +* +* Frees a virtual address range reserved by cuMemAddressReserve. The size +* must match what was given to memAddressReserve and the ptr given must +* match what was returned from memAddressReserve. +* +* \param[in] ptr - Starting address of the virtual address range to free +* \param[in] size - Size of the virtual address region to free +* \return +* ::CUDA_SUCCESS, +* ::CUDA_ERROR_INVALID_VALUE, +* ::CUDA_ERROR_NOT_INITIALIZED, +* ::CUDA_ERROR_DEINITIALIZED, +* ::CUDA_ERROR_NOT_PERMITTED, +* ::CUDA_ERROR_NOT_SUPPORTED +* +* \sa ::cuMemAddressReserve +*/ +CUresult CUDAAPI cuMemAddressFree(CUdeviceptr ptr, size_t size); + +/** +* \brief Create a CUDA memory handle representing a memory allocation of a given size described by the given properties +* +* This creates a memory allocation on the target device specified through the +* \p prop structure. The created allocation will not have any device or host +* mappings. The generic memory \p handle for the allocation can be +* mapped to the address space of calling process via ::cuMemMap. This handle +* cannot be transmitted directly to other processes (see +* ::cuMemExportToShareableHandle). On Windows, the caller must also pass +* an LPSECURITYATTRIBUTE in \p prop to be associated with this handle which +* limits or allows access to this handle for a recipient process (see +* ::CUmemAllocationProp::win32HandleMetaData for more). The \p size of this +* allocation must be a multiple of the the value given via +* ::cuMemGetAllocationGranularity with the ::CU_MEM_ALLOC_GRANULARITY_MINIMUM +* flag. +* If ::CUmemAllocationProp::allocFlags::usage contains ::CU_MEM_CREATE_USAGE_TILE_POOL flag then +* the memory allocation is intended only to be used as backing tile pool for sparse CUDA arrays +* and sparse CUDA mipmapped arrays. +* (see ::cuMemMapArrayAsync). +* +* \param[out] handle - Value of handle returned. All operations on this allocation are to be performed using this handle. +* \param[in] size - Size of the allocation requested +* \param[in] prop - Properties of the allocation to create. +* \param[in] flags - flags for future use, must be zero now. +* \return +* ::CUDA_SUCCESS, +* ::CUDA_ERROR_INVALID_VALUE, +* ::CUDA_ERROR_OUT_OF_MEMORY, +* ::CUDA_ERROR_INVALID_DEVICE, +* ::CUDA_ERROR_NOT_INITIALIZED, +* ::CUDA_ERROR_DEINITIALIZED, +* ::CUDA_ERROR_NOT_PERMITTED, +* ::CUDA_ERROR_NOT_SUPPORTED +* \notefnerr +* +* \sa ::cuMemRelease, ::cuMemExportToShareableHandle, ::cuMemImportFromShareableHandle +*/ +CUresult CUDAAPI cuMemCreate(CUmemGenericAllocationHandle *handle, size_t size, const CUmemAllocationProp *prop, unsigned long long flags); + +/** +* \brief Release a memory handle representing a memory allocation which was previously allocated through cuMemCreate. +* +* Frees the memory that was allocated on a device through cuMemCreate. +* +* The memory allocation will be freed when all outstanding mappings to the memory +* are unmapped and when all outstanding references to the handle (including it's +* shareable counterparts) are also released. The generic memory handle can be +* freed when there are still outstanding mappings made with this handle. Each +* time a recipient process imports a shareable handle, it needs to pair it with +* ::cuMemRelease for the handle to be freed. If \p handle is not a valid handle +* the behavior is undefined. +* +* \param[in] handle Value of handle which was returned previously by cuMemCreate. +* \return +* ::CUDA_SUCCESS, +* ::CUDA_ERROR_INVALID_VALUE, +* ::CUDA_ERROR_NOT_INITIALIZED, +* ::CUDA_ERROR_DEINITIALIZED, +* ::CUDA_ERROR_NOT_PERMITTED, +* ::CUDA_ERROR_NOT_SUPPORTED +* \notefnerr +* +* \sa ::cuMemCreate +*/ +CUresult CUDAAPI cuMemRelease(CUmemGenericAllocationHandle handle); + +/** +* \brief Maps an allocation handle to a reserved virtual address range. +* +* Maps bytes of memory represented by \p handle starting from byte \p offset to +* \p size to address range [\p addr, \p addr + \p size]. This range must be an +* address reservation previously reserved with ::cuMemAddressReserve, and +* \p offset + \p size must be less than the size of the memory allocation. +* Both \p ptr, \p size, and \p offset must be a multiple of the value given via +* ::cuMemGetAllocationGranularity with the ::CU_MEM_ALLOC_GRANULARITY_MINIMUM flag. +* If \p handle represents a multicast object, \p ptr, \p size and \p offset must +* be aligned to the value returned by ::cuMulticastGetGranularity with the flag +* ::CU_MULTICAST_MINIMUM_GRANULARITY. For best performance however, it is +* recommended that \p ptr, \p size and \p offset be aligned to the value +* returned by ::cuMulticastGetGranularity with the flag +* ::CU_MULTICAST_RECOMMENDED_GRANULARITY. +* +* Please note calling ::cuMemMap does not make the address accessible, +* the caller needs to update accessibility of a contiguous mapped VA +* range by calling ::cuMemSetAccess. +* +* Once a recipient process obtains a shareable memory handle +* from ::cuMemImportFromShareableHandle, the process must +* use ::cuMemMap to map the memory into its address ranges before +* setting accessibility with ::cuMemSetAccess. +* +* ::cuMemMap can only create mappings on VA range reservations +* that are not currently mapped. +* +* \param[in] ptr - Address where memory will be mapped. +* \param[in] size - Size of the memory mapping. +* \param[in] offset - Offset into the memory represented by +* - \p handle from which to start mapping +* - Note: currently must be zero. +* \param[in] handle - Handle to a shareable memory +* \param[in] flags - flags for future use, must be zero now. +* \return +* ::CUDA_SUCCESS, +* ::CUDA_ERROR_INVALID_VALUE, +* ::CUDA_ERROR_INVALID_DEVICE, +* ::CUDA_ERROR_OUT_OF_MEMORY, +* ::CUDA_ERROR_NOT_INITIALIZED, +* ::CUDA_ERROR_DEINITIALIZED, +* ::CUDA_ERROR_NOT_PERMITTED, +* ::CUDA_ERROR_NOT_SUPPORTED +* \notefnerr +* +* \sa ::cuMemUnmap, ::cuMemSetAccess, ::cuMemCreate, ::cuMemAddressReserve, ::cuMemImportFromShareableHandle +*/ +CUresult CUDAAPI cuMemMap(CUdeviceptr ptr, size_t size, size_t offset, CUmemGenericAllocationHandle handle, unsigned long long flags); + +/** + * \brief Maps or unmaps subregions of sparse CUDA arrays and sparse CUDA mipmapped arrays + * + * Performs map or unmap operations on subregions of sparse CUDA arrays and sparse CUDA mipmapped arrays. + * Each operation is specified by a ::CUarrayMapInfo entry in the \p mapInfoList array of size \p count. + * The structure ::CUarrayMapInfo is defined as follow: + \code + typedef struct CUarrayMapInfo_st { + CUresourcetype resourceType; + union { + CUmipmappedArray mipmap; + CUarray array; + } resource; + + CUarraySparseSubresourceType subresourceType; + union { + struct { + unsigned int level; + unsigned int layer; + unsigned int offsetX; + unsigned int offsetY; + unsigned int offsetZ; + unsigned int extentWidth; + unsigned int extentHeight; + unsigned int extentDepth; + } sparseLevel; + struct { + unsigned int layer; + unsigned long long offset; + unsigned long long size; + } miptail; + } subresource; + + CUmemOperationType memOperationType; + + CUmemHandleType memHandleType; + union { + CUmemGenericAllocationHandle memHandle; + } memHandle; + + unsigned long long offset; + unsigned int deviceBitMask; + unsigned int flags; + unsigned int reserved[2]; + } CUarrayMapInfo; + \endcode + * + * where ::CUarrayMapInfo::resourceType specifies the type of resource to be operated on. + * If ::CUarrayMapInfo::resourceType is set to ::CUresourcetype::CU_RESOURCE_TYPE_ARRAY then + * ::CUarrayMapInfo::resource::array must be set to a valid sparse CUDA array handle. + * The CUDA array must be either a 2D, 2D layered or 3D CUDA array and must have been allocated using + * ::cuArrayCreate or ::cuArray3DCreate with the flag ::CUDA_ARRAY3D_SPARSE + * or ::CUDA_ARRAY3D_DEFERRED_MAPPING. + * For CUDA arrays obtained using ::cuMipmappedArrayGetLevel, ::CUDA_ERROR_INVALID_VALUE will be returned. + * If ::CUarrayMapInfo::resourceType is set to ::CUresourcetype::CU_RESOURCE_TYPE_MIPMAPPED_ARRAY + * then ::CUarrayMapInfo::resource::mipmap must be set to a valid sparse CUDA mipmapped array handle. + * The CUDA mipmapped array must be either a 2D, 2D layered or 3D CUDA mipmapped array and must have been + * allocated using ::cuMipmappedArrayCreate with the flag ::CUDA_ARRAY3D_SPARSE + * or ::CUDA_ARRAY3D_DEFERRED_MAPPING. + * + * ::CUarrayMapInfo::subresourceType specifies the type of subresource within the resource. + * ::CUarraySparseSubresourceType_enum is defined as: + \code + typedef enum CUarraySparseSubresourceType_enum { + CU_ARRAY_SPARSE_SUBRESOURCE_TYPE_SPARSE_LEVEL = 0, + CU_ARRAY_SPARSE_SUBRESOURCE_TYPE_MIPTAIL = 1 + } CUarraySparseSubresourceType; + \endcode + * + * where ::CUarraySparseSubresourceType::CU_ARRAY_SPARSE_SUBRESOURCE_TYPE_SPARSE_LEVEL indicates a + * sparse-miplevel which spans at least one tile in every dimension. The remaining miplevels which + * are too small to span at least one tile in any dimension constitute the mip tail region as indicated by + * ::CUarraySparseSubresourceType::CU_ARRAY_SPARSE_SUBRESOURCE_TYPE_MIPTAIL subresource type. + * + * If ::CUarrayMapInfo::subresourceType is set to ::CUarraySparseSubresourceType::CU_ARRAY_SPARSE_SUBRESOURCE_TYPE_SPARSE_LEVEL + * then ::CUarrayMapInfo::subresource::sparseLevel struct must contain valid array subregion offsets and extents. + * The ::CUarrayMapInfo::subresource::sparseLevel::offsetX, ::CUarrayMapInfo::subresource::sparseLevel::offsetY + * and ::CUarrayMapInfo::subresource::sparseLevel::offsetZ must specify valid X, Y and Z offsets respectively. + * The ::CUarrayMapInfo::subresource::sparseLevel::extentWidth, ::CUarrayMapInfo::subresource::sparseLevel::extentHeight + * and ::CUarrayMapInfo::subresource::sparseLevel::extentDepth must specify valid width, height and depth extents respectively. + * These offsets and extents must be aligned to the corresponding tile dimension. + * For CUDA mipmapped arrays ::CUarrayMapInfo::subresource::sparseLevel::level must specify a valid mip level index. Otherwise, + * must be zero. + * For layered CUDA arrays and layered CUDA mipmapped arrays ::CUarrayMapInfo::subresource::sparseLevel::layer must specify a valid layer index. Otherwise, + * must be zero. + * ::CUarrayMapInfo::subresource::sparseLevel::offsetZ must be zero and ::CUarrayMapInfo::subresource::sparseLevel::extentDepth + * must be set to 1 for 2D and 2D layered CUDA arrays and CUDA mipmapped arrays. + * Tile extents can be obtained by calling ::cuArrayGetSparseProperties and ::cuMipmappedArrayGetSparseProperties + * + * If ::CUarrayMapInfo::subresourceType is set to ::CUarraySparseSubresourceType::CU_ARRAY_SPARSE_SUBRESOURCE_TYPE_MIPTAIL + * then ::CUarrayMapInfo::subresource::miptail struct must contain valid mip tail offset in + * ::CUarrayMapInfo::subresource::miptail::offset and size in ::CUarrayMapInfo::subresource::miptail::size. + * Both, mip tail offset and mip tail size must be aligned to the tile size. + * For layered CUDA mipmapped arrays which don't have the flag ::CU_ARRAY_SPARSE_PROPERTIES_SINGLE_MIPTAIL set in ::CUDA_ARRAY_SPARSE_PROPERTIES::flags + * as returned by ::cuMipmappedArrayGetSparseProperties, ::CUarrayMapInfo::subresource::miptail::layer must specify a valid layer index. + * Otherwise, must be zero. + * + * If ::CUarrayMapInfo::resource::array or ::CUarrayMapInfo::resource::mipmap was created with ::CUDA_ARRAY3D_DEFERRED_MAPPING + * flag set the ::CUarrayMapInfo::subresourceType and the contents of ::CUarrayMapInfo::subresource will be ignored. + * + * ::CUarrayMapInfo::memOperationType specifies the type of operation. ::CUmemOperationType is defined as: + \code + typedef enum CUmemOperationType_enum { + CU_MEM_OPERATION_TYPE_MAP = 1, + CU_MEM_OPERATION_TYPE_UNMAP = 2 + } CUmemOperationType; + \endcode + * If ::CUarrayMapInfo::memOperationType is set to ::CUmemOperationType::CU_MEM_OPERATION_TYPE_MAP then the subresource + * will be mapped onto the tile pool memory specified by ::CUarrayMapInfo::memHandle at offset ::CUarrayMapInfo::offset. + * The tile pool allocation has to be created by specifying the ::CU_MEM_CREATE_USAGE_TILE_POOL flag when calling ::cuMemCreate. Also, + * ::CUarrayMapInfo::memHandleType must be set to ::CUmemHandleType::CU_MEM_HANDLE_TYPE_GENERIC. + * + * If ::CUarrayMapInfo::memOperationType is set to ::CUmemOperationType::CU_MEM_OPERATION_TYPE_UNMAP then an unmapping operation + * is performed. ::CUarrayMapInfo::memHandle must be NULL. + * + * ::CUarrayMapInfo::deviceBitMask specifies the list of devices that must map or unmap physical memory. + * Currently, this mask must have exactly one bit set, and the corresponding device must match the device associated with the stream. + * If ::CUarrayMapInfo::memOperationType is set to ::CUmemOperationType::CU_MEM_OPERATION_TYPE_MAP, the device must also match + * the device associated with the tile pool memory allocation as specified by ::CUarrayMapInfo::memHandle. + * + * ::CUarrayMapInfo::flags and ::CUarrayMapInfo::reserved[] are unused and must be set to zero. + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_HANDLE + * + * \param[in] mapInfoList - List of ::CUarrayMapInfo + * \param[in] count - Count of ::CUarrayMapInfo in \p mapInfoList + * \param[in] hStream - Stream identifier for the stream to use for map or unmap operations + * + * \sa ::cuMipmappedArrayCreate, ::cuArrayCreate, ::cuArray3DCreate, ::cuMemCreate, ::cuArrayGetSparseProperties, ::cuMipmappedArrayGetSparseProperties + */ +CUresult CUDAAPI cuMemMapArrayAsync(CUarrayMapInfo *mapInfoList, unsigned int count, CUstream hStream); + +/** +* \brief Unmap the backing memory of a given address range. +* +* The range must be the entire contiguous address range that was mapped to. In +* other words, ::cuMemUnmap cannot unmap a sub-range of an address range mapped +* by ::cuMemCreate / ::cuMemMap. Any backing memory allocations will be freed +* if there are no existing mappings and there are no unreleased memory handles. +* +* When ::cuMemUnmap returns successfully the address range is converted to an +* address reservation and can be used for a future calls to ::cuMemMap. Any new +* mapping to this virtual address will need to have access granted through +* ::cuMemSetAccess, as all mappings start with no accessibility setup. +* +* \param[in] ptr - Starting address for the virtual address range to unmap +* \param[in] size - Size of the virtual address range to unmap +* \returns +* ::CUDA_SUCCESS, +* ::CUDA_ERROR_INVALID_VALUE, +* ::CUDA_ERROR_NOT_INITIALIZED, +* ::CUDA_ERROR_DEINITIALIZED, +* ::CUDA_ERROR_NOT_PERMITTED, +* ::CUDA_ERROR_NOT_SUPPORTED +* \notefnerr +* \note_sync +* +* \sa ::cuMemCreate, ::cuMemAddressReserve +*/ +CUresult CUDAAPI cuMemUnmap(CUdeviceptr ptr, size_t size); + +/** +* \brief Set the access flags for each location specified in \p desc for the given virtual address range +* +* Given the virtual address range via \p ptr and \p size, and the locations +* in the array given by \p desc and \p count, set the access flags for the +* target locations. The range must be a fully mapped address range +* containing all allocations created by ::cuMemMap / ::cuMemCreate. +* When setting the access flags for a virtual address range mapping a multicast +* object, \p ptr and \p size must be aligned to the value returned by +* ::cuMulticastGetGranularity with the flag ::CU_MULTICAST_MINIMUM_GRANULARITY. +* For best performance however, it is recommended that \p ptr and \p size be +* aligned to the value returned by ::cuMulticastGetGranularity with the flag +* ::CU_MULTICAST_RECOMMENDED_GRANULARITY. +* +* \param[in] ptr - Starting address for the virtual address range +* \param[in] size - Length of the virtual address range +* \param[in] desc - Array of ::CUmemAccessDesc that describe how to change the +* - mapping for each location specified +* \param[in] count - Number of ::CUmemAccessDesc in \p desc +* \returns +* ::CUDA_SUCCESS, +* ::CUDA_ERROR_INVALID_VALUE, +* ::CUDA_ERROR_INVALID_DEVICE, +* ::CUDA_ERROR_NOT_SUPPORTED +* \notefnerr +* \note_sync +* +* \sa ::cuMemSetAccess, ::cuMemCreate, :cuMemMap +*/ +CUresult CUDAAPI cuMemSetAccess(CUdeviceptr ptr, size_t size, const CUmemAccessDesc *desc, size_t count); + +/** +* \brief Get the access \p flags set for the given \p location and \p ptr +* +* \param[out] flags - Flags set for this location +* \param[in] location - Location in which to check the flags for +* \param[in] ptr - Address in which to check the access flags for +* \returns +* ::CUDA_SUCCESS, +* ::CUDA_ERROR_INVALID_VALUE, +* ::CUDA_ERROR_INVALID_DEVICE, +* ::CUDA_ERROR_NOT_INITIALIZED, +* ::CUDA_ERROR_DEINITIALIZED, +* ::CUDA_ERROR_NOT_PERMITTED, +* ::CUDA_ERROR_NOT_SUPPORTED +* +* \sa ::cuMemSetAccess +*/ +CUresult CUDAAPI cuMemGetAccess(unsigned long long *flags, const CUmemLocation *location, CUdeviceptr ptr); + +/** +* \brief Exports an allocation to a requested shareable handle type +* +* Given a CUDA memory handle, create a shareable memory +* allocation handle that can be used to share the memory with other +* processes. The recipient process can convert the shareable handle back into a +* CUDA memory handle using ::cuMemImportFromShareableHandle and map +* it with ::cuMemMap. The implementation of what this handle is and how it +* can be transferred is defined by the requested handle type in \p handleType +* +* Once all shareable handles are closed and the allocation is released, the allocated +* memory referenced will be released back to the OS and uses of the CUDA handle afterward +* will lead to undefined behavior. +* +* This API can also be used in conjunction with other APIs (e.g. Vulkan, OpenGL) +* that support importing memory from the shareable type +* +* \param[out] shareableHandle - Pointer to the location in which to store the requested handle type +* \param[in] handle - CUDA handle for the memory allocation +* \param[in] handleType - Type of shareable handle requested (defines type and size of the \p shareableHandle output parameter) +* \param[in] flags - Reserved, must be zero +* \returns +* ::CUDA_SUCCESS, +* ::CUDA_ERROR_INVALID_VALUE, +* ::CUDA_ERROR_NOT_INITIALIZED, +* ::CUDA_ERROR_DEINITIALIZED, +* ::CUDA_ERROR_NOT_PERMITTED, +* ::CUDA_ERROR_NOT_SUPPORTED +* +* \sa ::cuMemImportFromShareableHandle +*/ +CUresult CUDAAPI cuMemExportToShareableHandle(void *shareableHandle, CUmemGenericAllocationHandle handle, CUmemAllocationHandleType handleType, unsigned long long flags); + +/** +* \brief Imports an allocation from a requested shareable handle type. +* +* If the current process cannot support the memory described by this shareable +* handle, this API will error as CUDA_ERROR_NOT_SUPPORTED. +* +* \note Importing shareable handles exported from some graphics APIs(VUlkan, OpenGL, etc) +* created on devices under an SLI group may not be supported, and thus this API will +* return CUDA_ERROR_NOT_SUPPORTED. +* There is no guarantee that the contents of \p handle will be the same CUDA memory handle +* for the same given OS shareable handle, or the same underlying allocation. +* +* \param[out] handle - CUDA Memory handle for the memory allocation. +* \param[in] osHandle - Shareable Handle representing the memory allocation that is to be imported. +* \param[in] shHandleType - handle type of the exported handle ::CUmemAllocationHandleType. +* \returns +* ::CUDA_SUCCESS, +* ::CUDA_ERROR_INVALID_VALUE, +* ::CUDA_ERROR_NOT_INITIALIZED, +* ::CUDA_ERROR_DEINITIALIZED, +* ::CUDA_ERROR_NOT_PERMITTED, +* ::CUDA_ERROR_NOT_SUPPORTED +* +* \sa ::cuMemExportToShareableHandle, ::cuMemMap, ::cuMemRelease +*/ +CUresult CUDAAPI cuMemImportFromShareableHandle(CUmemGenericAllocationHandle *handle, void *osHandle, CUmemAllocationHandleType shHandleType); + +/** +* \brief Calculates either the minimal or recommended granularity +* +* Calculates either the minimal or recommended granularity +* for a given allocation specification and returns it in granularity. This +* granularity can be used as a multiple for alignment, size, or address mapping. +* +* \param[out] granularity Returned granularity. +* \param[in] prop Property for which to determine the granularity for +* \param[in] option Determines which granularity to return +* \returns +* ::CUDA_SUCCESS, +* ::CUDA_ERROR_INVALID_VALUE, +* ::CUDA_ERROR_NOT_INITIALIZED, +* ::CUDA_ERROR_DEINITIALIZED, +* ::CUDA_ERROR_NOT_PERMITTED, +* ::CUDA_ERROR_NOT_SUPPORTED +* +* \sa ::cuMemCreate, ::cuMemMap +*/ +CUresult CUDAAPI cuMemGetAllocationGranularity(size_t *granularity, const CUmemAllocationProp *prop, CUmemAllocationGranularity_flags option); + +/** +* \brief Retrieve the contents of the property structure defining properties for this handle +* +* \param[out] prop - Pointer to a properties structure which will hold the information about this handle +* \param[in] handle - Handle which to perform the query on +* \returns +* ::CUDA_SUCCESS, +* ::CUDA_ERROR_INVALID_VALUE, +* ::CUDA_ERROR_NOT_INITIALIZED, +* ::CUDA_ERROR_DEINITIALIZED, +* ::CUDA_ERROR_NOT_PERMITTED, +* ::CUDA_ERROR_NOT_SUPPORTED +* +* \sa ::cuMemCreate, ::cuMemImportFromShareableHandle +*/ +CUresult CUDAAPI cuMemGetAllocationPropertiesFromHandle(CUmemAllocationProp *prop, CUmemGenericAllocationHandle handle); + +/** +* \brief Given an address \p addr, returns the allocation handle of the backing memory allocation. +* +* The handle is guaranteed to be the same handle value used to map the memory. If the address +* requested is not mapped, the function will fail. The returned handle must be released with +* corresponding number of calls to ::cuMemRelease. +* +* \note The address \p addr, can be any address in a range previously mapped +* by ::cuMemMap, and not necessarily the start address. +* +* \param[out] handle CUDA Memory handle for the backing memory allocation. +* \param[in] addr Memory address to query, that has been mapped previously. +* \returns +* ::CUDA_SUCCESS, +* ::CUDA_ERROR_INVALID_VALUE, +* ::CUDA_ERROR_NOT_INITIALIZED, +* ::CUDA_ERROR_DEINITIALIZED, +* ::CUDA_ERROR_NOT_PERMITTED, +* ::CUDA_ERROR_NOT_SUPPORTED +* +* \sa ::cuMemCreate, ::cuMemRelease, ::cuMemMap +*/ +CUresult CUDAAPI cuMemRetainAllocationHandle(CUmemGenericAllocationHandle *handle, void *addr); + +/** @} */ /* END CUDA_VA */ + +/** + * \defgroup CUDA_MALLOC_ASYNC Stream Ordered Memory Allocator + * + * ___MANBRIEF___ Functions for performing allocation and free operations in stream order. + * Functions for controlling the behavior of the underlying allocator. + * (___CURRENT_FILE___) ___ENDMANBRIEF___ + * + * This section describes the stream ordered memory allocator exposed by the + * low-level CUDA driver application programming interface. + * + * @{ + * + * \section CUDA_MALLOC_ASYNC_overview overview + * + * The asynchronous allocator allows the user to allocate and free in stream order. + * All asynchronous accesses of the allocation must happen between + * the stream executions of the allocation and the free. If the memory is accessed + * outside of the promised stream order, a use before allocation / use after free error + * will cause undefined behavior. + * + * The allocator is free to reallocate the memory as long as it can guarantee + * that compliant memory accesses will not overlap temporally. + * The allocator may refer to internal stream ordering as well as inter-stream dependencies + * (such as CUDA events and null stream dependencies) when establishing the temporal guarantee. + * The allocator may also insert inter-stream dependencies to establish the temporal guarantee. + * + * \section CUDA_MALLOC_ASYNC_support Supported Platforms + * + * Whether or not a device supports the integrated stream ordered memory allocator + * may be queried by calling ::cuDeviceGetAttribute() with the device attribute + * ::CU_DEVICE_ATTRIBUTE_MEMORY_POOLS_SUPPORTED + */ + +/** + * \brief Frees memory with stream ordered semantics + * + * Inserts a free operation into \p hStream. + * The allocation must not be accessed after stream execution reaches the free. + * After this API returns, accessing the memory from any subsequent work launched on the GPU + * or querying its pointer attributes results in undefined behavior. + * + * \note During stream capture, this function results in the creation of a free node and + * must therefore be passed the address of a graph allocation. + * + * \param dptr - memory to free + * \param hStream - The stream establishing the stream ordering contract. + * \returns + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT (default stream specified with no current context), + * ::CUDA_ERROR_NOT_SUPPORTED + */ +CUresult CUDAAPI cuMemFreeAsync(CUdeviceptr dptr, CUstream hStream); + +/** + * \brief Allocates memory with stream ordered semantics + * + * Inserts an allocation operation into \p hStream. + * A pointer to the allocated memory is returned immediately in *dptr. + * The allocation must not be accessed until the the allocation operation completes. + * The allocation comes from the memory pool current to the stream's device. + * + * \note The default memory pool of a device contains device memory from that device. + * \note Basic stream ordering allows future work submitted into the same stream to use the allocation. + * Stream query, stream synchronize, and CUDA events can be used to guarantee that the allocation + * operation completes before work submitted in a separate stream runs. + * \note During stream capture, this function results in the creation of an allocation node. In this case, + * the allocation is owned by the graph instead of the memory pool. The memory pool's properties + * are used to set the node's creation parameters. + * + * \param[out] dptr - Returned device pointer + * \param[in] bytesize - Number of bytes to allocate + * \param[in] hStream - The stream establishing the stream ordering contract and the memory pool to allocate from + * \returns + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT (default stream specified with no current context), + * ::CUDA_ERROR_NOT_SUPPORTED, + * ::CUDA_ERROR_OUT_OF_MEMORY + * + * \sa ::cuMemAllocFromPoolAsync, ::cuMemFreeAsync, ::cuDeviceSetMemPool, + * ::cuDeviceGetDefaultMemPool, ::cuDeviceGetMemPool, ::cuMemPoolCreate, + * ::cuMemPoolSetAccess, ::cuMemPoolSetAttribute + */ +CUresult CUDAAPI cuMemAllocAsync(CUdeviceptr *dptr, size_t bytesize, CUstream hStream); + +/** + * \brief Tries to release memory back to the OS + * + * Releases memory back to the OS until the pool contains fewer than minBytesToKeep + * reserved bytes, or there is no more memory that the allocator can safely release. + * The allocator cannot release OS allocations that back outstanding asynchronous allocations. + * The OS allocations may happen at different granularity from the user allocations. + * + * \note: Allocations that have not been freed count as outstanding. + * \note: Allocations that have been asynchronously freed but whose completion has + * not been observed on the host (eg. by a synchronize) can count as outstanding. + * + * \param[in] pool - The memory pool to trim + * \param[in] minBytesToKeep - If the pool has less than minBytesToKeep reserved, + * the TrimTo operation is a no-op. Otherwise the pool will be guaranteed to have + * at least minBytesToKeep bytes reserved after the operation. + * \returns + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_VALUE + * + * \sa ::cuMemAllocAsync, ::cuMemFreeAsync, ::cuDeviceGetDefaultMemPool, + * ::cuDeviceGetMemPool, ::cuMemPoolCreate + */ +CUresult CUDAAPI cuMemPoolTrimTo(CUmemoryPool pool, size_t minBytesToKeep); + +/** + * \brief Sets attributes of a memory pool + * + * Supported attributes are: + * - ::CU_MEMPOOL_ATTR_RELEASE_THRESHOLD: (value type = cuuint64_t) + * Amount of reserved memory in bytes to hold onto before trying + * to release memory back to the OS. When more than the release + * threshold bytes of memory are held by the memory pool, the + * allocator will try to release memory back to the OS on the + * next call to stream, event or context synchronize. (default 0) + * - ::CU_MEMPOOL_ATTR_REUSE_FOLLOW_EVENT_DEPENDENCIES: (value type = int) + * Allow ::cuMemAllocAsync to use memory asynchronously freed + * in another stream as long as a stream ordering dependency + * of the allocating stream on the free action exists. + * Cuda events and null stream interactions can create the required + * stream ordered dependencies. (default enabled) + * - ::CU_MEMPOOL_ATTR_REUSE_ALLOW_OPPORTUNISTIC: (value type = int) + * Allow reuse of already completed frees when there is no dependency + * between the free and allocation. (default enabled) + * - ::CU_MEMPOOL_ATTR_REUSE_ALLOW_INTERNAL_DEPENDENCIES: (value type = int) + * Allow ::cuMemAllocAsync to insert new stream dependencies + * in order to establish the stream ordering required to reuse + * a piece of memory released by ::cuMemFreeAsync (default enabled). + * - ::CU_MEMPOOL_ATTR_RESERVED_MEM_HIGH: (value type = cuuint64_t) + * Reset the high watermark that tracks the amount of backing memory that was + * allocated for the memory pool. It is illegal to set this attribute to a non-zero value. + * - ::CU_MEMPOOL_ATTR_USED_MEM_HIGH: (value type = cuuint64_t) + * Reset the high watermark that tracks the amount of used memory that was + * allocated for the memory pool. + * + * \param[in] pool - The memory pool to modify + * \param[in] attr - The attribute to modify + * \param[in] value - Pointer to the value to assign + * + * \returns + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE + * + * \sa ::cuMemAllocAsync, ::cuMemFreeAsync, ::cuDeviceGetDefaultMemPool, + * ::cuDeviceGetMemPool, ::cuMemPoolCreate + */ +CUresult CUDAAPI cuMemPoolSetAttribute(CUmemoryPool pool, CUmemPool_attribute attr, void *value); + +/** + * \brief Gets attributes of a memory pool + * + * Supported attributes are: + * - ::CU_MEMPOOL_ATTR_RELEASE_THRESHOLD: (value type = cuuint64_t) + * Amount of reserved memory in bytes to hold onto before trying + * to release memory back to the OS. When more than the release + * threshold bytes of memory are held by the memory pool, the + * allocator will try to release memory back to the OS on the + * next call to stream, event or context synchronize. (default 0) + * - ::CU_MEMPOOL_ATTR_REUSE_FOLLOW_EVENT_DEPENDENCIES: (value type = int) + * Allow ::cuMemAllocAsync to use memory asynchronously freed + * in another stream as long as a stream ordering dependency + * of the allocating stream on the free action exists. + * Cuda events and null stream interactions can create the required + * stream ordered dependencies. (default enabled) + * - ::CU_MEMPOOL_ATTR_REUSE_ALLOW_OPPORTUNISTIC: (value type = int) + * Allow reuse of already completed frees when there is no dependency + * between the free and allocation. (default enabled) + * - ::CU_MEMPOOL_ATTR_REUSE_ALLOW_INTERNAL_DEPENDENCIES: (value type = int) + * Allow ::cuMemAllocAsync to insert new stream dependencies + * in order to establish the stream ordering required to reuse + * a piece of memory released by ::cuMemFreeAsync (default enabled). + * - ::CU_MEMPOOL_ATTR_RESERVED_MEM_CURRENT: (value type = cuuint64_t) + * Amount of backing memory currently allocated for the mempool + * - ::CU_MEMPOOL_ATTR_RESERVED_MEM_HIGH: (value type = cuuint64_t) + * High watermark of backing memory allocated for the mempool since the + * last time it was reset. + * - ::CU_MEMPOOL_ATTR_USED_MEM_CURRENT: (value type = cuuint64_t) + * Amount of memory from the pool that is currently in use by the application. + * - ::CU_MEMPOOL_ATTR_USED_MEM_HIGH: (value type = cuuint64_t) + * High watermark of the amount of memory from the pool that was in use by the application. + * + * \param[in] pool - The memory pool to get attributes of + * \param[in] attr - The attribute to get + * \param[out] value - Retrieved value + * + * \returns + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_VALUE + * + * \sa ::cuMemAllocAsync, ::cuMemFreeAsync, ::cuDeviceGetDefaultMemPool, + * ::cuDeviceGetMemPool, ::cuMemPoolCreate + */ +CUresult CUDAAPI cuMemPoolGetAttribute(CUmemoryPool pool, CUmemPool_attribute attr, void *value); + +/** + * \brief Controls visibility of pools between devices + * + * \param[in] pool - The pool being modified + * \param[in] map - Array of access descriptors. Each descriptor instructs the access to enable for a single gpu. + * \param[in] count - Number of descriptors in the map array. + * + * \returns + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_VALUE + * + * \sa ::cuMemAllocAsync, ::cuMemFreeAsync, ::cuDeviceGetDefaultMemPool, + * ::cuDeviceGetMemPool, ::cuMemPoolCreate + */ +CUresult CUDAAPI cuMemPoolSetAccess(CUmemoryPool pool, const CUmemAccessDesc *map, size_t count); + +/** + * \brief Returns the accessibility of a pool from a device + * + * Returns the accessibility of the pool's memory from the specified location. + * + * \param[out] flags - the accessibility of the pool from the specified location + * \param[in] memPool - the pool being queried + * \param[in] location - the location accessing the pool + * + * \sa ::cuMemAllocAsync, ::cuMemFreeAsync, ::cuDeviceGetDefaultMemPool, + * ::cuDeviceGetMemPool, ::cuMemPoolCreate + */ +CUresult CUDAAPI cuMemPoolGetAccess(CUmemAccess_flags *flags, CUmemoryPool memPool, CUmemLocation *location); + +/** + * \brief Creates a memory pool + * + * Creates a CUDA memory pool and returns the handle in \p pool. The \p poolProps determines + * the properties of the pool such as the backing device and IPC capabilities. + * + * By default, the pool's memory will be accessible from the device it is allocated on. + * + * \note Specifying CU_MEM_HANDLE_TYPE_NONE creates a memory pool that will not support IPC. + * + * \returns + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_OUT_OF_MEMORY, + * ::CUDA_ERROR_NOT_SUPPORTED + * + * \sa ::cuDeviceSetMemPool, ::cuDeviceGetMemPool, ::cuDeviceGetDefaultMemPool, + * ::cuMemAllocFromPoolAsync, ::cuMemPoolExportToShareableHandle + */ +CUresult CUDAAPI cuMemPoolCreate(CUmemoryPool *pool, const CUmemPoolProps *poolProps); + +/** + * \brief Destroys the specified memory pool + * + * If any pointers obtained from this pool haven't been freed or + * the pool has free operations that haven't completed + * when ::cuMemPoolDestroy is invoked, the function will return immediately and the + * resources associated with the pool will be released automatically + * once there are no more outstanding allocations. + * + * Destroying the current mempool of a device sets the default mempool of + * that device as the current mempool for that device. + * + * \note A device's default memory pool cannot be destroyed. + * + * \returns + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE + * + * \sa ::cuMemFreeAsync, ::cuDeviceSetMemPool, ::cuDeviceGetMemPool, + * ::cuDeviceGetDefaultMemPool, ::cuMemPoolCreate + */ +CUresult CUDAAPI cuMemPoolDestroy(CUmemoryPool pool); + +/** + * \brief Allocates memory from a specified pool with stream ordered semantics. + * + * Inserts an allocation operation into \p hStream. + * A pointer to the allocated memory is returned immediately in *dptr. + * The allocation must not be accessed until the the allocation operation completes. + * The allocation comes from the specified memory pool. + * + * \note + * - The specified memory pool may be from a device different than that of the specified \p hStream. + * + * - Basic stream ordering allows future work submitted into the same stream to use the allocation. + * Stream query, stream synchronize, and CUDA events can be used to guarantee that the allocation + * operation completes before work submitted in a separate stream runs. + * + * \note During stream capture, this function results in the creation of an allocation node. In this case, + * the allocation is owned by the graph instead of the memory pool. The memory pool's properties + * are used to set the node's creation parameters. + * + * \param[out] dptr - Returned device pointer + * \param[in] bytesize - Number of bytes to allocate + * \param[in] pool - The pool to allocate from + * \param[in] hStream - The stream establishing the stream ordering semantic + * + * \returns + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT (default stream specified with no current context), + * ::CUDA_ERROR_NOT_SUPPORTED, + * ::CUDA_ERROR_OUT_OF_MEMORY + * + * \sa ::cuMemAllocAsync, ::cuMemFreeAsync, ::cuDeviceGetDefaultMemPool, + * ::cuDeviceGetMemPool, ::cuMemPoolCreate, ::cuMemPoolSetAccess, + * ::cuMemPoolSetAttribute + */ +CUresult CUDAAPI cuMemAllocFromPoolAsync(CUdeviceptr *dptr, size_t bytesize, CUmemoryPool pool, CUstream hStream); + +/** + * \brief Exports a memory pool to the requested handle type. + * + * Given an IPC capable mempool, create an OS handle to share the pool with another process. + * A recipient process can convert the shareable handle into a mempool with ::cuMemPoolImportFromShareableHandle. + * Individual pointers can then be shared with the ::cuMemPoolExportPointer and ::cuMemPoolImportPointer APIs. + * The implementation of what the shareable handle is and how it can be transferred is defined by the requested + * handle type. + * + * \note: To create an IPC capable mempool, create a mempool with a CUmemAllocationHandleType other than CU_MEM_HANDLE_TYPE_NONE. + * + * \param[out] handle_out - Returned OS handle + * \param[in] pool - pool to export + * \param[in] handleType - the type of handle to create + * \param[in] flags - must be 0 + * + * \returns + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_OUT_OF_MEMORY + * + * \sa ::cuMemPoolImportFromShareableHandle, ::cuMemPoolExportPointer, + * ::cuMemPoolImportPointer, ::cuMemAllocAsync, ::cuMemFreeAsync, + * ::cuDeviceGetDefaultMemPool, ::cuDeviceGetMemPool, ::cuMemPoolCreate, + * ::cuMemPoolSetAccess, ::cuMemPoolSetAttribute + */ +CUresult CUDAAPI cuMemPoolExportToShareableHandle(void *handle_out, CUmemoryPool pool, CUmemAllocationHandleType handleType, unsigned long long flags); + +/** + * \brief imports a memory pool from a shared handle. + * + * Specific allocations can be imported from the imported pool with cuMemPoolImportPointer. + * + * \note Imported memory pools do not support creating new allocations. + * As such imported memory pools may not be used in cuDeviceSetMemPool + * or ::cuMemAllocFromPoolAsync calls. + * + * \param[out] pool_out - Returned memory pool + * \param[in] handle - OS handle of the pool to open + * \param[in] handleType - The type of handle being imported + * \param[in] flags - must be 0 + * + * \returns + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_OUT_OF_MEMORY + * + * \sa ::cuMemPoolExportToShareableHandle, ::cuMemPoolExportPointer, ::cuMemPoolImportPointer + */ +CUresult CUDAAPI cuMemPoolImportFromShareableHandle( + CUmemoryPool *pool_out, + void *handle, + CUmemAllocationHandleType handleType, + unsigned long long flags); + +/** + * \brief Export data to share a memory pool allocation between processes. + * + * Constructs \p shareData_out for sharing a specific allocation from an already shared memory pool. + * The recipient process can import the allocation with the ::cuMemPoolImportPointer api. + * The data is not a handle and may be shared through any IPC mechanism. + * + * \param[out] shareData_out - Returned export data + * \param[in] ptr - pointer to memory being exported + * + * \returns + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_OUT_OF_MEMORY + * + * \sa ::cuMemPoolExportToShareableHandle, ::cuMemPoolImportFromShareableHandle, ::cuMemPoolImportPointer + */ +CUresult CUDAAPI cuMemPoolExportPointer(CUmemPoolPtrExportData *shareData_out, CUdeviceptr ptr); + +/** + * \brief Import a memory pool allocation from another process. + * + * Returns in \p ptr_out a pointer to the imported memory. + * The imported memory must not be accessed before the allocation operation completes + * in the exporting process. The imported memory must be freed from all importing processes before + * being freed in the exporting process. The pointer may be freed with cuMemFree + * or cuMemFreeAsync. If cuMemFreeAsync is used, the free must be completed + * on the importing process before the free operation on the exporting process. + * + * \note The cuMemFreeAsync api may be used in the exporting process before + * the cuMemFreeAsync operation completes in its stream as long as the + * cuMemFreeAsync in the exporting process specifies a stream with + * a stream dependency on the importing process's cuMemFreeAsync. + * + * \param[out] ptr_out - pointer to imported memory + * \param[in] pool - pool from which to import + * \param[in] shareData - data specifying the memory to import + * + * \returns + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_OUT_OF_MEMORY + * + * \sa ::cuMemPoolExportToShareableHandle, ::cuMemPoolImportFromShareableHandle, ::cuMemPoolExportPointer + */ +CUresult CUDAAPI cuMemPoolImportPointer(CUdeviceptr *ptr_out, CUmemoryPool pool, CUmemPoolPtrExportData *shareData); + +/** @} */ /* END CUDA_MALLOC_ASYNC */ + +/** + * \defgroup CUDA_MULTICAST Multicast Object Management + * + * ___MANBRIEF___ Functions for creating multicast objects, adding devices to them and binding/unbinding memory + * (___CURRENT_FILE___) ___ENDMANBRIEF___ + * + * This section describes the CUDA multicast object operations exposed by the + * low-level CUDA driver application programming interface. + * + * @{ + * + * \section CUDA_MULTICAST_overview overview + * + * A multicast object created via ::cuMulticastCreate enables certain memory + * operations to be broadcasted to a team of devices. Devices can be added to a + * multicast object via ::cuMulticastAddDevice. Memory can be bound on each + * participating device via either ::cuMulticastBindMem or ::cuMulticastBindAddr. + * Multicast objects can be mapped into a device's virtual address space using + * the virtual memmory management APIs (see ::cuMemMap and ::cuMemSetAccess). + * + * \section CUDA_MULTICAST_support Supported Platforms + * + * Support for multicast on a specific device can be queried using the device + * attribute ::CU_DEVICE_ATTRIBUTE_MULTICAST_SUPPORTED + */ + +/** + * \brief Create a generic allocation handle representing a multicast object described by the given properties. + * + * This creates a multicast object as described by \p prop. The number of + * participating devices is specified by ::CUmulticastObjectProp::numDevices. + * Devices can be added to the multicast object via ::cuMulticastAddDevice. + * All participating devices must be added to the multicast object before memory + * can be bound to it. Memory is bound to the multicast object via either + * ::cuMulticastBindMem or ::cuMulticastBindAddr, and can be unbound via + * ::cuMulticastUnbind. The total amount of memory that can be bound per device + * is specified by :CUmulticastObjectProp::size. This size must be a multiple of + * the value returned by ::cuMulticastGetGranularity with the flag + * ::CU_MULTICAST_GRANULARITY_MINIMUM. For best performance however, the size + * should be aligned to the value returned by ::cuMulticastGetGranularity with + * the flag ::CU_MULTICAST_GRANULARITY_RECOMMENDED. + * + * After all participating devices have been added, multicast objects can also + * be mapped to a device's virtual address space using the virtual memory + * management APIs (see ::cuMemMap and ::cuMemSetAccess). Multicast objects can + * also be shared with other processes by requesting a shareable handle via + * ::cuMemExportToShareableHandle. Note that the desired types of shareable + * handles must be specified in the bitmask ::CUmulticastObjectProp::handleTypes. + * Multicast objects can be released using the virtual memory management API + * ::cuMemRelease. + * + * \param[out] mcHandle Value of handle returned. + * \param[in] prop Properties of the multicast object to create. + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_OUT_OF_MEMORY, + * ::CUDA_ERROR_INVALID_DEVICE, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_PERMITTED, + * ::CUDA_ERROR_NOT_SUPPORTED + * + * \sa ::cuMulticastAddDevice, ::cuMulticastBindMem, ::cuMulticastBindAddr, ::cuMulticastUnbind + * \sa ::cuMemCreate, ::cuMemRelease, ::cuMemExportToShareableHandle, ::cuMemImportFromShareableHandle + */ +CUresult CUDAAPI cuMulticastCreate(CUmemGenericAllocationHandle *mcHandle, const CUmulticastObjectProp *prop); + +/** + * \brief Associate a device to a multicast object. + * + * Associates a device to a multicast object. The added device will be a part of + * the multicast team of size specified by CUmulticastObjectProp::numDevices + * during ::cuMulticastCreate. + * The association of the device to the multicast object is permanent during + * the life time of the multicast object. + * All devices must be added to the multicast team before any memory can be + * bound to any device in the team. Any calls to ::cuMulticastBindMem or + * ::cuMulticastBindAddr will block until all devices have been added. + * Similarly all devices must be added to the multicast team before a virtual + * address range can be mapped to the multicast object. A call to ::cuMemMap + * will block until all devices have been added. + * + * \param[in] mcHandle Handle representing a multicast object. + * \param[in] dev Device that will be associated to the multicast + * object. + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_OUT_OF_MEMORY, + * ::CUDA_ERROR_INVALID_DEVICE, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_PERMITTED, + * ::CUDA_ERROR_NOT_SUPPORTED + * + * \sa ::cuMulticastCreate, ::cuMulticastBindMem, ::cuMulticastBindAddr + */ +CUresult CUDAAPI cuMulticastAddDevice(CUmemGenericAllocationHandle mcHandle, CUdevice dev); + +/** + * \brief Bind a memory allocation represented by a handle to a multicast object. + * + * Binds a memory allocation specified by \p memHandle and created via + * ::cuMemCreate to a multicast object represented by \p mcHandle and created + * via ::cuMulticastCreate. The intended \p size of the bind, the offset in the + * multicast range \p mcOffset as well as the offset in the memory \p memOffset + * must be a multiple of the value returned by ::cuMulticastGetGranularity with + * the flag ::CU_MULTICAST_GRANULARITY_MINIMUM. For best performance however, + * \p size, \p mcOffset and \p memOffset should be aligned to the granularity of + * the memory allocation(see ::cuMemGetAllocationGranularity) or to the value + * returned by ::cuMulticastGetGranularity with the flag + * ::CU_MULTICAST_GRANULARITY_RECOMMENDED. + * + * The \p size + \p memOffset must be smaller than the size of the allocated + * memory. Similarly the \p size + \p mcOffset must be smaller than the size + * of the multicast object. + * The memory allocation must have beeen created on one of the devices + * that was added to the multicast team via ::cuMulticastAddDevice. + * Externally shareable as well as imported multicast objects can be bound only + * to externally shareable memory. + * Note that this call will return CUDA_ERROR_OUT_OF_MEMORY if there are + * insufficient resources required to perform the bind. This call may also + * return CUDA_ERROR_SYSTEM_NOT_READY if the necessary system software is not + * initialized or running. + * + * \param[in] mcHandle Handle representing a multicast object. + * \param[in] mcOffset Offset into the multicast object for attachment. + * \param[in] memHandle Handle representing a memory allocation. + * \param[in] memOffset Offset into the memory for attachment. + * \param[in] size Size of the memory that will be bound to the + * multicast object. + * \param[in] flags Flags for future use, must be zero for now. + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_DEVICE, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_PERMITTED, + * ::CUDA_ERROR_NOT_SUPPORTED, + * ::CUDA_ERROR_OUT_OF_MEMORY, + * ::CUDA_ERROR_SYSTEM_NOT_READY + * + * \sa ::cuMulticastCreate, ::cuMulticastAddDevice, ::cuMemCreate + */ +CUresult CUDAAPI cuMulticastBindMem(CUmemGenericAllocationHandle mcHandle, size_t mcOffset, CUmemGenericAllocationHandle memHandle, size_t memOffset, size_t size, unsigned long long flags); + +/** + * \brief Bind a memory allocation represented by a virtual address to a multicast object. + * + * Binds a memory allocation specified by its mapped address \p memptr to a + * multicast object represented by \p mcHandle. + * The memory must have been allocated via ::cuMemCreate or ::cudaMallocAsync. + * The intended \p size of the bind, the offset in the multicast range + * \p mcOffset and \p memptr must be a multiple of the value returned by + * ::cuMulticastGetGranularity with the flag ::CU_MULTICAST_GRANULARITY_MINIMUM. + * For best performance however, \p size, \p mcOffset and \p memptr should be + * aligned to the value returned by ::cuMulticastGetGranularity with the flag + * ::CU_MULTICAST_GRANULARITY_RECOMMENDED. + * + * The \p size must be smaller than the size of the allocated memory. + * Similarly the \p size + \p mcOffset must be smaller than the total size + * of the multicast object. + * The memory allocation must have beeen created on one of the devices + * that was added to the multicast team via ::cuMulticastAddDevice. + * Externally shareable as well as imported multicast objects can be bound only + * to externally shareable memory. + * Note that this call will return CUDA_ERROR_OUT_OF_MEMORY if there are + * insufficient resources required to perform the bind. This call may also + * return CUDA_ERROR_SYSTEM_NOT_READY if the necessary system software is not + * initialized or running. + * + * \param[in] mcHandle Handle representing a multicast object. + * \param[in] mcOffset Offset into multicast va range for attachment. + * \param[in] memptr Virtual address of the memory allocation. + * \param[in] size Size of memory that will be bound to the + * multicast object. + * \param[in] flags Flags for future use, must be zero now. + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_DEVICE, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_PERMITTED, + * ::CUDA_ERROR_NOT_SUPPORTED, + * ::CUDA_ERROR_OUT_OF_MEMORY, + * ::CUDA_ERROR_SYSTEM_NOT_READY + * + * \sa ::cuMulticastCreate, ::cuMulticastAddDevice, ::cuMemCreate + */ +CUresult CUDAAPI cuMulticastBindAddr(CUmemGenericAllocationHandle mcHandle, size_t mcOffset, CUdeviceptr memptr, size_t size, unsigned long long flags); + +/** + * \brief Unbind any memory allocations bound to a multicast object at a given offset and upto a given size. + * + * Unbinds any memory allocations hosted on \p dev and bound to a multicast + * object at \p mcOffset and upto a given \p size. + * The intended \p size of the unbind and the offset in the multicast range + * ( \p mcOffset ) must be a multiple of the value returned by + * ::cuMulticastGetGranularity flag ::CU_MULTICAST_GRANULARITY_MINIMUM. + * The \p size + \p mcOffset must be smaller than the total size of the + * multicast object. + * + * \note + * Warning: + * The \p mcOffset and the \p size must match the corresponding values specified + * during the bind call. Any other values may result in undefined behavior. + * + * \param[in] mcHandle Handle representing a multicast object. + * \param[in] dev Device that hosts the memory allocation. + * \param[in] mcOffset Offset into the multicast object. + * \param[in] size Desired size to unbind. + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_DEVICE, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_PERMITTED, + * ::CUDA_ERROR_NOT_SUPPORTED + * + * \sa ::cuMulticastBindMem, ::cuMulticastBindAddr + */ +CUresult CUDAAPI cuMulticastUnbind(CUmemGenericAllocationHandle mcHandle, CUdevice dev, size_t mcOffset, size_t size); + +/** +* \brief Calculates either the minimal or recommended granularity for multicast object +* +* Calculates either the minimal or recommended granularity for a given set of +* multicast object properties and returns it in granularity. This granularity +* can be used as a multiple for size, bind offsets and address mappings of the +* multicast object. +* +* \param[out] granularity Returned granularity. +* \param[in] prop Properties of the multicast object. +* \param[in] option Determines which granularity to return. +* +* \returns +* ::CUDA_SUCCESS, +* ::CUDA_ERROR_INVALID_VALUE, +* ::CUDA_ERROR_NOT_INITIALIZED, +* ::CUDA_ERROR_DEINITIALIZED, +* ::CUDA_ERROR_NOT_PERMITTED, +* ::CUDA_ERROR_NOT_SUPPORTED +* +* \sa ::cuMulticastCreate, ::cuMulticastBindMem, ::cuMulticastBindAddr, ::cuMulticastUnbind +*/ +CUresult CUDAAPI cuMulticastGetGranularity(size_t *granularity, const CUmulticastObjectProp *prop, CUmulticastGranularity_flags option); + +/** @} */ /* END CUDA_MULTICAST */ + +/** + * \defgroup CUDA_UNIFIED Unified Addressing + * + * ___MANBRIEF___ unified addressing functions of the low-level CUDA driver + * API (___CURRENT_FILE___) ___ENDMANBRIEF___ + * + * This section describes the unified addressing functions of the + * low-level CUDA driver application programming interface. + * + * @{ + * + * \section CUDA_UNIFIED_overview Overview + * + * CUDA devices can share a unified address space with the host. + * For these devices there is no distinction between a device + * pointer and a host pointer -- the same pointer value may be + * used to access memory from the host program and from a kernel + * running on the device (with exceptions enumerated below). + * + * \section CUDA_UNIFIED_support Supported Platforms + * + * Whether or not a device supports unified addressing may be + * queried by calling ::cuDeviceGetAttribute() with the device + * attribute ::CU_DEVICE_ATTRIBUTE_UNIFIED_ADDRESSING. + * + * Unified addressing is automatically enabled in 64-bit processes + * + * \section CUDA_UNIFIED_lookup Looking Up Information from Pointer Values + * + * It is possible to look up information about the memory which backs a + * pointer value. For instance, one may want to know if a pointer points + * to host or device memory. As another example, in the case of device + * memory, one may want to know on which CUDA device the memory + * resides. These properties may be queried using the function + * ::cuPointerGetAttribute() + * + * Since pointers are unique, it is not necessary to specify information + * about the pointers specified to the various copy functions in the + * CUDA API. The function ::cuMemcpy() may be used to perform a copy + * between two pointers, ignoring whether they point to host or device + * memory (making ::cuMemcpyHtoD(), ::cuMemcpyDtoD(), and ::cuMemcpyDtoH() + * unnecessary for devices supporting unified addressing). For + * multidimensional copies, the memory type ::CU_MEMORYTYPE_UNIFIED may be + * used to specify that the CUDA driver should infer the location of the + * pointer from its value. + * + * \section CUDA_UNIFIED_automaphost Automatic Mapping of Host Allocated Host Memory + * + * All host memory allocated in all contexts using ::cuMemAllocHost() and + * ::cuMemHostAlloc() is always directly accessible from all contexts on + * all devices that support unified addressing. This is the case regardless + * of whether or not the flags ::CU_MEMHOSTALLOC_PORTABLE and + * ::CU_MEMHOSTALLOC_DEVICEMAP are specified. + * + * The pointer value through which allocated host memory may be accessed + * in kernels on all devices that support unified addressing is the same + * as the pointer value through which that memory is accessed on the host, + * so it is not necessary to call ::cuMemHostGetDevicePointer() to get the device + * pointer for these allocations. + * + * Note that this is not the case for memory allocated using the flag + * ::CU_MEMHOSTALLOC_WRITECOMBINED, as discussed below. + * + * \section CUDA_UNIFIED_autopeerregister Automatic Registration of Peer Memory + * + * Upon enabling direct access from a context that supports unified addressing + * to another peer context that supports unified addressing using + * ::cuCtxEnablePeerAccess() all memory allocated in the peer context using + * ::cuMemAlloc() and ::cuMemAllocPitch() will immediately be accessible + * by the current context. The device pointer value through + * which any peer memory may be accessed in the current context + * is the same pointer value through which that memory may be + * accessed in the peer context. + * + * \section CUDA_UNIFIED_exceptions Exceptions, Disjoint Addressing + * + * Not all memory may be accessed on devices through the same pointer + * value through which they are accessed on the host. These exceptions + * are host memory registered using ::cuMemHostRegister() and host memory + * allocated using the flag ::CU_MEMHOSTALLOC_WRITECOMBINED. For these + * exceptions, there exists a distinct host and device address for the + * memory. The device address is guaranteed to not overlap any valid host + * pointer range and is guaranteed to have the same value across all + * contexts that support unified addressing. + * + * This device address may be queried using ::cuMemHostGetDevicePointer() + * when a context using unified addressing is current. Either the host + * or the unified device pointer value may be used to refer to this memory + * through ::cuMemcpy() and similar functions using the + * ::CU_MEMORYTYPE_UNIFIED memory type. + * + */ + +/** + * \brief Returns information about a pointer + * + * The supported attributes are: + * + * - ::CU_POINTER_ATTRIBUTE_CONTEXT: + * + * Returns in \p *data the ::CUcontext in which \p ptr was allocated or + * registered. + * The type of \p data must be ::CUcontext *. + * + * If \p ptr was not allocated by, mapped by, or registered with + * a ::CUcontext which uses unified virtual addressing then + * ::CUDA_ERROR_INVALID_VALUE is returned. + * + * - ::CU_POINTER_ATTRIBUTE_MEMORY_TYPE: + * + * Returns in \p *data the physical memory type of the memory that + * \p ptr addresses as a ::CUmemorytype enumerated value. + * The type of \p data must be unsigned int. + * + * If \p ptr addresses device memory then \p *data is set to + * ::CU_MEMORYTYPE_DEVICE. The particular ::CUdevice on which the + * memory resides is the ::CUdevice of the ::CUcontext returned by the + * ::CU_POINTER_ATTRIBUTE_CONTEXT attribute of \p ptr. + * + * If \p ptr addresses host memory then \p *data is set to + * ::CU_MEMORYTYPE_HOST. + * + * If \p ptr was not allocated by, mapped by, or registered with + * a ::CUcontext which uses unified virtual addressing then + * ::CUDA_ERROR_INVALID_VALUE is returned. + * + * If the current ::CUcontext does not support unified virtual + * addressing then ::CUDA_ERROR_INVALID_CONTEXT is returned. + * + * - ::CU_POINTER_ATTRIBUTE_DEVICE_POINTER: + * + * Returns in \p *data the device pointer value through which + * \p ptr may be accessed by kernels running in the current + * ::CUcontext. + * The type of \p data must be CUdeviceptr *. + * + * If there exists no device pointer value through which + * kernels running in the current ::CUcontext may access + * \p ptr then ::CUDA_ERROR_INVALID_VALUE is returned. + * + * If there is no current ::CUcontext then + * ::CUDA_ERROR_INVALID_CONTEXT is returned. + * + * Except in the exceptional disjoint addressing cases discussed + * below, the value returned in \p *data will equal the input + * value \p ptr. + * + * - ::CU_POINTER_ATTRIBUTE_HOST_POINTER: + * + * Returns in \p *data the host pointer value through which + * \p ptr may be accessed by by the host program. + * The type of \p data must be void **. + * If there exists no host pointer value through which + * the host program may directly access \p ptr then + * ::CUDA_ERROR_INVALID_VALUE is returned. + * + * Except in the exceptional disjoint addressing cases discussed + * below, the value returned in \p *data will equal the input + * value \p ptr. + * + * - ::CU_POINTER_ATTRIBUTE_P2P_TOKENS: + * + * Returns in \p *data two tokens for use with the nv-p2p.h Linux + * kernel interface. \p data must be a struct of type + * CUDA_POINTER_ATTRIBUTE_P2P_TOKENS. + * + * \p ptr must be a pointer to memory obtained from :cuMemAlloc(). + * Note that p2pToken and vaSpaceToken are only valid for the + * lifetime of the source allocation. A subsequent allocation at + * the same address may return completely different tokens. + * Querying this attribute has a side effect of setting the attribute + * ::CU_POINTER_ATTRIBUTE_SYNC_MEMOPS for the region of memory that + * \p ptr points to. + * + * - ::CU_POINTER_ATTRIBUTE_SYNC_MEMOPS: + * + * A boolean attribute which when set, ensures that synchronous memory operations + * initiated on the region of memory that \p ptr points to will always synchronize. + * See further documentation in the section titled "API synchronization behavior" + * to learn more about cases when synchronous memory operations can + * exhibit asynchronous behavior. + * + * - ::CU_POINTER_ATTRIBUTE_BUFFER_ID: + * + * Returns in \p *data a buffer ID which is guaranteed to be unique within the process. + * \p data must point to an unsigned long long. + * + * \p ptr must be a pointer to memory obtained from a CUDA memory allocation API. + * Every memory allocation from any of the CUDA memory allocation APIs will + * have a unique ID over a process lifetime. Subsequent allocations do not reuse IDs + * from previous freed allocations. IDs are only unique within a single process. + * + * + * - ::CU_POINTER_ATTRIBUTE_IS_MANAGED: + * + * Returns in \p *data a boolean that indicates whether the pointer points to + * managed memory or not. + * + * If \p ptr is not a valid CUDA pointer then ::CUDA_ERROR_INVALID_VALUE is returned. + * + * - ::CU_POINTER_ATTRIBUTE_DEVICE_ORDINAL: + * + * Returns in \p *data an integer representing a device ordinal of a device against + * which the memory was allocated or registered. + * + * - ::CU_POINTER_ATTRIBUTE_IS_LEGACY_CUDA_IPC_CAPABLE: + * + * Returns in \p *data a boolean that indicates if this pointer maps to + * an allocation that is suitable for ::cudaIpcGetMemHandle. + * + * - ::CU_POINTER_ATTRIBUTE_RANGE_START_ADDR: + * + * Returns in \p *data the starting address for the allocation referenced + * by the device pointer \p ptr. Note that this is not necessarily the + * address of the mapped region, but the address of the mappable address + * range \p ptr references (e.g. from ::cuMemAddressReserve). + * + * - ::CU_POINTER_ATTRIBUTE_RANGE_SIZE: + * + * Returns in \p *data the size for the allocation referenced by the device + * pointer \p ptr. Note that this is not necessarily the size of the mapped + * region, but the size of the mappable address range \p ptr references + * (e.g. from ::cuMemAddressReserve). To retrieve the size of the mapped + * region, see ::cuMemGetAddressRange + * + * - ::CU_POINTER_ATTRIBUTE_MAPPED: + * + * Returns in \p *data a boolean that indicates if this pointer is in a + * valid address range that is mapped to a backing allocation. + * + * - ::CU_POINTER_ATTRIBUTE_ALLOWED_HANDLE_TYPES: + * + * Returns a bitmask of the allowed handle types for an allocation that may + * be passed to ::cuMemExportToShareableHandle. + * + * - ::CU_POINTER_ATTRIBUTE_MEMPOOL_HANDLE: + * + * Returns in \p *data the handle to the mempool that the allocation was obtained from. + * + * \par + * + * Note that for most allocations in the unified virtual address space + * the host and device pointer for accessing the allocation will be the + * same. The exceptions to this are + * - user memory registered using ::cuMemHostRegister + * - host memory allocated using ::cuMemHostAlloc with the + * ::CU_MEMHOSTALLOC_WRITECOMBINED flag + * For these types of allocation there will exist separate, disjoint host + * and device addresses for accessing the allocation. In particular + * - The host address will correspond to an invalid unmapped device address + * (which will result in an exception if accessed from the device) + * - The device address will correspond to an invalid unmapped host address + * (which will result in an exception if accessed from the host). + * For these types of allocations, querying ::CU_POINTER_ATTRIBUTE_HOST_POINTER + * and ::CU_POINTER_ATTRIBUTE_DEVICE_POINTER may be used to retrieve the host + * and device addresses from either address. + * + * \param data - Returned pointer attribute value + * \param attribute - Pointer attribute to query + * \param ptr - Pointer + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_DEVICE + * \notefnerr + * + * \sa + * ::cuPointerSetAttribute, + * ::cuMemAlloc, + * ::cuMemFree, + * ::cuMemAllocHost, + * ::cuMemFreeHost, + * ::cuMemHostAlloc, + * ::cuMemHostRegister, + * ::cuMemHostUnregister, + * ::cudaPointerGetAttributes + */ +CUresult CUDAAPI cuPointerGetAttribute(void *data, CUpointer_attribute attribute, CUdeviceptr ptr); + +/** + * \brief Prefetches memory to the specified destination device + * + * Prefetches memory to the specified destination device. \p devPtr is the + * base device pointer of the memory to be prefetched and \p dstDevice is the + * destination device. \p count specifies the number of bytes to copy. \p hStream + * is the stream in which the operation is enqueued. The memory range must refer + * to managed memory allocated via ::cuMemAllocManaged or declared via __managed__ variables. + * + * Passing in CU_DEVICE_CPU for \p dstDevice will prefetch the data to host memory. If + * \p dstDevice is a GPU, then the device attribute ::CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS + * must be non-zero. Additionally, \p hStream must be associated with a device that has a + * non-zero value for the device attribute ::CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS. + * + * The start address and end address of the memory range will be rounded down and rounded up + * respectively to be aligned to CPU page size before the prefetch operation is enqueued + * in the stream. + * + * If no physical memory has been allocated for this region, then this memory region + * will be populated and mapped on the destination device. If there's insufficient + * memory to prefetch the desired region, the Unified Memory driver may evict pages from other + * ::cuMemAllocManaged allocations to host memory in order to make room. Device memory + * allocated using ::cuMemAlloc or ::cuArrayCreate will not be evicted. + * + * By default, any mappings to the previous location of the migrated pages are removed and + * mappings for the new location are only setup on \p dstDevice. The exact behavior however + * also depends on the settings applied to this memory range via ::cuMemAdvise as described + * below: + * + * If ::CU_MEM_ADVISE_SET_READ_MOSTLY was set on any subset of this memory range, + * then that subset will create a read-only copy of the pages on \p dstDevice. + * + * If ::CU_MEM_ADVISE_SET_PREFERRED_LOCATION was called on any subset of this memory + * range, then the pages will be migrated to \p dstDevice even if \p dstDevice is not the + * preferred location of any pages in the memory range. + * + * If ::CU_MEM_ADVISE_SET_ACCESSED_BY was called on any subset of this memory range, + * then mappings to those pages from all the appropriate processors are updated to + * refer to the new location if establishing such a mapping is possible. Otherwise, + * those mappings are cleared. + * + * Note that this API is not required for functionality and only serves to improve performance + * by allowing the application to migrate data to a suitable location before it is accessed. + * Memory accesses to this range are always coherent and are allowed even when the data is + * actively being migrated. + * + * Note that this function is asynchronous with respect to the host and all work + * on other devices. + * + * \param devPtr - Pointer to be prefetched + * \param count - Size in bytes + * \param dstDevice - Destination device to prefetch to + * \param hStream - Stream to enqueue prefetch operation + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_DEVICE + * \notefnerr + * \note_async + * \note_null_stream + * + * \sa ::cuMemcpy, ::cuMemcpyPeer, ::cuMemcpyAsync, + * ::cuMemcpy3DPeerAsync, ::cuMemAdvise, + * ::cudaMemPrefetchAsync + */ +CUresult CUDAAPI cuMemPrefetchAsync(CUdeviceptr devPtr, size_t count, CUdevice dstDevice, CUstream hStream); + +/** + * \brief Advise about the usage of a given memory range + * + * Advise the Unified Memory subsystem about the usage pattern for the memory range + * starting at \p devPtr with a size of \p count bytes. The start address and end address of the memory + * range will be rounded down and rounded up respectively to be aligned to CPU page size before the + * advice is applied. The memory range must refer to managed memory allocated via ::cuMemAllocManaged + * or declared via __managed__ variables. The memory range could also refer to system-allocated pageable + * memory provided it represents a valid, host-accessible region of memory and all additional constraints + * imposed by \p advice as outlined below are also satisfied. Specifying an invalid system-allocated pageable + * memory range results in an error being returned. + * + * The \p advice parameter can take the following values: + * - ::CU_MEM_ADVISE_SET_READ_MOSTLY: This implies that the data is mostly going to be read + * from and only occasionally written to. Any read accesses from any processor to this region will create a + * read-only copy of at least the accessed pages in that processor's memory. Additionally, if ::cuMemPrefetchAsync + * is called on this region, it will create a read-only copy of the data on the destination processor. + * If any processor writes to this region, all copies of the corresponding page will be invalidated + * except for the one where the write occurred. The \p device argument is ignored for this advice. + * Note that for a page to be read-duplicated, the accessing processor must either be the CPU or a GPU + * that has a non-zero value for the device attribute ::CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS. + * Also, if a context is created on a device that does not have the device attribute + * ::CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS set, then read-duplication will not occur until + * all such contexts are destroyed. + * If the memory region refers to valid system-allocated pageable memory, then the accessing device must + * have a non-zero value for the device attribute ::CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS for a read-only + * copy to be created on that device. Note however that if the accessing device also has a non-zero value for the + * device attribute ::CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS_USES_HOST_PAGE_TABLES, then setting this advice + * will not create a read-only copy when that device accesses this memory region. + * + * - ::CU_MEM_ADVISE_UNSET_READ_MOSTLY: Undoes the effect of ::CU_MEM_ADVISE_SET_READ_MOSTLY and also prevents the + * Unified Memory driver from attempting heuristic read-duplication on the memory range. Any read-duplicated + * copies of the data will be collapsed into a single copy. The location for the collapsed + * copy will be the preferred location if the page has a preferred location and one of the read-duplicated + * copies was resident at that location. Otherwise, the location chosen is arbitrary. + * + * - ::CU_MEM_ADVISE_SET_PREFERRED_LOCATION: This advice sets the preferred location for the + * data to be the memory belonging to \p device. Passing in CU_DEVICE_CPU for \p device sets the + * preferred location as host memory. If \p device is a GPU, then it must have a non-zero value for the + * device attribute ::CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS. Setting the preferred location + * does not cause data to migrate to that location immediately. Instead, it guides the migration policy + * when a fault occurs on that memory region. If the data is already in its preferred location and the + * faulting processor can establish a mapping without requiring the data to be migrated, then + * data migration will be avoided. On the other hand, if the data is not in its preferred location + * or if a direct mapping cannot be established, then it will be migrated to the processor accessing + * it. It is important to note that setting the preferred location does not prevent data prefetching + * done using ::cuMemPrefetchAsync. + * Having a preferred location can override the page thrash detection and resolution logic in the Unified + * Memory driver. Normally, if a page is detected to be constantly thrashing between for example host and device + * memory, the page may eventually be pinned to host memory by the Unified Memory driver. But + * if the preferred location is set as device memory, then the page will continue to thrash indefinitely. + * If ::CU_MEM_ADVISE_SET_READ_MOSTLY is also set on this memory region or any subset of it, then the + * policies associated with that advice will override the policies of this advice, unless read accesses from + * \p device will not result in a read-only copy being created on that device as outlined in description for + * the advice ::CU_MEM_ADVISE_SET_READ_MOSTLY. + * If the memory region refers to valid system-allocated pageable memory, then \p device must have a non-zero + * value for the device attribute ::CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS. Additionally, if \p device has + * a non-zero value for the device attribute ::CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS_USES_HOST_PAGE_TABLES, + * then this call has no effect. Note however that this behavior may change in the future. + * + * - ::CU_MEM_ADVISE_UNSET_PREFERRED_LOCATION: Undoes the effect of ::CU_MEM_ADVISE_SET_PREFERRED_LOCATION + * and changes the preferred location to none. + * + * - ::CU_MEM_ADVISE_SET_ACCESSED_BY: This advice implies that the data will be accessed by \p device. + * Passing in ::CU_DEVICE_CPU for \p device will set the advice for the CPU. If \p device is a GPU, then + * the device attribute ::CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS must be non-zero. + * This advice does not cause data migration and has no impact on the location of the data per se. Instead, + * it causes the data to always be mapped in the specified processor's page tables, as long as the + * location of the data permits a mapping to be established. If the data gets migrated for any reason, + * the mappings are updated accordingly. + * This advice is recommended in scenarios where data locality is not important, but avoiding faults is. + * Consider for example a system containing multiple GPUs with peer-to-peer access enabled, where the + * data located on one GPU is occasionally accessed by peer GPUs. In such scenarios, migrating data + * over to the other GPUs is not as important because the accesses are infrequent and the overhead of + * migration may be too high. But preventing faults can still help improve performance, and so having + * a mapping set up in advance is useful. Note that on CPU access of this data, the data may be migrated + * to host memory because the CPU typically cannot access device memory directly. Any GPU that had the + * ::CU_MEM_ADVISE_SET_ACCESSED_BY flag set for this data will now have its mapping updated to point to the + * page in host memory. + * If ::CU_MEM_ADVISE_SET_READ_MOSTLY is also set on this memory region or any subset of it, then the + * policies associated with that advice will override the policies of this advice. Additionally, if the + * preferred location of this memory region or any subset of it is also \p device, then the policies + * associated with ::CU_MEM_ADVISE_SET_PREFERRED_LOCATION will override the policies of this advice. + * If the memory region refers to valid system-allocated pageable memory, then \p device must have a non-zero + * value for the device attribute ::CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS. Additionally, if \p device has + * a non-zero value for the device attribute ::CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS_USES_HOST_PAGE_TABLES, + * then this call has no effect. + * + * - ::CU_MEM_ADVISE_UNSET_ACCESSED_BY: Undoes the effect of ::CU_MEM_ADVISE_SET_ACCESSED_BY. Any mappings to + * the data from \p device may be removed at any time causing accesses to result in non-fatal page faults. + * If the memory region refers to valid system-allocated pageable memory, then \p device must have a non-zero + * value for the device attribute ::CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS. Additionally, if \p device has + * a non-zero value for the device attribute ::CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS_USES_HOST_PAGE_TABLES, + * then this call has no effect. + * + * \param devPtr - Pointer to memory to set the advice for + * \param count - Size in bytes of the memory range + * \param advice - Advice to be applied for the specified memory range + * \param device - Device to apply the advice for + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_DEVICE + * \notefnerr + * \note_async + * \note_null_stream + * + * \sa ::cuMemcpy, ::cuMemcpyPeer, ::cuMemcpyAsync, + * ::cuMemcpy3DPeerAsync, ::cuMemPrefetchAsync, + * ::cudaMemAdvise + */ +CUresult CUDAAPI cuMemAdvise(CUdeviceptr devPtr, size_t count, CUmem_advise advice, CUdevice device); + +/** + * \brief Query an attribute of a given memory range + * + * Query an attribute about the memory range starting at \p devPtr with a size of \p count bytes. The + * memory range must refer to managed memory allocated via ::cuMemAllocManaged or declared via + * __managed__ variables. + * + * The \p attribute parameter can take the following values: + * - ::CU_MEM_RANGE_ATTRIBUTE_READ_MOSTLY: If this attribute is specified, \p data will be interpreted + * as a 32-bit integer, and \p dataSize must be 4. The result returned will be 1 if all pages in the given + * memory range have read-duplication enabled, or 0 otherwise. + * - ::CU_MEM_RANGE_ATTRIBUTE_PREFERRED_LOCATION: If this attribute is specified, \p data will be + * interpreted as a 32-bit integer, and \p dataSize must be 4. The result returned will be a GPU device + * id if all pages in the memory range have that GPU as their preferred location, or it will be CU_DEVICE_CPU + * if all pages in the memory range have the CPU as their preferred location, or it will be CU_DEVICE_INVALID + * if either all the pages don't have the same preferred location or some of the pages don't have a + * preferred location at all. Note that the actual location of the pages in the memory range at the time of + * the query may be different from the preferred location. + * - ::CU_MEM_RANGE_ATTRIBUTE_ACCESSED_BY: If this attribute is specified, \p data will be interpreted + * as an array of 32-bit integers, and \p dataSize must be a non-zero multiple of 4. The result returned + * will be a list of device ids that had ::CU_MEM_ADVISE_SET_ACCESSED_BY set for that entire memory range. + * If any device does not have that advice set for the entire memory range, that device will not be included. + * If \p data is larger than the number of devices that have that advice set for that memory range, + * CU_DEVICE_INVALID will be returned in all the extra space provided. For ex., if \p dataSize is 12 + * (i.e. \p data has 3 elements) and only device 0 has the advice set, then the result returned will be + * { 0, CU_DEVICE_INVALID, CU_DEVICE_INVALID }. If \p data is smaller than the number of devices that have + * that advice set, then only as many devices will be returned as can fit in the array. There is no + * guarantee on which specific devices will be returned, however. + * - ::CU_MEM_RANGE_ATTRIBUTE_LAST_PREFETCH_LOCATION: If this attribute is specified, \p data will be + * interpreted as a 32-bit integer, and \p dataSize must be 4. The result returned will be the last location + * to which all pages in the memory range were prefetched explicitly via ::cuMemPrefetchAsync. This will either be + * a GPU id or CU_DEVICE_CPU depending on whether the last location for prefetch was a GPU or the CPU + * respectively. If any page in the memory range was never explicitly prefetched or if all pages were not + * prefetched to the same location, CU_DEVICE_INVALID will be returned. Note that this simply returns the + * last location that the application requested to prefetch the memory range to. It gives no indication as to + * whether the prefetch operation to that location has completed or even begun. + * + * \param data - A pointers to a memory location where the result + * of each attribute query will be written to. + * \param dataSize - Array containing the size of data + * \param attribute - The attribute to query + * \param devPtr - Start of the range to query + * \param count - Size of the range to query + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_DEVICE + * \notefnerr + * \note_async + * \note_null_stream + * + * \sa ::cuMemRangeGetAttributes, ::cuMemPrefetchAsync, + * ::cuMemAdvise, + * ::cudaMemRangeGetAttribute + */ +CUresult CUDAAPI cuMemRangeGetAttribute(void *data, size_t dataSize, CUmem_range_attribute attribute, CUdeviceptr devPtr, size_t count); + +/** + * \brief Query attributes of a given memory range. + * + * Query attributes of the memory range starting at \p devPtr with a size of \p count bytes. The + * memory range must refer to managed memory allocated via ::cuMemAllocManaged or declared via + * __managed__ variables. The \p attributes array will be interpreted to have \p numAttributes + * entries. The \p dataSizes array will also be interpreted to have \p numAttributes entries. + * The results of the query will be stored in \p data. + * + * The list of supported attributes are given below. Please refer to ::cuMemRangeGetAttribute for + * attribute descriptions and restrictions. + * + * - ::CU_MEM_RANGE_ATTRIBUTE_READ_MOSTLY + * - ::CU_MEM_RANGE_ATTRIBUTE_PREFERRED_LOCATION + * - ::CU_MEM_RANGE_ATTRIBUTE_ACCESSED_BY + * - ::CU_MEM_RANGE_ATTRIBUTE_LAST_PREFETCH_LOCATION + * + * \param data - A two-dimensional array containing pointers to memory + * locations where the result of each attribute query will be written to. + * \param dataSizes - Array containing the sizes of each result + * \param attributes - An array of attributes to query + * (numAttributes and the number of attributes in this array should match) + * \param numAttributes - Number of attributes to query + * \param devPtr - Start of the range to query + * \param count - Size of the range to query + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_DEVICE + * \notefnerr + * + * \sa ::cuMemRangeGetAttribute, ::cuMemAdvise, + * ::cuMemPrefetchAsync, + * ::cudaMemRangeGetAttributes + */ +CUresult CUDAAPI cuMemRangeGetAttributes(void **data, size_t *dataSizes, CUmem_range_attribute *attributes, size_t numAttributes, CUdeviceptr devPtr, size_t count); + +/** + * \brief Set attributes on a previously allocated memory region + * + * The supported attributes are: + * + * - ::CU_POINTER_ATTRIBUTE_SYNC_MEMOPS: + * + * A boolean attribute that can either be set (1) or unset (0). When set, + * the region of memory that \p ptr points to is guaranteed to always synchronize + * memory operations that are synchronous. If there are some previously initiated + * synchronous memory operations that are pending when this attribute is set, the + * function does not return until those memory operations are complete. + * See further documentation in the section titled "API synchronization behavior" + * to learn more about cases when synchronous memory operations can + * exhibit asynchronous behavior. + * \p value will be considered as a pointer to an unsigned integer to which this attribute is to be set. + * + * \param value - Pointer to memory containing the value to be set + * \param attribute - Pointer attribute to set + * \param ptr - Pointer to a memory region allocated using CUDA memory allocation APIs + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_DEVICE + * \notefnerr + * + * \sa ::cuPointerGetAttribute, + * ::cuPointerGetAttributes, + * ::cuMemAlloc, + * ::cuMemFree, + * ::cuMemAllocHost, + * ::cuMemFreeHost, + * ::cuMemHostAlloc, + * ::cuMemHostRegister, + * ::cuMemHostUnregister + */ +CUresult CUDAAPI cuPointerSetAttribute(const void *value, CUpointer_attribute attribute, CUdeviceptr ptr); + +/** + * \brief Returns information about a pointer. + * + * The supported attributes are (refer to ::cuPointerGetAttribute for attribute descriptions and restrictions): + * + * - ::CU_POINTER_ATTRIBUTE_CONTEXT + * - ::CU_POINTER_ATTRIBUTE_MEMORY_TYPE + * - ::CU_POINTER_ATTRIBUTE_DEVICE_POINTER + * - ::CU_POINTER_ATTRIBUTE_HOST_POINTER + * - ::CU_POINTER_ATTRIBUTE_SYNC_MEMOPS + * - ::CU_POINTER_ATTRIBUTE_BUFFER_ID + * - ::CU_POINTER_ATTRIBUTE_IS_MANAGED + * - ::CU_POINTER_ATTRIBUTE_DEVICE_ORDINAL + * - ::CU_POINTER_ATTRIBUTE_RANGE_START_ADDR + * - ::CU_POINTER_ATTRIBUTE_RANGE_SIZE + * - ::CU_POINTER_ATTRIBUTE_MAPPED + * - ::CU_POINTER_ATTRIBUTE_IS_LEGACY_CUDA_IPC_CAPABLE + * - ::CU_POINTER_ATTRIBUTE_ALLOWED_HANDLE_TYPES + * - ::CU_POINTER_ATTRIBUTE_MEMPOOL_HANDLE + * + * \param numAttributes - Number of attributes to query + * \param attributes - An array of attributes to query + * (numAttributes and the number of attributes in this array should match) + * \param data - A two-dimensional array containing pointers to memory + * locations where the result of each attribute query will be written to. + * \param ptr - Pointer to query + * + * Unlike ::cuPointerGetAttribute, this function will not return an error when the \p ptr + * encountered is not a valid CUDA pointer. Instead, the attributes are assigned default NULL values + * and CUDA_SUCCESS is returned. + * + * If \p ptr was not allocated by, mapped by, or registered with a ::CUcontext which uses UVA + * (Unified Virtual Addressing), ::CUDA_ERROR_INVALID_CONTEXT is returned. + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_DEVICE + * \notefnerr + * + * \sa + * ::cuPointerGetAttribute, + * ::cuPointerSetAttribute, + * ::cudaPointerGetAttributes + */ +CUresult CUDAAPI cuPointerGetAttributes(unsigned int numAttributes, CUpointer_attribute *attributes, void **data, CUdeviceptr ptr); + +/** @} */ /* END CUDA_UNIFIED */ + +/** + * \defgroup CUDA_STREAM Stream Management + * + * ___MANBRIEF___ stream management functions of the low-level CUDA driver API + * (___CURRENT_FILE___) ___ENDMANBRIEF___ + * + * This section describes the stream management functions of the low-level CUDA + * driver application programming interface. + * + * @{ + */ + +/** + * \brief Create a stream + * + * Creates a stream and returns a handle in \p phStream. The \p Flags argument + * determines behaviors of the stream. + * + * Valid values for \p Flags are: + * - ::CU_STREAM_DEFAULT: Default stream creation flag. + * - ::CU_STREAM_NON_BLOCKING: Specifies that work running in the created + * stream may run concurrently with work in stream 0 (the NULL stream), and that + * the created stream should perform no implicit synchronization with stream 0. + * + * \param phStream - Returned newly created stream + * \param Flags - Parameters for stream creation + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_OUT_OF_MEMORY + * \notefnerr + * + * \sa ::cuStreamDestroy, + * ::cuStreamCreateWithPriority, + * ::cuStreamGetPriority, + * ::cuStreamGetFlags, + * ::cuStreamWaitEvent, + * ::cuStreamQuery, + * ::cuStreamSynchronize, + * ::cuStreamAddCallback, + * ::cudaStreamCreate, + * ::cudaStreamCreateWithFlags + */ +CUresult CUDAAPI cuStreamCreate(CUstream *phStream, unsigned int Flags); + +/** + * \brief Create a stream with the given priority + * + * Creates a stream with the specified priority and returns a handle in \p phStream. + * This API alters the scheduler priority of work in the stream. Work in a higher + * priority stream may preempt work already executing in a low priority stream. + * + * \p priority follows a convention where lower numbers represent higher priorities. + * '0' represents default priority. The range of meaningful numerical priorities can + * be queried using ::cuCtxGetStreamPriorityRange. If the specified priority is + * outside the numerical range returned by ::cuCtxGetStreamPriorityRange, + * it will automatically be clamped to the lowest or the highest number in the range. + * + * \param phStream - Returned newly created stream + * \param flags - Flags for stream creation. See ::cuStreamCreate for a list of + * valid flags + * \param priority - Stream priority. Lower numbers represent higher priorities. + * See ::cuCtxGetStreamPriorityRange for more information about + * meaningful stream priorities that can be passed. + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_OUT_OF_MEMORY + * \notefnerr + * + * \note Stream priorities are supported only on GPUs + * with compute capability 3.5 or higher. + * + * \note In the current implementation, only compute kernels launched in + * priority streams are affected by the stream's priority. Stream priorities have + * no effect on host-to-device and device-to-host memory operations. + * + * \sa ::cuStreamDestroy, + * ::cuStreamCreate, + * ::cuStreamGetPriority, + * ::cuCtxGetStreamPriorityRange, + * ::cuStreamGetFlags, + * ::cuStreamWaitEvent, + * ::cuStreamQuery, + * ::cuStreamSynchronize, + * ::cuStreamAddCallback, + * ::cudaStreamCreateWithPriority + */ +CUresult CUDAAPI cuStreamCreateWithPriority(CUstream *phStream, unsigned int flags, int priority); + + +/** + * \brief Query the priority of a given stream + * + * Query the priority of a stream created using ::cuStreamCreate or ::cuStreamCreateWithPriority + * and return the priority in \p priority. Note that if the stream was created with a + * priority outside the numerical range returned by ::cuCtxGetStreamPriorityRange, + * this function returns the clamped priority. + * See ::cuStreamCreateWithPriority for details about priority clamping. + * + * \param hStream - Handle to the stream to be queried + * \param priority - Pointer to a signed integer in which the stream's priority is returned + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_OUT_OF_MEMORY + * \notefnerr + * + * \sa ::cuStreamDestroy, + * ::cuStreamCreate, + * ::cuStreamCreateWithPriority, + * ::cuCtxGetStreamPriorityRange, + * ::cuStreamGetFlags, + * ::cudaStreamGetPriority + */ +CUresult CUDAAPI cuStreamGetPriority(CUstream hStream, int *priority); + +/** + * \brief Query the flags of a given stream + * + * Query the flags of a stream created using ::cuStreamCreate or ::cuStreamCreateWithPriority + * and return the flags in \p flags. + * + * \param hStream - Handle to the stream to be queried + * \param flags - Pointer to an unsigned integer in which the stream's flags are returned + * The value returned in \p flags is a logical 'OR' of all flags that + * were used while creating this stream. See ::cuStreamCreate for the list + * of valid flags + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_OUT_OF_MEMORY + * \notefnerr + * + * \sa ::cuStreamDestroy, + * ::cuStreamCreate, + * ::cuStreamGetPriority, + * ::cudaStreamGetFlags + */ +CUresult CUDAAPI cuStreamGetFlags(CUstream hStream, unsigned int *flags); + +/** + * \brief Returns the unique Id associated with the stream handle supplied + * + * Returns in \p streamId the unique Id which is associated with the given stream handle. + * The Id is unique for the life of the program. + * + * The stream handle \p hStream can refer to any of the following: + *
    + *
  • a stream created via any of the CUDA driver APIs such as ::cuStreamCreate + * and ::cuStreamCreateWithPriority, or their runtime API equivalents such as + * ::cudaStreamCreate, ::cudaStreamCreateWithFlags and ::cudaStreamCreateWithPriority. + * Passing an invalid handle will result in undefined behavior.
  • + *
  • any of the special streams such as the NULL stream, ::CU_STREAM_LEGACY and + * ::CU_STREAM_PER_THREAD. The runtime API equivalents of these are also accepted, + * which are NULL, ::cudaStreamLegacy and ::cudaStreamPerThread respectively.
  • + *
+ * + * \param hStream - Handle to the stream to be queried + * \param streamId - Pointer to store the Id of the stream + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_HANDLE + * \notefnerr + * + * \sa ::cuStreamDestroy, + * ::cuStreamCreate, + * ::cuStreamGetPriority, + * ::cudaStreamGetId + */ +CUresult CUDAAPI cuStreamGetId(CUstream hStream, unsigned long long *streamId); + +/** + * \brief Query the context associated with a stream + * + * Returns the CUDA context that the stream is associated with. + * + * The stream handle \p hStream can refer to any of the following: + *
    + *
  • a stream created via any of the CUDA driver APIs such as ::cuStreamCreate + * and ::cuStreamCreateWithPriority, or their runtime API equivalents such as + * ::cudaStreamCreate, ::cudaStreamCreateWithFlags and ::cudaStreamCreateWithPriority. + * The returned context is the context that was active in the calling thread when the + * stream was created. Passing an invalid handle will result in undefined behavior.
  • + *
  • any of the special streams such as the NULL stream, ::CU_STREAM_LEGACY and + * ::CU_STREAM_PER_THREAD. The runtime API equivalents of these are also accepted, + * which are NULL, ::cudaStreamLegacy and ::cudaStreamPerThread respectively. + * Specifying any of the special handles will return the context current to the + * calling thread. If no context is current to the calling thread, + * ::CUDA_ERROR_INVALID_CONTEXT is returned.
  • + *
+ * + * \param hStream - Handle to the stream to be queried + * \param pctx - Returned context associated with the stream + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_HANDLE, + * \notefnerr + * + * \sa ::cuStreamDestroy, + * ::cuStreamCreateWithPriority, + * ::cuStreamGetPriority, + * ::cuStreamGetFlags, + * ::cuStreamWaitEvent, + * ::cuStreamQuery, + * ::cuStreamSynchronize, + * ::cuStreamAddCallback, + * ::cudaStreamCreate, + * ::cudaStreamCreateWithFlags + */ +CUresult CUDAAPI cuStreamGetCtx(CUstream hStream, CUcontext *pctx); + +/** + * \brief Make a compute stream wait on an event + * + * Makes all future work submitted to \p hStream wait for all work captured in + * \p hEvent. See ::cuEventRecord() for details on what is captured by an event. + * The synchronization will be performed efficiently on the device when applicable. + * \p hEvent may be from a different context or device than \p hStream. + * + * flags include: + * - ::CU_EVENT_WAIT_DEFAULT: Default event creation flag. + * - ::CU_EVENT_WAIT_EXTERNAL: Event is captured in the graph as an external + * event node when performing stream capture. This flag is invalid outside + * of stream capture. + * + * \param hStream - Stream to wait + * \param hEvent - Event to wait on (may not be NULL) + * \param Flags - See ::CUevent_capture_flags + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_HANDLE, + * \note_null_stream + * \notefnerr + * + * \sa ::cuStreamCreate, + * ::cuEventRecord, + * ::cuStreamQuery, + * ::cuStreamSynchronize, + * ::cuStreamAddCallback, + * ::cuStreamDestroy, + * ::cudaStreamWaitEvent + */ +CUresult CUDAAPI cuStreamWaitEvent(CUstream hStream, CUevent hEvent, unsigned int Flags); + +/** + * \brief Add a callback to a compute stream + * + * \note This function is slated for eventual deprecation and removal. If + * you do not require the callback to execute in case of a device error, + * consider using ::cuLaunchHostFunc. Additionally, this function is not + * supported with ::cuStreamBeginCapture and ::cuStreamEndCapture, unlike + * ::cuLaunchHostFunc. + * + * Adds a callback to be called on the host after all currently enqueued + * items in the stream have completed. For each + * cuStreamAddCallback call, the callback will be executed exactly once. + * The callback will block later work in the stream until it is finished. + * + * The callback may be passed ::CUDA_SUCCESS or an error code. In the event + * of a device error, all subsequently executed callbacks will receive an + * appropriate ::CUresult. + * + * Callbacks must not make any CUDA API calls. Attempting to use a CUDA API + * will result in ::CUDA_ERROR_NOT_PERMITTED. Callbacks must not perform any + * synchronization that may depend on outstanding device work or other callbacks + * that are not mandated to run earlier. Callbacks without a mandated order + * (in independent streams) execute in undefined order and may be serialized. + * + * For the purposes of Unified Memory, callback execution makes a number of + * guarantees: + *
    + *
  • The callback stream is considered idle for the duration of the + * callback. Thus, for example, a callback may always use memory attached + * to the callback stream.
  • + *
  • The start of execution of a callback has the same effect as + * synchronizing an event recorded in the same stream immediately prior to + * the callback. It thus synchronizes streams which have been "joined" + * prior to the callback.
  • + *
  • Adding device work to any stream does not have the effect of making + * the stream active until all preceding host functions and stream callbacks + * have executed. Thus, for + * example, a callback might use global attached memory even if work has + * been added to another stream, if the work has been ordered behind the + * callback with an event.
  • + *
  • Completion of a callback does not cause a stream to become + * active except as described above. The callback stream will remain idle + * if no device work follows the callback, and will remain idle across + * consecutive callbacks without device work in between. Thus, for example, + * stream synchronization can be done by signaling from a callback at the + * end of the stream.
  • + *
+ * + * \param hStream - Stream to add callback to + * \param callback - The function to call once preceding stream operations are complete + * \param userData - User specified data to be passed to the callback function + * \param flags - Reserved for future use, must be 0 + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_NOT_SUPPORTED + * \note_null_stream + * \notefnerr + * + * \sa ::cuStreamCreate, + * ::cuStreamQuery, + * ::cuStreamSynchronize, + * ::cuStreamWaitEvent, + * ::cuStreamDestroy, + * ::cuMemAllocManaged, + * ::cuStreamAttachMemAsync, + * ::cuLaunchHostFunc, + * ::cudaStreamAddCallback + */ +CUresult CUDAAPI cuStreamAddCallback(CUstream hStream, CUstreamCallback callback, void *userData, unsigned int flags); + +/** + * \brief Begins graph capture on a stream + * + * Begin graph capture on \p hStream. When a stream is in capture mode, all operations + * pushed into the stream will not be executed, but will instead be captured into + * a graph, which will be returned via ::cuStreamEndCapture. Capture may not be initiated + * if \p stream is CU_STREAM_LEGACY. Capture must be ended on the same stream in which + * it was initiated, and it may only be initiated if the stream is not already in capture + * mode. The capture mode may be queried via ::cuStreamIsCapturing. A unique id + * representing the capture sequence may be queried via ::cuStreamGetCaptureInfo. + * + * If \p mode is not ::CU_STREAM_CAPTURE_MODE_RELAXED, ::cuStreamEndCapture must be + * called on this stream from the same thread. + * + * \param hStream - Stream in which to initiate capture + * \param mode - Controls the interaction of this capture sequence with other API + * calls that are potentially unsafe. For more details see + * ::cuThreadExchangeStreamCaptureMode. + * + * \note Kernels captured using this API must not use texture and surface references. + * Reading or writing through any texture or surface reference is undefined + * behavior. This restriction does not apply to texture and surface objects. + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_VALUE + * \notefnerr + * + * \sa + * ::cuStreamCreate, + * ::cuStreamIsCapturing, + * ::cuStreamEndCapture, + * ::cuThreadExchangeStreamCaptureMode + */ +CUresult CUDAAPI cuStreamBeginCapture(CUstream hStream, CUstreamCaptureMode mode); + +/** + * \brief Swaps the stream capture interaction mode for a thread + * + * Sets the calling thread's stream capture interaction mode to the value contained + * in \p *mode, and overwrites \p *mode with the previous mode for the thread. To + * facilitate deterministic behavior across function or module boundaries, callers + * are encouraged to use this API in a push-pop fashion: \code + CUstreamCaptureMode mode = desiredMode; + cuThreadExchangeStreamCaptureMode(&mode); + ... + cuThreadExchangeStreamCaptureMode(&mode); // restore previous mode + * \endcode + * + * During stream capture (see ::cuStreamBeginCapture), some actions, such as a call + * to ::cudaMalloc, may be unsafe. In the case of ::cudaMalloc, the operation is + * not enqueued asynchronously to a stream, and is not observed by stream capture. + * Therefore, if the sequence of operations captured via ::cuStreamBeginCapture + * depended on the allocation being replayed whenever the graph is launched, the + * captured graph would be invalid. + * + * Therefore, stream capture places restrictions on API calls that can be made within + * or concurrently to a ::cuStreamBeginCapture-::cuStreamEndCapture sequence. This + * behavior can be controlled via this API and flags to ::cuStreamBeginCapture. + * + * A thread's mode is one of the following: + * - \p CU_STREAM_CAPTURE_MODE_GLOBAL: This is the default mode. If the local thread has + * an ongoing capture sequence that was not initiated with + * \p CU_STREAM_CAPTURE_MODE_RELAXED at \p cuStreamBeginCapture, or if any other thread + * has a concurrent capture sequence initiated with \p CU_STREAM_CAPTURE_MODE_GLOBAL, + * this thread is prohibited from potentially unsafe API calls. + * - \p CU_STREAM_CAPTURE_MODE_THREAD_LOCAL: If the local thread has an ongoing capture + * sequence not initiated with \p CU_STREAM_CAPTURE_MODE_RELAXED, it is prohibited + * from potentially unsafe API calls. Concurrent capture sequences in other threads + * are ignored. + * - \p CU_STREAM_CAPTURE_MODE_RELAXED: The local thread is not prohibited from potentially + * unsafe API calls. Note that the thread is still prohibited from API calls which + * necessarily conflict with stream capture, for example, attempting ::cuEventQuery + * on an event that was last recorded inside a capture sequence. + * + * \param mode - Pointer to mode value to swap with the current mode + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_VALUE + * \notefnerr + * + * \sa + * ::cuStreamBeginCapture + */ +CUresult CUDAAPI cuThreadExchangeStreamCaptureMode(CUstreamCaptureMode *mode); + +/** + * \brief Ends capture on a stream, returning the captured graph + * + * End capture on \p hStream, returning the captured graph via \p phGraph. + * Capture must have been initiated on \p hStream via a call to ::cuStreamBeginCapture. + * If capture was invalidated, due to a violation of the rules of stream capture, then + * a NULL graph will be returned. + * + * If the \p mode argument to ::cuStreamBeginCapture was not + * ::CU_STREAM_CAPTURE_MODE_RELAXED, this call must be from the same thread as + * ::cuStreamBeginCapture. + * + * \param hStream - Stream to query + * \param phGraph - The captured graph + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_STREAM_CAPTURE_WRONG_THREAD + * \notefnerr + * + * \sa + * ::cuStreamCreate, + * ::cuStreamBeginCapture, + * ::cuStreamIsCapturing + */ +CUresult CUDAAPI cuStreamEndCapture(CUstream hStream, CUgraph *phGraph); + +/** + * \brief Returns a stream's capture status + * + * Return the capture status of \p hStream via \p captureStatus. After a successful + * call, \p *captureStatus will contain one of the following: + * - ::CU_STREAM_CAPTURE_STATUS_NONE: The stream is not capturing. + * - ::CU_STREAM_CAPTURE_STATUS_ACTIVE: The stream is capturing. + * - ::CU_STREAM_CAPTURE_STATUS_INVALIDATED: The stream was capturing but an error + * has invalidated the capture sequence. The capture sequence must be terminated + * with ::cuStreamEndCapture on the stream where it was initiated in order to + * continue using \p hStream. + * + * Note that, if this is called on ::CU_STREAM_LEGACY (the "null stream") while + * a blocking stream in the same context is capturing, it will return + * ::CUDA_ERROR_STREAM_CAPTURE_IMPLICIT and \p *captureStatus is unspecified + * after the call. The blocking stream capture is not invalidated. + * + * When a blocking stream is capturing, the legacy stream is in an + * unusable state until the blocking stream capture is terminated. The legacy + * stream is not supported for stream capture, but attempted use would have an + * implicit dependency on the capturing stream(s). + * + * \param hStream - Stream to query + * \param captureStatus - Returns the stream's capture status + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_STREAM_CAPTURE_IMPLICIT + * \notefnerr + * + * \sa + * ::cuStreamCreate, + * ::cuStreamBeginCapture, + * ::cuStreamEndCapture + */ +CUresult CUDAAPI cuStreamIsCapturing(CUstream hStream, CUstreamCaptureStatus *captureStatus); + +/** + * \brief Query a stream's capture state + * + * Query stream state related to stream capture. + * + * If called on ::CU_STREAM_LEGACY (the "null stream") while a stream not created + * with ::CU_STREAM_NON_BLOCKING is capturing, returns ::CUDA_ERROR_STREAM_CAPTURE_IMPLICIT. + * + * Valid data (other than capture status) is returned only if both of the following are true: + * - the call returns CUDA_SUCCESS + * - the returned capture status is ::CU_STREAM_CAPTURE_STATUS_ACTIVE + * + * \param hStream - The stream to query + * \param captureStatus_out - Location to return the capture status of the stream; required + * \param id_out - Optional location to return an id for the capture sequence, which is + * unique over the lifetime of the process + * \param graph_out - Optional location to return the graph being captured into. All + * operations other than destroy and node removal are permitted on the graph + * while the capture sequence is in progress. This API does not transfer + * ownership of the graph, which is transferred or destroyed at + * ::cuStreamEndCapture. Note that the graph handle may be invalidated before + * end of capture for certain errors. Nodes that are or become + * unreachable from the original stream at ::cuStreamEndCapture due to direct + * actions on the graph do not trigger ::CUDA_ERROR_STREAM_CAPTURE_UNJOINED. + * \param dependencies_out - Optional location to store a pointer to an array of nodes. + * The next node to be captured in the stream will depend on this set of nodes, + * absent operations such as event wait which modify this set. The array pointer + * is valid until the next API call which operates on the stream or until end of + * capture. The node handles may be copied out and are valid until they or the + * graph is destroyed. The driver-owned array may also be passed directly to + * APIs that operate on the graph (not the stream) without copying. + * \param numDependencies_out - Optional location to store the size of the array + * returned in dependencies_out. + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_STREAM_CAPTURE_IMPLICIT + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuStreamBeginCapture, + * ::cuStreamIsCapturing, + * ::cuStreamUpdateCaptureDependencies + */ +CUresult CUDAAPI cuStreamGetCaptureInfo(CUstream hStream, CUstreamCaptureStatus *captureStatus_out, + cuuint64_t *id_out, CUgraph *graph_out, const CUgraphNode **dependencies_out, size_t *numDependencies_out); + +/** + * \brief Update the set of dependencies in a capturing stream (11.3+) + * + * Modifies the dependency set of a capturing stream. The dependency set is the set + * of nodes that the next captured node in the stream will depend on. + * + * Valid flags are ::CU_STREAM_ADD_CAPTURE_DEPENDENCIES and + * ::CU_STREAM_SET_CAPTURE_DEPENDENCIES. These control whether the set passed to + * the API is added to the existing set or replaces it. A flags value of 0 defaults + * to ::CU_STREAM_ADD_CAPTURE_DEPENDENCIES. + * + * Nodes that are removed from the dependency set via this API do not result in + * ::CUDA_ERROR_STREAM_CAPTURE_UNJOINED if they are unreachable from the stream at + * ::cuStreamEndCapture. + * + * Returns ::CUDA_ERROR_ILLEGAL_STATE if the stream is not capturing. + * + * This API is new in CUDA 11.3. Developers requiring compatibility across minor + * versions to CUDA 11.0 should not use this API or provide a fallback. + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_ILLEGAL_STATE + * + * \sa + * ::cuStreamBeginCapture, + * ::cuStreamGetCaptureInfo, + */ +CUresult CUDAAPI cuStreamUpdateCaptureDependencies(CUstream hStream, CUgraphNode *dependencies, size_t numDependencies, unsigned int flags); + +/** + * \brief Attach memory to a stream asynchronously + * + * Enqueues an operation in \p hStream to specify stream association of + * \p length bytes of memory starting from \p dptr. This function is a + * stream-ordered operation, meaning that it is dependent on, and will + * only take effect when, previous work in stream has completed. Any + * previous association is automatically replaced. + * + * \p dptr must point to one of the following types of memories: + * - managed memory declared using the __managed__ keyword or allocated with + * ::cuMemAllocManaged. + * - a valid host-accessible region of system-allocated pageable memory. This + * type of memory may only be specified if the device associated with the + * stream reports a non-zero value for the device attribute + * ::CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS. + * + * For managed allocations, \p length must be either zero or the entire + * allocation's size. Both indicate that the entire allocation's stream + * association is being changed. Currently, it is not possible to change stream + * association for a portion of a managed allocation. + * + * For pageable host allocations, \p length must be non-zero. + * + * The stream association is specified using \p flags which must be + * one of ::CUmemAttach_flags. + * If the ::CU_MEM_ATTACH_GLOBAL flag is specified, the memory can be accessed + * by any stream on any device. + * If the ::CU_MEM_ATTACH_HOST flag is specified, the program makes a guarantee + * that it won't access the memory on the device from any stream on a device that + * has a zero value for the device attribute ::CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS. + * If the ::CU_MEM_ATTACH_SINGLE flag is specified and \p hStream is associated with + * a device that has a zero value for the device attribute ::CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS, + * the program makes a guarantee that it will only access the memory on the device + * from \p hStream. It is illegal to attach singly to the NULL stream, because the + * NULL stream is a virtual global stream and not a specific stream. An error will + * be returned in this case. + * + * When memory is associated with a single stream, the Unified Memory system will + * allow CPU access to this memory region so long as all operations in \p hStream + * have completed, regardless of whether other streams are active. In effect, + * this constrains exclusive ownership of the managed memory region by + * an active GPU to per-stream activity instead of whole-GPU activity. + * + * Accessing memory on the device from streams that are not associated with + * it will produce undefined results. No error checking is performed by the + * Unified Memory system to ensure that kernels launched into other streams + * do not access this region. + * + * It is a program's responsibility to order calls to ::cuStreamAttachMemAsync + * via events, synchronization or other means to ensure legal access to memory + * at all times. Data visibility and coherency will be changed appropriately + * for all kernels which follow a stream-association change. + * + * If \p hStream is destroyed while data is associated with it, the association is + * removed and the association reverts to the default visibility of the allocation + * as specified at ::cuMemAllocManaged. For __managed__ variables, the default + * association is always ::CU_MEM_ATTACH_GLOBAL. Note that destroying a stream is an + * asynchronous operation, and as a result, the change to default association won't + * happen until all work in the stream has completed. + * + * \param hStream - Stream in which to enqueue the attach operation + * \param dptr - Pointer to memory (must be a pointer to managed memory or + * to a valid host-accessible region of system-allocated + * pageable memory) + * \param length - Length of memory + * \param flags - Must be one of ::CUmemAttach_flags + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_NOT_SUPPORTED + * \note_null_stream + * \notefnerr + * + * \sa ::cuStreamCreate, + * ::cuStreamQuery, + * ::cuStreamSynchronize, + * ::cuStreamWaitEvent, + * ::cuStreamDestroy, + * ::cuMemAllocManaged, + * ::cudaStreamAttachMemAsync + */ +CUresult CUDAAPI cuStreamAttachMemAsync(CUstream hStream, CUdeviceptr dptr, size_t length, unsigned int flags); + +/** + * \brief Determine status of a compute stream + * + * Returns ::CUDA_SUCCESS if all operations in the stream specified by + * \p hStream have completed, or ::CUDA_ERROR_NOT_READY if not. + * + * For the purposes of Unified Memory, a return value of ::CUDA_SUCCESS + * is equivalent to having called ::cuStreamSynchronize(). + * + * \param hStream - Stream to query status of + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_NOT_READY + * \note_null_stream + * \notefnerr + * + * \sa ::cuStreamCreate, + * ::cuStreamWaitEvent, + * ::cuStreamDestroy, + * ::cuStreamSynchronize, + * ::cuStreamAddCallback, + * ::cudaStreamQuery + */ +CUresult CUDAAPI cuStreamQuery(CUstream hStream); + +/** + * \brief Wait until a stream's tasks are completed + * + * Waits until the device has completed all operations in the stream specified + * by \p hStream. If the context was created with the + * ::CU_CTX_SCHED_BLOCKING_SYNC flag, the CPU thread will block until the + * stream is finished with all of its tasks. + * + * \param hStream - Stream to wait for + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_HANDLE + + * \note_null_stream + * \notefnerr + * + * \sa ::cuStreamCreate, + * ::cuStreamDestroy, + * ::cuStreamWaitEvent, + * ::cuStreamQuery, + * ::cuStreamAddCallback, + * ::cudaStreamSynchronize + */ +CUresult CUDAAPI cuStreamSynchronize(CUstream hStream); + +/** + * \brief Destroys a stream + * + * Destroys the stream specified by \p hStream. + * + * In case the device is still doing work in the stream \p hStream + * when ::cuStreamDestroy() is called, the function will return immediately + * and the resources associated with \p hStream will be released automatically + * once the device has completed all work in \p hStream. + * + * \param hStream - Stream to destroy + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_HANDLE + * \notefnerr + * + * \sa ::cuStreamCreate, + * ::cuStreamWaitEvent, + * ::cuStreamQuery, + * ::cuStreamSynchronize, + * ::cuStreamAddCallback, + * ::cudaStreamDestroy + */ +CUresult CUDAAPI cuStreamDestroy(CUstream hStream); + +/** + * \brief Copies attributes from source stream to destination stream. + * + * Copies attributes from source stream \p src to destination stream \p dst. + * Both streams must have the same context. + * + * \param[out] dst Destination stream + * \param[in] src Source stream + * For list of attributes see ::CUstreamAttrID + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE + * \notefnerr + * + * \sa + * ::CUaccessPolicyWindow + */ +CUresult CUDAAPI cuStreamCopyAttributes(CUstream dst, CUstream src); + +/** + * \brief Queries stream attribute. + * + * Queries attribute \p attr from \p hStream and stores it in corresponding + * member of \p value_out. + * + * \param[in] hStream + * \param[in] attr + * \param[out] value_out + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_HANDLE + * \notefnerr + * + * \sa + * ::CUaccessPolicyWindow + */ +CUresult CUDAAPI cuStreamGetAttribute(CUstream hStream, CUstreamAttrID attr, + CUstreamAttrValue *value_out); + +/** + * \brief Sets stream attribute. + * + * Sets attribute \p attr on \p hStream from corresponding attribute of + * \p value. The updated attribute will be applied to subsequent work + * submitted to the stream. It will not affect previously submitted work. + * + * \param[out] hStream + * \param[in] attr + * \param[in] value + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_HANDLE + * \notefnerr + * + * \sa + * ::CUaccessPolicyWindow + */ +CUresult CUDAAPI cuStreamSetAttribute(CUstream hStream, CUstreamAttrID attr, + const CUstreamAttrValue *value); + +/** @} */ /* END CUDA_STREAM */ + + +/** + * \defgroup CUDA_EVENT Event Management + * + * ___MANBRIEF___ event management functions of the low-level CUDA driver API + * (___CURRENT_FILE___) ___ENDMANBRIEF___ + * + * This section describes the event management functions of the low-level CUDA + * driver application programming interface. + * + * @{ + */ + +/** + * \brief Creates an event + * + * Creates an event *phEvent for the current context with the flags specified via + * \p Flags. Valid flags include: + * - ::CU_EVENT_DEFAULT: Default event creation flag. + * - ::CU_EVENT_BLOCKING_SYNC: Specifies that the created event should use blocking + * synchronization. A CPU thread that uses ::cuEventSynchronize() to wait on + * an event created with this flag will block until the event has actually + * been recorded. + * - ::CU_EVENT_DISABLE_TIMING: Specifies that the created event does not need + * to record timing data. Events created with this flag specified and + * the ::CU_EVENT_BLOCKING_SYNC flag not specified will provide the best + * performance when used with ::cuStreamWaitEvent() and ::cuEventQuery(). + * - ::CU_EVENT_INTERPROCESS: Specifies that the created event may be used as an + * interprocess event by ::cuIpcGetEventHandle(). ::CU_EVENT_INTERPROCESS must + * be specified along with ::CU_EVENT_DISABLE_TIMING. + * + * \param phEvent - Returns newly created event + * \param Flags - Event creation flags + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_OUT_OF_MEMORY + * \notefnerr + * + * \sa + * ::cuEventRecord, + * ::cuEventQuery, + * ::cuEventSynchronize, + * ::cuEventDestroy, + * ::cuEventElapsedTime, + * ::cudaEventCreate, + * ::cudaEventCreateWithFlags + */ +CUresult CUDAAPI cuEventCreate(CUevent *phEvent, unsigned int Flags); + +/** + * \brief Records an event + * + * Captures in \p hEvent the contents of \p hStream at the time of this call. + * \p hEvent and \p hStream must be from the same context. + * Calls such as ::cuEventQuery() or ::cuStreamWaitEvent() will then + * examine or wait for completion of the work that was captured. Uses of + * \p hStream after this call do not modify \p hEvent. See note on default + * stream behavior for what is captured in the default case. + * + * ::cuEventRecord() can be called multiple times on the same event and + * will overwrite the previously captured state. Other APIs such as + * ::cuStreamWaitEvent() use the most recently captured state at the time + * of the API call, and are not affected by later calls to + * ::cuEventRecord(). Before the first call to ::cuEventRecord(), an + * event represents an empty set of work, so for example ::cuEventQuery() + * would return ::CUDA_SUCCESS. + * + * \param hEvent - Event to record + * \param hStream - Stream to record event for + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_INVALID_VALUE + * \note_null_stream + * \notefnerr + * + * \sa ::cuEventCreate, + * ::cuEventQuery, + * ::cuEventSynchronize, + * ::cuStreamWaitEvent, + * ::cuEventDestroy, + * ::cuEventElapsedTime, + * ::cudaEventRecord, + * ::cuEventRecordWithFlags + */ +CUresult CUDAAPI cuEventRecord(CUevent hEvent, CUstream hStream); + +/** + * \brief Records an event + * + * Captures in \p hEvent the contents of \p hStream at the time of this call. + * \p hEvent and \p hStream must be from the same context. + * Calls such as ::cuEventQuery() or ::cuStreamWaitEvent() will then + * examine or wait for completion of the work that was captured. Uses of + * \p hStream after this call do not modify \p hEvent. See note on default + * stream behavior for what is captured in the default case. + * + * ::cuEventRecordWithFlags() can be called multiple times on the same event and + * will overwrite the previously captured state. Other APIs such as + * ::cuStreamWaitEvent() use the most recently captured state at the time + * of the API call, and are not affected by later calls to + * ::cuEventRecordWithFlags(). Before the first call to ::cuEventRecordWithFlags(), an + * event represents an empty set of work, so for example ::cuEventQuery() + * would return ::CUDA_SUCCESS. + * + * flags include: + * - ::CU_EVENT_RECORD_DEFAULT: Default event creation flag. + * - ::CU_EVENT_RECORD_EXTERNAL: Event is captured in the graph as an external + * event node when performing stream capture. This flag is invalid outside + * of stream capture. + * + * \param hEvent - Event to record + * \param hStream - Stream to record event for + * \param flags - See ::CUevent_capture_flags + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_INVALID_VALUE + * \note_null_stream + * \notefnerr + * + * \sa ::cuEventCreate, + * ::cuEventQuery, + * ::cuEventSynchronize, + * ::cuStreamWaitEvent, + * ::cuEventDestroy, + * ::cuEventElapsedTime, + * ::cuEventRecord, + * ::cudaEventRecord + */ +CUresult CUDAAPI cuEventRecordWithFlags(CUevent hEvent, CUstream hStream, unsigned int flags); + +/** + * \brief Queries an event's status + * + * Queries the status of all work currently captured by \p hEvent. See + * ::cuEventRecord() for details on what is captured by an event. + * + * Returns ::CUDA_SUCCESS if all captured work has been completed, or + * ::CUDA_ERROR_NOT_READY if any captured work is incomplete. + * + * For the purposes of Unified Memory, a return value of ::CUDA_SUCCESS + * is equivalent to having called ::cuEventSynchronize(). + * + * \param hEvent - Event to query + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_NOT_READY + * \notefnerr + * + * \sa ::cuEventCreate, + * ::cuEventRecord, + * ::cuEventSynchronize, + * ::cuEventDestroy, + * ::cuEventElapsedTime, + * ::cudaEventQuery + */ +CUresult CUDAAPI cuEventQuery(CUevent hEvent); + +/** + * \brief Waits for an event to complete + * + * Waits until the completion of all work currently captured in \p hEvent. + * See ::cuEventRecord() for details on what is captured by an event. + * + * Waiting for an event that was created with the ::CU_EVENT_BLOCKING_SYNC + * flag will cause the calling CPU thread to block until the event has + * been completed by the device. If the ::CU_EVENT_BLOCKING_SYNC flag has + * not been set, then the CPU thread will busy-wait until the event has + * been completed by the device. + * + * \param hEvent - Event to wait for + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_HANDLE + * \notefnerr + * + * \sa ::cuEventCreate, + * ::cuEventRecord, + * ::cuEventQuery, + * ::cuEventDestroy, + * ::cuEventElapsedTime, + * ::cudaEventSynchronize + */ +CUresult CUDAAPI cuEventSynchronize(CUevent hEvent); + +/** + * \brief Destroys an event + * + * Destroys the event specified by \p hEvent. + * + * An event may be destroyed before it is complete (i.e., while + * ::cuEventQuery() would return ::CUDA_ERROR_NOT_READY). In this case, the + * call does not block on completion of the event, and any associated + * resources will automatically be released asynchronously at completion. + * + * \param hEvent - Event to destroy + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_HANDLE + * \notefnerr + * + * \sa ::cuEventCreate, + * ::cuEventRecord, + * ::cuEventQuery, + * ::cuEventSynchronize, + * ::cuEventElapsedTime, + * ::cudaEventDestroy + */ +CUresult CUDAAPI cuEventDestroy(CUevent hEvent); + +/** + * \brief Computes the elapsed time between two events + * + * Computes the elapsed time between two events (in milliseconds with a + * resolution of around 0.5 microseconds). + * + * If either event was last recorded in a non-NULL stream, the resulting time + * may be greater than expected (even if both used the same stream handle). This + * happens because the ::cuEventRecord() operation takes place asynchronously + * and there is no guarantee that the measured latency is actually just between + * the two events. Any number of other different stream operations could execute + * in between the two measured events, thus altering the timing in a significant + * way. + * + * If ::cuEventRecord() has not been called on either event then + * ::CUDA_ERROR_INVALID_HANDLE is returned. If ::cuEventRecord() has been called + * on both events but one or both of them has not yet been completed (that is, + * ::cuEventQuery() would return ::CUDA_ERROR_NOT_READY on at least one of the + * events), ::CUDA_ERROR_NOT_READY is returned. If either event was created with + * the ::CU_EVENT_DISABLE_TIMING flag, then this function will return + * ::CUDA_ERROR_INVALID_HANDLE. + * + * \param pMilliseconds - Time between \p hStart and \p hEnd in ms + * \param hStart - Starting event + * \param hEnd - Ending event + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_NOT_READY, + * ::CUDA_ERROR_UNKNOWN + * \notefnerr + * + * \sa ::cuEventCreate, + * ::cuEventRecord, + * ::cuEventQuery, + * ::cuEventSynchronize, + * ::cuEventDestroy, + * ::cudaEventElapsedTime + */ +CUresult CUDAAPI cuEventElapsedTime(float *pMilliseconds, CUevent hStart, CUevent hEnd); + +/** @} */ /* END CUDA_EVENT */ + +/** + * \defgroup CUDA_EXTRES_INTEROP External Resource Interoperability + * + * ___MANBRIEF___ External resource interoperability functions of the low-level CUDA driver API + * (___CURRENT_FILE___) ___ENDMANBRIEF___ + * + * This section describes the external resource interoperability functions of the low-level CUDA + * driver application programming interface. + * + * @{ + */ + + /** + * \brief Imports an external memory object + * + * Imports an externally allocated memory object and returns + * a handle to that in \p extMem_out. + * + * The properties of the handle being imported must be described in + * \p memHandleDesc. The ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC structure + * is defined as follows: + * + * \code + typedef struct CUDA_EXTERNAL_MEMORY_HANDLE_DESC_st { + CUexternalMemoryHandleType type; + union { + int fd; + struct { + void *handle; + const void *name; + } win32; + const void *nvSciBufObject; + } handle; + unsigned long long size; + unsigned int flags; + } CUDA_EXTERNAL_MEMORY_HANDLE_DESC; + * \endcode + * + * where ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::type specifies the type + * of handle being imported. ::CUexternalMemoryHandleType is + * defined as: + * + * \code + typedef enum CUexternalMemoryHandleType_enum { + CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD = 1, + CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32 = 2, + CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT = 3, + CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_HEAP = 4, + CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_RESOURCE = 5, + CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_RESOURCE = 6, + CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_RESOURCE_KMT = 7, + CU_EXTERNAL_MEMORY_HANDLE_TYPE_NVSCIBUF = 8 + } CUexternalMemoryHandleType; + * \endcode + * + * If ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::type is + * ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD, then + * ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::fd must be a valid + * file descriptor referencing a memory object. Ownership of + * the file descriptor is transferred to the CUDA driver when the + * handle is imported successfully. Performing any operations on the + * file descriptor after it is imported results in undefined behavior. + * + * If ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::type is + * ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32, then exactly one + * of ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::handle and + * ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::name must not be + * NULL. If ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::handle + * is not NULL, then it must represent a valid shared NT handle that + * references a memory object. Ownership of this handle is + * not transferred to CUDA after the import operation, so the + * application must release the handle using the appropriate system + * call. If ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::name + * is not NULL, then it must point to a NULL-terminated array of + * UTF-16 characters that refers to a memory object. + * + * If ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::type is + * ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT, then + * ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::handle must + * be non-NULL and + * ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::name + * must be NULL. The handle specified must be a globally shared KMT + * handle. This handle does not hold a reference to the underlying + * object, and thus will be invalid when all references to the + * memory object are destroyed. + * + * If ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::type is + * ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_HEAP, then exactly one + * of ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::handle and + * ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::name must not be + * NULL. If ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::handle + * is not NULL, then it must represent a valid shared NT handle that + * is returned by ID3D12Device::CreateSharedHandle when referring to a + * ID3D12Heap object. This handle holds a reference to the underlying + * object. If ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::name + * is not NULL, then it must point to a NULL-terminated array of + * UTF-16 characters that refers to a ID3D12Heap object. + * + * If ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::type is + * ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_RESOURCE, then exactly one + * of ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::handle and + * ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::name must not be + * NULL. If ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::handle + * is not NULL, then it must represent a valid shared NT handle that + * is returned by ID3D12Device::CreateSharedHandle when referring to a + * ID3D12Resource object. This handle holds a reference to the + * underlying object. If + * ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::name + * is not NULL, then it must point to a NULL-terminated array of + * UTF-16 characters that refers to a ID3D12Resource object. + * + * If ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::type is + * ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_RESOURCE, then + * ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::handle must + * represent a valid shared NT handle that is returned by + * IDXGIResource1::CreateSharedHandle when referring to a + * ID3D11Resource object. If + * ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::name + * is not NULL, then it must point to a NULL-terminated array of + * UTF-16 characters that refers to a ID3D11Resource object. + * + * If ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::type is + * ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_RESOURCE_KMT, then + * ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::handle must + * represent a valid shared KMT handle that is returned by + * IDXGIResource::GetSharedHandle when referring to a + * ID3D11Resource object and + * ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::name + * must be NULL. + * + * If ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::type is + * ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_NVSCIBUF, then + * ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::nvSciBufObject must be non-NULL + * and reference a valid NvSciBuf object. + * If the NvSciBuf object imported into CUDA is also mapped by other drivers, then the + * application must use ::cuWaitExternalSemaphoresAsync or ::cuSignalExternalSemaphoresAsync + * as appropriate barriers to maintain coherence between CUDA and the other drivers. + * See ::CUDA_EXTERNAL_SEMAPHORE_SIGNAL_SKIP_NVSCIBUF_MEMSYNC and ::CUDA_EXTERNAL_SEMAPHORE_WAIT_SKIP_NVSCIBUF_MEMSYNC + * for memory synchronization. + * + * + * The size of the memory object must be specified in + * ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::size. + * + * Specifying the flag ::CUDA_EXTERNAL_MEMORY_DEDICATED in + * ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::flags indicates that the + * resource is a dedicated resource. The definition of what a + * dedicated resource is outside the scope of this extension. + * This flag must be set if ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::type + * is one of the following: + * ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_RESOURCE + * ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_RESOURCE + * ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_RESOURCE_KMT + * + * \param extMem_out - Returned handle to an external memory object + * \param memHandleDesc - Memory import handle descriptor + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_OPERATING_SYSTEM + * \notefnerr + * + * \note If the Vulkan memory imported into CUDA is mapped on the CPU then the + * application must use vkInvalidateMappedMemoryRanges/vkFlushMappedMemoryRanges + * as well as appropriate Vulkan pipeline barriers to maintain coherence between + * CPU and GPU. For more information on these APIs, please refer to "Synchronization + * and Cache Control" chapter from Vulkan specification. + * + * \sa ::cuDestroyExternalMemory, + * ::cuExternalMemoryGetMappedBuffer, + * ::cuExternalMemoryGetMappedMipmappedArray + */ +CUresult CUDAAPI cuImportExternalMemory(CUexternalMemory *extMem_out, const CUDA_EXTERNAL_MEMORY_HANDLE_DESC *memHandleDesc); + +/** + * \brief Maps a buffer onto an imported memory object + * + * Maps a buffer onto an imported memory object and returns a device + * pointer in \p devPtr. + * + * The properties of the buffer being mapped must be described in + * \p bufferDesc. The ::CUDA_EXTERNAL_MEMORY_BUFFER_DESC structure is + * defined as follows: + * + * \code + typedef struct CUDA_EXTERNAL_MEMORY_BUFFER_DESC_st { + unsigned long long offset; + unsigned long long size; + unsigned int flags; + } CUDA_EXTERNAL_MEMORY_BUFFER_DESC; + * \endcode + * + * where ::CUDA_EXTERNAL_MEMORY_BUFFER_DESC::offset is the offset in + * the memory object where the buffer's base address is. + * ::CUDA_EXTERNAL_MEMORY_BUFFER_DESC::size is the size of the buffer. + * ::CUDA_EXTERNAL_MEMORY_BUFFER_DESC::flags must be zero. + * + * The offset and size have to be suitably aligned to match the + * requirements of the external API. Mapping two buffers whose ranges + * overlap may or may not result in the same virtual address being + * returned for the overlapped portion. In such cases, the application + * must ensure that all accesses to that region from the GPU are + * volatile. Otherwise writes made via one address are not guaranteed + * to be visible via the other address, even if they're issued by the + * same thread. It is recommended that applications map the combined + * range instead of mapping separate buffers and then apply the + * appropriate offsets to the returned pointer to derive the + * individual buffers. + * + * The returned pointer \p devPtr must be freed using ::cuMemFree. + * + * \param devPtr - Returned device pointer to buffer + * \param extMem - Handle to external memory object + * \param bufferDesc - Buffer descriptor + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_HANDLE + * \notefnerr + * + * \sa ::cuImportExternalMemory, + * ::cuDestroyExternalMemory, + * ::cuExternalMemoryGetMappedMipmappedArray + */ +CUresult CUDAAPI cuExternalMemoryGetMappedBuffer(CUdeviceptr *devPtr, CUexternalMemory extMem, const CUDA_EXTERNAL_MEMORY_BUFFER_DESC *bufferDesc); + +/** + * \brief Maps a CUDA mipmapped array onto an external memory object + * + * Maps a CUDA mipmapped array onto an external object and returns a + * handle to it in \p mipmap. + * + * The properties of the CUDA mipmapped array being mapped must be + * described in \p mipmapDesc. The structure + * ::CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC is defined as follows: + * + * \code + typedef struct CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC_st { + unsigned long long offset; + CUDA_ARRAY3D_DESCRIPTOR arrayDesc; + unsigned int numLevels; + } CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC; + * \endcode + * + * where ::CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC::offset is the + * offset in the memory object where the base level of the mipmap + * chain is. + * ::CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC::arrayDesc describes + * the format, dimensions and type of the base level of the mipmap + * chain. For further details on these parameters, please refer to the + * documentation for ::cuMipmappedArrayCreate. Note that if the mipmapped + * array is bound as a color target in the graphics API, then the flag + * ::CUDA_ARRAY3D_COLOR_ATTACHMENT must be specified in + * ::CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC::arrayDesc::Flags. + * ::CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC::numLevels specifies + * the total number of levels in the mipmap chain. + * + * If \p extMem was imported from a handle of type ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_NVSCIBUF, then + * ::CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC::numLevels must be equal to 1. + * + * The returned CUDA mipmapped array must be freed using ::cuMipmappedArrayDestroy. + * + * \param mipmap - Returned CUDA mipmapped array + * \param extMem - Handle to external memory object + * \param mipmapDesc - CUDA array descriptor + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_HANDLE + * \notefnerr + * + * \sa ::cuImportExternalMemory, + * ::cuDestroyExternalMemory, + * ::cuExternalMemoryGetMappedBuffer + */ +CUresult CUDAAPI cuExternalMemoryGetMappedMipmappedArray(CUmipmappedArray *mipmap, CUexternalMemory extMem, const CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC *mipmapDesc); + +/** + * \brief Destroys an external memory object. + * + * Destroys the specified external memory object. Any existing buffers + * and CUDA mipmapped arrays mapped onto this object must no longer be + * used and must be explicitly freed using ::cuMemFree and + * ::cuMipmappedArrayDestroy respectively. + * + * \param extMem - External memory object to be destroyed + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_HANDLE + * \notefnerr + * + * \sa ::cuImportExternalMemory, + * ::cuExternalMemoryGetMappedBuffer, + * ::cuExternalMemoryGetMappedMipmappedArray + */ +CUresult CUDAAPI cuDestroyExternalMemory(CUexternalMemory extMem); + +/** + * \brief Imports an external semaphore + * + * Imports an externally allocated synchronization object and returns + * a handle to that in \p extSem_out. + * + * The properties of the handle being imported must be described in + * \p semHandleDesc. The ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC is + * defined as follows: + * + * \code + typedef struct CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC_st { + CUexternalSemaphoreHandleType type; + union { + int fd; + struct { + void *handle; + const void *name; + } win32; + const void* NvSciSyncObj; + } handle; + unsigned int flags; + } CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC; + * \endcode + * + * where ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::type specifies the type of + * handle being imported. ::CUexternalSemaphoreHandleType is defined + * as: + * + * \code + typedef enum CUexternalSemaphoreHandleType_enum { + CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD = 1, + CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32 = 2, + CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT = 3, + CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE = 4, + CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_FENCE = 5, + CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_NVSCISYNC = 6, + CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX = 7, + CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX_KMT = 8, + CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_FD = 9, + CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_WIN32 = 10 + } CUexternalSemaphoreHandleType; + * \endcode + * + * If ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::type is + * ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD, then + * ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::fd must be a valid + * file descriptor referencing a synchronization object. Ownership of + * the file descriptor is transferred to the CUDA driver when the + * handle is imported successfully. Performing any operations on the + * file descriptor after it is imported results in undefined behavior. + * + * If ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::type is + * ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32, then exactly one + * of ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::handle and + * ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::name must not be + * NULL. If + * ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::handle + * is not NULL, then it must represent a valid shared NT handle that + * references a synchronization object. Ownership of this handle is + * not transferred to CUDA after the import operation, so the + * application must release the handle using the appropriate system + * call. If ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::name + * is not NULL, then it must name a valid synchronization object. + * + * If ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::type is + * ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT, then + * ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::handle must + * be non-NULL and + * ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::name + * must be NULL. The handle specified must be a globally shared KMT + * handle. This handle does not hold a reference to the underlying + * object, and thus will be invalid when all references to the + * synchronization object are destroyed. + * + * If ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::type is + * ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE, then exactly one + * of ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::handle and + * ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::name must not be + * NULL. If + * ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::handle + * is not NULL, then it must represent a valid shared NT handle that + * is returned by ID3D12Device::CreateSharedHandle when referring to a + * ID3D12Fence object. This handle holds a reference to the underlying + * object. If + * ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::name + * is not NULL, then it must name a valid synchronization object that + * refers to a valid ID3D12Fence object. + * + * If ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::type is + * ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_FENCE, then + * ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::handle + * represents a valid shared NT handle that is returned by + * ID3D11Fence::CreateSharedHandle. If + * ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::name + * is not NULL, then it must name a valid synchronization object that + * refers to a valid ID3D11Fence object. + * + * If ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::type is + * ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_NVSCISYNC, then + * ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::nvSciSyncObj + * represents a valid NvSciSyncObj. + * + * ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX, then + * ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::handle + * represents a valid shared NT handle that + * is returned by IDXGIResource1::CreateSharedHandle when referring to + * a IDXGIKeyedMutex object. If + * ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::name + * is not NULL, then it must name a valid synchronization object that + * refers to a valid IDXGIKeyedMutex object. + * + * If ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::type is + * ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX_KMT, then + * ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::handle + * represents a valid shared KMT handle that + * is returned by IDXGIResource::GetSharedHandle when referring to + * a IDXGIKeyedMutex object and + * ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::name must be NULL. + * + * If ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::type is + * ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_FD, then + * ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::fd must be a valid + * file descriptor referencing a synchronization object. Ownership of + * the file descriptor is transferred to the CUDA driver when the + * handle is imported successfully. Performing any operations on the + * file descriptor after it is imported results in undefined behavior. + * + * If ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::type is + * ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_WIN32, then exactly one + * of ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::handle and + * ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::name must not be + * NULL. If + * ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::handle + * is not NULL, then it must represent a valid shared NT handle that + * references a synchronization object. Ownership of this handle is + * not transferred to CUDA after the import operation, so the + * application must release the handle using the appropriate system + * call. If ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::name + * is not NULL, then it must name a valid synchronization object. + * + * \param extSem_out - Returned handle to an external semaphore + * \param semHandleDesc - Semaphore import handle descriptor + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_NOT_SUPPORTED, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_OPERATING_SYSTEM + * \notefnerr + * + * \sa ::cuDestroyExternalSemaphore, + * ::cuSignalExternalSemaphoresAsync, + * ::cuWaitExternalSemaphoresAsync + */ +CUresult CUDAAPI cuImportExternalSemaphore(CUexternalSemaphore *extSem_out, const CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC *semHandleDesc); + +/** + * \brief Signals a set of external semaphore objects + * + * Enqueues a signal operation on a set of externally allocated + * semaphore object in the specified stream. The operations will be + * executed when all prior operations in the stream complete. + * + * The exact semantics of signaling a semaphore depends on the type of + * the object. + * + * If the semaphore object is any one of the following types: + * ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD, + * ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32, + * ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT + * then signaling the semaphore will set it to the signaled state. + * + * If the semaphore object is any one of the following types: + * ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE, + * ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_FENCE, + * ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_FD, + * ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_WIN32 + * then the semaphore will be set to the value specified in + * ::CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS::params::fence::value. + * + * If the semaphore object is of the type ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_NVSCISYNC + * this API sets ::CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS::params::nvSciSync::fence + * to a value that can be used by subsequent waiters of the same NvSciSync object + * to order operations with those currently submitted in \p stream. Such an update + * will overwrite previous contents of + * ::CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS::params::nvSciSync::fence. By default, + * signaling such an external semaphore object causes appropriate memory synchronization + * operations to be performed over all external memory objects that are imported as + * ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_NVSCIBUF. This ensures that any subsequent accesses + * made by other importers of the same set of NvSciBuf memory object(s) are coherent. + * These operations can be skipped by specifying the flag + * ::CUDA_EXTERNAL_SEMAPHORE_SIGNAL_SKIP_NVSCIBUF_MEMSYNC, which can be used as a + * performance optimization when data coherency is not required. But specifying this + * flag in scenarios where data coherency is required results in undefined behavior. + * Also, for semaphore object of the type ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_NVSCISYNC, + * if the NvSciSyncAttrList used to create the NvSciSyncObj had not set the flags in + * ::cuDeviceGetNvSciSyncAttributes to CUDA_NVSCISYNC_ATTR_SIGNAL, this API will return + * CUDA_ERROR_NOT_SUPPORTED. + * NvSciSyncFence associated with semaphore object of the type + * ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_NVSCISYNC can be deterministic. For this the + * NvSciSyncAttrList used to create the semaphore object must have value of + * NvSciSyncAttrKey_RequireDeterministicFences key set to true. Deterministic fences + * allow users to enqueue a wait over the semaphore object even before corresponding + * signal is enqueued. For such a semaphore object, CUDA guarantees that each signal + * operation will increment the fence value by '1'. Users are expected to track count + * of signals enqueued on the semaphore object and insert waits accordingly. When such + * a semaphore object is signaled from multiple streams, due to concurrent stream + * execution, it is possible that the order in which the semaphore gets signaled is + * indeterministic. This could lead to waiters of the semaphore getting unblocked + * incorrectly. Users are expected to handle such situations, either by not using the + * same semaphore object with deterministic fence support enabled in different streams + * or by adding explicit dependency amongst such streams so that the semaphore is + * signaled in order. + * + * If the semaphore object is any one of the following types: + * ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX, + * ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX_KMT + * then the keyed mutex will be released with the key specified in + * ::CUDA_EXTERNAL_SEMAPHORE_PARAMS::params::keyedmutex::key. + * + * \param extSemArray - Set of external semaphores to be signaled + * \param paramsArray - Array of semaphore parameters + * \param numExtSems - Number of semaphores to signal + * \param stream - Stream to enqueue the signal operations in + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_NOT_SUPPORTED + * \notefnerr + * + * \sa ::cuImportExternalSemaphore, + * ::cuDestroyExternalSemaphore, + * ::cuWaitExternalSemaphoresAsync + */ +CUresult CUDAAPI cuSignalExternalSemaphoresAsync(const CUexternalSemaphore *extSemArray, const CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS *paramsArray, unsigned int numExtSems, CUstream stream); + +/** + * \brief Waits on a set of external semaphore objects + * + * Enqueues a wait operation on a set of externally allocated + * semaphore object in the specified stream. The operations will be + * executed when all prior operations in the stream complete. + * + * The exact semantics of waiting on a semaphore depends on the type + * of the object. + * + * If the semaphore object is any one of the following types: + * ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD, + * ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32, + * ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT + * then waiting on the semaphore will wait until the semaphore reaches + * the signaled state. The semaphore will then be reset to the + * unsignaled state. Therefore for every signal operation, there can + * only be one wait operation. + * + * If the semaphore object is any one of the following types: + * ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE, + * ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_FENCE, + * ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_FD, + * ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_WIN32 + * then waiting on the semaphore will wait until the value of the + * semaphore is greater than or equal to + * ::CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS::params::fence::value. + * + * If the semaphore object is of the type ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_NVSCISYNC + * then, waiting on the semaphore will wait until the + * ::CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS::params::nvSciSync::fence is signaled by the + * signaler of the NvSciSyncObj that was associated with this semaphore object. + * By default, waiting on such an external semaphore object causes appropriate + * memory synchronization operations to be performed over all external memory objects + * that are imported as ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_NVSCIBUF. This ensures that + * any subsequent accesses made by other importers of the same set of NvSciBuf memory + * object(s) are coherent. These operations can be skipped by specifying the flag + * ::CUDA_EXTERNAL_SEMAPHORE_WAIT_SKIP_NVSCIBUF_MEMSYNC, which can be used as a + * performance optimization when data coherency is not required. But specifying this + * flag in scenarios where data coherency is required results in undefined behavior. + * Also, for semaphore object of the type ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_NVSCISYNC, + * if the NvSciSyncAttrList used to create the NvSciSyncObj had not set the flags in + * ::cuDeviceGetNvSciSyncAttributes to CUDA_NVSCISYNC_ATTR_WAIT, this API will return + * CUDA_ERROR_NOT_SUPPORTED. + * + * If the semaphore object is any one of the following types: + * ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX, + * ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX_KMT + * then the keyed mutex will be acquired when it is released with the key + * specified in ::CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS::params::keyedmutex::key + * or until the timeout specified by + * ::CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS::params::keyedmutex::timeoutMs + * has lapsed. The timeout interval can either be a finite value + * specified in milliseconds or an infinite value. In case an infinite + * value is specified the timeout never elapses. The windows INFINITE + * macro must be used to specify infinite timeout. + * + * \param extSemArray - External semaphores to be waited on + * \param paramsArray - Array of semaphore parameters + * \param numExtSems - Number of semaphores to wait on + * \param stream - Stream to enqueue the wait operations in + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_NOT_SUPPORTED, + * ::CUDA_ERROR_TIMEOUT + * \notefnerr + * + * \sa ::cuImportExternalSemaphore, + * ::cuDestroyExternalSemaphore, + * ::cuSignalExternalSemaphoresAsync + */ +CUresult CUDAAPI cuWaitExternalSemaphoresAsync(const CUexternalSemaphore *extSemArray, const CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS *paramsArray, unsigned int numExtSems, CUstream stream); + +/** + * \brief Destroys an external semaphore + * + * Destroys an external semaphore object and releases any references + * to the underlying resource. Any outstanding signals or waits must + * have completed before the semaphore is destroyed. + * + * \param extSem - External semaphore to be destroyed + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_HANDLE + * \notefnerr + * + * \sa ::cuImportExternalSemaphore, + * ::cuSignalExternalSemaphoresAsync, + * ::cuWaitExternalSemaphoresAsync + */ +CUresult CUDAAPI cuDestroyExternalSemaphore(CUexternalSemaphore extSem); + +/** @} */ /* END CUDA_EXTRES_INTEROP */ + +/** + * \defgroup CUDA_MEMOP Stream Memory Operations + * + * ___MANBRIEF___ Stream memory operations of the low-level CUDA driver API + * (___CURRENT_FILE___) ___ENDMANBRIEF___ + * + * This section describes the stream memory operations of the low-level CUDA + * driver application programming interface. + * + * Support for the ::CU_STREAM_WAIT_VALUE_NOR flag can be queried with + * ::CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_WAIT_VALUE_NOR_V2. + * + * Support for the ::cuStreamWriteValue64() and ::cuStreamWaitValue64() + * functions, as well as for the ::CU_STREAM_MEM_OP_WAIT_VALUE_64 and + * ::CU_STREAM_MEM_OP_WRITE_VALUE_64 flags, can be queried with + * ::CU_DEVICE_ATTRIBUTE_CAN_USE_64_BIT_STREAM_MEM_OPS. + * + * Support for both ::CU_STREAM_WAIT_VALUE_FLUSH and + * ::CU_STREAM_MEM_OP_FLUSH_REMOTE_WRITES requires dedicated platform + * hardware features and can be queried with ::cuDeviceGetAttribute() and + * ::CU_DEVICE_ATTRIBUTE_CAN_FLUSH_REMOTE_WRITES. + * + * Note that all memory pointers passed as parameters to these operations + * are device pointers. Where necessary a device pointer should be + * obtained, for example with ::cuMemHostGetDevicePointer(). + * + * None of the operations accepts pointers to managed memory buffers + * (::cuMemAllocManaged). + * + * \note + * Warning: + * Improper use of these APIs may deadlock the application. Synchronization + * ordering established through these APIs is not visible to CUDA. CUDA tasks + * that are (even indirectly) ordered by these APIs should also have that order + * expressed with CUDA-visible dependencies such as events. This ensures that + * the scheduler does not serialize them in an improper order. + * + * @{ + */ + +/** + * \brief Wait on a memory location + * + * Enqueues a synchronization of the stream on the given memory location. Work + * ordered after the operation will block until the given condition on the + * memory is satisfied. By default, the condition is to wait for + * (int32_t)(*addr - value) >= 0, a cyclic greater-or-equal. + * Other condition types can be specified via \p flags. + * + * If the memory was registered via ::cuMemHostRegister(), the device pointer + * should be obtained with ::cuMemHostGetDevicePointer(). This function cannot + * be used with managed memory (::cuMemAllocManaged). + * + * Support for CU_STREAM_WAIT_VALUE_NOR can be queried with ::cuDeviceGetAttribute() and + * ::CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_WAIT_VALUE_NOR_V2. + * + * \note + * Warning: + * Improper use of this API may deadlock the application. Synchronization + * ordering established through this API is not visible to CUDA. CUDA tasks + * that are (even indirectly) ordered by this API should also have that order + * expressed with CUDA-visible dependencies such as events. This ensures that + * the scheduler does not serialize them in an improper order. + * + * \param stream The stream to synchronize on the memory location. + * \param addr The memory location to wait on. + * \param value The value to compare with the memory location. + * \param flags See ::CUstreamWaitValue_flags. + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_NOT_SUPPORTED + * \notefnerr + * + * \sa ::cuStreamWaitValue64, + * ::cuStreamWriteValue32, + * ::cuStreamWriteValue64, + * ::cuStreamBatchMemOp, + * ::cuMemHostRegister, + * ::cuStreamWaitEvent + */ +CUresult CUDAAPI cuStreamWaitValue32(CUstream stream, CUdeviceptr addr, cuuint32_t value, unsigned int flags); + +/** + * \brief Wait on a memory location + * + * Enqueues a synchronization of the stream on the given memory location. Work + * ordered after the operation will block until the given condition on the + * memory is satisfied. By default, the condition is to wait for + * (int64_t)(*addr - value) >= 0, a cyclic greater-or-equal. + * Other condition types can be specified via \p flags. + * + * If the memory was registered via ::cuMemHostRegister(), the device pointer + * should be obtained with ::cuMemHostGetDevicePointer(). + * + * Support for this can be queried with ::cuDeviceGetAttribute() and + * ::CU_DEVICE_ATTRIBUTE_CAN_USE_64_BIT_STREAM_MEM_OPS. + * + * \note + * Warning: + * Improper use of this API may deadlock the application. Synchronization + * ordering established through this API is not visible to CUDA. CUDA tasks + * that are (even indirectly) ordered by this API should also have that order + * expressed with CUDA-visible dependencies such as events. This ensures that + * the scheduler does not serialize them in an improper order. + * + * \param stream The stream to synchronize on the memory location. + * \param addr The memory location to wait on. + * \param value The value to compare with the memory location. + * \param flags See ::CUstreamWaitValue_flags. + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_NOT_SUPPORTED + * \notefnerr + * + * \sa ::cuStreamWaitValue32, + * ::cuStreamWriteValue32, + * ::cuStreamWriteValue64, + * ::cuStreamBatchMemOp, + * ::cuMemHostRegister, + * ::cuStreamWaitEvent + */ +CUresult CUDAAPI cuStreamWaitValue64(CUstream stream, CUdeviceptr addr, cuuint64_t value, unsigned int flags); + +/** + * \brief Write a value to memory + * + * Write a value to memory. + * + * If the memory was registered via ::cuMemHostRegister(), the device pointer + * should be obtained with ::cuMemHostGetDevicePointer(). This function cannot + * be used with managed memory (::cuMemAllocManaged). + * + * \param stream The stream to do the write in. + * \param addr The device address to write to. + * \param value The value to write. + * \param flags See ::CUstreamWriteValue_flags. + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_NOT_SUPPORTED + * \notefnerr + * + * \sa ::cuStreamWriteValue64, + * ::cuStreamWaitValue32, + * ::cuStreamWaitValue64, + * ::cuStreamBatchMemOp, + * ::cuMemHostRegister, + * ::cuEventRecord + */ +CUresult CUDAAPI cuStreamWriteValue32(CUstream stream, CUdeviceptr addr, cuuint32_t value, unsigned int flags); + +/** + * \brief Write a value to memory + * + * Write a value to memory. + * + * If the memory was registered via ::cuMemHostRegister(), the device pointer + * should be obtained with ::cuMemHostGetDevicePointer(). + * + * Support for this can be queried with ::cuDeviceGetAttribute() and + * ::CU_DEVICE_ATTRIBUTE_CAN_USE_64_BIT_STREAM_MEM_OPS. + * + * \param stream The stream to do the write in. + * \param addr The device address to write to. + * \param value The value to write. + * \param flags See ::CUstreamWriteValue_flags. + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_NOT_SUPPORTED + * \notefnerr + * + * \sa ::cuStreamWriteValue32, + * ::cuStreamWaitValue32, + * ::cuStreamWaitValue64, + * ::cuStreamBatchMemOp, + * ::cuMemHostRegister, + * ::cuEventRecord + */ +CUresult CUDAAPI cuStreamWriteValue64(CUstream stream, CUdeviceptr addr, cuuint64_t value, unsigned int flags); + +/** + * \brief Batch operations to synchronize the stream via memory operations + * + * This is a batch version of ::cuStreamWaitValue32() and ::cuStreamWriteValue32(). + * Batching operations may avoid some performance overhead in both the API call + * and the device execution versus adding them to the stream in separate API + * calls. The operations are enqueued in the order they appear in the array. + * + * See ::CUstreamBatchMemOpType for the full set of supported operations, and + * ::cuStreamWaitValue32(), ::cuStreamWaitValue64(), ::cuStreamWriteValue32(), + * and ::cuStreamWriteValue64() for details of specific operations. + * + * See related APIs for details on querying support for specific operations. + * + * \note + * Warning: + * Improper use of this API may deadlock the application. Synchronization + * ordering established through this API is not visible to CUDA. CUDA tasks + * that are (even indirectly) ordered by this API should also have that order + * expressed with CUDA-visible dependencies such as events. This ensures that + * the scheduler does not serialize them in an improper order. For more + * information, see the Stream Memory Operations section in the programming + * guide(https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html). + * + * \param stream The stream to enqueue the operations in. + * \param count The number of operations in the array. Must be less than 256. + * \param paramArray The types and parameters of the individual operations. + * \param flags Reserved for future expansion; must be 0. + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_NOT_SUPPORTED + * \notefnerr + * + * \sa ::cuStreamWaitValue32, + * ::cuStreamWaitValue64, + * ::cuStreamWriteValue32, + * ::cuStreamWriteValue64, + * ::cuMemHostRegister + */ +CUresult CUDAAPI cuStreamBatchMemOp(CUstream stream, unsigned int count, CUstreamBatchMemOpParams *paramArray, unsigned int flags); + +/** @} */ /* END CUDA_MEMOP */ + +/** + * \defgroup CUDA_EXEC Execution Control + * + * ___MANBRIEF___ execution control functions of the low-level CUDA driver API + * (___CURRENT_FILE___) ___ENDMANBRIEF___ + * + * This section describes the execution control functions of the low-level CUDA + * driver application programming interface. + * + * @{ + */ + +/** + * \brief Returns information about a function + * + * Returns in \p *pi the integer value of the attribute \p attrib on the kernel + * given by \p hfunc. The supported attributes are: + * - ::CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK: The maximum number of threads + * per block, beyond which a launch of the function would fail. This number + * depends on both the function and the device on which the function is + * currently loaded. + * - ::CU_FUNC_ATTRIBUTE_SHARED_SIZE_BYTES: The size in bytes of + * statically-allocated shared memory per block required by this function. + * This does not include dynamically-allocated shared memory requested by + * the user at runtime. + * - ::CU_FUNC_ATTRIBUTE_CONST_SIZE_BYTES: The size in bytes of user-allocated + * constant memory required by this function. + * - ::CU_FUNC_ATTRIBUTE_LOCAL_SIZE_BYTES: The size in bytes of local memory + * used by each thread of this function. + * - ::CU_FUNC_ATTRIBUTE_NUM_REGS: The number of registers used by each thread + * of this function. + * - ::CU_FUNC_ATTRIBUTE_PTX_VERSION: The PTX virtual architecture version for + * which the function was compiled. This value is the major PTX version * 10 + * + the minor PTX version, so a PTX version 1.3 function would return the + * value 13. Note that this may return the undefined value of 0 for cubins + * compiled prior to CUDA 3.0. + * - ::CU_FUNC_ATTRIBUTE_BINARY_VERSION: The binary architecture version for + * which the function was compiled. This value is the major binary + * version * 10 + the minor binary version, so a binary version 1.3 function + * would return the value 13. Note that this will return a value of 10 for + * legacy cubins that do not have a properly-encoded binary architecture + * version. + * - ::CU_FUNC_CACHE_MODE_CA: The attribute to indicate whether the function has + * been compiled with user specified option "-Xptxas --dlcm=ca" set . + * - ::CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES: The maximum size in bytes of + * dynamically-allocated shared memory. + * - ::CU_FUNC_ATTRIBUTE_PREFERRED_SHARED_MEMORY_CARVEOUT: Preferred shared memory-L1 + * cache split ratio in percent of total shared memory. + * - ::CU_FUNC_ATTRIBUTE_CLUSTER_SIZE_MUST_BE_SET: If this attribute is set, the + * kernel must launch with a valid cluster size specified. + * - ::CU_FUNC_ATTRIBUTE_REQUIRED_CLUSTER_WIDTH: The required cluster width in + * blocks. + * - ::CU_FUNC_ATTRIBUTE_REQUIRED_CLUSTER_HEIGHT: The required cluster height in + * blocks. + * - ::CU_FUNC_ATTRIBUTE_REQUIRED_CLUSTER_DEPTH: The required cluster depth in + * blocks. + * - ::CU_FUNC_ATTRIBUTE_NON_PORTABLE_CLUSTER_SIZE_ALLOWED: Indicates whether + * the function can be launched with non-portable cluster size. 1 is allowed, + * 0 is disallowed. A non-portable cluster size may only function on the + * specific SKUs the program is tested on. The launch might fail if the + * program is run on a different hardware platform. CUDA API provides + * cudaOccupancyMaxActiveClusters to assist with checking whether the desired + * size can be launched on the current device. A portable cluster size is + * guaranteed to be functional on all compute capabilities higher than the + * target compute capability. The portable cluster size for sm_90 is 8 blocks + * per cluster. This value may increase for future compute capabilities. The + * specific hardware unit may support higher cluster sizes that’s not + * guaranteed to be portable. + * - ::CU_FUNC_ATTRIBUTE_CLUSTER_SCHEDULING_POLICY_PREFERENCE: The block + * scheduling policy of a function. The value type is CUclusterSchedulingPolicy. + * + * \param pi - Returned attribute value + * \param attrib - Attribute requested + * \param hfunc - Function to query attribute of + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_INVALID_VALUE + * \notefnerr + * + * \sa ::cuCtxGetCacheConfig, + * ::cuCtxSetCacheConfig, + * ::cuFuncSetCacheConfig, + * ::cuLaunchKernel, + * ::cudaFuncGetAttributes, + * ::cudaFuncSetAttribute, + * ::cuKernelGetAttribute + */ +CUresult CUDAAPI cuFuncGetAttribute(int *pi, CUfunction_attribute attrib, CUfunction hfunc); + +/** + * \brief Sets information about a function + * + * This call sets the value of a specified attribute \p attrib on the kernel given + * by \p hfunc to an integer value specified by \p val + * This function returns CUDA_SUCCESS if the new value of the attribute could be + * successfully set. If the set fails, this call will return an error. + * Not all attributes can have values set. Attempting to set a value on a read-only + * attribute will result in an error (CUDA_ERROR_INVALID_VALUE) + * + * Supported attributes for the cuFuncSetAttribute call are: + * - ::CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES: This maximum size in bytes of + * dynamically-allocated shared memory. The value should contain the requested + * maximum size of dynamically-allocated shared memory. The sum of this value and + * the function attribute ::CU_FUNC_ATTRIBUTE_SHARED_SIZE_BYTES cannot exceed the + * device attribute ::CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK_OPTIN. + * The maximal size of requestable dynamic shared memory may differ by GPU + * architecture. + * - ::CU_FUNC_ATTRIBUTE_PREFERRED_SHARED_MEMORY_CARVEOUT: On devices where the L1 + * cache and shared memory use the same hardware resources, this sets the shared memory + * carveout preference, in percent of the total shared memory. + * See ::CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_MULTIPROCESSOR + * This is only a hint, and the driver can choose a different ratio if required to execute the function. + * - ::CU_FUNC_ATTRIBUTE_REQUIRED_CLUSTER_WIDTH: The required cluster width in + * blocks. The width, height, and depth values must either all be 0 or all be + * positive. The validity of the cluster dimensions is checked at launch time. + * If the value is set during compile time, it cannot be set at runtime. + * Setting it at runtime will return CUDA_ERROR_NOT_PERMITTED. + * - ::CU_FUNC_ATTRIBUTE_REQUIRED_CLUSTER_HEIGHT: The required cluster height in + * blocks. The width, height, and depth values must either all be 0 or all be + * positive. The validity of the cluster dimensions is checked at launch time. + * If the value is set during compile time, it cannot be set at runtime. + * Setting it at runtime will return CUDA_ERROR_NOT_PERMITTED. + * - ::CU_FUNC_ATTRIBUTE_REQUIRED_CLUSTER_DEPTH: The required cluster depth in + * blocks. The width, height, and depth values must either all be 0 or all be + * positive. The validity of the cluster dimensions is checked at launch time. + * If the value is set during compile time, it cannot be set at runtime. + * Setting it at runtime will return CUDA_ERROR_NOT_PERMITTED. + * - ::CU_FUNC_ATTRIBUTE_CLUSTER_SCHEDULING_POLICY_PREFERENCE: The block + * scheduling policy of a function. The value type is CUclusterSchedulingPolicy. + * + * \param hfunc - Function to query attribute of + * \param attrib - Attribute requested + * \param value - The value to set + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_INVALID_VALUE + * \notefnerr + * + * \sa ::cuCtxGetCacheConfig, + * ::cuCtxSetCacheConfig, + * ::cuFuncSetCacheConfig, + * ::cuLaunchKernel, + * ::cudaFuncGetAttributes, + * ::cudaFuncSetAttribute, + * ::cuKernelSetAttribute + */ +CUresult CUDAAPI cuFuncSetAttribute(CUfunction hfunc, CUfunction_attribute attrib, int value); + +/** + * \brief Sets the preferred cache configuration for a device function + * + * On devices where the L1 cache and shared memory use the same hardware + * resources, this sets through \p config the preferred cache configuration for + * the device function \p hfunc. This is only a preference. The driver will use + * the requested configuration if possible, but it is free to choose a different + * configuration if required to execute \p hfunc. Any context-wide preference + * set via ::cuCtxSetCacheConfig() will be overridden by this per-function + * setting unless the per-function setting is ::CU_FUNC_CACHE_PREFER_NONE. In + * that case, the current context-wide setting will be used. + * + * This setting does nothing on devices where the size of the L1 cache and + * shared memory are fixed. + * + * Launching a kernel with a different preference than the most recent + * preference setting may insert a device-side synchronization point. + * + * + * The supported cache configurations are: + * - ::CU_FUNC_CACHE_PREFER_NONE: no preference for shared memory or L1 (default) + * - ::CU_FUNC_CACHE_PREFER_SHARED: prefer larger shared memory and smaller L1 cache + * - ::CU_FUNC_CACHE_PREFER_L1: prefer larger L1 cache and smaller shared memory + * - ::CU_FUNC_CACHE_PREFER_EQUAL: prefer equal sized L1 cache and shared memory + * + * \param hfunc - Kernel to configure cache for + * \param config - Requested cache configuration + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT + * \notefnerr + * + * \sa ::cuCtxGetCacheConfig, + * ::cuCtxSetCacheConfig, + * ::cuFuncGetAttribute, + * ::cuLaunchKernel, + * ::cudaFuncSetCacheConfig, + * ::cuKernelSetCacheConfig + */ +CUresult CUDAAPI cuFuncSetCacheConfig(CUfunction hfunc, CUfunc_cache config); + +/** + * \brief Sets the shared memory configuration for a device function. + * + * On devices with configurable shared memory banks, this function will + * force all subsequent launches of the specified device function to have + * the given shared memory bank size configuration. On any given launch of the + * function, the shared memory configuration of the device will be temporarily + * changed if needed to suit the function's preferred configuration. Changes in + * shared memory configuration between subsequent launches of functions, + * may introduce a device side synchronization point. + * + * Any per-function setting of shared memory bank size set via + * ::cuFuncSetSharedMemConfig will override the context wide setting set with + * ::cuCtxSetSharedMemConfig. + * + * Changing the shared memory bank size will not increase shared memory usage + * or affect occupancy of kernels, but may have major effects on performance. + * Larger bank sizes will allow for greater potential bandwidth to shared memory, + * but will change what kinds of accesses to shared memory will result in bank + * conflicts. + * + * This function will do nothing on devices with fixed shared memory bank size. + * + * The supported bank configurations are: + * - ::CU_SHARED_MEM_CONFIG_DEFAULT_BANK_SIZE: use the context's shared memory + * configuration when launching this function. + * - ::CU_SHARED_MEM_CONFIG_FOUR_BYTE_BANK_SIZE: set shared memory bank width to + * be natively four bytes when launching this function. + * - ::CU_SHARED_MEM_CONFIG_EIGHT_BYTE_BANK_SIZE: set shared memory bank width to + * be natively eight bytes when launching this function. + * + * \param hfunc - kernel to be given a shared memory config + * \param config - requested shared memory configuration + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT + * \notefnerr + * + * \sa ::cuCtxGetCacheConfig, + * ::cuCtxSetCacheConfig, + * ::cuCtxGetSharedMemConfig, + * ::cuCtxSetSharedMemConfig, + * ::cuFuncGetAttribute, + * ::cuLaunchKernel, + * ::cudaFuncSetSharedMemConfig + */ +CUresult CUDAAPI cuFuncSetSharedMemConfig(CUfunction hfunc, CUsharedconfig config); + +/** + * \brief Returns a module handle + * + * Returns in \p *hmod the handle of the module that function \p hfunc + * is located in. The lifetime of the module corresponds to the lifetime of + * the context it was loaded in or until the module is explicitly unloaded. + * + * The CUDA runtime manages its own modules loaded into the primary context. + * If the handle returned by this API refers to a module loaded by the CUDA runtime, + * calling ::cuModuleUnload() on that module will result in undefined behavior. + * + * \param hmod - Returned module handle + * \param hfunc - Function to retrieve module for + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_NOT_FOUND + * \notefnerr + * + */ +CUresult CUDAAPI cuFuncGetModule(CUmodule *hmod, CUfunction hfunc); + +/** + * \brief Launches a CUDA function ::CUfunction or a CUDA kernel ::CUkernel + * + * Invokes the function ::CUfunction or the kernel ::CUkernel \p f + * on a \p gridDimX x \p gridDimY x \p gridDimZ grid of blocks. + * Each block contains \p blockDimX x \p blockDimY x + * \p blockDimZ threads. + * + * \p sharedMemBytes sets the amount of dynamic shared memory that will be + * available to each thread block. + * + * Kernel parameters to \p f can be specified in one of two ways: + * + * 1) Kernel parameters can be specified via \p kernelParams. If \p f + * has N parameters, then \p kernelParams needs to be an array of N + * pointers. Each of \p kernelParams[0] through \p kernelParams[N-1] + * must point to a region of memory from which the actual kernel + * parameter will be copied. The number of kernel parameters and their + * offsets and sizes do not need to be specified as that information is + * retrieved directly from the kernel's image. + * + * 2) Kernel parameters can also be packaged by the application into + * a single buffer that is passed in via the \p extra parameter. + * This places the burden on the application of knowing each kernel + * parameter's size and alignment/padding within the buffer. Here is + * an example of using the \p extra parameter in this manner: + * \code + size_t argBufferSize; + char argBuffer[256]; + + // populate argBuffer and argBufferSize + + void *config[] = { + CU_LAUNCH_PARAM_BUFFER_POINTER, argBuffer, + CU_LAUNCH_PARAM_BUFFER_SIZE, &argBufferSize, + CU_LAUNCH_PARAM_END + }; + status = cuLaunchKernel(f, gx, gy, gz, bx, by, bz, sh, s, NULL, config); + * \endcode + * + * The \p extra parameter exists to allow ::cuLaunchKernel to take + * additional less commonly used arguments. \p extra specifies a list of + * names of extra settings and their corresponding values. Each extra + * setting name is immediately followed by the corresponding value. The + * list must be terminated with either NULL or ::CU_LAUNCH_PARAM_END. + * + * - ::CU_LAUNCH_PARAM_END, which indicates the end of the \p extra + * array; + * - ::CU_LAUNCH_PARAM_BUFFER_POINTER, which specifies that the next + * value in \p extra will be a pointer to a buffer containing all + * the kernel parameters for launching kernel \p f; + * - ::CU_LAUNCH_PARAM_BUFFER_SIZE, which specifies that the next + * value in \p extra will be a pointer to a size_t containing the + * size of the buffer specified with ::CU_LAUNCH_PARAM_BUFFER_POINTER; + * + * The error ::CUDA_ERROR_INVALID_VALUE will be returned if kernel + * parameters are specified with both \p kernelParams and \p extra + * (i.e. both \p kernelParams and \p extra are non-NULL). + * + * Calling ::cuLaunchKernel() invalidates the persistent function state + * set through the following deprecated APIs: + * ::cuFuncSetBlockShape(), + * ::cuFuncSetSharedSize(), + * ::cuParamSetSize(), + * ::cuParamSeti(), + * ::cuParamSetf(), + * ::cuParamSetv(). + * + * Note that to use ::cuLaunchKernel(), the kernel \p f must either have + * been compiled with toolchain version 3.2 or later so that it will + * contain kernel parameter information, or have no kernel parameters. + * If either of these conditions is not met, then ::cuLaunchKernel() will + * return ::CUDA_ERROR_INVALID_IMAGE. + * + * Note that the API can also be used to launch context-less kernel ::CUkernel + * by querying the handle using ::cuLibraryGetKernel() and then passing it + * to the API by casting to ::CUfunction. Here, the context to launch + * the kernel on will either be taken from the specified stream \p hStream + * or the current context in case of NULL stream. + * + * \param f - Function ::CUfunction or Kernel ::CUkernel to launch + * \param gridDimX - Width of grid in blocks + * \param gridDimY - Height of grid in blocks + * \param gridDimZ - Depth of grid in blocks + * \param blockDimX - X dimension of each thread block + * \param blockDimY - Y dimension of each thread block + * \param blockDimZ - Z dimension of each thread block + * \param sharedMemBytes - Dynamic shared-memory size per thread block in bytes + * \param hStream - Stream identifier + * \param kernelParams - Array of pointers to kernel parameters + * \param extra - Extra options + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_INVALID_IMAGE, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_LAUNCH_FAILED, + * ::CUDA_ERROR_LAUNCH_OUT_OF_RESOURCES, + * ::CUDA_ERROR_LAUNCH_TIMEOUT, + * ::CUDA_ERROR_LAUNCH_INCOMPATIBLE_TEXTURING, + * ::CUDA_ERROR_SHARED_OBJECT_INIT_FAILED, + * ::CUDA_ERROR_NOT_FOUND + * \note_null_stream + * \notefnerr + * + * \sa ::cuCtxGetCacheConfig, + * ::cuCtxSetCacheConfig, + * ::cuFuncSetCacheConfig, + * ::cuFuncGetAttribute, + * ::cudaLaunchKernel, + * ::cuLibraryGetKernel, + * ::cuKernelSetCacheConfig, + * ::cuKernelGetAttribute, + * ::cuKernelSetAttribute + */ +CUresult CUDAAPI cuLaunchKernel(CUfunction f, + unsigned int gridDimX, + unsigned int gridDimY, + unsigned int gridDimZ, + unsigned int blockDimX, + unsigned int blockDimY, + unsigned int blockDimZ, + unsigned int sharedMemBytes, + CUstream hStream, + void **kernelParams, + void **extra); + +/** + * \brief Launches a CUDA function ::CUfunction or a CUDA kernel ::CUkernel with launch-time configuration + * + * Invokes the function ::CUfunction or the kernel ::CUkernel \p f with the specified launch-time configuration + * \p config. + * + * The ::CUlaunchConfig structure is defined as: + * \code + typedef struct CUlaunchConfig_st { + unsigned int gridDimX; + unsigned int gridDimY; + unsigned int gridDimZ; + unsigned int blockDimX; + unsigned int blockDimY; + unsigned int blockDimZ; + unsigned int sharedMemBytes; + CUstream hStream; + CUlaunchAttribute *attrs; + unsigned int numAttrs; + } CUlaunchConfig; + * \endcode + * where: + * - ::CUlaunchConfig::gridDimX is the width of the grid in blocks. + * - ::CUlaunchConfig::gridDimY is the height of the grid in blocks. + * - ::CUlaunchConfig::gridDimZ is the depth of the grid in blocks. + * - ::CUlaunchConfig::blockDimX is the X dimension of each thread block. + * - ::CUlaunchConfig::blockDimX is the Y dimension of each thread block. + * - ::CUlaunchConfig::blockDimZ is the Z dimension of each thread block. + * - ::CUlaunchConfig::sharedMemBytes is the dynamic shared-memory size per + * thread block in bytes. + * - ::CUlaunchConfig::hStream is the handle to the stream to perform the launch + * in. The CUDA context associated with this stream must match that associated + * with function f. + * - ::CUlaunchConfig::attrs is an array of ::CUlaunchConfig::numAttrs + * continguous ::CUlaunchAttribute elements. The value of this pointer is not + * considered if ::CUlaunchConfig::numAttrs is zero. However, in that case, it + * is recommended to set the pointer to NULL. + * - ::CUlaunchConfig::numAttrs is the numbers of attributes populating the + * first ::CUlaunchConfig::numAttrs positions of the ::CUlaunchConfig::attrs + * array. + * + * Launch-time configuration is specified by adding entries to + * ::CUlaunchConfig::attrs. Each entry is an attribute ID and a corresponding + * attribute value. + * + * The ::CUlaunchAttribute structure is defined as: + * \code + typedef struct CUlaunchAttribute_st { + CUlaunchAttributeID id; + CUlaunchAttributeValue value; + } CUlaunchAttribute; + * \endcode + * where: + * - ::CUlaunchAttribute::id is a unique enum identifying the attribute. + * - ::CUlaunchAttribute::value is a union that hold the attribute value. + * + * An example of using the \p config parameter: + * \code + CUlaunchAttribute coopAttr = {.id = CU_LAUNCH_ATTRIBUTE_COOPERATIVE, + .value = 1}; + CUlaunchConfig config = {... // set block and grid dimensions + .attrs = &coopAttr, + .numAttrs = 1}; + + cuLaunchKernelEx(&config, kernel, NULL, NULL); + * \endcode + * + * The ::CUlaunchAttributeID enum is defined as: + * \code + typedef enum CUlaunchAttributeID_enum { + CU_LAUNCH_ATTRIBUTE_IGNORE = 0, + CU_LAUNCH_ATTRIBUTE_ACCESS_POLICY_WINDOW = 1, + CU_LAUNCH_ATTRIBUTE_COOPERATIVE = 2, + CU_LAUNCH_ATTRIBUTE_SYNCHRONIZATION_POLICY = 3, + CU_LAUNCH_ATTRIBUTE_CLUSTER_DIMENSION = 4, + CU_LAUNCH_ATTRIBUTE_CLUSTER_SCHEDULING_POLICY_PREFERENCE = 5, + CU_LAUNCH_ATTRIBUTE_PROGRAMMATIC_STREAM_SERIALIZATION = 6, + CU_LAUNCH_ATTRIBUTE_PROGRAMMATIC_EVENT = 7, + } CUlaunchAttributeID; + * \endcode + * + * and the corresponding ::CUlaunchAttributeValue union as : + * \code + typedef union CUlaunchAttributeValue_union { + cuuint64_t pad[8]; + CUaccessPolicyWindow accessPolicyWindow; + int cooperative; + CUsynchronizationPolicy syncPolicy; + struct { + unsigned int x; + unsigned int y; + unsigned int z; + } clusterDim; + CUclusterSchedulingPolicy clusterSchedulingPolicyPreference; + int programmaticStreamSerializationAllowed; + struct { + CUevent event; + int flags; + int triggerAtBlockStart; + } programmaticEvent; + } CUlaunchAttributeValue; + * \endcode + * + * Setting ::CU_LAUNCH_ATTRIBUTE_COOPERATIVE to a non-zero value causes the + * kernel launch to be a cooperative launch, with exactly the same usage and + * semantics of ::cuLaunchCooperativeKernel. + * + * Setting ::CU_LAUNCH_ATTRIBUTE_PROGRAMMATIC_STREAM_SERIALIZATION to a non-zero + * values causes the kernel to use programmatic means to resolve its stream + * dependency -- enabling the CUDA runtime to opportunistically allow the grid's + * execution to overlap with the previous kernel in the stream, if that kernel + * requests the overlap. + * + * ::CU_LAUNCH_ATTRIBUTE_PROGRAMMATIC_EVENT records an event along with the + * kernel launch. Event recorded through this launch attribute is guaranteed to + * only trigger after all block in the associated kernel trigger the event. A + * block can trigger the event through PTX launchdep.release or CUDA builtin + * function cudaTriggerProgrammaticLaunchCompletion(). A trigger can also be + * inserted at the beginning of each block's execution if triggerAtBlockStart is + * set to non-0. Note that dependents (including the CPU thread calling + * cuEventSynchronize()) are not guaranteed to observe the release precisely + * when it is released. For example, cuEventSynchronize() may only observe the + * event trigger long after the associated kernel has completed. This recording + * type is primarily meant for establishing programmatic dependency between + * device tasks. The event supplied must not be an interprocess or interop + * event. The event must disable timing (i.e. created with + * ::CU_EVENT_DISABLE_TIMING flag set). + * + * The effect of other attributes is consistent with their effect when set via + * persistent APIs. + * + * See ::cuStreamSetAttribute for + * - ::CU_LAUNCH_ATTRIBUTE_ACCESS_POLICY_WINDOW + * - ::CU_LAUNCH_ATTRIBUTE_SYNCHRONIZATION_POLICY + * + * See ::cuFunctionSetAttribute for + * - ::CU_LAUNCH_ATTRIBUTE_CLUSTER_DIMENSION + * - ::CU_LAUNCH_ATTRIBUTE_CLUSTER_SCHEDULING_POLICY_PREFERENCE + * + * Kernel parameters to \p f can be specified in the same ways that they can be + * using ::cuLaunchKernel. + * + * Note that the API can also be used to launch context-less kernel ::CUkernel + * by querying the handle using ::cuLibraryGetKernel() and then passing it + * to the API by casting to ::CUfunction. Here, the context to launch + * the kernel on will either be taken from the specified stream ::CUlaunchConfig::hStream + * or the current context in case of NULL stream. + * + * \param config - Config to launch + * \param f - Function ::CUfunction or Kernel ::CUkernel to launch + * \param kernelParams - Array of pointers to kernel parameters + * \param extra - Extra options + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_INVALID_IMAGE, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_LAUNCH_FAILED, + * ::CUDA_ERROR_LAUNCH_OUT_OF_RESOURCES, + * ::CUDA_ERROR_LAUNCH_TIMEOUT, + * ::CUDA_ERROR_LAUNCH_INCOMPATIBLE_TEXTURING, + * ::CUDA_ERROR_COOPERATIVE_LAUNCH_TOO_LARGE, + * ::CUDA_ERROR_SHARED_OBJECT_INIT_FAILED, + * ::CUDA_ERROR_NOT_FOUND + * \note_null_stream + * \notefnerr + * + * \sa ::cuCtxGetCacheConfig, + * ::cuCtxSetCacheConfig, + * ::cuFuncSetCacheConfig, + * ::cuFuncGetAttribute, + * ::cudaLaunchKernel, + * ::cudaLaunchKernelEx, + * ::cuLibraryGetKernel, + * ::cuKernelSetCacheConfig, + * ::cuKernelGetAttribute, + * ::cuKernelSetAttribute + */ +CUresult CUDAAPI cuLaunchKernelEx(const CUlaunchConfig *config, + CUfunction f, + void **kernelParams, + void **extra); + +/** + * \brief Launches a CUDA function ::CUfunction or a CUDA kernel ::CUkernel where thread blocks + * can cooperate and synchronize as they execute + * + * Invokes the function ::CUfunction or the kernel ::CUkernel \p f on a \p gridDimX x \p gridDimY x \p gridDimZ + * grid of blocks. Each block contains \p blockDimX x \p blockDimY x + * \p blockDimZ threads. + * + * Note that the API can also be used to launch context-less kernel ::CUkernel + * by querying the handle using ::cuLibraryGetKernel() and then passing it + * to the API by casting to ::CUfunction. Here, the context to launch + * the kernel on will either be taken from the specified stream \p hStream + * or the current context in case of NULL stream. + * + * \p sharedMemBytes sets the amount of dynamic shared memory that will be + * available to each thread block. + * + * The device on which this kernel is invoked must have a non-zero value for + * the device attribute ::CU_DEVICE_ATTRIBUTE_COOPERATIVE_LAUNCH. + * + * The total number of blocks launched cannot exceed the maximum number of blocks per + * multiprocessor as returned by ::cuOccupancyMaxActiveBlocksPerMultiprocessor (or + * ::cuOccupancyMaxActiveBlocksPerMultiprocessorWithFlags) times the number of multiprocessors + * as specified by the device attribute ::CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT. + * + * The kernel cannot make use of CUDA dynamic parallelism. + * + * Kernel parameters must be specified via \p kernelParams. If \p f + * has N parameters, then \p kernelParams needs to be an array of N + * pointers. Each of \p kernelParams[0] through \p kernelParams[N-1] + * must point to a region of memory from which the actual kernel + * parameter will be copied. The number of kernel parameters and their + * offsets and sizes do not need to be specified as that information is + * retrieved directly from the kernel's image. + * + * Calling ::cuLaunchCooperativeKernel() sets persistent function state that is + * the same as function state set through ::cuLaunchKernel API + * + * When the kernel \p f is launched via ::cuLaunchCooperativeKernel(), the previous + * block shape, shared size and parameter info associated with \p f + * is overwritten. + * + * Note that to use ::cuLaunchCooperativeKernel(), the kernel \p f must either have + * been compiled with toolchain version 3.2 or later so that it will + * contain kernel parameter information, or have no kernel parameters. + * If either of these conditions is not met, then ::cuLaunchCooperativeKernel() will + * return ::CUDA_ERROR_INVALID_IMAGE. + * + * Note that the API can also be used to launch context-less kernel ::CUkernel + * by querying the handle using ::cuLibraryGetKernel() and then passing it + * to the API by casting to ::CUfunction. Here, the context to launch + * the kernel on will either be taken from the specified stream \p hStream + * or the current context in case of NULL stream. + * + * \param f - Function ::CUfunction or Kernel ::CUkernel to launch + * \param gridDimX - Width of grid in blocks + * \param gridDimY - Height of grid in blocks + * \param gridDimZ - Depth of grid in blocks + * \param blockDimX - X dimension of each thread block + * \param blockDimY - Y dimension of each thread block + * \param blockDimZ - Z dimension of each thread block + * \param sharedMemBytes - Dynamic shared-memory size per thread block in bytes + * \param hStream - Stream identifier + * \param kernelParams - Array of pointers to kernel parameters + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_INVALID_IMAGE, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_LAUNCH_FAILED, + * ::CUDA_ERROR_LAUNCH_OUT_OF_RESOURCES, + * ::CUDA_ERROR_LAUNCH_TIMEOUT, + * ::CUDA_ERROR_LAUNCH_INCOMPATIBLE_TEXTURING, + * ::CUDA_ERROR_COOPERATIVE_LAUNCH_TOO_LARGE, + * ::CUDA_ERROR_SHARED_OBJECT_INIT_FAILED, + * ::CUDA_ERROR_NOT_FOUND + * \note_null_stream + * \notefnerr + * + * \sa ::cuCtxGetCacheConfig, + * ::cuCtxSetCacheConfig, + * ::cuFuncSetCacheConfig, + * ::cuFuncGetAttribute, + * ::cuLaunchCooperativeKernelMultiDevice, + * ::cudaLaunchCooperativeKernel, + * ::cuLibraryGetKernel, + * ::cuKernelSetCacheConfig, + * ::cuKernelGetAttribute, + * ::cuKernelSetAttribute + */ +CUresult CUDAAPI cuLaunchCooperativeKernel(CUfunction f, + unsigned int gridDimX, + unsigned int gridDimY, + unsigned int gridDimZ, + unsigned int blockDimX, + unsigned int blockDimY, + unsigned int blockDimZ, + unsigned int sharedMemBytes, + CUstream hStream, + void **kernelParams); + +/** + * \brief Launches CUDA functions on multiple devices where thread blocks can cooperate and synchronize as they execute + * + * \deprecated This function is deprecated as of CUDA 11.3. + * + * Invokes kernels as specified in the \p launchParamsList array where each element + * of the array specifies all the parameters required to perform a single kernel launch. + * These kernels can cooperate and synchronize as they execute. The size of the array is + * specified by \p numDevices. + * + * No two kernels can be launched on the same device. All the devices targeted by this + * multi-device launch must be identical. All devices must have a non-zero value for the + * device attribute ::CU_DEVICE_ATTRIBUTE_COOPERATIVE_MULTI_DEVICE_LAUNCH. + * + * All kernels launched must be identical with respect to the compiled code. Note that + * any __device__, __constant__ or __managed__ variables present in the module that owns + * the kernel launched on each device, are independently instantiated on every device. + * It is the application's responsibility to ensure these variables are initialized and + * used appropriately. + * + * The size of the grids as specified in blocks, the size of the blocks themselves + * and the amount of shared memory used by each thread block must also match across + * all launched kernels. + * + * The streams used to launch these kernels must have been created via either ::cuStreamCreate + * or ::cuStreamCreateWithPriority. The NULL stream or ::CU_STREAM_LEGACY or ::CU_STREAM_PER_THREAD + * cannot be used. + * + * The total number of blocks launched per kernel cannot exceed the maximum number of blocks + * per multiprocessor as returned by ::cuOccupancyMaxActiveBlocksPerMultiprocessor (or + * ::cuOccupancyMaxActiveBlocksPerMultiprocessorWithFlags) times the number of multiprocessors + * as specified by the device attribute ::CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT. Since the + * total number of blocks launched per device has to match across all devices, the maximum + * number of blocks that can be launched per device will be limited by the device with the + * least number of multiprocessors. + * + * The kernels cannot make use of CUDA dynamic parallelism. + * + * The ::CUDA_LAUNCH_PARAMS structure is defined as: + * \code + typedef struct CUDA_LAUNCH_PARAMS_st + { + CUfunction function; + unsigned int gridDimX; + unsigned int gridDimY; + unsigned int gridDimZ; + unsigned int blockDimX; + unsigned int blockDimY; + unsigned int blockDimZ; + unsigned int sharedMemBytes; + CUstream hStream; + void **kernelParams; + } CUDA_LAUNCH_PARAMS; + * \endcode + * where: + * - ::CUDA_LAUNCH_PARAMS::function specifies the kernel to be launched. All functions must + * be identical with respect to the compiled code. + * Note that you can also specify context-less kernel ::CUkernel by querying the handle + * using ::cuLibraryGetKernel() and then casting to ::CUfunction. In this case, the context to + * launch the kernel on be taken from the specified stream ::CUDA_LAUNCH_PARAMS::hStream. + * - ::CUDA_LAUNCH_PARAMS::gridDimX is the width of the grid in blocks. This must match across + * all kernels launched. + * - ::CUDA_LAUNCH_PARAMS::gridDimY is the height of the grid in blocks. This must match across + * all kernels launched. + * - ::CUDA_LAUNCH_PARAMS::gridDimZ is the depth of the grid in blocks. This must match across + * all kernels launched. + * - ::CUDA_LAUNCH_PARAMS::blockDimX is the X dimension of each thread block. This must match across + * all kernels launched. + * - ::CUDA_LAUNCH_PARAMS::blockDimX is the Y dimension of each thread block. This must match across + * all kernels launched. + * - ::CUDA_LAUNCH_PARAMS::blockDimZ is the Z dimension of each thread block. This must match across + * all kernels launched. + * - ::CUDA_LAUNCH_PARAMS::sharedMemBytes is the dynamic shared-memory size per thread block in bytes. + * This must match across all kernels launched. + * - ::CUDA_LAUNCH_PARAMS::hStream is the handle to the stream to perform the launch in. This cannot + * be the NULL stream or ::CU_STREAM_LEGACY or ::CU_STREAM_PER_THREAD. The CUDA context associated + * with this stream must match that associated with ::CUDA_LAUNCH_PARAMS::function. + * - ::CUDA_LAUNCH_PARAMS::kernelParams is an array of pointers to kernel parameters. If + * ::CUDA_LAUNCH_PARAMS::function has N parameters, then ::CUDA_LAUNCH_PARAMS::kernelParams + * needs to be an array of N pointers. Each of ::CUDA_LAUNCH_PARAMS::kernelParams[0] through + * ::CUDA_LAUNCH_PARAMS::kernelParams[N-1] must point to a region of memory from which the actual + * kernel parameter will be copied. The number of kernel parameters and their offsets and sizes + * do not need to be specified as that information is retrieved directly from the kernel's image. + * + * By default, the kernel won't begin execution on any GPU until all prior work in all the specified + * streams has completed. This behavior can be overridden by specifying the flag + * ::CUDA_COOPERATIVE_LAUNCH_MULTI_DEVICE_NO_PRE_LAUNCH_SYNC. When this flag is specified, each kernel + * will only wait for prior work in the stream corresponding to that GPU to complete before it begins + * execution. + * + * Similarly, by default, any subsequent work pushed in any of the specified streams will not begin + * execution until the kernels on all GPUs have completed. This behavior can be overridden by specifying + * the flag ::CUDA_COOPERATIVE_LAUNCH_MULTI_DEVICE_NO_POST_LAUNCH_SYNC. When this flag is specified, + * any subsequent work pushed in any of the specified streams will only wait for the kernel launched + * on the GPU corresponding to that stream to complete before it begins execution. + * + * Calling ::cuLaunchCooperativeKernelMultiDevice() sets persistent function state that is + * the same as function state set through ::cuLaunchKernel API when called individually for each + * element in \p launchParamsList. + * + * When kernels are launched via ::cuLaunchCooperativeKernelMultiDevice(), the previous + * block shape, shared size and parameter info associated with each ::CUDA_LAUNCH_PARAMS::function + * in \p launchParamsList is overwritten. + * + * Note that to use ::cuLaunchCooperativeKernelMultiDevice(), the kernels must either have + * been compiled with toolchain version 3.2 or later so that it will + * contain kernel parameter information, or have no kernel parameters. + * If either of these conditions is not met, then ::cuLaunchCooperativeKernelMultiDevice() will + * return ::CUDA_ERROR_INVALID_IMAGE. + * + * \param launchParamsList - List of launch parameters, one per device + * \param numDevices - Size of the \p launchParamsList array + * \param flags - Flags to control launch behavior + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_INVALID_IMAGE, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_LAUNCH_FAILED, + * ::CUDA_ERROR_LAUNCH_OUT_OF_RESOURCES, + * ::CUDA_ERROR_LAUNCH_TIMEOUT, + * ::CUDA_ERROR_LAUNCH_INCOMPATIBLE_TEXTURING, + * ::CUDA_ERROR_COOPERATIVE_LAUNCH_TOO_LARGE, + * ::CUDA_ERROR_SHARED_OBJECT_INIT_FAILED + * \note_null_stream + * \notefnerr + * + * \sa ::cuCtxGetCacheConfig, + * ::cuCtxSetCacheConfig, + * ::cuFuncSetCacheConfig, + * ::cuFuncGetAttribute, + * ::cuLaunchCooperativeKernel, + * ::cudaLaunchCooperativeKernelMultiDevice + */ +__CUDA_DEPRECATED CUresult CUDAAPI cuLaunchCooperativeKernelMultiDevice(CUDA_LAUNCH_PARAMS *launchParamsList, unsigned int numDevices, unsigned int flags); + +/** + * \brief Enqueues a host function call in a stream + * + * Enqueues a host function to run in a stream. The function will be called + * after currently enqueued work and will block work added after it. + * + * The host function must not make any CUDA API calls. Attempting to use a + * CUDA API may result in ::CUDA_ERROR_NOT_PERMITTED, but this is not required. + * The host function must not perform any synchronization that may depend on + * outstanding CUDA work not mandated to run earlier. Host functions without a + * mandated order (such as in independent streams) execute in undefined order + * and may be serialized. + * + * For the purposes of Unified Memory, execution makes a number of guarantees: + *
    + *
  • The stream is considered idle for the duration of the function's + * execution. Thus, for example, the function may always use memory attached + * to the stream it was enqueued in.
  • + *
  • The start of execution of the function has the same effect as + * synchronizing an event recorded in the same stream immediately prior to + * the function. It thus synchronizes streams which have been "joined" + * prior to the function.
  • + *
  • Adding device work to any stream does not have the effect of making + * the stream active until all preceding host functions and stream callbacks + * have executed. Thus, for + * example, a function might use global attached memory even if work has + * been added to another stream, if the work has been ordered behind the + * function call with an event.
  • + *
  • Completion of the function does not cause a stream to become + * active except as described above. The stream will remain idle + * if no device work follows the function, and will remain idle across + * consecutive host functions or stream callbacks without device work in + * between. Thus, for example, + * stream synchronization can be done by signaling from a host function at the + * end of the stream.
  • + *
+ * + * Note that, in contrast to ::cuStreamAddCallback, the function will not be + * called in the event of an error in the CUDA context. + * + * \param hStream - Stream to enqueue function call in + * \param fn - The function to call once preceding stream operations are complete + * \param userData - User-specified data to be passed to the function + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_NOT_SUPPORTED + * \note_null_stream + * \notefnerr + * + * \sa ::cuStreamCreate, + * ::cuStreamQuery, + * ::cuStreamSynchronize, + * ::cuStreamWaitEvent, + * ::cuStreamDestroy, + * ::cuMemAllocManaged, + * ::cuStreamAttachMemAsync, + * ::cuStreamAddCallback + */ +CUresult CUDAAPI cuLaunchHostFunc(CUstream hStream, CUhostFn fn, void *userData); + +/** @} */ /* END CUDA_EXEC */ + +/** + * \defgroup CUDA_EXEC_DEPRECATED Execution Control [DEPRECATED] + * + * ___MANBRIEF___ deprecated execution control functions of the low-level CUDA + * driver API (___CURRENT_FILE___) ___ENDMANBRIEF___ + * + * This section describes the deprecated execution control functions of the + * low-level CUDA driver application programming interface. + * + * @{ + */ + +/** + * \brief Sets the block-dimensions for the function + * + * \deprecated + * + * Specifies the \p x, \p y, and \p z dimensions of the thread blocks that are + * created when the kernel given by \p hfunc is launched. + * + * \param hfunc - Kernel to specify dimensions of + * \param x - X dimension + * \param y - Y dimension + * \param z - Z dimension + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_INVALID_VALUE + * \notefnerr + * + * \sa ::cuFuncSetSharedSize, + * ::cuFuncSetCacheConfig, + * ::cuFuncGetAttribute, + * ::cuParamSetSize, + * ::cuParamSeti, + * ::cuParamSetf, + * ::cuParamSetv, + * ::cuLaunch, + * ::cuLaunchGrid, + * ::cuLaunchGridAsync, + * ::cuLaunchKernel + */ +__CUDA_DEPRECATED CUresult CUDAAPI cuFuncSetBlockShape(CUfunction hfunc, int x, int y, int z); + +/** + * \brief Sets the dynamic shared-memory size for the function + * + * \deprecated + * + * Sets through \p bytes the amount of dynamic shared memory that will be + * available to each thread block when the kernel given by \p hfunc is launched. + * + * \param hfunc - Kernel to specify dynamic shared-memory size for + * \param bytes - Dynamic shared-memory size per thread in bytes + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_INVALID_VALUE + * \notefnerr + * + * \sa ::cuFuncSetBlockShape, + * ::cuFuncSetCacheConfig, + * ::cuFuncGetAttribute, + * ::cuParamSetSize, + * ::cuParamSeti, + * ::cuParamSetf, + * ::cuParamSetv, + * ::cuLaunch, + * ::cuLaunchGrid, + * ::cuLaunchGridAsync, + * ::cuLaunchKernel + */ +__CUDA_DEPRECATED CUresult CUDAAPI cuFuncSetSharedSize(CUfunction hfunc, unsigned int bytes); + +/** + * \brief Sets the parameter size for the function + * + * \deprecated + * + * Sets through \p numbytes the total size in bytes needed by the function + * parameters of the kernel corresponding to \p hfunc. + * + * \param hfunc - Kernel to set parameter size for + * \param numbytes - Size of parameter list in bytes + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * \notefnerr + * + * \sa ::cuFuncSetBlockShape, + * ::cuFuncSetSharedSize, + * ::cuFuncGetAttribute, + * ::cuParamSetf, + * ::cuParamSeti, + * ::cuParamSetv, + * ::cuLaunch, + * ::cuLaunchGrid, + * ::cuLaunchGridAsync, + * ::cuLaunchKernel + */ +__CUDA_DEPRECATED CUresult CUDAAPI cuParamSetSize(CUfunction hfunc, unsigned int numbytes); + +/** + * \brief Adds an integer parameter to the function's argument list + * + * \deprecated + * + * Sets an integer parameter that will be specified the next time the + * kernel corresponding to \p hfunc will be invoked. \p offset is a byte offset. + * + * \param hfunc - Kernel to add parameter to + * \param offset - Offset to add parameter to argument list + * \param value - Value of parameter + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * \notefnerr + * + * \sa ::cuFuncSetBlockShape, + * ::cuFuncSetSharedSize, + * ::cuFuncGetAttribute, + * ::cuParamSetSize, + * ::cuParamSetf, + * ::cuParamSetv, + * ::cuLaunch, + * ::cuLaunchGrid, + * ::cuLaunchGridAsync, + * ::cuLaunchKernel + */ +__CUDA_DEPRECATED CUresult CUDAAPI cuParamSeti(CUfunction hfunc, int offset, unsigned int value); + +/** + * \brief Adds a floating-point parameter to the function's argument list + * + * \deprecated + * + * Sets a floating-point parameter that will be specified the next time the + * kernel corresponding to \p hfunc will be invoked. \p offset is a byte offset. + * + * \param hfunc - Kernel to add parameter to + * \param offset - Offset to add parameter to argument list + * \param value - Value of parameter + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * \notefnerr + * + * \sa ::cuFuncSetBlockShape, + * ::cuFuncSetSharedSize, + * ::cuFuncGetAttribute, + * ::cuParamSetSize, + * ::cuParamSeti, + * ::cuParamSetv, + * ::cuLaunch, + * ::cuLaunchGrid, + * ::cuLaunchGridAsync, + * ::cuLaunchKernel + */ +__CUDA_DEPRECATED CUresult CUDAAPI cuParamSetf(CUfunction hfunc, int offset, float value); + +/** + * \brief Adds arbitrary data to the function's argument list + * + * \deprecated + * + * Copies an arbitrary amount of data (specified in \p numbytes) from \p ptr + * into the parameter space of the kernel corresponding to \p hfunc. \p offset + * is a byte offset. + * + * \param hfunc - Kernel to add data to + * \param offset - Offset to add data to argument list + * \param ptr - Pointer to arbitrary data + * \param numbytes - Size of data to copy in bytes + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * \notefnerr + * + * \sa ::cuFuncSetBlockShape, + * ::cuFuncSetSharedSize, + * ::cuFuncGetAttribute, + * ::cuParamSetSize, + * ::cuParamSetf, + * ::cuParamSeti, + * ::cuLaunch, + * ::cuLaunchGrid, + * ::cuLaunchGridAsync, + * ::cuLaunchKernel + */ +__CUDA_DEPRECATED CUresult CUDAAPI cuParamSetv(CUfunction hfunc, int offset, void *ptr, unsigned int numbytes); + +/** + * \brief Launches a CUDA function + * + * \deprecated + * + * Invokes the kernel \p f on a 1 x 1 x 1 grid of blocks. The block + * contains the number of threads specified by a previous call to + * ::cuFuncSetBlockShape(). + * + * The block shape, dynamic shared memory size, and parameter information + * must be set using + * ::cuFuncSetBlockShape(), + * ::cuFuncSetSharedSize(), + * ::cuParamSetSize(), + * ::cuParamSeti(), + * ::cuParamSetf(), and + * ::cuParamSetv() + * prior to calling this function. + * + * Launching a function via ::cuLaunchKernel() invalidates the function's + * block shape, dynamic shared memory size, and parameter information. After + * launching via cuLaunchKernel, this state must be re-initialized prior to + * calling this function. Failure to do so results in undefined behavior. + * + * \param f - Kernel to launch + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_LAUNCH_FAILED, + * ::CUDA_ERROR_LAUNCH_OUT_OF_RESOURCES, + * ::CUDA_ERROR_LAUNCH_TIMEOUT, + * ::CUDA_ERROR_LAUNCH_INCOMPATIBLE_TEXTURING, + * ::CUDA_ERROR_SHARED_OBJECT_INIT_FAILED + * \notefnerr + * + * \sa ::cuFuncSetBlockShape, + * ::cuFuncSetSharedSize, + * ::cuFuncGetAttribute, + * ::cuParamSetSize, + * ::cuParamSetf, + * ::cuParamSeti, + * ::cuParamSetv, + * ::cuLaunchGrid, + * ::cuLaunchGridAsync, + * ::cuLaunchKernel + */ +__CUDA_DEPRECATED CUresult CUDAAPI cuLaunch(CUfunction f); + +/** + * \brief Launches a CUDA function + * + * \deprecated + * + * Invokes the kernel \p f on a \p grid_width x \p grid_height grid of + * blocks. Each block contains the number of threads specified by a previous + * call to ::cuFuncSetBlockShape(). + * + * The block shape, dynamic shared memory size, and parameter information + * must be set using + * ::cuFuncSetBlockShape(), + * ::cuFuncSetSharedSize(), + * ::cuParamSetSize(), + * ::cuParamSeti(), + * ::cuParamSetf(), and + * ::cuParamSetv() + * prior to calling this function. + * + * Launching a function via ::cuLaunchKernel() invalidates the function's + * block shape, dynamic shared memory size, and parameter information. After + * launching via cuLaunchKernel, this state must be re-initialized prior to + * calling this function. Failure to do so results in undefined behavior. + * + * \param f - Kernel to launch + * \param grid_width - Width of grid in blocks + * \param grid_height - Height of grid in blocks + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_LAUNCH_FAILED, + * ::CUDA_ERROR_LAUNCH_OUT_OF_RESOURCES, + * ::CUDA_ERROR_LAUNCH_TIMEOUT, + * ::CUDA_ERROR_LAUNCH_INCOMPATIBLE_TEXTURING, + * ::CUDA_ERROR_SHARED_OBJECT_INIT_FAILED + * \notefnerr + * + * \sa ::cuFuncSetBlockShape, + * ::cuFuncSetSharedSize, + * ::cuFuncGetAttribute, + * ::cuParamSetSize, + * ::cuParamSetf, + * ::cuParamSeti, + * ::cuParamSetv, + * ::cuLaunch, + * ::cuLaunchGridAsync, + * ::cuLaunchKernel + */ +__CUDA_DEPRECATED CUresult CUDAAPI cuLaunchGrid(CUfunction f, int grid_width, int grid_height); + +/** + * \brief Launches a CUDA function + * + * \deprecated + * + * Invokes the kernel \p f on a \p grid_width x \p grid_height grid of + * blocks. Each block contains the number of threads specified by a previous + * call to ::cuFuncSetBlockShape(). + * + * The block shape, dynamic shared memory size, and parameter information + * must be set using + * ::cuFuncSetBlockShape(), + * ::cuFuncSetSharedSize(), + * ::cuParamSetSize(), + * ::cuParamSeti(), + * ::cuParamSetf(), and + * ::cuParamSetv() + * prior to calling this function. + * + * Launching a function via ::cuLaunchKernel() invalidates the function's + * block shape, dynamic shared memory size, and parameter information. After + * launching via cuLaunchKernel, this state must be re-initialized prior to + * calling this function. Failure to do so results in undefined behavior. + * + * \param f - Kernel to launch + * \param grid_width - Width of grid in blocks + * \param grid_height - Height of grid in blocks + * \param hStream - Stream identifier + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_LAUNCH_FAILED, + * ::CUDA_ERROR_LAUNCH_OUT_OF_RESOURCES, + * ::CUDA_ERROR_LAUNCH_TIMEOUT, + * ::CUDA_ERROR_LAUNCH_INCOMPATIBLE_TEXTURING, + * ::CUDA_ERROR_SHARED_OBJECT_INIT_FAILED + * + * \note In certain cases where cubins are created with no ABI (i.e., using \p ptxas \p --abi-compile \p no), + * this function may serialize kernel launches. The CUDA driver retains asynchronous behavior by + * growing the per-thread stack as needed per launch and not shrinking it afterwards. + * + * \note_null_stream + * \notefnerr + * + * \sa ::cuFuncSetBlockShape, + * ::cuFuncSetSharedSize, + * ::cuFuncGetAttribute, + * ::cuParamSetSize, + * ::cuParamSetf, + * ::cuParamSeti, + * ::cuParamSetv, + * ::cuLaunch, + * ::cuLaunchGrid, + * ::cuLaunchKernel + */ +__CUDA_DEPRECATED CUresult CUDAAPI cuLaunchGridAsync(CUfunction f, int grid_width, int grid_height, CUstream hStream); + + +/** + * \brief Adds a texture-reference to the function's argument list + * + * \deprecated + * + * Makes the CUDA array or linear memory bound to the texture reference + * \p hTexRef available to a device program as a texture. In this version of + * CUDA, the texture-reference must be obtained via ::cuModuleGetTexRef() and + * the \p texunit parameter must be set to ::CU_PARAM_TR_DEFAULT. + * + * \param hfunc - Kernel to add texture-reference to + * \param texunit - Texture unit (must be ::CU_PARAM_TR_DEFAULT) + * \param hTexRef - Texture-reference to add to argument list + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * \notefnerr + */ +__CUDA_DEPRECATED CUresult CUDAAPI cuParamSetTexRef(CUfunction hfunc, int texunit, CUtexref hTexRef); +/** @} */ /* END CUDA_EXEC_DEPRECATED */ + +/** + * \defgroup CUDA_GRAPH Graph Management + * + * ___MANBRIEF___ graph management functions of the low-level CUDA driver API + * (___CURRENT_FILE___) ___ENDMANBRIEF___ + * + * This section describes the graph management functions of the low-level CUDA + * driver application programming interface. + * + * @{ + */ + +/** + * \brief Creates a graph + * + * Creates an empty graph, which is returned via \p phGraph. + * + * \param phGraph - Returns newly created graph + * \param flags - Graph creation flags, must be 0 + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_OUT_OF_MEMORY + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuGraphAddChildGraphNode, + * ::cuGraphAddEmptyNode, + * ::cuGraphAddKernelNode, + * ::cuGraphAddHostNode, + * ::cuGraphAddMemcpyNode, + * ::cuGraphAddMemsetNode, + * ::cuGraphInstantiate, + * ::cuGraphDestroy, + * ::cuGraphGetNodes, + * ::cuGraphGetRootNodes, + * ::cuGraphGetEdges, + * ::cuGraphClone + */ +CUresult CUDAAPI cuGraphCreate(CUgraph *phGraph, unsigned int flags); + +/** + * \brief Creates a kernel execution node and adds it to a graph + * + * Creates a new kernel execution node and adds it to \p hGraph with \p numDependencies + * dependencies specified via \p dependencies and arguments specified in \p nodeParams. + * It is possible for \p numDependencies to be 0, in which case the node will be placed + * at the root of the graph. \p dependencies may not have any duplicate entries. + * A handle to the new node will be returned in \p phGraphNode. + * + * The CUDA_KERNEL_NODE_PARAMS structure is defined as: + * + * \code + * typedef struct CUDA_KERNEL_NODE_PARAMS_st { + * CUfunction func; + * unsigned int gridDimX; + * unsigned int gridDimY; + * unsigned int gridDimZ; + * unsigned int blockDimX; + * unsigned int blockDimY; + * unsigned int blockDimZ; + * unsigned int sharedMemBytes; + * void **kernelParams; + * void **extra; + * } CUDA_KERNEL_NODE_PARAMS; + * \endcode + * + * When the graph is launched, the node will invoke kernel \p func on a (\p gridDimX x + * \p gridDimY x \p gridDimZ) grid of blocks. Each block contains + * (\p blockDimX x \p blockDimY x \p blockDimZ) threads. + * + * \p sharedMemBytes sets the amount of dynamic shared memory that will be + * available to each thread block. + * + * Kernel parameters to \p func can be specified in one of two ways: + * + * 1) Kernel parameters can be specified via \p kernelParams. If the kernel has N + * parameters, then \p kernelParams needs to be an array of N pointers. Each pointer, + * from \p kernelParams[0] to \p kernelParams[N-1], points to the region of memory from which the actual + * parameter will be copied. The number of kernel parameters and their offsets and sizes do not need + * to be specified as that information is retrieved directly from the kernel's image. + * + * 2) Kernel parameters for non-cooperative kernels can also be packaged by the application into a single + * buffer that is passed in via \p extra. This places the burden on the application of knowing each + * kernel parameter's size and alignment/padding within the buffer. The \p extra parameter exists + * to allow this function to take additional less commonly used arguments. \p extra specifies + * a list of names of extra settings and their corresponding values. Each extra setting name is + * immediately followed by the corresponding value. The list must be terminated with either NULL or + * CU_LAUNCH_PARAM_END. + * + * - ::CU_LAUNCH_PARAM_END, which indicates the end of the \p extra + * array; + * - ::CU_LAUNCH_PARAM_BUFFER_POINTER, which specifies that the next + * value in \p extra will be a pointer to a buffer + * containing all the kernel parameters for launching kernel + * \p func; + * - ::CU_LAUNCH_PARAM_BUFFER_SIZE, which specifies that the next + * value in \p extra will be a pointer to a size_t + * containing the size of the buffer specified with + * ::CU_LAUNCH_PARAM_BUFFER_POINTER; + * + * The error ::CUDA_ERROR_INVALID_VALUE will be returned if kernel parameters are specified with both + * \p kernelParams and \p extra (i.e. both \p kernelParams and \p extra are non-NULL). + * ::CUDA_ERROR_INVALID_VALUE will be returned if \p extra is used for a cooperative kernel. + * + * The \p kernelParams or \p extra array, as well as the argument values it points to, + * are copied during this call. + * + * \note Kernels launched using graphs must not use texture and surface references. Reading or + * writing through any texture or surface reference is undefined behavior. + * This restriction does not apply to texture and surface objects. + * + * \param phGraphNode - Returns newly created node + * \param hGraph - Graph to which to add the node + * \param dependencies - Dependencies of the node + * \param numDependencies - Number of dependencies + * \param nodeParams - Parameters for the GPU execution node + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_VALUE + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuLaunchKernel, + * ::cuLaunchCooperativeKernel, + * ::cuGraphKernelNodeGetParams, + * ::cuGraphKernelNodeSetParams, + * ::cuGraphCreate, + * ::cuGraphDestroyNode, + * ::cuGraphAddChildGraphNode, + * ::cuGraphAddEmptyNode, + * ::cuGraphAddHostNode, + * ::cuGraphAddMemcpyNode, + * ::cuGraphAddMemsetNode + */ +CUresult CUDAAPI cuGraphAddKernelNode(CUgraphNode *phGraphNode, CUgraph hGraph, const CUgraphNode *dependencies, size_t numDependencies, const CUDA_KERNEL_NODE_PARAMS *nodeParams); + +/** + * \brief Returns a kernel node's parameters + * + * Returns the parameters of kernel node \p hNode in \p nodeParams. + * The \p kernelParams or \p extra array returned in \p nodeParams, + * as well as the argument values it points to, are owned by the node. + * This memory remains valid until the node is destroyed or its + * parameters are modified, and should not be modified + * directly. Use ::cuGraphKernelNodeSetParams to update the + * parameters of this node. + * + * The params will contain either \p kernelParams or \p extra, + * according to which of these was most recently set on the node. + * + * \param hNode - Node to get the parameters for + * \param nodeParams - Pointer to return the parameters + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_VALUE + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuLaunchKernel, + * ::cuGraphAddKernelNode, + * ::cuGraphKernelNodeSetParams + */ +CUresult CUDAAPI cuGraphKernelNodeGetParams(CUgraphNode hNode, CUDA_KERNEL_NODE_PARAMS *nodeParams); + +/** + * \brief Sets a kernel node's parameters + * + * Sets the parameters of kernel node \p hNode to \p nodeParams. + * + * \param hNode - Node to set the parameters for + * \param nodeParams - Parameters to copy + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_OUT_OF_MEMORY + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuLaunchKernel, + * ::cuGraphAddKernelNode, + * ::cuGraphKernelNodeGetParams + */ +CUresult CUDAAPI cuGraphKernelNodeSetParams(CUgraphNode hNode, const CUDA_KERNEL_NODE_PARAMS *nodeParams); + +/** + * \brief Creates a memcpy node and adds it to a graph + * + * Creates a new memcpy node and adds it to \p hGraph with \p numDependencies + * dependencies specified via \p dependencies. + * It is possible for \p numDependencies to be 0, in which case the node will be placed + * at the root of the graph. \p dependencies may not have any duplicate entries. + * A handle to the new node will be returned in \p phGraphNode. + * + * When the graph is launched, the node will perform the memcpy described by \p copyParams. + * See ::cuMemcpy3D() for a description of the structure and its restrictions. + * + * Memcpy nodes have some additional restrictions with regards to managed memory, if the + * system contains at least one device which has a zero value for the device attribute + * ::CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS. If one or more of the operands refer + * to managed memory, then using the memory type ::CU_MEMORYTYPE_UNIFIED is disallowed + * for those operand(s). The managed memory will be treated as residing on either the + * host or the device, depending on which memory type is specified. + * + * \param phGraphNode - Returns newly created node + * \param hGraph - Graph to which to add the node + * \param dependencies - Dependencies of the node + * \param numDependencies - Number of dependencies + * \param copyParams - Parameters for the memory copy + * \param ctx - Context on which to run the node + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_VALUE + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuMemcpy3D, + * ::cuGraphMemcpyNodeGetParams, + * ::cuGraphMemcpyNodeSetParams, + * ::cuGraphCreate, + * ::cuGraphDestroyNode, + * ::cuGraphAddChildGraphNode, + * ::cuGraphAddEmptyNode, + * ::cuGraphAddKernelNode, + * ::cuGraphAddHostNode, + * ::cuGraphAddMemsetNode + */ +CUresult CUDAAPI cuGraphAddMemcpyNode(CUgraphNode *phGraphNode, CUgraph hGraph, const CUgraphNode *dependencies, size_t numDependencies, const CUDA_MEMCPY3D *copyParams, CUcontext ctx); + +/** + * \brief Returns a memcpy node's parameters + * + * Returns the parameters of memcpy node \p hNode in \p nodeParams. + * + * \param hNode - Node to get the parameters for + * \param nodeParams - Pointer to return the parameters + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_VALUE + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuMemcpy3D, + * ::cuGraphAddMemcpyNode, + * ::cuGraphMemcpyNodeSetParams + */ +CUresult CUDAAPI cuGraphMemcpyNodeGetParams(CUgraphNode hNode, CUDA_MEMCPY3D *nodeParams); + +/** + * \brief Sets a memcpy node's parameters + * + * Sets the parameters of memcpy node \p hNode to \p nodeParams. + * + * \param hNode - Node to set the parameters for + * \param nodeParams - Parameters to copy + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_VALUE, + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuMemcpy3D, + * ::cuGraphAddMemcpyNode, + * ::cuGraphMemcpyNodeGetParams + */ +CUresult CUDAAPI cuGraphMemcpyNodeSetParams(CUgraphNode hNode, const CUDA_MEMCPY3D *nodeParams); + +/** + * \brief Creates a memset node and adds it to a graph + * + * Creates a new memset node and adds it to \p hGraph with \p numDependencies + * dependencies specified via \p dependencies. + * It is possible for \p numDependencies to be 0, in which case the node will be placed + * at the root of the graph. \p dependencies may not have any duplicate entries. + * A handle to the new node will be returned in \p phGraphNode. + * + * The element size must be 1, 2, or 4 bytes. + * When the graph is launched, the node will perform the memset described by \p memsetParams. + * + * \param phGraphNode - Returns newly created node + * \param hGraph - Graph to which to add the node + * \param dependencies - Dependencies of the node + * \param numDependencies - Number of dependencies + * \param memsetParams - Parameters for the memory set + * \param ctx - Context on which to run the node + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_CONTEXT + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuMemsetD2D32, + * ::cuGraphMemsetNodeGetParams, + * ::cuGraphMemsetNodeSetParams, + * ::cuGraphCreate, + * ::cuGraphDestroyNode, + * ::cuGraphAddChildGraphNode, + * ::cuGraphAddEmptyNode, + * ::cuGraphAddKernelNode, + * ::cuGraphAddHostNode, + * ::cuGraphAddMemcpyNode + */ +CUresult CUDAAPI cuGraphAddMemsetNode(CUgraphNode *phGraphNode, CUgraph hGraph, const CUgraphNode *dependencies, size_t numDependencies, const CUDA_MEMSET_NODE_PARAMS *memsetParams, CUcontext ctx); + +/** + * \brief Returns a memset node's parameters + * + * Returns the parameters of memset node \p hNode in \p nodeParams. + * + * \param hNode - Node to get the parameters for + * \param nodeParams - Pointer to return the parameters + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_VALUE + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuMemsetD2D32, + * ::cuGraphAddMemsetNode, + * ::cuGraphMemsetNodeSetParams + */ +CUresult CUDAAPI cuGraphMemsetNodeGetParams(CUgraphNode hNode, CUDA_MEMSET_NODE_PARAMS *nodeParams); + +/** + * \brief Sets a memset node's parameters + * + * Sets the parameters of memset node \p hNode to \p nodeParams. + * + * \param hNode - Node to set the parameters for + * \param nodeParams - Parameters to copy + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_VALUE + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuMemsetD2D32, + * ::cuGraphAddMemsetNode, + * ::cuGraphMemsetNodeGetParams + */ +CUresult CUDAAPI cuGraphMemsetNodeSetParams(CUgraphNode hNode, const CUDA_MEMSET_NODE_PARAMS *nodeParams); + +/** + * \brief Creates a host execution node and adds it to a graph + * + * Creates a new CPU execution node and adds it to \p hGraph with \p numDependencies + * dependencies specified via \p dependencies and arguments specified in \p nodeParams. + * It is possible for \p numDependencies to be 0, in which case the node will be placed + * at the root of the graph. \p dependencies may not have any duplicate entries. + * A handle to the new node will be returned in \p phGraphNode. + * + * When the graph is launched, the node will invoke the specified CPU function. + * Host nodes are not supported under MPS with pre-Volta GPUs. + * + * \param phGraphNode - Returns newly created node + * \param hGraph - Graph to which to add the node + * \param dependencies - Dependencies of the node + * \param numDependencies - Number of dependencies + * \param nodeParams - Parameters for the host node + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_NOT_SUPPORTED, + * ::CUDA_ERROR_INVALID_VALUE + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuLaunchHostFunc, + * ::cuGraphHostNodeGetParams, + * ::cuGraphHostNodeSetParams, + * ::cuGraphCreate, + * ::cuGraphDestroyNode, + * ::cuGraphAddChildGraphNode, + * ::cuGraphAddEmptyNode, + * ::cuGraphAddKernelNode, + * ::cuGraphAddMemcpyNode, + * ::cuGraphAddMemsetNode + */ +CUresult CUDAAPI cuGraphAddHostNode(CUgraphNode *phGraphNode, CUgraph hGraph, const CUgraphNode *dependencies, size_t numDependencies, const CUDA_HOST_NODE_PARAMS *nodeParams); + +/** + * \brief Returns a host node's parameters + * + * Returns the parameters of host node \p hNode in \p nodeParams. + * + * \param hNode - Node to get the parameters for + * \param nodeParams - Pointer to return the parameters + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_VALUE + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuLaunchHostFunc, + * ::cuGraphAddHostNode, + * ::cuGraphHostNodeSetParams + */ +CUresult CUDAAPI cuGraphHostNodeGetParams(CUgraphNode hNode, CUDA_HOST_NODE_PARAMS *nodeParams); + +/** + * \brief Sets a host node's parameters + * + * Sets the parameters of host node \p hNode to \p nodeParams. + * + * \param hNode - Node to set the parameters for + * \param nodeParams - Parameters to copy + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_VALUE + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuLaunchHostFunc, + * ::cuGraphAddHostNode, + * ::cuGraphHostNodeGetParams + */ +CUresult CUDAAPI cuGraphHostNodeSetParams(CUgraphNode hNode, const CUDA_HOST_NODE_PARAMS *nodeParams); + +/** + * \brief Creates a child graph node and adds it to a graph + * + * Creates a new node which executes an embedded graph, and adds it to \p hGraph with + * \p numDependencies dependencies specified via \p dependencies. + * It is possible for \p numDependencies to be 0, in which case the node will be placed + * at the root of the graph. \p dependencies may not have any duplicate entries. + * A handle to the new node will be returned in \p phGraphNode. + * + * If \p hGraph contains allocation or free nodes, this call will return an error. + * + * The node executes an embedded child graph. The child graph is cloned in this call. + * + * \param phGraphNode - Returns newly created node + * \param hGraph - Graph to which to add the node + * \param dependencies - Dependencies of the node + * \param numDependencies - Number of dependencies + * \param childGraph - The graph to clone into this node + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_VALUE, + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuGraphChildGraphNodeGetGraph, + * ::cuGraphCreate, + * ::cuGraphDestroyNode, + * ::cuGraphAddEmptyNode, + * ::cuGraphAddKernelNode, + * ::cuGraphAddHostNode, + * ::cuGraphAddMemcpyNode, + * ::cuGraphAddMemsetNode, + * ::cuGraphClone + */ +CUresult CUDAAPI cuGraphAddChildGraphNode(CUgraphNode *phGraphNode, CUgraph hGraph, const CUgraphNode *dependencies, size_t numDependencies, CUgraph childGraph); + +/** + * \brief Gets a handle to the embedded graph of a child graph node + * + * Gets a handle to the embedded graph in a child graph node. This call + * does not clone the graph. Changes to the graph will be reflected in + * the node, and the node retains ownership of the graph. + * + * Allocation and free nodes cannot be added to the returned graph. + * Attempting to do so will return an error. + * + * \param hNode - Node to get the embedded graph for + * \param phGraph - Location to store a handle to the graph + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_VALUE, + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuGraphAddChildGraphNode, + * ::cuGraphNodeFindInClone + */ +CUresult CUDAAPI cuGraphChildGraphNodeGetGraph(CUgraphNode hNode, CUgraph *phGraph); + +/** + * \brief Creates an empty node and adds it to a graph + * + * Creates a new node which performs no operation, and adds it to \p hGraph with + * \p numDependencies dependencies specified via \p dependencies. + * It is possible for \p numDependencies to be 0, in which case the node will be placed + * at the root of the graph. \p dependencies may not have any duplicate entries. + * A handle to the new node will be returned in \p phGraphNode. + * + * An empty node performs no operation during execution, but can be used for + * transitive ordering. For example, a phased execution graph with 2 groups of n + * nodes with a barrier between them can be represented using an empty node and + * 2*n dependency edges, rather than no empty node and n^2 dependency edges. + * + * \param phGraphNode - Returns newly created node + * \param hGraph - Graph to which to add the node + * \param dependencies - Dependencies of the node + * \param numDependencies - Number of dependencies + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_VALUE, + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuGraphCreate, + * ::cuGraphDestroyNode, + * ::cuGraphAddChildGraphNode, + * ::cuGraphAddKernelNode, + * ::cuGraphAddHostNode, + * ::cuGraphAddMemcpyNode, + * ::cuGraphAddMemsetNode + */ +CUresult CUDAAPI cuGraphAddEmptyNode(CUgraphNode *phGraphNode, CUgraph hGraph, const CUgraphNode *dependencies, size_t numDependencies); + +/** + * \brief Creates an event record node and adds it to a graph + * + * Creates a new event record node and adds it to \p hGraph with \p numDependencies + * dependencies specified via \p dependencies and event specified in \p event. + * It is possible for \p numDependencies to be 0, in which case the node will be placed + * at the root of the graph. \p dependencies may not have any duplicate entries. + * A handle to the new node will be returned in \p phGraphNode. + * + * Each launch of the graph will record \p event to capture execution of the + * node's dependencies. + * + * \param phGraphNode - Returns newly created node + * \param hGraph - Graph to which to add the node + * \param dependencies - Dependencies of the node + * \param numDependencies - Number of dependencies + * \param event - Event for the node + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_NOT_SUPPORTED, + * ::CUDA_ERROR_INVALID_VALUE + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuGraphAddEventWaitNode, + * ::cuEventRecordWithFlags, + * ::cuStreamWaitEvent, + * ::cuGraphCreate, + * ::cuGraphDestroyNode, + * ::cuGraphAddChildGraphNode, + * ::cuGraphAddEmptyNode, + * ::cuGraphAddKernelNode, + * ::cuGraphAddMemcpyNode, + * ::cuGraphAddMemsetNode + */ +CUresult CUDAAPI cuGraphAddEventRecordNode(CUgraphNode *phGraphNode, CUgraph hGraph, const CUgraphNode *dependencies, size_t numDependencies, CUevent event); + +/** + * \brief Returns the event associated with an event record node + * + * Returns the event of event record node \p hNode in \p event_out. + * + * \param hNode - Node to get the event for + * \param event_out - Pointer to return the event + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_VALUE + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuGraphAddEventRecordNode, + * ::cuGraphEventRecordNodeSetEvent, + * ::cuGraphEventWaitNodeGetEvent, + * ::cuEventRecordWithFlags, + * ::cuStreamWaitEvent + */ +CUresult CUDAAPI cuGraphEventRecordNodeGetEvent(CUgraphNode hNode, CUevent *event_out); + +/** + * \brief Sets an event record node's event + * + * Sets the event of event record node \p hNode to \p event. + * + * \param hNode - Node to set the event for + * \param event - Event to use + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_OUT_OF_MEMORY + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuGraphAddEventRecordNode, + * ::cuGraphEventRecordNodeGetEvent, + * ::cuGraphEventWaitNodeSetEvent, + * ::cuEventRecordWithFlags, + * ::cuStreamWaitEvent + */ +CUresult CUDAAPI cuGraphEventRecordNodeSetEvent(CUgraphNode hNode, CUevent event); + +/** + * \brief Creates an event wait node and adds it to a graph + * + * Creates a new event wait node and adds it to \p hGraph with \p numDependencies + * dependencies specified via \p dependencies and event specified in \p event. + * It is possible for \p numDependencies to be 0, in which case the node will be placed + * at the root of the graph. \p dependencies may not have any duplicate entries. + * A handle to the new node will be returned in \p phGraphNode. + * + * The graph node will wait for all work captured in \p event. See ::cuEventRecord() + * for details on what is captured by an event. \p event may be from a different context + * or device than the launch stream. + * + * \param phGraphNode - Returns newly created node + * \param hGraph - Graph to which to add the node + * \param dependencies - Dependencies of the node + * \param numDependencies - Number of dependencies + * \param event - Event for the node + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_NOT_SUPPORTED, + * ::CUDA_ERROR_INVALID_VALUE + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuGraphAddEventRecordNode, + * ::cuEventRecordWithFlags, + * ::cuStreamWaitEvent, + * ::cuGraphCreate, + * ::cuGraphDestroyNode, + * ::cuGraphAddChildGraphNode, + * ::cuGraphAddEmptyNode, + * ::cuGraphAddKernelNode, + * ::cuGraphAddMemcpyNode, + * ::cuGraphAddMemsetNode + */ +CUresult CUDAAPI cuGraphAddEventWaitNode(CUgraphNode *phGraphNode, CUgraph hGraph, const CUgraphNode *dependencies, size_t numDependencies, CUevent event); + +/** + * \brief Returns the event associated with an event wait node + * + * Returns the event of event wait node \p hNode in \p event_out. + * + * \param hNode - Node to get the event for + * \param event_out - Pointer to return the event + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_VALUE + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuGraphAddEventWaitNode, + * ::cuGraphEventWaitNodeSetEvent, + * ::cuGraphEventRecordNodeGetEvent, + * ::cuEventRecordWithFlags, + * ::cuStreamWaitEvent + */ +CUresult CUDAAPI cuGraphEventWaitNodeGetEvent(CUgraphNode hNode, CUevent *event_out); + +/** + * \brief Sets an event wait node's event + * + * Sets the event of event wait node \p hNode to \p event. + * + * \param hNode - Node to set the event for + * \param event - Event to use + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_OUT_OF_MEMORY + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuGraphAddEventWaitNode, + * ::cuGraphEventWaitNodeGetEvent, + * ::cuGraphEventRecordNodeSetEvent, + * ::cuEventRecordWithFlags, + * ::cuStreamWaitEvent + */ +CUresult CUDAAPI cuGraphEventWaitNodeSetEvent(CUgraphNode hNode, CUevent event); + +/** + * \brief Creates an external semaphore signal node and adds it to a graph + * + * Creates a new external semaphore signal node and adds it to \p hGraph with \p + * numDependencies dependencies specified via \p dependencies and arguments specified + * in \p nodeParams. It is possible for \p numDependencies to be 0, in which case the + * node will be placed at the root of the graph. \p dependencies may not have any + * duplicate entries. A handle to the new node will be returned in \p phGraphNode. + * + * Performs a signal operation on a set of externally allocated semaphore objects + * when the node is launched. The operation(s) will occur after all of the node's + * dependencies have completed. + * + * \param phGraphNode - Returns newly created node + * \param hGraph - Graph to which to add the node + * \param dependencies - Dependencies of the node + * \param numDependencies - Number of dependencies + * \param nodeParams - Parameters for the node + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_NOT_SUPPORTED, + * ::CUDA_ERROR_INVALID_VALUE + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuGraphExternalSemaphoresSignalNodeGetParams, + * ::cuGraphExternalSemaphoresSignalNodeSetParams, + * ::cuGraphExecExternalSemaphoresSignalNodeSetParams, + * ::cuGraphAddExternalSemaphoresWaitNode, + * ::cuImportExternalSemaphore, + * ::cuSignalExternalSemaphoresAsync, + * ::cuWaitExternalSemaphoresAsync, + * ::cuGraphCreate, + * ::cuGraphDestroyNode, + * ::cuGraphAddEventRecordNode, + * ::cuGraphAddEventWaitNode, + * ::cuGraphAddChildGraphNode, + * ::cuGraphAddEmptyNode, + * ::cuGraphAddKernelNode, + * ::cuGraphAddMemcpyNode, + * ::cuGraphAddMemsetNode + */ +CUresult CUDAAPI cuGraphAddExternalSemaphoresSignalNode(CUgraphNode *phGraphNode, CUgraph hGraph, const CUgraphNode *dependencies, size_t numDependencies, const CUDA_EXT_SEM_SIGNAL_NODE_PARAMS *nodeParams); + +/** + * \brief Returns an external semaphore signal node's parameters + * + * Returns the parameters of an external semaphore signal node \p hNode in \p params_out. + * The \p extSemArray and \p paramsArray returned in \p params_out, + * are owned by the node. This memory remains valid until the node is destroyed or its + * parameters are modified, and should not be modified + * directly. Use ::cuGraphExternalSemaphoresSignalNodeSetParams to update the + * parameters of this node. + * + * \param hNode - Node to get the parameters for + * \param params_out - Pointer to return the parameters + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_VALUE + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuLaunchKernel, + * ::cuGraphAddExternalSemaphoresSignalNode, + * ::cuGraphExternalSemaphoresSignalNodeSetParams, + * ::cuGraphAddExternalSemaphoresWaitNode, + * ::cuSignalExternalSemaphoresAsync, + * ::cuWaitExternalSemaphoresAsync + */ +CUresult CUDAAPI cuGraphExternalSemaphoresSignalNodeGetParams(CUgraphNode hNode, CUDA_EXT_SEM_SIGNAL_NODE_PARAMS *params_out); + +/** + * \brief Sets an external semaphore signal node's parameters + * + * Sets the parameters of an external semaphore signal node \p hNode to \p nodeParams. + * + * \param hNode - Node to set the parameters for + * \param nodeParams - Parameters to copy + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_OUT_OF_MEMORY + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuGraphAddExternalSemaphoresSignalNode, + * ::cuGraphExternalSemaphoresSignalNodeSetParams, + * ::cuGraphAddExternalSemaphoresWaitNode, + * ::cuSignalExternalSemaphoresAsync, + * ::cuWaitExternalSemaphoresAsync + */ +CUresult CUDAAPI cuGraphExternalSemaphoresSignalNodeSetParams(CUgraphNode hNode, const CUDA_EXT_SEM_SIGNAL_NODE_PARAMS *nodeParams); + +/** + * \brief Creates an external semaphore wait node and adds it to a graph + * + * Creates a new external semaphore wait node and adds it to \p hGraph with \p numDependencies + * dependencies specified via \p dependencies and arguments specified in \p nodeParams. + * It is possible for \p numDependencies to be 0, in which case the node will be placed + * at the root of the graph. \p dependencies may not have any duplicate entries. A handle + * to the new node will be returned in \p phGraphNode. + * + * Performs a wait operation on a set of externally allocated semaphore objects + * when the node is launched. The node's dependencies will not be launched until + * the wait operation has completed. + * + * \param phGraphNode - Returns newly created node + * \param hGraph - Graph to which to add the node + * \param dependencies - Dependencies of the node + * \param numDependencies - Number of dependencies + * \param nodeParams - Parameters for the node + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_NOT_SUPPORTED, + * ::CUDA_ERROR_INVALID_VALUE + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuGraphExternalSemaphoresWaitNodeGetParams, + * ::cuGraphExternalSemaphoresWaitNodeSetParams, + * ::cuGraphExecExternalSemaphoresWaitNodeSetParams, + * ::cuGraphAddExternalSemaphoresSignalNode, + * ::cuImportExternalSemaphore, + * ::cuSignalExternalSemaphoresAsync, + * ::cuWaitExternalSemaphoresAsync, + * ::cuGraphCreate, + * ::cuGraphDestroyNode, + * ::cuGraphAddEventRecordNode, + * ::cuGraphAddEventWaitNode, + * ::cuGraphAddChildGraphNode, + * ::cuGraphAddEmptyNode, + * ::cuGraphAddKernelNode, + * ::cuGraphAddMemcpyNode, + * ::cuGraphAddMemsetNode + */ +CUresult CUDAAPI cuGraphAddExternalSemaphoresWaitNode(CUgraphNode *phGraphNode, CUgraph hGraph, const CUgraphNode *dependencies, size_t numDependencies, const CUDA_EXT_SEM_WAIT_NODE_PARAMS *nodeParams); + +/** + * \brief Returns an external semaphore wait node's parameters + * + * Returns the parameters of an external semaphore wait node \p hNode in \p params_out. + * The \p extSemArray and \p paramsArray returned in \p params_out, + * are owned by the node. This memory remains valid until the node is destroyed or its + * parameters are modified, and should not be modified + * directly. Use ::cuGraphExternalSemaphoresSignalNodeSetParams to update the + * parameters of this node. + * + * \param hNode - Node to get the parameters for + * \param params_out - Pointer to return the parameters + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_VALUE + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuLaunchKernel, + * ::cuGraphAddExternalSemaphoresWaitNode, + * ::cuGraphExternalSemaphoresWaitNodeSetParams, + * ::cuGraphAddExternalSemaphoresWaitNode, + * ::cuSignalExternalSemaphoresAsync, + * ::cuWaitExternalSemaphoresAsync + */ +CUresult CUDAAPI cuGraphExternalSemaphoresWaitNodeGetParams(CUgraphNode hNode, CUDA_EXT_SEM_WAIT_NODE_PARAMS *params_out); + +/** + * \brief Sets an external semaphore wait node's parameters + * + * Sets the parameters of an external semaphore wait node \p hNode to \p nodeParams. + * + * \param hNode - Node to set the parameters for + * \param nodeParams - Parameters to copy + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_OUT_OF_MEMORY + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuGraphAddExternalSemaphoresWaitNode, + * ::cuGraphExternalSemaphoresWaitNodeSetParams, + * ::cuGraphAddExternalSemaphoresWaitNode, + * ::cuSignalExternalSemaphoresAsync, + * ::cuWaitExternalSemaphoresAsync + */ +CUresult CUDAAPI cuGraphExternalSemaphoresWaitNodeSetParams(CUgraphNode hNode, const CUDA_EXT_SEM_WAIT_NODE_PARAMS *nodeParams); + +/** + * \brief Creates a batch memory operation node and adds it to a graph + * + * Creates a new batch memory operation node and adds it to \p hGraph with \p + * numDependencies dependencies specified via \p dependencies and arguments specified in \p nodeParams. + * It is possible for \p numDependencies to be 0, in which case the node will be placed + * at the root of the graph. \p dependencies may not have any duplicate entries. + * A handle to the new node will be returned in \p phGraphNode. + * + * When the node is added, the paramArray inside \p nodeParams is copied and therefore it can be + * freed after the call returns. + * + * \note + * Warning: + * Improper use of this API may deadlock the application. Synchronization + * ordering established through this API is not visible to CUDA. CUDA tasks + * that are (even indirectly) ordered by this API should also have that order + * expressed with CUDA-visible dependencies such as events. This ensures that + * the scheduler does not serialize them in an improper order. For more + * information, see the Stream Memory Operations section in the programming + * guide(https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html). + * + * \param phGraphNode - Returns newly created node + * \param hGraph - Graph to which to add the node + * \param dependencies - Dependencies of the node + * \param numDependencies - Number of dependencies + * \param nodeParams - Parameters for the node + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_NOT_SUPPORTED, + * ::CUDA_ERROR_INVALID_VALUE + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuStreamBatchMemOp, + * ::cuStreamWaitValue32, + * ::cuStreamWriteValue32, + * ::cuStreamWaitValue64, + * ::cuStreamWriteValue64, + * ::cuGraphBatchMemOpNodeGetParams, + * ::cuGraphBatchMemOpNodeSetParams, + * ::cuGraphCreate, + * ::cuGraphDestroyNode, + * ::cuGraphAddChildGraphNode, + * ::cuGraphAddEmptyNode, + * ::cuGraphAddKernelNode, + * ::cuGraphAddMemcpyNode, + * ::cuGraphAddMemsetNode + */ +CUresult CUDAAPI cuGraphAddBatchMemOpNode(CUgraphNode *phGraphNode, CUgraph hGraph, const CUgraphNode *dependencies, size_t numDependencies, const CUDA_BATCH_MEM_OP_NODE_PARAMS *nodeParams); + +/** + * \brief Returns a batch mem op node's parameters + * + * Returns the parameters of batch mem op node \p hNode in \p nodeParams_out. + * The \p paramArray returned in \p nodeParams_out is owned by the node. + * This memory remains valid until the node is destroyed or its + * parameters are modified, and should not be modified + * directly. Use ::cuGraphBatchMemOpNodeSetParams to update the + * parameters of this node. + * + * \param hNode - Node to get the parameters for + * \param nodeParams_out - Pointer to return the parameters + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_VALUE + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuStreamBatchMemOp, + * ::cuGraphAddBatchMemOpNode, + * ::cuGraphBatchMemOpNodeSetParams + */ +CUresult CUDAAPI cuGraphBatchMemOpNodeGetParams(CUgraphNode hNode, CUDA_BATCH_MEM_OP_NODE_PARAMS *nodeParams_out); + +/** + * \brief Sets a batch mem op node's parameters + * + * Sets the parameters of batch mem op node \p hNode to \p nodeParams. + * + * The paramArray inside \p nodeParams is copied and therefore it can be + * freed after the call returns. + * + * \param hNode - Node to set the parameters for + * \param nodeParams - Parameters to copy + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_OUT_OF_MEMORY + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuStreamBatchMemOp, + * ::cuGraphAddBatchMemOpNode, + * ::cuGraphBatchMemOpNodeGetParams + */ +CUresult CUDAAPI cuGraphBatchMemOpNodeSetParams(CUgraphNode hNode, const CUDA_BATCH_MEM_OP_NODE_PARAMS *nodeParams); + +/** + * \brief Sets the parameters for a batch mem op node in the given graphExec + * + * Sets the parameters of a batch mem op node in an executable graph \p hGraphExec. + * The node is identified by the corresponding node \p hNode in the + * non-executable graph, from which the executable graph was instantiated. + * + * The following fields on operations may be modified on an executable graph: + * + * op.waitValue.address + * op.waitValue.value[64] + * op.waitValue.flags bits corresponding to wait type (i.e. CU_STREAM_WAIT_VALUE_FLUSH bit cannot be modified) + * op.writeValue.address + * op.writeValue.value[64] + * + * Other fields, such as the context, count or type of operations, and other types of operations such as membars, + * may not be modified. + * + * \p hNode must not have been removed from the original graph. + * + * The modifications only affect future launches of \p hGraphExec. Already + * enqueued or running launches of \p hGraphExec are not affected by this call. + * \p hNode is also not modified by this call. + * + * The paramArray inside \p nodeParams is copied and therefore it can be + * freed after the call returns. + * + * \param hGraphExec - The executable graph in which to set the specified node + * \param hNode - Batch mem op node from the graph from which graphExec was instantiated + * \param nodeParams - Updated Parameters to set + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE, + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuStreamBatchMemOp, + * ::cuGraphAddBatchMemOpNode, + * ::cuGraphBatchMemOpNodeGetParams, + * ::cuGraphBatchMemOpNodeSetParams, + * ::cuGraphInstantiate + */ +CUresult CUDAAPI cuGraphExecBatchMemOpNodeSetParams(CUgraphExec hGraphExec, CUgraphNode hNode, const CUDA_BATCH_MEM_OP_NODE_PARAMS *nodeParams); + +/** + * \brief Creates an allocation node and adds it to a graph + * + * Creates a new allocation node and adds it to \p hGraph with \p numDependencies + * dependencies specified via \p dependencies and arguments specified in \p nodeParams. + * It is possible for \p numDependencies to be 0, in which case the node will be placed + * at the root of the graph. \p dependencies may not have any duplicate entries. A handle + * to the new node will be returned in \p phGraphNode. + * + * \param phGraphNode - Returns newly created node + * \param hGraph - Graph to which to add the node + * \param dependencies - Dependencies of the node + * \param numDependencies - Number of dependencies + * \param nodeParams - Parameters for the node + * + * When ::cuGraphAddMemAllocNode creates an allocation node, it returns the address of the allocation in + * \p nodeParams.dptr. The allocation's address remains fixed across instantiations and launches. + * + * If the allocation is freed in the same graph, by creating a free node using ::cuGraphAddMemFreeNode, + * the allocation can be accessed by nodes ordered after the allocation node but before the free node. + * These allocations cannot be freed outside the owning graph, and they can only be freed once in the + * owning graph. + * + * If the allocation is not freed in the same graph, then it can be accessed not only by nodes in the + * graph which are ordered after the allocation node, but also by stream operations ordered after the + * graph's execution but before the allocation is freed. + * + * Allocations which are not freed in the same graph can be freed by: + * - passing the allocation to ::cuMemFreeAsync or ::cuMemFree; + * - launching a graph with a free node for that allocation; or + * - specifying ::CUDA_GRAPH_INSTANTIATE_FLAG_AUTO_FREE_ON_LAUNCH during instantiation, which makes + * each launch behave as though it called ::cuMemFreeAsync for every unfreed allocation. + * + * It is not possible to free an allocation in both the owning graph and another graph. If the allocation + * is freed in the same graph, a free node cannot be added to another graph. If the allocation is freed + * in another graph, a free node can no longer be added to the owning graph. + * + * The following restrictions apply to graphs which contain allocation and/or memory free nodes: + * - Nodes and edges of the graph cannot be deleted. + * - The graph cannot be used in a child node. + * - Only one instantiation of the graph may exist at any point in time. + * - The graph cannot be cloned. + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_NOT_SUPPORTED, + * ::CUDA_ERROR_INVALID_VALUE + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuGraphAddMemFreeNode, + * ::cuGraphMemAllocNodeGetParams, + * ::cuDeviceGraphMemTrim, + * ::cuDeviceGetGraphMemAttribute, + * ::cuDeviceSetGraphMemAttribute, + * ::cuMemAllocAsync, + * ::cuMemFreeAsync, + * ::cuGraphCreate, + * ::cuGraphDestroyNode, + * ::cuGraphAddChildGraphNode, + * ::cuGraphAddEmptyNode, + * ::cuGraphAddEventRecordNode, + * ::cuGraphAddEventWaitNode, + * ::cuGraphAddExternalSemaphoresSignalNode, + * ::cuGraphAddExternalSemaphoresWaitNode, + * ::cuGraphAddKernelNode, + * ::cuGraphAddMemcpyNode, + * ::cuGraphAddMemsetNode + */ +CUresult CUDAAPI cuGraphAddMemAllocNode(CUgraphNode *phGraphNode, CUgraph hGraph, const CUgraphNode *dependencies, size_t numDependencies, CUDA_MEM_ALLOC_NODE_PARAMS *nodeParams); + +/** + * \brief Returns a memory alloc node's parameters + * + * Returns the parameters of a memory alloc node \p hNode in \p params_out. + * The \p poolProps and \p accessDescs returned in \p params_out, are owned by the + * node. This memory remains valid until the node is destroyed. The returned + * parameters must not be modified. + * + * \param hNode - Node to get the parameters for + * \param params_out - Pointer to return the parameters + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_VALUE + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuGraphAddMemAllocNode, + * ::cuGraphMemFreeNodeGetParams + */ +CUresult CUDAAPI cuGraphMemAllocNodeGetParams(CUgraphNode hNode, CUDA_MEM_ALLOC_NODE_PARAMS *params_out); + +/** + * \brief Creates a memory free node and adds it to a graph + * + * Creates a new memory free node and adds it to \p hGraph with \p numDependencies + * dependencies specified via \p dependencies and arguments specified in \p nodeParams. + * It is possible for \p numDependencies to be 0, in which case the node will be placed + * at the root of the graph. \p dependencies may not have any duplicate entries. A handle + * to the new node will be returned in \p phGraphNode. + * + * \param phGraphNode - Returns newly created node + * \param hGraph - Graph to which to add the node + * \param dependencies - Dependencies of the node + * \param numDependencies - Number of dependencies + * \param dptr - Address of memory to free + * + * ::cuGraphAddMemFreeNode will return ::CUDA_ERROR_INVALID_VALUE if the user attempts to free: + * - an allocation twice in the same graph. + * - an address that was not returned by an allocation node. + * - an invalid address. + * + * The following restrictions apply to graphs which contain allocation and/or memory free nodes: + * - Nodes and edges of the graph cannot be deleted. + * - The graph cannot be used in a child node. + * - Only one instantiation of the graph may exist at any point in time. + * - The graph cannot be cloned. + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_NOT_SUPPORTED, + * ::CUDA_ERROR_INVALID_VALUE + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuGraphAddMemAllocNode, + * ::cuGraphMemFreeNodeGetParams, + * ::cuDeviceGraphMemTrim, + * ::cuDeviceGetGraphMemAttribute, + * ::cuDeviceSetGraphMemAttribute, + * ::cuMemAllocAsync, + * ::cuMemFreeAsync, + * ::cuGraphCreate, + * ::cuGraphDestroyNode, + * ::cuGraphAddChildGraphNode, + * ::cuGraphAddEmptyNode, + * ::cuGraphAddEventRecordNode, + * ::cuGraphAddEventWaitNode, + * ::cuGraphAddExternalSemaphoresSignalNode, + * ::cuGraphAddExternalSemaphoresWaitNode, + * ::cuGraphAddKernelNode, + * ::cuGraphAddMemcpyNode, + * ::cuGraphAddMemsetNode + */ +CUresult CUDAAPI cuGraphAddMemFreeNode(CUgraphNode *phGraphNode, CUgraph hGraph, const CUgraphNode *dependencies, size_t numDependencies, CUdeviceptr dptr); + +/** + * \brief Returns a memory free node's parameters + * + * Returns the address of a memory free node \p hNode in \p dptr_out. + * + * \param hNode - Node to get the parameters for + * \param dptr_out - Pointer to return the device address + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_VALUE + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuGraphAddMemFreeNode, + * ::cuGraphMemAllocNodeGetParams + */ +CUresult CUDAAPI cuGraphMemFreeNodeGetParams(CUgraphNode hNode, CUdeviceptr *dptr_out); + +/** + * \brief Free unused memory that was cached on the specified device for use with graphs back to the OS. + * + * Blocks which are not in use by a graph that is either currently executing or scheduled to execute are + * freed back to the operating system. + * + * \param device - The device for which cached memory should be freed. + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_DEVICE + * + * \sa + * ::cuGraphAddMemAllocNode, + * ::cuGraphAddMemFreeNode, + * ::cuDeviceSetGraphMemAttribute, + * ::cuDeviceGetGraphMemAttribute + */ +CUresult CUDAAPI cuDeviceGraphMemTrim(CUdevice device); + +/** + * \brief Query asynchronous allocation attributes related to graphs + * + * Valid attributes are: + * + * - ::CU_GRAPH_MEM_ATTR_USED_MEM_CURRENT: Amount of memory, in bytes, currently associated with graphs + * - ::CU_GRAPH_MEM_ATTR_USED_MEM_HIGH: High watermark of memory, in bytes, associated with graphs since the + * last time it was reset. High watermark can only be reset to zero. + * - ::CU_GRAPH_MEM_ATTR_RESERVED_MEM_CURRENT: Amount of memory, in bytes, currently allocated for use by + * the CUDA graphs asynchronous allocator. + * - ::CU_GRAPH_MEM_ATTR_RESERVED_MEM_HIGH: High watermark of memory, in bytes, currently allocated for use by + * the CUDA graphs asynchronous allocator. + * + * \param device - Specifies the scope of the query + * \param attr - attribute to get + * \param value - retrieved value + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_DEVICE + * + * \sa + * ::cuDeviceSetGraphMemAttribute, + * ::cuGraphAddMemAllocNode, + * ::cuGraphAddMemFreeNode + */ +CUresult CUDAAPI cuDeviceGetGraphMemAttribute(CUdevice device, CUgraphMem_attribute attr, void* value); + +/** + * \brief Set asynchronous allocation attributes related to graphs + * + * Valid attributes are: + * + * - ::CU_GRAPH_MEM_ATTR_USED_MEM_HIGH: High watermark of memory, in bytes, associated with graphs since the + * last time it was reset. High watermark can only be reset to zero. + * - ::CU_GRAPH_MEM_ATTR_RESERVED_MEM_HIGH: High watermark of memory, in bytes, currently allocated for use by + * the CUDA graphs asynchronous allocator. + * + * \param device - Specifies the scope of the query + * \param attr - attribute to get + * \param value - pointer to value to set + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_DEVICE + * + * \sa + * ::cuDeviceGetGraphMemAttribute, + * ::cuGraphAddMemAllocNode, + * ::cuGraphAddMemFreeNode + */ +CUresult CUDAAPI cuDeviceSetGraphMemAttribute(CUdevice device, CUgraphMem_attribute attr, void* value); + +/** + * \brief Clones a graph + * + * This function creates a copy of \p originalGraph and returns it in \p phGraphClone. + * All parameters are copied into the cloned graph. The original graph may be modified + * after this call without affecting the clone. + * + * Child graph nodes in the original graph are recursively copied into the clone. + * + * \param phGraphClone - Returns newly created cloned graph + * \param originalGraph - Graph to clone + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_OUT_OF_MEMORY + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuGraphCreate, + * ::cuGraphNodeFindInClone + */ +CUresult CUDAAPI cuGraphClone(CUgraph *phGraphClone, CUgraph originalGraph); + +/** + * \brief Finds a cloned version of a node + * + * This function returns the node in \p hClonedGraph corresponding to \p hOriginalNode + * in the original graph. + * + * \p hClonedGraph must have been cloned from \p hOriginalGraph via ::cuGraphClone. + * \p hOriginalNode must have been in \p hOriginalGraph at the time of the call to + * ::cuGraphClone, and the corresponding cloned node in \p hClonedGraph must not have + * been removed. The cloned node is then returned via \p phClonedNode. + * + * \param phNode - Returns handle to the cloned node + * \param hOriginalNode - Handle to the original node + * \param hClonedGraph - Cloned graph to query + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE, + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuGraphClone + */ +CUresult CUDAAPI cuGraphNodeFindInClone(CUgraphNode *phNode, CUgraphNode hOriginalNode, CUgraph hClonedGraph); + +/** + * \brief Returns a node's type + * + * Returns the node type of \p hNode in \p type. + * + * \param hNode - Node to query + * \param type - Pointer to return the node type + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_VALUE + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuGraphGetNodes, + * ::cuGraphGetRootNodes, + * ::cuGraphChildGraphNodeGetGraph, + * ::cuGraphKernelNodeGetParams, + * ::cuGraphKernelNodeSetParams, + * ::cuGraphHostNodeGetParams, + * ::cuGraphHostNodeSetParams, + * ::cuGraphMemcpyNodeGetParams, + * ::cuGraphMemcpyNodeSetParams, + * ::cuGraphMemsetNodeGetParams, + * ::cuGraphMemsetNodeSetParams + */ +CUresult CUDAAPI cuGraphNodeGetType(CUgraphNode hNode, CUgraphNodeType *type); + +/** + * \brief Returns a graph's nodes + * + * Returns a list of \p hGraph's nodes. \p nodes may be NULL, in which case this + * function will return the number of nodes in \p numNodes. Otherwise, + * \p numNodes entries will be filled in. If \p numNodes is higher than the actual + * number of nodes, the remaining entries in \p nodes will be set to NULL, and the + * number of nodes actually obtained will be returned in \p numNodes. + * + * \param hGraph - Graph to query + * \param nodes - Pointer to return the nodes + * \param numNodes - See description + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_VALUE + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuGraphCreate, + * ::cuGraphGetRootNodes, + * ::cuGraphGetEdges, + * ::cuGraphNodeGetType, + * ::cuGraphNodeGetDependencies, + * ::cuGraphNodeGetDependentNodes + */ +CUresult CUDAAPI cuGraphGetNodes(CUgraph hGraph, CUgraphNode *nodes, size_t *numNodes); + +/** + * \brief Returns a graph's root nodes + * + * Returns a list of \p hGraph's root nodes. \p rootNodes may be NULL, in which case this + * function will return the number of root nodes in \p numRootNodes. Otherwise, + * \p numRootNodes entries will be filled in. If \p numRootNodes is higher than the actual + * number of root nodes, the remaining entries in \p rootNodes will be set to NULL, and the + * number of nodes actually obtained will be returned in \p numRootNodes. + * + * \param hGraph - Graph to query + * \param rootNodes - Pointer to return the root nodes + * \param numRootNodes - See description + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_VALUE + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuGraphCreate, + * ::cuGraphGetNodes, + * ::cuGraphGetEdges, + * ::cuGraphNodeGetType, + * ::cuGraphNodeGetDependencies, + * ::cuGraphNodeGetDependentNodes + */ +CUresult CUDAAPI cuGraphGetRootNodes(CUgraph hGraph, CUgraphNode *rootNodes, size_t *numRootNodes); + +/** + * \brief Returns a graph's dependency edges + * + * Returns a list of \p hGraph's dependency edges. Edges are returned via corresponding + * indices in \p from and \p to; that is, the node in \p to[i] has a dependency on the + * node in \p from[i]. \p from and \p to may both be NULL, in which + * case this function only returns the number of edges in \p numEdges. Otherwise, + * \p numEdges entries will be filled in. If \p numEdges is higher than the actual + * number of edges, the remaining entries in \p from and \p to will be set to NULL, and + * the number of edges actually returned will be written to \p numEdges. + * + * \param hGraph - Graph to get the edges from + * \param from - Location to return edge endpoints + * \param to - Location to return edge endpoints + * \param numEdges - See description + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_VALUE + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuGraphGetNodes, + * ::cuGraphGetRootNodes, + * ::cuGraphAddDependencies, + * ::cuGraphRemoveDependencies, + * ::cuGraphNodeGetDependencies, + * ::cuGraphNodeGetDependentNodes + */ +CUresult CUDAAPI cuGraphGetEdges(CUgraph hGraph, CUgraphNode *from, CUgraphNode *to, size_t *numEdges); + +/** + * \brief Returns a node's dependencies + * + * Returns a list of \p node's dependencies. \p dependencies may be NULL, in which case this + * function will return the number of dependencies in \p numDependencies. Otherwise, + * \p numDependencies entries will be filled in. If \p numDependencies is higher than the actual + * number of dependencies, the remaining entries in \p dependencies will be set to NULL, and the + * number of nodes actually obtained will be returned in \p numDependencies. + * + * \param hNode - Node to query + * \param dependencies - Pointer to return the dependencies + * \param numDependencies - See description + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_VALUE + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuGraphNodeGetDependentNodes, + * ::cuGraphGetNodes, + * ::cuGraphGetRootNodes, + * ::cuGraphGetEdges, + * ::cuGraphAddDependencies, + * ::cuGraphRemoveDependencies + */ +CUresult CUDAAPI cuGraphNodeGetDependencies(CUgraphNode hNode, CUgraphNode *dependencies, size_t *numDependencies); + +/** + * \brief Returns a node's dependent nodes + * + * Returns a list of \p node's dependent nodes. \p dependentNodes may be NULL, in which + * case this function will return the number of dependent nodes in \p numDependentNodes. + * Otherwise, \p numDependentNodes entries will be filled in. If \p numDependentNodes is + * higher than the actual number of dependent nodes, the remaining entries in + * \p dependentNodes will be set to NULL, and the number of nodes actually obtained will + * be returned in \p numDependentNodes. + * + * \param hNode - Node to query + * \param dependentNodes - Pointer to return the dependent nodes + * \param numDependentNodes - See description + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_VALUE + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuGraphNodeGetDependencies, + * ::cuGraphGetNodes, + * ::cuGraphGetRootNodes, + * ::cuGraphGetEdges, + * ::cuGraphAddDependencies, + * ::cuGraphRemoveDependencies + */ +CUresult CUDAAPI cuGraphNodeGetDependentNodes(CUgraphNode hNode, CUgraphNode *dependentNodes, size_t *numDependentNodes); + +/** + * \brief Adds dependency edges to a graph + * + * The number of dependencies to be added is defined by \p numDependencies + * Elements in \p from and \p to at corresponding indices define a dependency. + * Each node in \p from and \p to must belong to \p hGraph. + * + * If \p numDependencies is 0, elements in \p from and \p to will be ignored. + * Specifying an existing dependency will return an error. + * + * \param hGraph - Graph to which dependencies are added + * \param from - Array of nodes that provide the dependencies + * \param to - Array of dependent nodes + * \param numDependencies - Number of dependencies to be added + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuGraphRemoveDependencies, + * ::cuGraphGetEdges, + * ::cuGraphNodeGetDependencies, + * ::cuGraphNodeGetDependentNodes + */ +CUresult CUDAAPI cuGraphAddDependencies(CUgraph hGraph, const CUgraphNode *from, const CUgraphNode *to, size_t numDependencies); + +/** + * \brief Removes dependency edges from a graph + * + * The number of \p dependencies to be removed is defined by \p numDependencies. + * Elements in \p from and \p to at corresponding indices define a dependency. + * Each node in \p from and \p to must belong to \p hGraph. + * + * If \p numDependencies is 0, elements in \p from and \p to will be ignored. + * Specifying a non-existing dependency will return an error. + * + * Dependencies cannot be removed from graphs which contain allocation or free nodes. + * Any attempt to do so will return an error. + * + * \param hGraph - Graph from which to remove dependencies + * \param from - Array of nodes that provide the dependencies + * \param to - Array of dependent nodes + * \param numDependencies - Number of dependencies to be removed + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuGraphAddDependencies, + * ::cuGraphGetEdges, + * ::cuGraphNodeGetDependencies, + * ::cuGraphNodeGetDependentNodes + */ +CUresult CUDAAPI cuGraphRemoveDependencies(CUgraph hGraph, const CUgraphNode *from, const CUgraphNode *to, size_t numDependencies); + +/** + * \brief Remove a node from the graph + * + * Removes \p hNode from its graph. This operation also severs any dependencies of other nodes + * on \p hNode and vice versa. + * + * Nodes which belong to a graph which contains allocation or free nodes cannot be destroyed. + * Any attempt to do so will return an error. + * + * \param hNode - Node to remove + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuGraphAddChildGraphNode, + * ::cuGraphAddEmptyNode, + * ::cuGraphAddKernelNode, + * ::cuGraphAddHostNode, + * ::cuGraphAddMemcpyNode, + * ::cuGraphAddMemsetNode + */ +CUresult CUDAAPI cuGraphDestroyNode(CUgraphNode hNode); + +/** + * \brief Creates an executable graph from a graph + * + * Instantiates \p hGraph as an executable graph. The graph is validated for any + * structural constraints or intra-node constraints which were not previously + * validated. If instantiation is successful, a handle to the instantiated graph + * is returned in \p phGraphExec. + * + * The \p flags parameter controls the behavior of instantiation and subsequent + * graph launches. Valid flags are: + * + * - ::CUDA_GRAPH_INSTANTIATE_FLAG_AUTO_FREE_ON_LAUNCH, which configures a + * graph containing memory allocation nodes to automatically free any + * unfreed memory allocations before the graph is relaunched. + * + * - ::CUDA_GRAPH_INSTANTIATE_FLAG_DEVICE_LAUNCH, which configures the graph for launch + * from the device. If this flag is passed, the executable graph handle returned can be + * used to launch the graph from both the host and device. This flag can only be used + * on platforms which support unified addressing. This flag cannot be used in + * conjunction with ::CUDA_GRAPH_INSTANTIATE_FLAG_AUTO_FREE_ON_LAUNCH. + * + * - ::CUDA_GRAPH_INSTANTIATE_FLAG_USE_NODE_PRIORITY, which causes the graph + * to use the priorities from the per-node attributes rather than the priority + * of the launch stream during execution. Note that priorities are only available + * on kernel nodes, and are copied from stream priority during stream capture. + * + * If \p hGraph contains any allocation or free nodes, there can be at most one + * executable graph in existence for that graph at a time. An attempt to instantiate + * a second executable graph before destroying the first with ::cuGraphExecDestroy + * will result in an error. + * + * If \p hGraph contains kernels which call device-side cudaGraphLaunch() from multiple + * contexts, this will result in an error. + * + * Graphs instantiated for launch on the device have additional restrictions which do not + * apply to host graphs: + * + * - The graph's nodes must reside on a single context. + * - The graph can only contain kernel nodes, memcpy nodes, memset nodes, and child graph nodes. + * Operation-specific restrictions are outlined below. + * - Kernel nodes: + * - Use of CUDA Dynamic Parallelism is not permitted. + * - Cooperative launches are permitted as long as MPS is not in use. + * - Memcpy nodes: + * - Only copies involving device memory and/or pinned device-mapped host memory are permitted. + * - Copies involving CUDA arrays are not permitted. + * - Both operands must be accessible from the current context, and the current context must + * match the context of other nodes in the graph. + * + * \param phGraphExec - Returns instantiated graph + * \param hGraph - Graph to instantiate + * \param flags - Flags to control instantiation. See ::CUgraphInstantiate_flags. + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_VALUE + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuGraphInstantiate, + * ::cuGraphCreate, + * ::cuGraphUpload, + * ::cuGraphLaunch, + * ::cuGraphExecDestroy + */ +CUresult CUDAAPI cuGraphInstantiate(CUgraphExec *phGraphExec, CUgraph hGraph, unsigned long long flags); + +/** + * \brief Creates an executable graph from a graph + * + * Instantiates \p hGraph as an executable graph according to the \p instantiateParams structure. + * The graph is validated for any structural constraints or intra-node constraints + * which were not previously validated. If instantiation is successful, a handle to + * the instantiated graph is returned in \p phGraphExec. + * + * \p instantiateParams controls the behavior of instantiation and subsequent + * graph launches, as well as returning more detailed information in the event of an error. + * ::CUDA_GRAPH_INSTANTIATE_PARAMS is defined as: + * + * \code + typedef struct { + cuuint64_t flags; + CUstream hUploadStream; + CUgraphNode hErrNode_out; + CUgraphInstantiateResult result_out; + } CUDA_GRAPH_INSTANTIATE_PARAMS; + * \endcode + * + * The \p flags field controls the behavior of instantiation and subsequent + * graph launches. Valid flags are: + * + * - ::CUDA_GRAPH_INSTANTIATE_FLAG_AUTO_FREE_ON_LAUNCH, which configures a + * graph containing memory allocation nodes to automatically free any + * unfreed memory allocations before the graph is relaunched. + * + * - ::CUDA_GRAPH_INSTANTIATE_FLAG_UPLOAD, which will perform an upload of the graph + * into \p hUploadStream once the graph has been instantiated. + * + * - ::CUDA_GRAPH_INSTANTIATE_FLAG_DEVICE_LAUNCH, which configures the graph for launch + * from the device. If this flag is passed, the executable graph handle returned can be + * used to launch the graph from both the host and device. This flag can only be used + * on platforms which support unified addressing. This flag cannot be used in + * conjunction with ::CUDA_GRAPH_INSTANTIATE_FLAG_AUTO_FREE_ON_LAUNCH. + * + * - ::CUDA_GRAPH_INSTANTIATE_FLAG_USE_NODE_PRIORITY, which causes the graph + * to use the priorities from the per-node attributes rather than the priority + * of the launch stream during execution. Note that priorities are only available + * on kernel nodes, and are copied from stream priority during stream capture. + * + * If \p hGraph contains any allocation or free nodes, there can be at most one + * executable graph in existence for that graph at a time. An attempt to instantiate a + * second executable graph before destroying the first with ::cuGraphExecDestroy will + * result in an error. + * + * If \p hGraph contains kernels which call device-side cudaGraphLaunch() from multiple + * contexts, this will result in an error. + * + * Graphs instantiated for launch on the device have additional restrictions which do not + * apply to host graphs: + * + * - The graph's nodes must reside on a single context. + * - The graph can only contain kernel nodes, memcpy nodes, memset nodes, and child graph nodes. + * Operation-specific restrictions are outlined below. + * - Kernel nodes: + * - Use of CUDA Dynamic Parallelism is not permitted. + * - Cooperative launches are permitted as long as MPS is not in use. + * - Memcpy nodes: + * - Only copies involving device memory and/or pinned device-mapped host memory are permitted. + * - Copies involving CUDA arrays are not permitted. + * - Both operands must be accessible from the current context, and the current context must + * match the context of other nodes in the graph. + * + * In the event of an error, the \p result_out and \p hErrNode_out fields will contain more + * information about the nature of the error. Possible error reporting includes: + * + * - ::CUDA_GRAPH_INSTANTIATE_ERROR, if passed an invalid value or if an unexpected error occurred + * which is described by the return value of the function. \p hErrNode_out will be set to NULL. + * - ::CUDA_GRAPH_INSTANTIATE_INVALID_STRUCTURE, if the graph structure is invalid. \p hErrNode_out + * will be set to one of the offending nodes. + * - ::CUDA_GRAPH_INSTANTIATE_NODE_OPERATION_NOT_SUPPORTED, if the graph is instantiated for device + * launch but contains a node of an unsupported node type, or a node which performs unsupported + * operations, such as use of CUDA dynamic parallelism within a kernel node. \p hErrNode_out will + * be set to this node. + * - ::CUDA_GRAPH_INSTANTIATE_MULTIPLE_CTXS_NOT_SUPPORTED, if the graph is instantiated for device + * launch but a node’s context differs from that of another node. This error can also be returned + * if a graph is not instantiated for device launch and it contains kernels which call device-side + * cudaGraphLaunch() from multiple contexts. \p hErrNode_out will be set to this node. + * + * If instantiation is successful, \p result_out will be set to ::CUDA_GRAPH_INSTANTIATE_SUCCESS, + * and \p hErrNode_out will be set to NULL. + * + * \param phGraphExec - Returns instantiated graph + * \param hGraph - Graph to instantiate + * \param instantiateParams - Instantiation parameters + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE, + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuGraphCreate, + * ::cuGraphInstantiate, + * ::cuGraphExecDestroy + */ +CUresult CUDAAPI cuGraphInstantiateWithParams(CUgraphExec *phGraphExec, CUgraph hGraph, CUDA_GRAPH_INSTANTIATE_PARAMS *instantiateParams); + +/** + * \brief Query the instantiation flags of an executable graph + * + * Returns the flags that were passed to instantiation for the given executable graph. + * ::CUDA_GRAPH_INSTANTIATE_FLAG_UPLOAD will not be returned by this API as it does + * not affect the resulting executable graph. + * + * \param hGraphExec - The executable graph to query + * \param flags - Returns the instantiation flags + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE, + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuGraphInstantiate, + * ::cuGraphInstantiateWithParams + */ +CUresult CUDAAPI cuGraphExecGetFlags(CUgraphExec hGraphExec, cuuint64_t *flags); + +/** + * \brief Sets the parameters for a kernel node in the given graphExec + * + * Sets the parameters of a kernel node in an executable graph \p hGraphExec. + * The node is identified by the corresponding node \p hNode in the + * non-executable graph, from which the executable graph was instantiated. + * + * \p hNode must not have been removed from the original graph. All \p nodeParams + * fields may change, but the following restrictions apply to \p func updates: + * + * - The owning context of the function cannot change. + * - A node whose function originally did not use CUDA dynamic parallelism cannot be updated + * to a function which uses CDP + * - If \p hGraphExec was not instantiated for device launch, a node whose function originally + * did not use device-side cudaGraphLaunch() cannot be updated to a function which uses + * device-side cudaGraphLaunch() unless the node resides on the same context as nodes which + * contained such calls at instantiate-time. If no such calls were present at instantiation, + * these updates cannot be performed at all. + * + * The modifications only affect future launches of \p hGraphExec. Already + * enqueued or running launches of \p hGraphExec are not affected by this call. + * \p hNode is also not modified by this call. + * + * \param hGraphExec - The executable graph in which to set the specified node + * \param hNode - kernel node from the graph from which graphExec was instantiated + * \param nodeParams - Updated Parameters to set + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE, + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuGraphAddKernelNode, + * ::cuGraphKernelNodeSetParams, + * ::cuGraphExecMemcpyNodeSetParams, + * ::cuGraphExecMemsetNodeSetParams, + * ::cuGraphExecHostNodeSetParams, + * ::cuGraphExecChildGraphNodeSetParams, + * ::cuGraphExecEventRecordNodeSetEvent, + * ::cuGraphExecEventWaitNodeSetEvent, + * ::cuGraphExecExternalSemaphoresSignalNodeSetParams, + * ::cuGraphExecExternalSemaphoresWaitNodeSetParams, + * ::cuGraphExecUpdate, + * ::cuGraphInstantiate + */ +CUresult CUDAAPI cuGraphExecKernelNodeSetParams(CUgraphExec hGraphExec, CUgraphNode hNode, const CUDA_KERNEL_NODE_PARAMS *nodeParams); + +/** + * \brief Sets the parameters for a memcpy node in the given graphExec. + * + * Updates the work represented by \p hNode in \p hGraphExec as though \p hNode had + * contained \p copyParams at instantiation. hNode must remain in the graph which was + * used to instantiate \p hGraphExec. Changed edges to and from hNode are ignored. + * + * The source and destination memory in \p copyParams must be allocated from the same + * contexts as the original source and destination memory. Both the instantiation-time + * memory operands and the memory operands in \p copyParams must be 1-dimensional. + * Zero-length operations are not supported. + * + * The modifications only affect future launches of \p hGraphExec. Already enqueued + * or running launches of \p hGraphExec are not affected by this call. hNode is also + * not modified by this call. + * + * Returns CUDA_ERROR_INVALID_VALUE if the memory operands' mappings changed or + * either the original or new memory operands are multidimensional. + * + * \param hGraphExec - The executable graph in which to set the specified node + * \param hNode - Memcpy node from the graph which was used to instantiate graphExec + * \param copyParams - The updated parameters to set + * \param ctx - Context on which to run the node + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE, + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuGraphAddMemcpyNode, + * ::cuGraphMemcpyNodeSetParams, + * ::cuGraphExecKernelNodeSetParams, + * ::cuGraphExecMemsetNodeSetParams, + * ::cuGraphExecHostNodeSetParams, + * ::cuGraphExecChildGraphNodeSetParams, + * ::cuGraphExecEventRecordNodeSetEvent, + * ::cuGraphExecEventWaitNodeSetEvent, + * ::cuGraphExecExternalSemaphoresSignalNodeSetParams, + * ::cuGraphExecExternalSemaphoresWaitNodeSetParams, + * ::cuGraphExecUpdate, + * ::cuGraphInstantiate + */ +CUresult CUDAAPI cuGraphExecMemcpyNodeSetParams(CUgraphExec hGraphExec, CUgraphNode hNode, const CUDA_MEMCPY3D *copyParams, CUcontext ctx); + +/** + * \brief Sets the parameters for a memset node in the given graphExec. + * + * Updates the work represented by \p hNode in \p hGraphExec as though \p hNode had + * contained \p memsetParams at instantiation. hNode must remain in the graph which was + * used to instantiate \p hGraphExec. Changed edges to and from hNode are ignored. + * + * The destination memory in \p memsetParams must be allocated from the same + * contexts as the original destination memory. Both the instantiation-time + * memory operand and the memory operand in \p memsetParams must be 1-dimensional. + * Zero-length operations are not supported. + * + * The modifications only affect future launches of \p hGraphExec. Already enqueued + * or running launches of \p hGraphExec are not affected by this call. hNode is also + * not modified by this call. + * + * Returns CUDA_ERROR_INVALID_VALUE if the memory operand's mappings changed or + * either the original or new memory operand are multidimensional. + * + * \param hGraphExec - The executable graph in which to set the specified node + * \param hNode - Memset node from the graph which was used to instantiate graphExec + * \param memsetParams - The updated parameters to set + * \param ctx - Context on which to run the node + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE, + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuGraphAddMemsetNode, + * ::cuGraphMemsetNodeSetParams, + * ::cuGraphExecKernelNodeSetParams, + * ::cuGraphExecMemcpyNodeSetParams, + * ::cuGraphExecHostNodeSetParams, + * ::cuGraphExecChildGraphNodeSetParams, + * ::cuGraphExecEventRecordNodeSetEvent, + * ::cuGraphExecEventWaitNodeSetEvent, + * ::cuGraphExecExternalSemaphoresSignalNodeSetParams, + * ::cuGraphExecExternalSemaphoresWaitNodeSetParams, + * ::cuGraphExecUpdate, + * ::cuGraphInstantiate + */ +CUresult CUDAAPI cuGraphExecMemsetNodeSetParams(CUgraphExec hGraphExec, CUgraphNode hNode, const CUDA_MEMSET_NODE_PARAMS *memsetParams, CUcontext ctx); + +/** + * \brief Sets the parameters for a host node in the given graphExec. + * + * Updates the work represented by \p hNode in \p hGraphExec as though \p hNode had + * contained \p nodeParams at instantiation. hNode must remain in the graph which was + * used to instantiate \p hGraphExec. Changed edges to and from hNode are ignored. + * + * The modifications only affect future launches of \p hGraphExec. Already enqueued + * or running launches of \p hGraphExec are not affected by this call. hNode is also + * not modified by this call. + * + * \param hGraphExec - The executable graph in which to set the specified node + * \param hNode - Host node from the graph which was used to instantiate graphExec + * \param nodeParams - The updated parameters to set + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE, + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuGraphAddHostNode, + * ::cuGraphHostNodeSetParams, + * ::cuGraphExecKernelNodeSetParams, + * ::cuGraphExecMemcpyNodeSetParams, + * ::cuGraphExecMemsetNodeSetParams, + * ::cuGraphExecChildGraphNodeSetParams, + * ::cuGraphExecEventRecordNodeSetEvent, + * ::cuGraphExecEventWaitNodeSetEvent, + * ::cuGraphExecExternalSemaphoresSignalNodeSetParams, + * ::cuGraphExecExternalSemaphoresWaitNodeSetParams, + * ::cuGraphExecUpdate, + * ::cuGraphInstantiate + */ +CUresult CUDAAPI cuGraphExecHostNodeSetParams(CUgraphExec hGraphExec, CUgraphNode hNode, const CUDA_HOST_NODE_PARAMS *nodeParams); + +/** + * \brief Updates node parameters in the child graph node in the given graphExec. + * + * Updates the work represented by \p hNode in \p hGraphExec as though the nodes contained + * in \p hNode's graph had the parameters contained in \p childGraph's nodes at instantiation. + * \p hNode must remain in the graph which was used to instantiate \p hGraphExec. + * Changed edges to and from \p hNode are ignored. + * + * The modifications only affect future launches of \p hGraphExec. Already enqueued + * or running launches of \p hGraphExec are not affected by this call. \p hNode is also + * not modified by this call. + * + * The topology of \p childGraph, as well as the node insertion order, must match that + * of the graph contained in \p hNode. See ::cuGraphExecUpdate() for a list of restrictions + * on what can be updated in an instantiated graph. The update is recursive, so child graph + * nodes contained within the top level child graph will also be updated. + * + * \param hGraphExec - The executable graph in which to set the specified node + * \param hNode - Host node from the graph which was used to instantiate graphExec + * \param childGraph - The graph supplying the updated parameters + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE, + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuGraphAddChildGraphNode, + * ::cuGraphChildGraphNodeGetGraph, + * ::cuGraphExecKernelNodeSetParams, + * ::cuGraphExecMemcpyNodeSetParams, + * ::cuGraphExecMemsetNodeSetParams, + * ::cuGraphExecHostNodeSetParams, + * ::cuGraphExecEventRecordNodeSetEvent, + * ::cuGraphExecEventWaitNodeSetEvent, + * ::cuGraphExecExternalSemaphoresSignalNodeSetParams, + * ::cuGraphExecExternalSemaphoresWaitNodeSetParams, + * ::cuGraphExecUpdate, + * ::cuGraphInstantiate + */ +CUresult CUDAAPI cuGraphExecChildGraphNodeSetParams(CUgraphExec hGraphExec, CUgraphNode hNode, CUgraph childGraph); + +/** + * \brief Sets the event for an event record node in the given graphExec + * + * Sets the event of an event record node in an executable graph \p hGraphExec. + * The node is identified by the corresponding node \p hNode in the + * non-executable graph, from which the executable graph was instantiated. + * + * The modifications only affect future launches of \p hGraphExec. Already + * enqueued or running launches of \p hGraphExec are not affected by this call. + * \p hNode is also not modified by this call. + * + * \param hGraphExec - The executable graph in which to set the specified node + * \param hNode - event record node from the graph from which graphExec was instantiated + * \param event - Updated event to use + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE, + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuGraphAddEventRecordNode, + * ::cuGraphEventRecordNodeGetEvent, + * ::cuGraphEventWaitNodeSetEvent, + * ::cuEventRecordWithFlags, + * ::cuStreamWaitEvent, + * ::cuGraphExecKernelNodeSetParams, + * ::cuGraphExecMemcpyNodeSetParams, + * ::cuGraphExecMemsetNodeSetParams, + * ::cuGraphExecHostNodeSetParams, + * ::cuGraphExecChildGraphNodeSetParams, + * ::cuGraphExecEventWaitNodeSetEvent, + * ::cuGraphExecExternalSemaphoresSignalNodeSetParams, + * ::cuGraphExecExternalSemaphoresWaitNodeSetParams, + * ::cuGraphExecUpdate, + * ::cuGraphInstantiate + */ +CUresult CUDAAPI cuGraphExecEventRecordNodeSetEvent(CUgraphExec hGraphExec, CUgraphNode hNode, CUevent event); + +/** + * \brief Sets the event for an event wait node in the given graphExec + * + * Sets the event of an event wait node in an executable graph \p hGraphExec. + * The node is identified by the corresponding node \p hNode in the + * non-executable graph, from which the executable graph was instantiated. + * + * The modifications only affect future launches of \p hGraphExec. Already + * enqueued or running launches of \p hGraphExec are not affected by this call. + * \p hNode is also not modified by this call. + * + * \param hGraphExec - The executable graph in which to set the specified node + * \param hNode - event wait node from the graph from which graphExec was instantiated + * \param event - Updated event to use + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE, + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuGraphAddEventWaitNode, + * ::cuGraphEventWaitNodeGetEvent, + * ::cuGraphEventRecordNodeSetEvent, + * ::cuEventRecordWithFlags, + * ::cuStreamWaitEvent, + * ::cuGraphExecKernelNodeSetParams, + * ::cuGraphExecMemcpyNodeSetParams, + * ::cuGraphExecMemsetNodeSetParams, + * ::cuGraphExecHostNodeSetParams, + * ::cuGraphExecChildGraphNodeSetParams, + * ::cuGraphExecEventRecordNodeSetEvent, + * ::cuGraphExecExternalSemaphoresSignalNodeSetParams, + * ::cuGraphExecExternalSemaphoresWaitNodeSetParams, + * ::cuGraphExecUpdate, + * ::cuGraphInstantiate + */ +CUresult CUDAAPI cuGraphExecEventWaitNodeSetEvent(CUgraphExec hGraphExec, CUgraphNode hNode, CUevent event); + +/** + * \brief Sets the parameters for an external semaphore signal node in the given graphExec + * + * Sets the parameters of an external semaphore signal node in an executable graph \p hGraphExec. + * The node is identified by the corresponding node \p hNode in the + * non-executable graph, from which the executable graph was instantiated. + * + * \p hNode must not have been removed from the original graph. + * + * The modifications only affect future launches of \p hGraphExec. Already + * enqueued or running launches of \p hGraphExec are not affected by this call. + * \p hNode is also not modified by this call. + * + * Changing \p nodeParams->numExtSems is not supported. + * + * \param hGraphExec - The executable graph in which to set the specified node + * \param hNode - semaphore signal node from the graph from which graphExec was instantiated + * \param nodeParams - Updated Parameters to set + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE, + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuGraphAddExternalSemaphoresSignalNode, + * ::cuImportExternalSemaphore, + * ::cuSignalExternalSemaphoresAsync, + * ::cuWaitExternalSemaphoresAsync, + * ::cuGraphExecKernelNodeSetParams, + * ::cuGraphExecMemcpyNodeSetParams, + * ::cuGraphExecMemsetNodeSetParams, + * ::cuGraphExecHostNodeSetParams, + * ::cuGraphExecChildGraphNodeSetParams, + * ::cuGraphExecEventRecordNodeSetEvent, + * ::cuGraphExecEventWaitNodeSetEvent, + * ::cuGraphExecExternalSemaphoresWaitNodeSetParams, + * ::cuGraphExecUpdate, + * ::cuGraphInstantiate + */ +CUresult CUDAAPI cuGraphExecExternalSemaphoresSignalNodeSetParams(CUgraphExec hGraphExec, CUgraphNode hNode, const CUDA_EXT_SEM_SIGNAL_NODE_PARAMS *nodeParams); + +/** + * \brief Sets the parameters for an external semaphore wait node in the given graphExec + * + * Sets the parameters of an external semaphore wait node in an executable graph \p hGraphExec. + * The node is identified by the corresponding node \p hNode in the + * non-executable graph, from which the executable graph was instantiated. + * + * \p hNode must not have been removed from the original graph. + * + * The modifications only affect future launches of \p hGraphExec. Already + * enqueued or running launches of \p hGraphExec are not affected by this call. + * \p hNode is also not modified by this call. + * + * Changing \p nodeParams->numExtSems is not supported. + * + * \param hGraphExec - The executable graph in which to set the specified node + * \param hNode - semaphore wait node from the graph from which graphExec was instantiated + * \param nodeParams - Updated Parameters to set + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE, + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuGraphAddExternalSemaphoresWaitNode, + * ::cuImportExternalSemaphore, + * ::cuSignalExternalSemaphoresAsync, + * ::cuWaitExternalSemaphoresAsync, + * ::cuGraphExecKernelNodeSetParams, + * ::cuGraphExecMemcpyNodeSetParams, + * ::cuGraphExecMemsetNodeSetParams, + * ::cuGraphExecHostNodeSetParams, + * ::cuGraphExecChildGraphNodeSetParams, + * ::cuGraphExecEventRecordNodeSetEvent, + * ::cuGraphExecEventWaitNodeSetEvent, + * ::cuGraphExecExternalSemaphoresSignalNodeSetParams, + * ::cuGraphExecUpdate, + * ::cuGraphInstantiate + */ +CUresult CUDAAPI cuGraphExecExternalSemaphoresWaitNodeSetParams(CUgraphExec hGraphExec, CUgraphNode hNode, const CUDA_EXT_SEM_WAIT_NODE_PARAMS *nodeParams); + +/** + * \brief Enables or disables the specified node in the given graphExec + * + * Sets \p hNode to be either enabled or disabled. Disabled nodes are functionally equivalent + * to empty nodes until they are reenabled. Existing node parameters are not affected by + * disabling/enabling the node. + * + * The node is identified by the corresponding node \p hNode in the non-executable + * graph, from which the executable graph was instantiated. + * + * \p hNode must not have been removed from the original graph. + * + * The modifications only affect future launches of \p hGraphExec. Already + * enqueued or running launches of \p hGraphExec are not affected by this call. + * \p hNode is also not modified by this call. + * + * \note Currently only kernel, memset and memcpy nodes are supported. + * + * \param hGraphExec - The executable graph in which to set the specified node + * \param hNode - Node from the graph from which graphExec was instantiated + * \param isEnabled - Node is enabled if != 0, otherwise the node is disabled + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE, + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuGraphNodeGetEnabled, + * ::cuGraphExecUpdate, + * ::cuGraphInstantiate + * ::cuGraphLaunch + */ +CUresult CUDAAPI cuGraphNodeSetEnabled(CUgraphExec hGraphExec, CUgraphNode hNode, unsigned int isEnabled); + +/** + * \brief Query whether a node in the given graphExec is enabled + * + * Sets isEnabled to 1 if \p hNode is enabled, or 0 if \p hNode is disabled. + * + * The node is identified by the corresponding node \p hNode in the non-executable + * graph, from which the executable graph was instantiated. + * + * \p hNode must not have been removed from the original graph. + * + * \note Currently only kernel, memset and memcpy nodes are supported. + * + * \param hGraphExec - The executable graph in which to set the specified node + * \param hNode - Node from the graph from which graphExec was instantiated + * \param isEnabled - Location to return the enabled status of the node + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE, + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuGraphNodeSetEnabled, + * ::cuGraphExecUpdate, + * ::cuGraphInstantiate + * ::cuGraphLaunch + */ +CUresult CUDAAPI cuGraphNodeGetEnabled(CUgraphExec hGraphExec, CUgraphNode hNode, unsigned int *isEnabled); + +/** + * \brief Uploads an executable graph in a stream + * + * Uploads \p hGraphExec to the device in \p hStream without executing it. Uploads of + * the same \p hGraphExec will be serialized. Each upload is ordered behind both any + * previous work in \p hStream and any previous launches of \p hGraphExec. + * Uses memory cached by \p stream to back the allocations owned by \p hGraphExec. + * + * \param hGraphExec - Executable graph to upload + * \param hStream - Stream in which to upload the graph + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_VALUE + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuGraphInstantiate, + * ::cuGraphLaunch, + * ::cuGraphExecDestroy + */ +CUresult CUDAAPI cuGraphUpload(CUgraphExec hGraphExec, CUstream hStream); + +/** + * \brief Launches an executable graph in a stream + * + * Executes \p hGraphExec in \p hStream. Only one instance of \p hGraphExec may be executing + * at a time. Each launch is ordered behind both any previous work in \p hStream + * and any previous launches of \p hGraphExec. To execute a graph concurrently, it must be + * instantiated multiple times into multiple executable graphs. + * + * If any allocations created by \p hGraphExec remain unfreed (from a previous launch) and + * \p hGraphExec was not instantiated with ::CUDA_GRAPH_INSTANTIATE_FLAG_AUTO_FREE_ON_LAUNCH, + * the launch will fail with ::CUDA_ERROR_INVALID_VALUE. + * + * \param hGraphExec - Executable graph to launch + * \param hStream - Stream in which to launch the graph + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_VALUE + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuGraphInstantiate, + * ::cuGraphUpload, + * ::cuGraphExecDestroy + */ +CUresult CUDAAPI cuGraphLaunch(CUgraphExec hGraphExec, CUstream hStream); + +/** + * \brief Destroys an executable graph + * + * Destroys the executable graph specified by \p hGraphExec, as well + * as all of its executable nodes. If the executable graph is + * in-flight, it will not be terminated, but rather freed + * asynchronously on completion. + * + * \param hGraphExec - Executable graph to destroy + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_VALUE + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuGraphInstantiate, + * ::cuGraphUpload, + * ::cuGraphLaunch + */ +CUresult CUDAAPI cuGraphExecDestroy(CUgraphExec hGraphExec); + +/** + * \brief Destroys a graph + * + * Destroys the graph specified by \p hGraph, as well as all of its nodes. + * + * \param hGraph - Graph to destroy + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_VALUE + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuGraphCreate + */ +CUresult CUDAAPI cuGraphDestroy(CUgraph hGraph); + +/** + * \brief Check whether an executable graph can be updated with a graph and perform the update if possible + * + * Updates the node parameters in the instantiated graph specified by \p hGraphExec with the + * node parameters in a topologically identical graph specified by \p hGraph. + * + * Limitations: + * + * - Kernel nodes: + * - The owning context of the function cannot change. + * - A node whose function originally did not use CUDA dynamic parallelism cannot be updated + * to a function which uses CDP. + * - A cooperative node cannot be updated to a non-cooperative node, and vice-versa. + * - If the graph was instantiated with CUDA_GRAPH_INSTANTIATE_FLAG_USE_NODE_PRIORITY, the + * priority attribute cannot change. Equality is checked on the originally requested + * priority values, before they are clamped to the device's supported range. + * - If \p hGraphExec was not instantiated for device launch, a node whose function originally + * did not use device-side cudaGraphLaunch() cannot be updated to a function which uses + * device-side cudaGraphLaunch() unless the node resides on the same context as nodes which + * contained such calls at instantiate-time. If no such calls were present at instantiation, + * these updates cannot be performed at all. + * - Memset and memcpy nodes: + * - The CUDA device(s) to which the operand(s) was allocated/mapped cannot change. + * - The source/destination memory must be allocated from the same contexts as the original + * source/destination memory. + * - Only 1D memsets can be changed. + * - Additional memcpy node restrictions: + * - Changing either the source or destination memory type(i.e. CU_MEMORYTYPE_DEVICE, + * CU_MEMORYTYPE_ARRAY, etc.) is not supported. + * - External semaphore wait nodes and record nodes: + * - Changing the number of semaphores is not supported. + * + * Note: The API may add further restrictions in future releases. The return code should always be checked. + * + * cuGraphExecUpdate sets the result member of \p resultInfo to CU_GRAPH_EXEC_UPDATE_ERROR_TOPOLOGY_CHANGED + * under the following conditions: + * - The count of nodes directly in \p hGraphExec and \p hGraph differ, in which case resultInfo->errorNode + * is set to NULL. + * - \p hGraph has more exit nodes than \p hGraph, in which case resultInfo->errorNode is set to one of + * the exit nodes in hGraph. + * - A node in \p hGraph has a different number of dependencies than the node from \p hGraphExec it is paired with, + * in which case resultInfo->errorNode is set to the node from \p hGraph. + * - A node in \p hGraph has a dependency that does not match with the corresponding dependency of the paired node + * from \p hGraphExec. resultInfo->errorNode will be set to the node from \p hGraph. resultInfo->errorFromNode + * will be set to the mismatched dependency. The dependencies are paired based on edge order and a dependency + * does not match when the nodes are already paired based on other edges examined in the graph. + * + * cuGraphExecUpdate sets the result member of \p resultInfo to: + * - CU_GRAPH_EXEC_UPDATE_ERROR if passed an invalid value. + * - CU_GRAPH_EXEC_UPDATE_ERROR_TOPOLOGY_CHANGED if the graph topology changed + * - CU_GRAPH_EXEC_UPDATE_ERROR_NODE_TYPE_CHANGED if the type of a node changed, in which case + * \p hErrorNode_out is set to the node from \p hGraph. + * - CU_GRAPH_EXEC_UPDATE_ERROR_UNSUPPORTED_FUNCTION_CHANGE if the function changed in an unsupported + * way(see note above), in which case \p hErrorNode_out is set to the node from \p hGraph + * - CU_GRAPH_EXEC_UPDATE_ERROR_PARAMETERS_CHANGED if any parameters to a node changed in a way + * that is not supported, in which case \p hErrorNode_out is set to the node from \p hGraph. + * - CU_GRAPH_EXEC_UPDATE_ERROR_ATTRIBUTES_CHANGED if any attributes of a node changed in a way + * that is not supported, in which case \p hErrorNode_out is set to the node from \p hGraph. + * - CU_GRAPH_EXEC_UPDATE_ERROR_NOT_SUPPORTED if something about a node is unsupported, like + * the node's type or configuration, in which case \p hErrorNode_out is set to the node from \p hGraph + * + * If the update fails for a reason not listed above, the result member of \p resultInfo will be set + * to CU_GRAPH_EXEC_UPDATE_ERROR. If the update succeeds, the result member will be set to CU_GRAPH_EXEC_UPDATE_SUCCESS. + * + * cuGraphExecUpdate returns CUDA_SUCCESS when the updated was performed successfully. It returns + * CUDA_ERROR_GRAPH_EXEC_UPDATE_FAILURE if the graph update was not performed because it included + * changes which violated constraints specific to instantiated graph update. + * + * \param hGraphExec The instantiated graph to be updated + * \param hGraph The graph containing the updated parameters + * \param resultInfo the error info structure + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_GRAPH_EXEC_UPDATE_FAILURE, + * \note_graph_thread_safety + * \notefnerr + * + * \sa + * ::cuGraphInstantiate + */ +CUresult CUDAAPI cuGraphExecUpdate(CUgraphExec hGraphExec, CUgraph hGraph, CUgraphExecUpdateResultInfo *resultInfo); + +/** + * \brief Copies attributes from source node to destination node. + * + * Copies attributes from source node \p src to destination node \p dst. + * Both node must have the same context. + * + * \param[out] dst Destination node + * \param[in] src Source node + * For list of attributes see ::CUkernelNodeAttrID + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE + * \notefnerr + * + * \sa + * ::CUaccessPolicyWindow + */ +CUresult CUDAAPI cuGraphKernelNodeCopyAttributes(CUgraphNode dst, CUgraphNode src); + +/** + * \brief Queries node attribute. + * + * Queries attribute \p attr from node \p hNode and stores it in corresponding + * member of \p value_out. + * + * \param[in] hNode + * \param[in] attr + * \param[out] value_out + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_HANDLE + * \notefnerr + * + * \sa + * ::CUaccessPolicyWindow + */ +CUresult CUDAAPI cuGraphKernelNodeGetAttribute(CUgraphNode hNode, CUkernelNodeAttrID attr, + CUkernelNodeAttrValue *value_out); + +/** + * \brief Sets node attribute. + * + * Sets attribute \p attr on node \p hNode from corresponding attribute of + * \p value. + * + * \param[out] hNode + * \param[in] attr + * \param[out] value + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_HANDLE + * \notefnerr + * + * \sa + * ::CUaccessPolicyWindow + */ +CUresult CUDAAPI cuGraphKernelNodeSetAttribute(CUgraphNode hNode, CUkernelNodeAttrID attr, + const CUkernelNodeAttrValue *value); + +/** + * \brief Write a DOT file describing graph structure + * + * Using the provided \p hGraph, write to \p path a DOT formatted description of the graph. + * By default this includes the graph topology, node types, node id, kernel names and memcpy direction. + * \p flags can be specified to write more detailed information about each node type such as + * parameter values, kernel attributes, node and function handles. + * + * \param hGraph - The graph to create a DOT file from + * \param path - The path to write the DOT file to + * \param flags - Flags from CUgraphDebugDot_flags for specifying which additional node information to write + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_OPERATING_SYSTEM + */ +CUresult CUDAAPI cuGraphDebugDotPrint(CUgraph hGraph, const char *path, unsigned int flags); + +/** + * \brief Create a user object + * + * Create a user object with the specified destructor callback and initial reference count. The + * initial references are owned by the caller. + * + * Destructor callbacks cannot make CUDA API calls and should avoid blocking behavior, as they + * are executed by a shared internal thread. Another thread may be signaled to perform such + * actions, if it does not block forward progress of tasks scheduled through CUDA. + * + * See CUDA User Objects in the CUDA C++ Programming Guide for more information on user objects. + * + * \param object_out - Location to return the user object handle + * \param ptr - The pointer to pass to the destroy function + * \param destroy - Callback to free the user object when it is no longer in use + * \param initialRefcount - The initial refcount to create the object with, typically 1. The + * initial references are owned by the calling thread. + * \param flags - Currently it is required to pass ::CU_USER_OBJECT_NO_DESTRUCTOR_SYNC, + * which is the only defined flag. This indicates that the destroy + * callback cannot be waited on by any CUDA API. Users requiring + * synchronization of the callback should signal its completion + * manually. + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE + * + * \sa + * ::cuUserObjectRetain, + * ::cuUserObjectRelease, + * ::cuGraphRetainUserObject, + * ::cuGraphReleaseUserObject, + * ::cuGraphCreate + */ +CUresult CUDAAPI cuUserObjectCreate(CUuserObject *object_out, void *ptr, CUhostFn destroy, + unsigned int initialRefcount, unsigned int flags); + +/** + * \brief Retain a reference to a user object + * + * Retains new references to a user object. The new references are owned by the caller. + * + * See CUDA User Objects in the CUDA C++ Programming Guide for more information on user objects. + * + * \param object - The object to retain + * \param count - The number of references to retain, typically 1. Must be nonzero + * and not larger than INT_MAX. + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE + * + * \sa + * ::cuUserObjectCreate, + * ::cuUserObjectRelease, + * ::cuGraphRetainUserObject, + * ::cuGraphReleaseUserObject, + * ::cuGraphCreate + */ +CUresult CUDAAPI cuUserObjectRetain(CUuserObject object, unsigned int count); + +/** + * \brief Release a reference to a user object + * + * Releases user object references owned by the caller. The object's destructor is invoked if + * the reference count reaches zero. + * + * It is undefined behavior to release references not owned by the caller, or to use a user + * object handle after all references are released. + * + * See CUDA User Objects in the CUDA C++ Programming Guide for more information on user objects. + * + * \param object - The object to release + * \param count - The number of references to release, typically 1. Must be nonzero + * and not larger than INT_MAX. + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE + * + * \sa + * ::cuUserObjectCreate, + * ::cuUserObjectRetain, + * ::cuGraphRetainUserObject, + * ::cuGraphReleaseUserObject, + * ::cuGraphCreate + */ +CUresult CUDAAPI cuUserObjectRelease(CUuserObject object, unsigned int count); + +/** + * \brief Retain a reference to a user object from a graph + * + * Creates or moves user object references that will be owned by a CUDA graph. + * + * See CUDA User Objects in the CUDA C++ Programming Guide for more information on user objects. + * + * \param graph - The graph to associate the reference with + * \param object - The user object to retain a reference for + * \param count - The number of references to add to the graph, typically 1. Must be + * nonzero and not larger than INT_MAX. + * \param flags - The optional flag ::CU_GRAPH_USER_OBJECT_MOVE transfers references + * from the calling thread, rather than create new references. Pass 0 + * to create new references. + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE + * + * \sa + * ::cuUserObjectCreate, + * ::cuUserObjectRetain, + * ::cuUserObjectRelease, + * ::cuGraphReleaseUserObject, + * ::cuGraphCreate + */ +CUresult CUDAAPI cuGraphRetainUserObject(CUgraph graph, CUuserObject object, unsigned int count, unsigned int flags); + +/** + * \brief Release a user object reference from a graph + * + * Releases user object references owned by a graph. + * + * See CUDA User Objects in the CUDA C++ Programming Guide for more information on user objects. + * + * \param graph - The graph that will release the reference + * \param object - The user object to release a reference for + * \param count - The number of references to release, typically 1. Must be nonzero + * and not larger than INT_MAX. + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE + * + * \sa + * ::cuUserObjectCreate, + * ::cuUserObjectRetain, + * ::cuUserObjectRelease, + * ::cuGraphRetainUserObject, + * ::cuGraphCreate + */ +CUresult CUDAAPI cuGraphReleaseUserObject(CUgraph graph, CUuserObject object, unsigned int count); + +/** @} */ /* END CUDA_GRAPH */ + +/** + * \defgroup CUDA_OCCUPANCY Occupancy + * + * ___MANBRIEF___ occupancy calculation functions of the low-level CUDA driver + * API (___CURRENT_FILE___) ___ENDMANBRIEF___ + * + * This section describes the occupancy calculation functions of the low-level CUDA + * driver application programming interface. + * + * @{ + */ + +/** + * \brief Returns occupancy of a function + * + * Returns in \p *numBlocks the number of the maximum active blocks per + * streaming multiprocessor. + * + * \param numBlocks - Returned occupancy + * \param func - Kernel for which occupancy is calculated + * \param blockSize - Block size the kernel is intended to be launched with + * \param dynamicSMemSize - Per-block dynamic shared memory usage intended, in bytes + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_UNKNOWN + * \notefnerr + * + * \sa + * ::cudaOccupancyMaxActiveBlocksPerMultiprocessor + */ +CUresult CUDAAPI cuOccupancyMaxActiveBlocksPerMultiprocessor(int *numBlocks, CUfunction func, int blockSize, size_t dynamicSMemSize); + +/** + * \brief Returns occupancy of a function + * + * Returns in \p *numBlocks the number of the maximum active blocks per + * streaming multiprocessor. + * + * The \p Flags parameter controls how special cases are handled. The + * valid flags are: + * + * - ::CU_OCCUPANCY_DEFAULT, which maintains the default behavior as + * ::cuOccupancyMaxActiveBlocksPerMultiprocessor; + * + * - ::CU_OCCUPANCY_DISABLE_CACHING_OVERRIDE, which suppresses the + * default behavior on platform where global caching affects + * occupancy. On such platforms, if caching is enabled, but + * per-block SM resource usage would result in zero occupancy, the + * occupancy calculator will calculate the occupancy as if caching + * is disabled. Setting ::CU_OCCUPANCY_DISABLE_CACHING_OVERRIDE makes + * the occupancy calculator to return 0 in such cases. More information + * can be found about this feature in the "Unified L1/Texture Cache" + * section of the Maxwell tuning guide. + * + * \param numBlocks - Returned occupancy + * \param func - Kernel for which occupancy is calculated + * \param blockSize - Block size the kernel is intended to be launched with + * \param dynamicSMemSize - Per-block dynamic shared memory usage intended, in bytes + * \param flags - Requested behavior for the occupancy calculator + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_UNKNOWN + * \notefnerr + * + * \sa + * ::cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags + */ +CUresult CUDAAPI cuOccupancyMaxActiveBlocksPerMultiprocessorWithFlags(int *numBlocks, CUfunction func, int blockSize, size_t dynamicSMemSize, unsigned int flags); + +/** + * \brief Suggest a launch configuration with reasonable occupancy + * + * Returns in \p *blockSize a reasonable block size that can achieve + * the maximum occupancy (or, the maximum number of active warps with + * the fewest blocks per multiprocessor), and in \p *minGridSize the + * minimum grid size to achieve the maximum occupancy. + * + * If \p blockSizeLimit is 0, the configurator will use the maximum + * block size permitted by the device / function instead. + * + * If per-block dynamic shared memory allocation is not needed, the + * user should leave both \p blockSizeToDynamicSMemSize and \p + * dynamicSMemSize as 0. + * + * If per-block dynamic shared memory allocation is needed, then if + * the dynamic shared memory size is constant regardless of block + * size, the size should be passed through \p dynamicSMemSize, and \p + * blockSizeToDynamicSMemSize should be NULL. + * + * Otherwise, if the per-block dynamic shared memory size varies with + * different block sizes, the user needs to provide a unary function + * through \p blockSizeToDynamicSMemSize that computes the dynamic + * shared memory needed by \p func for any given block size. \p + * dynamicSMemSize is ignored. An example signature is: + * + * \code + * // Take block size, returns dynamic shared memory needed + * size_t blockToSmem(int blockSize); + * \endcode + * + * \param minGridSize - Returned minimum grid size needed to achieve the maximum occupancy + * \param blockSize - Returned maximum block size that can achieve the maximum occupancy + * \param func - Kernel for which launch configuration is calculated + * \param blockSizeToDynamicSMemSize - A function that calculates how much per-block dynamic shared memory \p func uses based on the block size + * \param dynamicSMemSize - Dynamic shared memory usage intended, in bytes + * \param blockSizeLimit - The maximum block size \p func is designed to handle + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_UNKNOWN + * \notefnerr + * + * \sa + * ::cudaOccupancyMaxPotentialBlockSize + */ +CUresult CUDAAPI cuOccupancyMaxPotentialBlockSize(int *minGridSize, int *blockSize, CUfunction func, CUoccupancyB2DSize blockSizeToDynamicSMemSize, size_t dynamicSMemSize, int blockSizeLimit); + +/** + * \brief Suggest a launch configuration with reasonable occupancy + * + * An extended version of ::cuOccupancyMaxPotentialBlockSize. In + * addition to arguments passed to ::cuOccupancyMaxPotentialBlockSize, + * ::cuOccupancyMaxPotentialBlockSizeWithFlags also takes a \p Flags + * parameter. + * + * The \p Flags parameter controls how special cases are handled. The + * valid flags are: + * + * - ::CU_OCCUPANCY_DEFAULT, which maintains the default behavior as + * ::cuOccupancyMaxPotentialBlockSize; + * + * - ::CU_OCCUPANCY_DISABLE_CACHING_OVERRIDE, which suppresses the + * default behavior on platform where global caching affects + * occupancy. On such platforms, the launch configurations that + * produces maximal occupancy might not support global + * caching. Setting ::CU_OCCUPANCY_DISABLE_CACHING_OVERRIDE + * guarantees that the the produced launch configuration is global + * caching compatible at a potential cost of occupancy. More information + * can be found about this feature in the "Unified L1/Texture Cache" + * section of the Maxwell tuning guide. + * + * \param minGridSize - Returned minimum grid size needed to achieve the maximum occupancy + * \param blockSize - Returned maximum block size that can achieve the maximum occupancy + * \param func - Kernel for which launch configuration is calculated + * \param blockSizeToDynamicSMemSize - A function that calculates how much per-block dynamic shared memory \p func uses based on the block size + * \param dynamicSMemSize - Dynamic shared memory usage intended, in bytes + * \param blockSizeLimit - The maximum block size \p func is designed to handle + * \param flags - Options + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_UNKNOWN + * \notefnerr + * + * \sa + * ::cudaOccupancyMaxPotentialBlockSizeWithFlags + */ +CUresult CUDAAPI cuOccupancyMaxPotentialBlockSizeWithFlags(int *minGridSize, int *blockSize, CUfunction func, CUoccupancyB2DSize blockSizeToDynamicSMemSize, size_t dynamicSMemSize, int blockSizeLimit, unsigned int flags); + +/** + * \brief Returns dynamic shared memory available per block when launching \p numBlocks blocks on SM + * + * Returns in \p *dynamicSmemSize the maximum size of dynamic shared memory to allow \p numBlocks blocks per SM. + * + * \param dynamicSmemSize - Returned maximum dynamic shared memory + * \param func - Kernel function for which occupancy is calculated + * \param numBlocks - Number of blocks to fit on SM + * \param blockSize - Size of the blocks + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_UNKNOWN + * \notefnerr + */ +CUresult CUDAAPI cuOccupancyAvailableDynamicSMemPerBlock(size_t *dynamicSmemSize, CUfunction func, int numBlocks, int blockSize); + +/** + * \brief Given the kernel function (\p func) and launch configuration + * (\p config), return the maximum cluster size in \p *clusterSize. + * + * The cluster dimensions in \p config are ignored. If func has a required + * cluster size set (see ::cudaFuncGetAttributes / ::cuFuncGetAttribute),\p + * *clusterSize will reflect the required cluster size. + * + * By default this function will always return a value that's portable on + * future hardware. A higher value may be returned if the kernel function + * allows non-portable cluster sizes. + * + * This function will respect the compile time launch bounds. + * + * \param clusterSize - Returned maximum cluster size that can be launched + * for the given kernel function and launch configuration + * \param func - Kernel function for which maximum cluster + * size is calculated + * \param config - Launch configuration for the given kernel function + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_UNKNOWN + * \notefnerr + * + * \sa + * ::cudaFuncGetAttributes, + * ::cuFuncGetAttribute + */ +CUresult CUDAAPI cuOccupancyMaxPotentialClusterSize(int *clusterSize, CUfunction func, const CUlaunchConfig *config); + +/** + * \brief Given the kernel function (\p func) and launch configuration + * (\p config), return the maximum number of clusters that could co-exist + * on the target device in \p *numClusters. + * + * If the function has required cluster size already set (see + * ::cudaFuncGetAttributes / ::cuFuncGetAttribute), the cluster size + * from config must either be unspecified or match the required size. + * Without required sizes, the cluster size must be specified in config, + * else the function will return an error. + * + * Note that various attributes of the kernel function may affect occupancy + * calculation. Runtime environment may affect how the hardware schedules + * the clusters, so the calculated occupancy is not guaranteed to be achievable. + * + * \param numClusters - Returned maximum number of clusters that + * could co-exist on the target device + * \param func - Kernel function for which maximum number + * of clusters are calculated + * \param config - Launch configuration for the given kernel function + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_CLUSTER_SIZE, + * ::CUDA_ERROR_UNKNOWN + * \notefnerr + * + * \sa + * ::cudaFuncGetAttributes, + * ::cuFuncGetAttribute + */ +CUresult CUDAAPI cuOccupancyMaxActiveClusters(int *numClusters, CUfunction func, const CUlaunchConfig *config); +/** @} */ /* END CUDA_OCCUPANCY */ + +/** + * \defgroup CUDA_TEXREF_DEPRECATED Texture Reference Management [DEPRECATED] + * + * ___MANBRIEF___ deprecated texture reference management functions of the + * low-level CUDA driver API (___CURRENT_FILE___) ___ENDMANBRIEF___ + * + * This section describes the deprecated texture reference management + * functions of the low-level CUDA driver application programming interface. + * + * @{ + */ + +/** + * \brief Binds an array as a texture reference + * + * \deprecated + * + * Binds the CUDA array \p hArray to the texture reference \p hTexRef. Any + * previous address or CUDA array state associated with the texture reference + * is superseded by this function. \p Flags must be set to + * ::CU_TRSA_OVERRIDE_FORMAT. Any CUDA array previously bound to \p hTexRef is + * unbound. + * + * \param hTexRef - Texture reference to bind + * \param hArray - Array to bind + * \param Flags - Options (must be ::CU_TRSA_OVERRIDE_FORMAT) + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * + * \sa + * ::cuTexRefSetAddress, + * ::cuTexRefSetAddress2D, ::cuTexRefSetAddressMode, + * ::cuTexRefSetFilterMode, ::cuTexRefSetFlags, ::cuTexRefSetFormat, + * ::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray, + * ::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat + */ +__CUDA_DEPRECATED CUresult CUDAAPI cuTexRefSetArray(CUtexref hTexRef, CUarray hArray, unsigned int Flags); + +/** + * \brief Binds a mipmapped array to a texture reference + * + * \deprecated + * + * Binds the CUDA mipmapped array \p hMipmappedArray to the texture reference \p hTexRef. + * Any previous address or CUDA array state associated with the texture reference + * is superseded by this function. \p Flags must be set to ::CU_TRSA_OVERRIDE_FORMAT. + * Any CUDA array previously bound to \p hTexRef is unbound. + * + * \param hTexRef - Texture reference to bind + * \param hMipmappedArray - Mipmapped array to bind + * \param Flags - Options (must be ::CU_TRSA_OVERRIDE_FORMAT) + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * + * \sa + * ::cuTexRefSetAddress, + * ::cuTexRefSetAddress2D, ::cuTexRefSetAddressMode, + * ::cuTexRefSetFilterMode, ::cuTexRefSetFlags, ::cuTexRefSetFormat, + * ::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray, + * ::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat + */ +__CUDA_DEPRECATED CUresult CUDAAPI cuTexRefSetMipmappedArray(CUtexref hTexRef, CUmipmappedArray hMipmappedArray, unsigned int Flags); + +/** + * \brief Binds an address as a texture reference + * + * \deprecated + * + * Binds a linear address range to the texture reference \p hTexRef. Any + * previous address or CUDA array state associated with the texture reference + * is superseded by this function. Any memory previously bound to \p hTexRef + * is unbound. + * + * Since the hardware enforces an alignment requirement on texture base + * addresses, ::cuTexRefSetAddress() passes back a byte offset in + * \p *ByteOffset that must be applied to texture fetches in order to read from + * the desired memory. This offset must be divided by the texel size and + * passed to kernels that read from the texture so they can be applied to the + * ::tex1Dfetch() function. + * + * If the device memory pointer was returned from ::cuMemAlloc(), the offset + * is guaranteed to be 0 and NULL may be passed as the \p ByteOffset parameter. + * + * The total number of elements (or texels) in the linear address range + * cannot exceed ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LINEAR_WIDTH. + * The number of elements is computed as (\p bytes / bytesPerElement), + * where bytesPerElement is determined from the data format and number of + * components set using ::cuTexRefSetFormat(). + * + * \param ByteOffset - Returned byte offset + * \param hTexRef - Texture reference to bind + * \param dptr - Device pointer to bind + * \param bytes - Size of memory to bind in bytes + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * + * \sa + * ::cuTexRefSetAddress2D, ::cuTexRefSetAddressMode, ::cuTexRefSetArray, + * ::cuTexRefSetFilterMode, ::cuTexRefSetFlags, ::cuTexRefSetFormat, + * ::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray, + * ::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat + */ +__CUDA_DEPRECATED CUresult CUDAAPI cuTexRefSetAddress(size_t *ByteOffset, CUtexref hTexRef, CUdeviceptr dptr, size_t bytes); + +/** + * \brief Binds an address as a 2D texture reference + * + * \deprecated + * + * Binds a linear address range to the texture reference \p hTexRef. Any + * previous address or CUDA array state associated with the texture reference + * is superseded by this function. Any memory previously bound to \p hTexRef + * is unbound. + * + * Using a ::tex2D() function inside a kernel requires a call to either + * ::cuTexRefSetArray() to bind the corresponding texture reference to an + * array, or ::cuTexRefSetAddress2D() to bind the texture reference to linear + * memory. + * + * Function calls to ::cuTexRefSetFormat() cannot follow calls to + * ::cuTexRefSetAddress2D() for the same texture reference. + * + * It is required that \p dptr be aligned to the appropriate hardware-specific + * texture alignment. You can query this value using the device attribute + * ::CU_DEVICE_ATTRIBUTE_TEXTURE_ALIGNMENT. If an unaligned \p dptr is + * supplied, ::CUDA_ERROR_INVALID_VALUE is returned. + * + * \p Pitch has to be aligned to the hardware-specific texture pitch alignment. + * This value can be queried using the device attribute + * ::CU_DEVICE_ATTRIBUTE_TEXTURE_PITCH_ALIGNMENT. If an unaligned \p Pitch is + * supplied, ::CUDA_ERROR_INVALID_VALUE is returned. + * + * Width and Height, which are specified in elements (or texels), cannot exceed + * ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_WIDTH and + * ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_HEIGHT respectively. + * \p Pitch, which is specified in bytes, cannot exceed + * ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_PITCH. + * + * \param hTexRef - Texture reference to bind + * \param desc - Descriptor of CUDA array + * \param dptr - Device pointer to bind + * \param Pitch - Line pitch in bytes + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * + * \sa + * ::cuTexRefSetAddress, + * ::cuTexRefSetAddressMode, ::cuTexRefSetArray, + * ::cuTexRefSetFilterMode, ::cuTexRefSetFlags, ::cuTexRefSetFormat, + * ::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray, + * ::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat + */ +__CUDA_DEPRECATED CUresult CUDAAPI cuTexRefSetAddress2D(CUtexref hTexRef, const CUDA_ARRAY_DESCRIPTOR *desc, CUdeviceptr dptr, size_t Pitch); + +/** + * \brief Sets the format for a texture reference + * + * \deprecated + * + * Specifies the format of the data to be read by the texture reference + * \p hTexRef. \p fmt and \p NumPackedComponents are exactly analogous to the + * ::Format and ::NumChannels members of the ::CUDA_ARRAY_DESCRIPTOR structure: + * They specify the format of each component and the number of components per + * array element. + * + * \param hTexRef - Texture reference + * \param fmt - Format to set + * \param NumPackedComponents - Number of components per array element + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * + * \sa + * ::cuTexRefSetAddress, + * ::cuTexRefSetAddress2D, ::cuTexRefSetAddressMode, ::cuTexRefSetArray, + * ::cuTexRefSetFilterMode, ::cuTexRefSetFlags, + * ::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray, + * ::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat, + * ::cudaCreateChannelDesc + */ +__CUDA_DEPRECATED CUresult CUDAAPI cuTexRefSetFormat(CUtexref hTexRef, CUarray_format fmt, int NumPackedComponents); + +/** + * \brief Sets the addressing mode for a texture reference + * + * \deprecated + * + * Specifies the addressing mode \p am for the given dimension \p dim of the + * texture reference \p hTexRef. If \p dim is zero, the addressing mode is + * applied to the first parameter of the functions used to fetch from the + * texture; if \p dim is 1, the second, and so on. ::CUaddress_mode is defined + * as: + * \code + typedef enum CUaddress_mode_enum { + CU_TR_ADDRESS_MODE_WRAP = 0, + CU_TR_ADDRESS_MODE_CLAMP = 1, + CU_TR_ADDRESS_MODE_MIRROR = 2, + CU_TR_ADDRESS_MODE_BORDER = 3 + } CUaddress_mode; + * \endcode + * + * Note that this call has no effect if \p hTexRef is bound to linear memory. + * Also, if the flag, ::CU_TRSF_NORMALIZED_COORDINATES, is not set, the only + * supported address mode is ::CU_TR_ADDRESS_MODE_CLAMP. + * + * \param hTexRef - Texture reference + * \param dim - Dimension + * \param am - Addressing mode to set + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * + * \sa + * ::cuTexRefSetAddress, + * ::cuTexRefSetAddress2D, ::cuTexRefSetArray, + * ::cuTexRefSetFilterMode, ::cuTexRefSetFlags, ::cuTexRefSetFormat, + * ::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray, + * ::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat + */ +__CUDA_DEPRECATED CUresult CUDAAPI cuTexRefSetAddressMode(CUtexref hTexRef, int dim, CUaddress_mode am); + +/** + * \brief Sets the filtering mode for a texture reference + * + * \deprecated + * + * Specifies the filtering mode \p fm to be used when reading memory through + * the texture reference \p hTexRef. ::CUfilter_mode_enum is defined as: + * + * \code + typedef enum CUfilter_mode_enum { + CU_TR_FILTER_MODE_POINT = 0, + CU_TR_FILTER_MODE_LINEAR = 1 + } CUfilter_mode; + * \endcode + * + * Note that this call has no effect if \p hTexRef is bound to linear memory. + * + * \param hTexRef - Texture reference + * \param fm - Filtering mode to set + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * + * \sa + * ::cuTexRefSetAddress, + * ::cuTexRefSetAddress2D, ::cuTexRefSetAddressMode, ::cuTexRefSetArray, + * ::cuTexRefSetFlags, ::cuTexRefSetFormat, + * ::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray, + * ::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat + */ +__CUDA_DEPRECATED CUresult CUDAAPI cuTexRefSetFilterMode(CUtexref hTexRef, CUfilter_mode fm); + +/** + * \brief Sets the mipmap filtering mode for a texture reference + * + * \deprecated + * + * Specifies the mipmap filtering mode \p fm to be used when reading memory through + * the texture reference \p hTexRef. ::CUfilter_mode_enum is defined as: + * + * \code + typedef enum CUfilter_mode_enum { + CU_TR_FILTER_MODE_POINT = 0, + CU_TR_FILTER_MODE_LINEAR = 1 + } CUfilter_mode; + * \endcode + * + * Note that this call has no effect if \p hTexRef is not bound to a mipmapped array. + * + * \param hTexRef - Texture reference + * \param fm - Filtering mode to set + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * + * \sa + * ::cuTexRefSetAddress, + * ::cuTexRefSetAddress2D, ::cuTexRefSetAddressMode, ::cuTexRefSetArray, + * ::cuTexRefSetFlags, ::cuTexRefSetFormat, + * ::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray, + * ::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat + */ +__CUDA_DEPRECATED CUresult CUDAAPI cuTexRefSetMipmapFilterMode(CUtexref hTexRef, CUfilter_mode fm); + +/** + * \brief Sets the mipmap level bias for a texture reference + * + * \deprecated + * + * Specifies the mipmap level bias \p bias to be added to the specified mipmap level when + * reading memory through the texture reference \p hTexRef. + * + * Note that this call has no effect if \p hTexRef is not bound to a mipmapped array. + * + * \param hTexRef - Texture reference + * \param bias - Mipmap level bias + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * + * \sa + * ::cuTexRefSetAddress, + * ::cuTexRefSetAddress2D, ::cuTexRefSetAddressMode, ::cuTexRefSetArray, + * ::cuTexRefSetFlags, ::cuTexRefSetFormat, + * ::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray, + * ::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat + */ +__CUDA_DEPRECATED CUresult CUDAAPI cuTexRefSetMipmapLevelBias(CUtexref hTexRef, float bias); + +/** + * \brief Sets the mipmap min/max mipmap level clamps for a texture reference + * + * \deprecated + * + * Specifies the min/max mipmap level clamps, \p minMipmapLevelClamp and \p maxMipmapLevelClamp + * respectively, to be used when reading memory through the texture reference + * \p hTexRef. + * + * Note that this call has no effect if \p hTexRef is not bound to a mipmapped array. + * + * \param hTexRef - Texture reference + * \param minMipmapLevelClamp - Mipmap min level clamp + * \param maxMipmapLevelClamp - Mipmap max level clamp + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * + * \sa + * ::cuTexRefSetAddress, + * ::cuTexRefSetAddress2D, ::cuTexRefSetAddressMode, ::cuTexRefSetArray, + * ::cuTexRefSetFlags, ::cuTexRefSetFormat, + * ::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray, + * ::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat + */ +__CUDA_DEPRECATED CUresult CUDAAPI cuTexRefSetMipmapLevelClamp(CUtexref hTexRef, float minMipmapLevelClamp, float maxMipmapLevelClamp); + +/** + * \brief Sets the maximum anisotropy for a texture reference + * + * \deprecated + * + * Specifies the maximum anisotropy \p maxAniso to be used when reading memory through + * the texture reference \p hTexRef. + * + * Note that this call has no effect if \p hTexRef is bound to linear memory. + * + * \param hTexRef - Texture reference + * \param maxAniso - Maximum anisotropy + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * + * \sa + * ::cuTexRefSetAddress, + * ::cuTexRefSetAddress2D, ::cuTexRefSetAddressMode, ::cuTexRefSetArray, + * ::cuTexRefSetFlags, ::cuTexRefSetFormat, + * ::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray, + * ::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat + */ +__CUDA_DEPRECATED CUresult CUDAAPI cuTexRefSetMaxAnisotropy(CUtexref hTexRef, unsigned int maxAniso); + +/** + * \brief Sets the border color for a texture reference + * + * \deprecated + * + * Specifies the value of the RGBA color via the \p pBorderColor to the texture reference + * \p hTexRef. The color value supports only float type and holds color components in + * the following sequence: + * pBorderColor[0] holds 'R' component + * pBorderColor[1] holds 'G' component + * pBorderColor[2] holds 'B' component + * pBorderColor[3] holds 'A' component + * + * Note that the color values can be set only when the Address mode is set to + * CU_TR_ADDRESS_MODE_BORDER using ::cuTexRefSetAddressMode. + * Applications using integer border color values have to "reinterpret_cast" their values to float. + * + * \param hTexRef - Texture reference + * \param pBorderColor - RGBA color + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * + * \sa + * ::cuTexRefSetAddressMode, + * ::cuTexRefGetAddressMode, ::cuTexRefGetBorderColor + */ +__CUDA_DEPRECATED CUresult CUDAAPI cuTexRefSetBorderColor(CUtexref hTexRef, float *pBorderColor); + +/** + * \brief Sets the flags for a texture reference + * + * \deprecated + * + * Specifies optional flags via \p Flags to specify the behavior of data + * returned through the texture reference \p hTexRef. The valid flags are: + * + * - ::CU_TRSF_READ_AS_INTEGER, which suppresses the default behavior of + * having the texture promote integer data to floating point data in the + * range [0, 1]. Note that texture with 32-bit integer format + * would not be promoted, regardless of whether or not this + * flag is specified; + * - ::CU_TRSF_NORMALIZED_COORDINATES, which suppresses the + * default behavior of having the texture coordinates range + * from [0, Dim) where Dim is the width or height of the CUDA + * array. Instead, the texture coordinates [0, 1.0) reference + * the entire breadth of the array dimension; + * - ::CU_TRSF_DISABLE_TRILINEAR_OPTIMIZATION, which disables any trilinear + * filtering optimizations. Trilinear optimizations improve texture filtering + * performance by allowing bilinear filtering on textures in scenarios where + * it can closely approximate the expected results. + * + * \param hTexRef - Texture reference + * \param Flags - Optional flags to set + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * + * \sa + * ::cuTexRefSetAddress, + * ::cuTexRefSetAddress2D, ::cuTexRefSetAddressMode, ::cuTexRefSetArray, + * ::cuTexRefSetFilterMode, ::cuTexRefSetFormat, + * ::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray, + * ::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat + */ +__CUDA_DEPRECATED CUresult CUDAAPI cuTexRefSetFlags(CUtexref hTexRef, unsigned int Flags); + +/** + * \brief Gets the address associated with a texture reference + * + * \deprecated + * + * Returns in \p *pdptr the base address bound to the texture reference + * \p hTexRef, or returns ::CUDA_ERROR_INVALID_VALUE if the texture reference + * is not bound to any device memory range. + * + * \param pdptr - Returned device address + * \param hTexRef - Texture reference + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * + * \sa ::cuTexRefSetAddress, + * ::cuTexRefSetAddress2D, ::cuTexRefSetAddressMode, ::cuTexRefSetArray, + * ::cuTexRefSetFilterMode, ::cuTexRefSetFlags, ::cuTexRefSetFormat, + * ::cuTexRefGetAddressMode, ::cuTexRefGetArray, + * ::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat + */ +__CUDA_DEPRECATED CUresult CUDAAPI cuTexRefGetAddress(CUdeviceptr *pdptr, CUtexref hTexRef); + +/** + * \brief Gets the array bound to a texture reference + * + * \deprecated + * + * Returns in \p *phArray the CUDA array bound to the texture reference + * \p hTexRef, or returns ::CUDA_ERROR_INVALID_VALUE if the texture reference + * is not bound to any CUDA array. + * + * \param phArray - Returned array + * \param hTexRef - Texture reference + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * + * \sa ::cuTexRefSetAddress, + * ::cuTexRefSetAddress2D, ::cuTexRefSetAddressMode, ::cuTexRefSetArray, + * ::cuTexRefSetFilterMode, ::cuTexRefSetFlags, ::cuTexRefSetFormat, + * ::cuTexRefGetAddress, ::cuTexRefGetAddressMode, + * ::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat + */ +__CUDA_DEPRECATED CUresult CUDAAPI cuTexRefGetArray(CUarray *phArray, CUtexref hTexRef); + +/** + * \brief Gets the mipmapped array bound to a texture reference + * + * \deprecated + * + * Returns in \p *phMipmappedArray the CUDA mipmapped array bound to the texture + * reference \p hTexRef, or returns ::CUDA_ERROR_INVALID_VALUE if the texture reference + * is not bound to any CUDA mipmapped array. + * + * \param phMipmappedArray - Returned mipmapped array + * \param hTexRef - Texture reference + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * + * \sa ::cuTexRefSetAddress, + * ::cuTexRefSetAddress2D, ::cuTexRefSetAddressMode, ::cuTexRefSetArray, + * ::cuTexRefSetFilterMode, ::cuTexRefSetFlags, ::cuTexRefSetFormat, + * ::cuTexRefGetAddress, ::cuTexRefGetAddressMode, + * ::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat + */ +__CUDA_DEPRECATED CUresult CUDAAPI cuTexRefGetMipmappedArray(CUmipmappedArray *phMipmappedArray, CUtexref hTexRef); + +/** + * \brief Gets the addressing mode used by a texture reference + * + * \deprecated + * + * Returns in \p *pam the addressing mode corresponding to the + * dimension \p dim of the texture reference \p hTexRef. Currently, the only + * valid value for \p dim are 0 and 1. + * + * \param pam - Returned addressing mode + * \param hTexRef - Texture reference + * \param dim - Dimension + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * + * \sa ::cuTexRefSetAddress, + * ::cuTexRefSetAddress2D, ::cuTexRefSetAddressMode, ::cuTexRefSetArray, + * ::cuTexRefSetFilterMode, ::cuTexRefSetFlags, ::cuTexRefSetFormat, + * ::cuTexRefGetAddress, ::cuTexRefGetArray, + * ::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat + */ +__CUDA_DEPRECATED CUresult CUDAAPI cuTexRefGetAddressMode(CUaddress_mode *pam, CUtexref hTexRef, int dim); + +/** + * \brief Gets the filter-mode used by a texture reference + * + * \deprecated + * + * Returns in \p *pfm the filtering mode of the texture reference + * \p hTexRef. + * + * \param pfm - Returned filtering mode + * \param hTexRef - Texture reference + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * + * \sa ::cuTexRefSetAddress, + * ::cuTexRefSetAddress2D, ::cuTexRefSetAddressMode, ::cuTexRefSetArray, + * ::cuTexRefSetFilterMode, ::cuTexRefSetFlags, ::cuTexRefSetFormat, + * ::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray, + * ::cuTexRefGetFlags, ::cuTexRefGetFormat + */ +__CUDA_DEPRECATED CUresult CUDAAPI cuTexRefGetFilterMode(CUfilter_mode *pfm, CUtexref hTexRef); + +/** + * \brief Gets the format used by a texture reference + * + * \deprecated + * + * Returns in \p *pFormat and \p *pNumChannels the format and number + * of components of the CUDA array bound to the texture reference \p hTexRef. + * If \p pFormat or \p pNumChannels is NULL, it will be ignored. + * + * \param pFormat - Returned format + * \param pNumChannels - Returned number of components + * \param hTexRef - Texture reference + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * + * \sa ::cuTexRefSetAddress, + * ::cuTexRefSetAddress2D, ::cuTexRefSetAddressMode, ::cuTexRefSetArray, + * ::cuTexRefSetFilterMode, ::cuTexRefSetFlags, ::cuTexRefSetFormat, + * ::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray, + * ::cuTexRefGetFilterMode, ::cuTexRefGetFlags + */ +__CUDA_DEPRECATED CUresult CUDAAPI cuTexRefGetFormat(CUarray_format *pFormat, int *pNumChannels, CUtexref hTexRef); + +/** + * \brief Gets the mipmap filtering mode for a texture reference + * + * \deprecated + * + * Returns the mipmap filtering mode in \p pfm that's used when reading memory through + * the texture reference \p hTexRef. + * + * \param pfm - Returned mipmap filtering mode + * \param hTexRef - Texture reference + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * + * \sa ::cuTexRefSetAddress, + * ::cuTexRefSetAddress2D, ::cuTexRefSetAddressMode, ::cuTexRefSetArray, + * ::cuTexRefSetFlags, ::cuTexRefSetFormat, + * ::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray, + * ::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat + */ +__CUDA_DEPRECATED CUresult CUDAAPI cuTexRefGetMipmapFilterMode(CUfilter_mode *pfm, CUtexref hTexRef); + +/** + * \brief Gets the mipmap level bias for a texture reference + * + * \deprecated + * + * Returns the mipmap level bias in \p pBias that's added to the specified mipmap + * level when reading memory through the texture reference \p hTexRef. + * + * \param pbias - Returned mipmap level bias + * \param hTexRef - Texture reference + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * + * \sa ::cuTexRefSetAddress, + * ::cuTexRefSetAddress2D, ::cuTexRefSetAddressMode, ::cuTexRefSetArray, + * ::cuTexRefSetFlags, ::cuTexRefSetFormat, + * ::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray, + * ::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat + */ +__CUDA_DEPRECATED CUresult CUDAAPI cuTexRefGetMipmapLevelBias(float *pbias, CUtexref hTexRef); + +/** + * \brief Gets the min/max mipmap level clamps for a texture reference + * + * \deprecated + * + * Returns the min/max mipmap level clamps in \p pminMipmapLevelClamp and \p pmaxMipmapLevelClamp + * that's used when reading memory through the texture reference \p hTexRef. + * + * \param pminMipmapLevelClamp - Returned mipmap min level clamp + * \param pmaxMipmapLevelClamp - Returned mipmap max level clamp + * \param hTexRef - Texture reference + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * + * \sa ::cuTexRefSetAddress, + * ::cuTexRefSetAddress2D, ::cuTexRefSetAddressMode, ::cuTexRefSetArray, + * ::cuTexRefSetFlags, ::cuTexRefSetFormat, + * ::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray, + * ::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat + */ +__CUDA_DEPRECATED CUresult CUDAAPI cuTexRefGetMipmapLevelClamp(float *pminMipmapLevelClamp, float *pmaxMipmapLevelClamp, CUtexref hTexRef); + +/** + * \brief Gets the maximum anisotropy for a texture reference + * + * \deprecated + * + * Returns the maximum anisotropy in \p pmaxAniso that's used when reading memory through + * the texture reference \p hTexRef. + * + * \param pmaxAniso - Returned maximum anisotropy + * \param hTexRef - Texture reference + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * + * \sa ::cuTexRefSetAddress, + * ::cuTexRefSetAddress2D, ::cuTexRefSetAddressMode, ::cuTexRefSetArray, + * ::cuTexRefSetFlags, ::cuTexRefSetFormat, + * ::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray, + * ::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat + */ +__CUDA_DEPRECATED CUresult CUDAAPI cuTexRefGetMaxAnisotropy(int *pmaxAniso, CUtexref hTexRef); + +/** + * \brief Gets the border color used by a texture reference + * + * \deprecated + * + * Returns in \p pBorderColor, values of the RGBA color used by + * the texture reference \p hTexRef. + * The color value is of type float and holds color components in + * the following sequence: + * pBorderColor[0] holds 'R' component + * pBorderColor[1] holds 'G' component + * pBorderColor[2] holds 'B' component + * pBorderColor[3] holds 'A' component + * + * \param hTexRef - Texture reference + * \param pBorderColor - Returned Type and Value of RGBA color + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * + * \sa ::cuTexRefSetAddressMode, + * ::cuTexRefSetAddressMode, ::cuTexRefSetBorderColor + */ +__CUDA_DEPRECATED CUresult CUDAAPI cuTexRefGetBorderColor(float *pBorderColor, CUtexref hTexRef); + +/** + * \brief Gets the flags used by a texture reference + * + * \deprecated + * + * Returns in \p *pFlags the flags of the texture reference \p hTexRef. + * + * \param pFlags - Returned flags + * \param hTexRef - Texture reference + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * + * \sa ::cuTexRefSetAddress, + * ::cuTexRefSetAddress2D, ::cuTexRefSetAddressMode, ::cuTexRefSetArray, + * ::cuTexRefSetFilterMode, ::cuTexRefSetFlags, ::cuTexRefSetFormat, + * ::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray, + * ::cuTexRefGetFilterMode, ::cuTexRefGetFormat + */ +__CUDA_DEPRECATED CUresult CUDAAPI cuTexRefGetFlags(unsigned int *pFlags, CUtexref hTexRef); + +/** + * \brief Creates a texture reference + * + * \deprecated + * + * Creates a texture reference and returns its handle in \p *pTexRef. Once + * created, the application must call ::cuTexRefSetArray() or + * ::cuTexRefSetAddress() to associate the reference with allocated memory. + * Other texture reference functions are used to specify the format and + * interpretation (addressing, filtering, etc.) to be used when the memory is + * read through this texture reference. + * + * \param pTexRef - Returned texture reference + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * + * \sa ::cuTexRefDestroy + */ +__CUDA_DEPRECATED CUresult CUDAAPI cuTexRefCreate(CUtexref *pTexRef); + +/** + * \brief Destroys a texture reference + * + * \deprecated + * + * Destroys the texture reference specified by \p hTexRef. + * + * \param hTexRef - Texture reference to destroy + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * + * \sa ::cuTexRefCreate + */ +__CUDA_DEPRECATED CUresult CUDAAPI cuTexRefDestroy(CUtexref hTexRef); + +/** @} */ /* END CUDA_TEXREF_DEPRECATED */ + + +/** + * \defgroup CUDA_SURFREF_DEPRECATED Surface Reference Management [DEPRECATED] + * + * ___MANBRIEF___ surface reference management functions of the low-level CUDA + * driver API (___CURRENT_FILE___) ___ENDMANBRIEF___ + * + * This section describes the surface reference management functions of the + * low-level CUDA driver application programming interface. + * + * @{ + */ + +/** + * \brief Sets the CUDA array for a surface reference. + * + * \deprecated + * + * Sets the CUDA array \p hArray to be read and written by the surface reference + * \p hSurfRef. Any previous CUDA array state associated with the surface + * reference is superseded by this function. \p Flags must be set to 0. + * The ::CUDA_ARRAY3D_SURFACE_LDST flag must have been set for the CUDA array. + * Any CUDA array previously bound to \p hSurfRef is unbound. + + * \param hSurfRef - Surface reference handle + * \param hArray - CUDA array handle + * \param Flags - set to 0 + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * + * \sa + * ::cuModuleGetSurfRef, + * ::cuSurfRefGetArray + */ +__CUDA_DEPRECATED CUresult CUDAAPI cuSurfRefSetArray(CUsurfref hSurfRef, CUarray hArray, unsigned int Flags); + +/** + * \brief Passes back the CUDA array bound to a surface reference. + * + * \deprecated + * + * Returns in \p *phArray the CUDA array bound to the surface reference + * \p hSurfRef, or returns ::CUDA_ERROR_INVALID_VALUE if the surface reference + * is not bound to any CUDA array. + + * \param phArray - Surface reference handle + * \param hSurfRef - Surface reference handle + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * + * \sa ::cuModuleGetSurfRef, ::cuSurfRefSetArray + */ +__CUDA_DEPRECATED CUresult CUDAAPI cuSurfRefGetArray(CUarray *phArray, CUsurfref hSurfRef); + +/** @} */ /* END CUDA_SURFREF_DEPRECATED */ + +/** + * \defgroup CUDA_TEXOBJECT Texture Object Management + * + * ___MANBRIEF___ texture object management functions of the low-level CUDA + * driver API (___CURRENT_FILE___) ___ENDMANBRIEF___ + * + * This section describes the texture object management functions of the + * low-level CUDA driver application programming interface. The texture + * object API is only supported on devices of compute capability 3.0 or higher. + * + * @{ + */ + +/** + * \brief Creates a texture object + * + * Creates a texture object and returns it in \p pTexObject. \p pResDesc describes + * the data to texture from. \p pTexDesc describes how the data should be sampled. + * \p pResViewDesc is an optional argument that specifies an alternate format for + * the data described by \p pResDesc, and also describes the subresource region + * to restrict access to when texturing. \p pResViewDesc can only be specified if + * the type of resource is a CUDA array or a CUDA mipmapped array. + * + * Texture objects are only supported on devices of compute capability 3.0 or higher. + * Additionally, a texture object is an opaque value, and, as such, should only be + * accessed through CUDA API calls. + * + * The ::CUDA_RESOURCE_DESC structure is defined as: + * \code + typedef struct CUDA_RESOURCE_DESC_st + { + CUresourcetype resType; + + union { + struct { + CUarray hArray; + } array; + struct { + CUmipmappedArray hMipmappedArray; + } mipmap; + struct { + CUdeviceptr devPtr; + CUarray_format format; + unsigned int numChannels; + size_t sizeInBytes; + } linear; + struct { + CUdeviceptr devPtr; + CUarray_format format; + unsigned int numChannels; + size_t width; + size_t height; + size_t pitchInBytes; + } pitch2D; + } res; + + unsigned int flags; + } CUDA_RESOURCE_DESC; + + * \endcode + * where: + * - ::CUDA_RESOURCE_DESC::resType specifies the type of resource to texture from. + * CUresourceType is defined as: + * \code + typedef enum CUresourcetype_enum { + CU_RESOURCE_TYPE_ARRAY = 0x00, + CU_RESOURCE_TYPE_MIPMAPPED_ARRAY = 0x01, + CU_RESOURCE_TYPE_LINEAR = 0x02, + CU_RESOURCE_TYPE_PITCH2D = 0x03 + } CUresourcetype; + * \endcode + * + * \par + * If ::CUDA_RESOURCE_DESC::resType is set to ::CU_RESOURCE_TYPE_ARRAY, ::CUDA_RESOURCE_DESC::res::array::hArray + * must be set to a valid CUDA array handle. + * + * \par + * If ::CUDA_RESOURCE_DESC::resType is set to ::CU_RESOURCE_TYPE_MIPMAPPED_ARRAY, ::CUDA_RESOURCE_DESC::res::mipmap::hMipmappedArray + * must be set to a valid CUDA mipmapped array handle. + * + * \par + * If ::CUDA_RESOURCE_DESC::resType is set to ::CU_RESOURCE_TYPE_LINEAR, ::CUDA_RESOURCE_DESC::res::linear::devPtr + * must be set to a valid device pointer, that is aligned to ::CU_DEVICE_ATTRIBUTE_TEXTURE_ALIGNMENT. + * ::CUDA_RESOURCE_DESC::res::linear::format and ::CUDA_RESOURCE_DESC::res::linear::numChannels + * describe the format of each component and the number of components per array element. ::CUDA_RESOURCE_DESC::res::linear::sizeInBytes + * specifies the size of the array in bytes. The total number of elements in the linear address range cannot exceed + * ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LINEAR_WIDTH. The number of elements is computed as (sizeInBytes / (sizeof(format) * numChannels)). + * + * \par + * If ::CUDA_RESOURCE_DESC::resType is set to ::CU_RESOURCE_TYPE_PITCH2D, ::CUDA_RESOURCE_DESC::res::pitch2D::devPtr + * must be set to a valid device pointer, that is aligned to ::CU_DEVICE_ATTRIBUTE_TEXTURE_ALIGNMENT. + * ::CUDA_RESOURCE_DESC::res::pitch2D::format and ::CUDA_RESOURCE_DESC::res::pitch2D::numChannels + * describe the format of each component and the number of components per array element. ::CUDA_RESOURCE_DESC::res::pitch2D::width + * and ::CUDA_RESOURCE_DESC::res::pitch2D::height specify the width and height of the array in elements, and cannot exceed + * ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_WIDTH and ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_HEIGHT respectively. + * ::CUDA_RESOURCE_DESC::res::pitch2D::pitchInBytes specifies the pitch between two rows in bytes and has to be aligned to + * ::CU_DEVICE_ATTRIBUTE_TEXTURE_PITCH_ALIGNMENT. Pitch cannot exceed ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_PITCH. + * + * - ::flags must be set to zero. + * + * + * The ::CUDA_TEXTURE_DESC struct is defined as + * \code + typedef struct CUDA_TEXTURE_DESC_st { + CUaddress_mode addressMode[3]; + CUfilter_mode filterMode; + unsigned int flags; + unsigned int maxAnisotropy; + CUfilter_mode mipmapFilterMode; + float mipmapLevelBias; + float minMipmapLevelClamp; + float maxMipmapLevelClamp; + } CUDA_TEXTURE_DESC; + * \endcode + * where + * - ::CUDA_TEXTURE_DESC::addressMode specifies the addressing mode for each dimension of the texture data. ::CUaddress_mode is defined as: + * \code + typedef enum CUaddress_mode_enum { + CU_TR_ADDRESS_MODE_WRAP = 0, + CU_TR_ADDRESS_MODE_CLAMP = 1, + CU_TR_ADDRESS_MODE_MIRROR = 2, + CU_TR_ADDRESS_MODE_BORDER = 3 + } CUaddress_mode; + * \endcode + * This is ignored if ::CUDA_RESOURCE_DESC::resType is ::CU_RESOURCE_TYPE_LINEAR. Also, if the flag, ::CU_TRSF_NORMALIZED_COORDINATES + * is not set, the only supported address mode is ::CU_TR_ADDRESS_MODE_CLAMP. + * + * - ::CUDA_TEXTURE_DESC::filterMode specifies the filtering mode to be used when fetching from the texture. CUfilter_mode is defined as: + * \code + typedef enum CUfilter_mode_enum { + CU_TR_FILTER_MODE_POINT = 0, + CU_TR_FILTER_MODE_LINEAR = 1 + } CUfilter_mode; + * \endcode + * This is ignored if ::CUDA_RESOURCE_DESC::resType is ::CU_RESOURCE_TYPE_LINEAR. + * + * - ::CUDA_TEXTURE_DESC::flags can be any combination of the following: + * - ::CU_TRSF_READ_AS_INTEGER, which suppresses the default behavior of + * having the texture promote integer data to floating point data in the + * range [0, 1]. Note that texture with 32-bit integer format would not be + * promoted, regardless of whether or not this flag is specified. + * - ::CU_TRSF_NORMALIZED_COORDINATES, which suppresses the default behavior + * of having the texture coordinates range from [0, Dim) where Dim is the + * width or height of the CUDA array. Instead, the texture coordinates + * [0, 1.0) reference the entire breadth of the array dimension; Note that + * for CUDA mipmapped arrays, this flag has to be set. + * - ::CU_TRSF_DISABLE_TRILINEAR_OPTIMIZATION, which disables any trilinear + * filtering optimizations. Trilinear optimizations improve texture filtering + * performance by allowing bilinear filtering on textures in scenarios where + * it can closely approximate the expected results. + * - ::CU_TRSF_SEAMLESS_CUBEMAP, which enables seamless cube map filtering. + * This flag can only be specified if the underlying resource is a CUDA array + * or a CUDA mipmapped array that was created with the flag ::CUDA_ARRAY3D_CUBEMAP. + * When seamless cube map filtering is enabled, texture address modes specified + * by ::CUDA_TEXTURE_DESC::addressMode are ignored. Instead, if the ::CUDA_TEXTURE_DESC::filterMode + * is set to ::CU_TR_FILTER_MODE_POINT the address mode ::CU_TR_ADDRESS_MODE_CLAMP + * will be applied for all dimensions. If the ::CUDA_TEXTURE_DESC::filterMode is + * set to ::CU_TR_FILTER_MODE_LINEAR seamless cube map filtering will be performed + * when sampling along the cube face borders. + * + * - ::CUDA_TEXTURE_DESC::maxAnisotropy specifies the maximum anisotropy ratio to be used when doing anisotropic filtering. This value will be + * clamped to the range [1,16]. + * + * - ::CUDA_TEXTURE_DESC::mipmapFilterMode specifies the filter mode when the calculated mipmap level lies between two defined mipmap levels. + * + * - ::CUDA_TEXTURE_DESC::mipmapLevelBias specifies the offset to be applied to the calculated mipmap level. + * + * - ::CUDA_TEXTURE_DESC::minMipmapLevelClamp specifies the lower end of the mipmap level range to clamp access to. + * + * - ::CUDA_TEXTURE_DESC::maxMipmapLevelClamp specifies the upper end of the mipmap level range to clamp access to. + * + * + * The ::CUDA_RESOURCE_VIEW_DESC struct is defined as + * \code + typedef struct CUDA_RESOURCE_VIEW_DESC_st + { + CUresourceViewFormat format; + size_t width; + size_t height; + size_t depth; + unsigned int firstMipmapLevel; + unsigned int lastMipmapLevel; + unsigned int firstLayer; + unsigned int lastLayer; + } CUDA_RESOURCE_VIEW_DESC; + * \endcode + * where: + * - ::CUDA_RESOURCE_VIEW_DESC::format specifies how the data contained in the CUDA array or CUDA mipmapped array should + * be interpreted. Note that this can incur a change in size of the texture data. If the resource view format is a block + * compressed format, then the underlying CUDA array or CUDA mipmapped array has to have a base of format ::CU_AD_FORMAT_UNSIGNED_INT32. + * with 2 or 4 channels, depending on the block compressed format. For ex., BC1 and BC4 require the underlying CUDA array to have + * a format of ::CU_AD_FORMAT_UNSIGNED_INT32 with 2 channels. The other BC formats require the underlying resource to have the same base + * format but with 4 channels. + * + * - ::CUDA_RESOURCE_VIEW_DESC::width specifies the new width of the texture data. If the resource view format is a block + * compressed format, this value has to be 4 times the original width of the resource. For non block compressed formats, + * this value has to be equal to that of the original resource. + * + * - ::CUDA_RESOURCE_VIEW_DESC::height specifies the new height of the texture data. If the resource view format is a block + * compressed format, this value has to be 4 times the original height of the resource. For non block compressed formats, + * this value has to be equal to that of the original resource. + * + * - ::CUDA_RESOURCE_VIEW_DESC::depth specifies the new depth of the texture data. This value has to be equal to that of the + * original resource. + * + * - ::CUDA_RESOURCE_VIEW_DESC::firstMipmapLevel specifies the most detailed mipmap level. This will be the new mipmap level zero. + * For non-mipmapped resources, this value has to be zero.::CUDA_TEXTURE_DESC::minMipmapLevelClamp and ::CUDA_TEXTURE_DESC::maxMipmapLevelClamp + * will be relative to this value. For ex., if the firstMipmapLevel is set to 2, and a minMipmapLevelClamp of 1.2 is specified, + * then the actual minimum mipmap level clamp will be 3.2. + * + * - ::CUDA_RESOURCE_VIEW_DESC::lastMipmapLevel specifies the least detailed mipmap level. For non-mipmapped resources, this value + * has to be zero. + * + * - ::CUDA_RESOURCE_VIEW_DESC::firstLayer specifies the first layer index for layered textures. This will be the new layer zero. + * For non-layered resources, this value has to be zero. + * + * - ::CUDA_RESOURCE_VIEW_DESC::lastLayer specifies the last layer index for layered textures. For non-layered resources, + * this value has to be zero. + * + * + * \param pTexObject - Texture object to create + * \param pResDesc - Resource descriptor + * \param pTexDesc - Texture descriptor + * \param pResViewDesc - Resource view descriptor + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * + * \sa + * ::cuTexObjectDestroy, + * ::cudaCreateTextureObject + */ +CUresult CUDAAPI cuTexObjectCreate(CUtexObject *pTexObject, const CUDA_RESOURCE_DESC *pResDesc, const CUDA_TEXTURE_DESC *pTexDesc, const CUDA_RESOURCE_VIEW_DESC *pResViewDesc); + +/** + * \brief Destroys a texture object + * + * Destroys the texture object specified by \p texObject. + * + * \param texObject - Texture object to destroy + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * + * \sa + * ::cuTexObjectCreate, + * ::cudaDestroyTextureObject + */ +CUresult CUDAAPI cuTexObjectDestroy(CUtexObject texObject); + +/** + * \brief Returns a texture object's resource descriptor + * + * Returns the resource descriptor for the texture object specified by \p texObject. + * + * \param pResDesc - Resource descriptor + * \param texObject - Texture object + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * + * \sa + * ::cuTexObjectCreate, + * ::cudaGetTextureObjectResourceDesc, + */ +CUresult CUDAAPI cuTexObjectGetResourceDesc(CUDA_RESOURCE_DESC *pResDesc, CUtexObject texObject); + +/** + * \brief Returns a texture object's texture descriptor + * + * Returns the texture descriptor for the texture object specified by \p texObject. + * + * \param pTexDesc - Texture descriptor + * \param texObject - Texture object + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * + * \sa + * ::cuTexObjectCreate, + * ::cudaGetTextureObjectTextureDesc + */ +CUresult CUDAAPI cuTexObjectGetTextureDesc(CUDA_TEXTURE_DESC *pTexDesc, CUtexObject texObject); + +/** + * \brief Returns a texture object's resource view descriptor + * + * Returns the resource view descriptor for the texture object specified by \p texObject. + * If no resource view was set for \p texObject, the ::CUDA_ERROR_INVALID_VALUE is returned. + * + * \param pResViewDesc - Resource view descriptor + * \param texObject - Texture object + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * + * \sa + * ::cuTexObjectCreate, + * ::cudaGetTextureObjectResourceViewDesc + */ +CUresult CUDAAPI cuTexObjectGetResourceViewDesc(CUDA_RESOURCE_VIEW_DESC *pResViewDesc, CUtexObject texObject); + +/** @} */ /* END CUDA_TEXOBJECT */ + +/** + * \defgroup CUDA_SURFOBJECT Surface Object Management + * + * ___MANBRIEF___ surface object management functions of the low-level CUDA + * driver API (___CURRENT_FILE___) ___ENDMANBRIEF___ + * + * This section describes the surface object management functions of the + * low-level CUDA driver application programming interface. The surface + * object API is only supported on devices of compute capability 3.0 or higher. + * + * @{ + */ + +/** + * \brief Creates a surface object + * + * Creates a surface object and returns it in \p pSurfObject. \p pResDesc describes + * the data to perform surface load/stores on. ::CUDA_RESOURCE_DESC::resType must be + * ::CU_RESOURCE_TYPE_ARRAY and ::CUDA_RESOURCE_DESC::res::array::hArray + * must be set to a valid CUDA array handle. ::CUDA_RESOURCE_DESC::flags must be set to zero. + * + * Surface objects are only supported on devices of compute capability 3.0 or higher. + * Additionally, a surface object is an opaque value, and, as such, should only be + * accessed through CUDA API calls. + * + * \param pSurfObject - Surface object to create + * \param pResDesc - Resource descriptor + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * + * \sa + * ::cuSurfObjectDestroy, + * ::cudaCreateSurfaceObject + */ +CUresult CUDAAPI cuSurfObjectCreate(CUsurfObject *pSurfObject, const CUDA_RESOURCE_DESC *pResDesc); + +/** + * \brief Destroys a surface object + * + * Destroys the surface object specified by \p surfObject. + * + * \param surfObject - Surface object to destroy + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * + * \sa + * ::cuSurfObjectCreate, + * ::cudaDestroySurfaceObject + */ +CUresult CUDAAPI cuSurfObjectDestroy(CUsurfObject surfObject); + +/** + * \brief Returns a surface object's resource descriptor + * + * Returns the resource descriptor for the surface object specified by \p surfObject. + * + * \param pResDesc - Resource descriptor + * \param surfObject - Surface object + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * + * \sa + * ::cuSurfObjectCreate, + * ::cudaGetSurfaceObjectResourceDesc + */ +CUresult CUDAAPI cuSurfObjectGetResourceDesc(CUDA_RESOURCE_DESC *pResDesc, CUsurfObject surfObject); + +/** @} */ /* END CUDA_SURFOBJECT */ + +/** + * \defgroup CUDA_TENSOR_MEMORY Tensor Map Object Managment + * + * ___MANBRIEF___ tensor map object management functions of the low-level CUDA + * driver API (___CURRENT_FILE___) ___ENDMANBRIEF___ + * + * This section describes the tensor map object management functions of the + * low-level CUDA driver application programming interface. The tensor + * core API is only supported on devices of compute capability 9.0 or higher. + * + * @{ + */ + +/** + * \brief Create a tensor map descriptor object representing tiled memory region + * + * Creates a descriptor for Tensor Memory Access (TMA) object specified + * by the parameters describing a tiled region and returns it in \p tensorMap. + * + * Tensor map objects are only supported on devices of compute capability 9.0 or higher. + * Additionally, a tensor map object is an opaque value, and, as such, should only be + * accessed through CUDA API calls. + * + * The parameters passed are bound to the following requirements: + * + * - \p tensorMap address must be aligned to 64 bytes. + * + * - \p tensorDataType has to be an enum from ::CUtensorMapDataType which is defined as: + * \code + typedef enum CUtensorMapDataType_enum { + CU_TENSOR_MAP_DATA_TYPE_UINT8 = 0, // 1 byte + CU_TENSOR_MAP_DATA_TYPE_UINT16, // 2 bytes + CU_TENSOR_MAP_DATA_TYPE_UINT32, // 4 bytes + CU_TENSOR_MAP_DATA_TYPE_INT32, // 4 bytes + CU_TENSOR_MAP_DATA_TYPE_UINT64, // 8 bytes + CU_TENSOR_MAP_DATA_TYPE_INT64, // 8 bytes + CU_TENSOR_MAP_DATA_TYPE_FLOAT16, // 2 bytes + CU_TENSOR_MAP_DATA_TYPE_FLOAT32, // 4 bytes + CU_TENSOR_MAP_DATA_TYPE_FLOAT64, // 8 bytes + CU_TENSOR_MAP_DATA_TYPE_BFLOAT16, // 2 bytes + CU_TENSOR_MAP_DATA_TYPE_FLOAT32_FTZ, // 4 bytes + CU_TENSOR_MAP_DATA_TYPE_TFLOAT32, // 4 bytes + CU_TENSOR_MAP_DATA_TYPE_TFLOAT32_FTZ // 4 bytes + } CUtensorMapDataType; + * \endcode + * + * - \p tensorRank must be non-zero and less than or equal to the maximum supported dimensionality of 5. If \p interleave is not + * ::CU_TENSOR_MAP_INTERLEAVE_NONE, then \p tensorRank must additionally be greater than or equal to 3. + * + * - \p globalAddress, which specifies the starting address of the memory region described, must be 32 byte aligned when \p interleave is + * ::CU_TENSOR_MAP_INTERLEAVE_32B and 16 byte aligned otherwise. + * + * - \p globalDim array, which specifies tensor size of each of the \p tensorRank dimensions, must be non-zero and less than or + * equal to 2^32. + * + * - \p globalStrides array, which specifies tensor stride of each of the lower \p tensorRank - 1 dimensions in bytes, must be a + * multiple of 16 and less than 2^40. Additionally, the stride must be a multiple of 32 when \p interleave is ::CU_TENSOR_MAP_INTERLEAVE_32B. + * Each following dimension specified includes previous dimension stride: + * \code + globalStrides[0] = globalDim[0] * elementSizeInBytes(tensorDataType) + padding[0]; + for (i = 1; i < tensorRank - 1; i++) + globalStrides[i] = globalStrides[i – 1] * (globalDim[i] + padding[i]); + assert(globalStrides[i] >= globalDim[i]); + * \endcode + * + * - \p boxDim array, which specifies number of elements to be traversed along each of the \p tensorRank dimensions, must be non-zero + * and less than or equal to 256. + * When \p interleave is ::CU_TENSOR_MAP_INTERLEAVE_NONE, { \p boxDim[0] * elementSizeInBytes( \p tensorDataType ) } must be a multiple + * of 16 bytes. + * + * - \p elementStrides array, which specifies the iteration step along each of the \p tensorRank dimensions, must be non-zero and less + * than or equal to 8. Note that when \p interleave is ::CU_TENSOR_MAP_INTERLEAVE_NONE, the first element of this array is ignored since + * TMA doesn’t support the stride for dimension zero. + * When all elemets of \p elementStrides array is one, \p boxDim specifies the number of elements to load. However, if the \p elementStrides[i] + * is not equal to one, then TMA loads ceil( \p boxDim[i] / \p elementStrides[i]) number of elements along i-th dimension. To load N elements along + * i-th dimension, \p boxDim[i] must be set to N * \p elementStrides[i]. + * + * - \p interleave specifies the interleaved layout of type ::CUtensorMapInterleave, which is defined as: + * \code + typedef enum CUtensorMapInterleave_enum { + CU_TENSOR_MAP_INTERLEAVE_NONE = 0, + CU_TENSOR_MAP_INTERLEAVE_16B, + CU_TENSOR_MAP_INTERLEAVE_32B + } CUtensorMapInterleave; + * \endcode + * TMA supports interleaved layouts like NC/8HWC8 where C8 utilizes 16 bytes in memory assuming 2 byte per channel or NC/16HWC16 where C16 + * uses 32 bytes. + * When \p interleave is ::CU_TENSOR_MAP_INTERLEAVE_NONE and \p swizzle is not ::CU_TENSOR_MAP_SWIZZLE_NONE, the bounding box inner dimension + * (computed as \p boxDim[0] multiplied by element size derived from \p tensorDataType) must be less than or equal to the swizzle size. + * - CU_TENSOR_MAP_SWIZZLE_32B implies the bounding box inner dimension will be <= 32. + * - CU_TENSOR_MAP_SWIZZLE_64B implies the bounding box inner dimension will be <= 64. + * - CU_TENSOR_MAP_SWIZZLE_128B implies the bounding box inner dimension will be <= 128. + * + * - \p swizzle, which specifies the shared memory bank swizzling pattern, has to be of type ::CUtensorMapSwizzle which is defined as: + * \code + typedef enum CUtensorMapSwizzle_enum { + CU_TENSOR_MAP_SWIZZLE_NONE = 0, + CU_TENSOR_MAP_SWIZZLE_32B, + CU_TENSOR_MAP_SWIZZLE_64B, + CU_TENSOR_MAP_SWIZZLE_128B + } CUtensorMapSwizzle; + * \endcode + * Data is organized in specific order in global memory; however, it may not match the order in which data are accessed by application in + * the shared memory. This difference in data organization may cause bank conflicts when shared memory is accessed. In order to avoid this + * problem, data can be loaded to shard memory with shuffling across shared memory banks. + * Note that it’s expected that when \p interleave is ::CU_TENSOR_MAP_INTERLEAVE_32B, \p swizzle should be ::CU_TENSOR_MAP_SWIZZLE_32B mode. + * Other interleave modes can have any swizzling patterns. + * + * - \p l2Promotion specifies L2 fetch size which indicates the byte granurality at which L2 requests is filled from DRAM. It must be of + * type ::CUtensorMapL2promotion, which is defined as: + * \code + typedef enum CUtensorMapL2promotion_enum { + CU_TENSOR_MAP_L2_PROMOTION_NONE = 0, + CU_TENSOR_MAP_L2_PROMOTION_L2_64B, + CU_TENSOR_MAP_L2_PROMOTION_L2_128B, + CU_TENSOR_MAP_L2_PROMOTION_L2_256B + } CUtensorMapL2promotion; + * \endcode + * + * - \p oobFill, which indicates whether zero or a special NaN constant should be used to fill out-of-bound elements, must be of type + * ::CUtensorMapFloatOOBfill which is defined as: + * \code + typedef enum CUtensorMapFloatOOBfill_enum { + CU_TENSOR_MAP_FLOAT_OOB_FILL_NONE = 0, + CU_TENSOR_MAP_FLOAT_OOB_FILL_NAN_REQUEST_ZERO_FMA + } CUtensorMapFloatOOBfill; + * \endcode + * Note that ::CU_TENSOR_MAP_FLOAT_OOB_FILL_NAN_REQUEST_ZERO_FMA can only be used when \p tensorDataType represents a floating data type. + * + * \param tensorMap - Tensor map object to create + * \param tensorDataType - Tensor data type + * \param tensorRank - Dimensionality of tensor + * \param globalAddress - Starting address of memory region described by tensor + * \param globalDim - Array containing tensor size (number of elements) along each of the \p tensorRank dimensions + * \param globalStrides - Array containing stride size (in bytes) along each of the \p tensorRank - 1 dimensions + * \param boxDim - Array containing traversal box size (number of elments) along each of the \p tensorRank dimensions. Specifies how many elements to be traversed along each tensor dimension. + * \param elementStrides - Array containing traversal stride in each of the \p tensorRank dimensions + * \param interleave - Type of interleaved layout the tensor addresses + * \param swizzle - Bank swizzling pattern inside shared memory + * \param l2Promotion - L2 promotion size + * \param oobFill - Indicate whether zero or special NaN constant must be used to fill out-of-bound elements + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * + * \sa + * ::cuTensorMapEncodeIm2col, + * ::cuTensorMapReplaceAddress + */ +CUresult CUDAAPI cuTensorMapEncodeTiled(CUtensorMap *tensorMap, CUtensorMapDataType tensorDataType, cuuint32_t tensorRank, void *globalAddress, const cuuint64_t *globalDim, const cuuint64_t *globalStrides, const cuuint32_t *boxDim, const cuuint32_t *elementStrides, CUtensorMapInterleave interleave, CUtensorMapSwizzle swizzle, CUtensorMapL2promotion l2Promotion, CUtensorMapFloatOOBfill oobFill); + + +/** + * \brief Create a tensor map descriptor object representing im2col memory region + * + * Creates a descriptor for Tensor Memory Access (TMA) object specified + * by the parameters describing a im2col memory layout and returns it in \p tensorMap. + * + * Tensor map objects are only supported on devices of compute capability 9.0 or higher. + * Additionally, a tensor map object is an opaque value, and, as such, should only be + * accessed through CUDA API calls. + * + * The parameters passed are bound to the following requirements: + * + * - \p tensorMap address must be aligned to 64 bytes. + * + * - \p tensorDataType has to be an enum from ::CUtensorMapDataType which is defined as: + * \code + typedef enum CUtensorMapDataType_enum { + CU_TENSOR_MAP_DATA_TYPE_UINT8 = 0, // 1 byte + CU_TENSOR_MAP_DATA_TYPE_UINT16, // 2 bytes + CU_TENSOR_MAP_DATA_TYPE_UINT32, // 4 bytes + CU_TENSOR_MAP_DATA_TYPE_INT32, // 4 bytes + CU_TENSOR_MAP_DATA_TYPE_UINT64, // 8 bytes + CU_TENSOR_MAP_DATA_TYPE_INT64, // 8 bytes + CU_TENSOR_MAP_DATA_TYPE_FLOAT16, // 2 bytes + CU_TENSOR_MAP_DATA_TYPE_FLOAT32, // 4 bytes + CU_TENSOR_MAP_DATA_TYPE_FLOAT64, // 8 bytes + CU_TENSOR_MAP_DATA_TYPE_BFLOAT16, // 2 bytes + CU_TENSOR_MAP_DATA_TYPE_FLOAT32_FTZ, // 4 bytes + CU_TENSOR_MAP_DATA_TYPE_TFLOAT32, // 4 bytes + CU_TENSOR_MAP_DATA_TYPE_TFLOAT32_FTZ // 4 bytes + } CUtensorMapDataType; + * \endcode + * + * - \p tensorRank must be one of dimensions 3, 4, or 5. + * + * - \p globalAddress, which specifies the starting address of the memory region described, must be 32 byte aligned when \p interleave is + * ::CU_TENSOR_MAP_INTERLEAVE_32B and 16 byte aligned otherwise. + * + * - \p globalDim array, which specifies tensor size of each of the \p tensorRank dimensions, must be non-zero and less than or + * equal to 2^32. + * + * - \p globalStrides array, which specifies tensor stride of each of the lower \p tensorRank - 1 dimensions in bytes, must be a + * multiple of 16 and less than 2^40. Additionally, the stride must be a multiple of 32 when \p interleave is ::CU_TENSOR_MAP_INTERLEAVE_32B. + * Each following dimension specified includes previous dimension stride: + * \code + globalStrides[0] = globalDim[0] * elementSizeInBytes(tensorDataType) + padding[0]; + for (i = 1; i < tensorRank - 1; i++) + globalStrides[i] = globalStrides[i – 1] * (globalDim[i] + padding[i]); + assert(globalStrides[i] >= globalDim[i]); + * \endcode + * + * - \p pixelBoxLowerCorner array specifies the coordinate offsets {D, H, W} of the bounding box from top/left/front corner. The number of + * offsets and their precision depends on the tensor dimensionality: + * - When \p tensorRank is 3, one signed offset within range [-32768, 32767] is supported. + * - When \p tensorRank is 4, two signed offsets each within range [-128, 127] are supported. + * - When \p tensorRank is 5, three offsets each within range [-16, 15] are supported. + * + * - \p pixelBoxUpperCorner array specifies the coordinate offsets {D, H, W} of the bounding box from bottom/right/back corner. The number of + * offsets and their precision depends on the tensor dimensionality: + * - When \p tensorRank is 3, one signed offset within range [-32768, 32767] is supported. + * - When \p tensorRank is 4, two signed offsets each within range [-128, 127] are supported. + * - When \p tensorRank is 5, three offsets each within range [-16, 15] are supported. + * The bounding box specified by \p pixelBoxLowerCorner and \p pixelBoxUpperCorner must have non-zero area. + * + * - \p channelsPerPixel, which specifies the number of elements which must be accessed along C dimension, must be less than or equal to 256. + * + * - \p pixelsPerColumn, which specifies the number of elements that must be accessed along the {N, D, H, W} dimensions, must be less than or + * equal to 1024. + * + * - \p elementStrides array, which specifies the iteration step along each of the \p tensorRank dimensions, must be non-zero and less + * than or equal to 8. Note that when \p interleave is ::CU_TENSOR_MAP_INTERLEAVE_NONE, the first element of this array is ignored since + * TMA doesn’t support the stride for dimension zero. + * When all elemets of \p elementStrides array is one, \p boxDim specifies the number of elements to load. However, if the \p elementStrides[i] + * is not equal to one, then TMA loads ceil( \p boxDim[i] / \p elementStrides[i]) number of elements along i-th dimension. To load N elements along + * i-th dimension, \p boxDim[i] must be set to N * \p elementStrides[i]. + * + * - \p interleave specifies the interleaved layout of type ::CUtensorMapInterleave, which is defined as: + * \code + typedef enum CUtensorMapInterleave_enum { + CU_TENSOR_MAP_INTERLEAVE_NONE = 0, + CU_TENSOR_MAP_INTERLEAVE_16B, + CU_TENSOR_MAP_INTERLEAVE_32B + } CUtensorMapInterleave; + * \endcode + * TMA supports interleaved layouts like NC/8HWC8 where C8 utilizes 16 bytes in memory assuming 2 byte per channel or NC/16HWC16 where C16 + * uses 32 bytes. + * When \p interleave is ::CU_TENSOR_MAP_INTERLEAVE_NONE and \p swizzle is not ::CU_TENSOR_MAP_SWIZZLE_NONE, the bounding box inner dimension + * (computed as \p boxDim[0] multiplied by element size derived from \p tensorDataType) must be less than or equal to the swizzle size. + * - CU_TENSOR_MAP_SWIZZLE_32B implies the bounding box inner dimension will be <= 32. + * - CU_TENSOR_MAP_SWIZZLE_64B implies the bounding box inner dimension will be <= 64. + * - CU_TENSOR_MAP_SWIZZLE_128B implies the bounding box inner dimension will be <= 128. + * + * - \p swizzle, which specifies the shared memory bank swizzling pattern, has to be of type ::CUtensorMapSwizzle which is defined as: + * \code + typedef enum CUtensorMapSwizzle_enum { + CU_TENSOR_MAP_SWIZZLE_NONE = 0, + CU_TENSOR_MAP_SWIZZLE_32B, + CU_TENSOR_MAP_SWIZZLE_64B, + CU_TENSOR_MAP_SWIZZLE_128B + } CUtensorMapSwizzle; + * \endcode + * Data is organized in specific order in global memory; however, it may not match the order in which data are accessed by application in + * the shared memory. This difference in data organization may cause bank conflicts when shared memory is accessed. In order to avoid this + * problem, data can be loaded to shard memory with shuffling across shared memory banks. + * Note that it’s expected that when \p interleave is ::CU_TENSOR_MAP_INTERLEAVE_32B, \p swizzle should be ::CU_TENSOR_MAP_SWIZZLE_32B mode. + * Other interleave modes can have any swizzling patterns. + * + * - \p l2Promotion specifies L2 fetch size which indicates the byte granurality at which L2 requests is filled from DRAM. It must be of + * type ::CUtensorMapL2promotion, which is defined as: + * \code + typedef enum CUtensorMapL2promotion_enum { + CU_TENSOR_MAP_L2_PROMOTION_NONE = 0, + CU_TENSOR_MAP_L2_PROMOTION_L2_64B, + CU_TENSOR_MAP_L2_PROMOTION_L2_128B, + CU_TENSOR_MAP_L2_PROMOTION_L2_256B + } CUtensorMapL2promotion; + * \endcode + * + * - \p oobFill, which indicates whether zero or a special NaN constant should be used to fill out-of-bound elements, must be of type + * ::CUtensorMapFloatOOBfill which is defined as: + * \code + typedef enum CUtensorMapFloatOOBfill_enum { + CU_TENSOR_MAP_FLOAT_OOB_FILL_NONE = 0, + CU_TENSOR_MAP_FLOAT_OOB_FILL_NAN_REQUEST_ZERO_FMA + } CUtensorMapFloatOOBfill; + * \endcode + * Note that ::CU_TENSOR_MAP_FLOAT_OOB_FILL_NAN_REQUEST_ZERO_FMA can only be used when \p tensorDataType represents a floating data type. + * + * \param tensorMap - Tensor map object to create + * \param tensorDataType - Tensor data type + * \param tensorRank - Dimensionality of tensor, needs to be at least of dimension 3 + * \param globalAddress - Starting address of memory region described by tensor + * \param globalDim - Array containing tensor size (number of elements) along each of the \p tensorRank dimensions + * \param globalStrides - Array containing stride size (in bytes) along each of the \p tensorRank - 1 dimensions + * \param pixelBoxLowerCorner - Array containing DHW dimentions of lower box corner + * \param pixelBoxUpperCorner - Array containing DHW dimentions of upper box corner + * \param channelsPerPixel - Number of channels per pixel + * \param pixelsPerColumn - Number of pixels per column + * \param elementStrides - Array containing traversal stride in each of the \p tensorRank dimensions + * \param interleave - Type of interleaved layout the tensor addresses + * \param swizzle - Bank swizzling pattern inside shared memory + * \param l2Promotion - L2 promotion size + * \param oobFill - Indicate whether zero or special NaN constant must be used to fill out-of-bound elements + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * + * \sa + * ::cuTensorMapEncodeTiled, + * ::cuTensorMapReplaceAddress + */ +CUresult CUDAAPI cuTensorMapEncodeIm2col(CUtensorMap *tensorMap, CUtensorMapDataType tensorDataType, cuuint32_t tensorRank, void *globalAddress, const cuuint64_t *globalDim, const cuuint64_t *globalStrides, const int *pixelBoxLowerCorner, const int *pixelBoxUpperCorner, cuuint32_t channelsPerPixel, cuuint32_t pixelsPerColumn, const cuuint32_t *elementStrides, CUtensorMapInterleave interleave, CUtensorMapSwizzle swizzle, CUtensorMapL2promotion l2Promotion, CUtensorMapFloatOOBfill oobFill); + +/** + * \brief Modify an existing tensor map descriptor with an updated global address + * + * Modifies the descriptor for Tensor Memory Access (TMA) object passed in \p tensorMap with + * an updated \p globalAddress. + * + * Tensor map objects are only supported on devices of compute capability 9.0 or higher. + * Additionally, a tensor map object is an opaque value, and, as such, should only be + * accessed through CUDA API calls. + * + * \param tensorMap - Tensor map object to modify + * \param globalAddress - Starting address of memory region described by tensor, must follow previous alignment requirements + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE + * + * \sa + * ::cuTensorMapEncodeTiled, + * ::cuTensorMapEncodeIm2col + */ +CUresult CUDAAPI cuTensorMapReplaceAddress(CUtensorMap *tensorMap, void *globalAddress); + +/** @} */ +/* END CUDA_TENSOR_MEMORY */ + +/** + * \defgroup CUDA_PEER_ACCESS Peer Context Memory Access + * + * ___MANBRIEF___ direct peer context memory access functions of the low-level + * CUDA driver API (___CURRENT_FILE___) ___ENDMANBRIEF___ + * + * This section describes the direct peer context memory access functions + * of the low-level CUDA driver application programming interface. + * + * @{ + */ + +/** + * \brief Queries if a device may directly access a peer device's memory. + * + * Returns in \p *canAccessPeer a value of 1 if contexts on \p dev are capable of + * directly accessing memory from contexts on \p peerDev and 0 otherwise. + * If direct access of \p peerDev from \p dev is possible, then access may be + * enabled on two specific contexts by calling ::cuCtxEnablePeerAccess(). + * + * \param canAccessPeer - Returned access capability + * \param dev - Device from which allocations on \p peerDev are to + * be directly accessed. + * \param peerDev - Device on which the allocations to be directly accessed + * by \p dev reside. + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_DEVICE + * \notefnerr + * + * \sa + * ::cuCtxEnablePeerAccess, + * ::cuCtxDisablePeerAccess, + * ::cudaDeviceCanAccessPeer + */ +CUresult CUDAAPI cuDeviceCanAccessPeer(int *canAccessPeer, CUdevice dev, CUdevice peerDev); + +/** + * \brief Enables direct access to memory allocations in a peer context. + * + * If both the current context and \p peerContext are on devices which support unified + * addressing (as may be queried using ::CU_DEVICE_ATTRIBUTE_UNIFIED_ADDRESSING) and same + * major compute capability, then on success all allocations from \p peerContext will + * immediately be accessible by the current context. See \ref CUDA_UNIFIED for additional + * details. + * + * Note that access granted by this call is unidirectional and that in order to access + * memory from the current context in \p peerContext, a separate symmetric call + * to ::cuCtxEnablePeerAccess() is required. + * + * Note that there are both device-wide and system-wide limitations per system + * configuration, as noted in the CUDA Programming Guide under the section + * "Peer-to-Peer Memory Access". + * + * Returns ::CUDA_ERROR_PEER_ACCESS_UNSUPPORTED if ::cuDeviceCanAccessPeer() indicates + * that the ::CUdevice of the current context cannot directly access memory + * from the ::CUdevice of \p peerContext. + * + * Returns ::CUDA_ERROR_PEER_ACCESS_ALREADY_ENABLED if direct access of + * \p peerContext from the current context has already been enabled. + * + * Returns ::CUDA_ERROR_TOO_MANY_PEERS if direct peer access is not possible + * because hardware resources required for peer access have been exhausted. + * + * Returns ::CUDA_ERROR_INVALID_CONTEXT if there is no current context, \p peerContext + * is not a valid context, or if the current context is \p peerContext. + * + * Returns ::CUDA_ERROR_INVALID_VALUE if \p Flags is not 0. + * + * \param peerContext - Peer context to enable direct access to from the current context + * \param Flags - Reserved for future use and must be set to 0 + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_PEER_ACCESS_ALREADY_ENABLED, + * ::CUDA_ERROR_TOO_MANY_PEERS, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_PEER_ACCESS_UNSUPPORTED, + * ::CUDA_ERROR_INVALID_VALUE + * \notefnerr + * + * \sa + * ::cuDeviceCanAccessPeer, + * ::cuCtxDisablePeerAccess, + * ::cudaDeviceEnablePeerAccess + */ +CUresult CUDAAPI cuCtxEnablePeerAccess(CUcontext peerContext, unsigned int Flags); + +/** + * \brief Disables direct access to memory allocations in a peer context and + * unregisters any registered allocations. + * + Returns ::CUDA_ERROR_PEER_ACCESS_NOT_ENABLED if direct peer access has + * not yet been enabled from \p peerContext to the current context. + * + * Returns ::CUDA_ERROR_INVALID_CONTEXT if there is no current context, or if + * \p peerContext is not a valid context. + * + * \param peerContext - Peer context to disable direct access to + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_PEER_ACCESS_NOT_ENABLED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * \notefnerr + * + * \sa + * ::cuDeviceCanAccessPeer, + * ::cuCtxEnablePeerAccess, + * ::cudaDeviceDisablePeerAccess + */ +CUresult CUDAAPI cuCtxDisablePeerAccess(CUcontext peerContext); + +/** + * \brief Queries attributes of the link between two devices. + * + * Returns in \p *value the value of the requested attribute \p attrib of the + * link between \p srcDevice and \p dstDevice. The supported attributes are: + * - ::CU_DEVICE_P2P_ATTRIBUTE_PERFORMANCE_RANK: A relative value indicating the + * performance of the link between two devices. + * - ::CU_DEVICE_P2P_ATTRIBUTE_ACCESS_SUPPORTED P2P: 1 if P2P Access is enable. + * - ::CU_DEVICE_P2P_ATTRIBUTE_NATIVE_ATOMIC_SUPPORTED: 1 if Atomic operations over + * the link are supported. + * - ::CU_DEVICE_P2P_ATTRIBUTE_CUDA_ARRAY_ACCESS_SUPPORTED: 1 if cudaArray can + * be accessed over the link. + * + * Returns ::CUDA_ERROR_INVALID_DEVICE if \p srcDevice or \p dstDevice are not valid + * or if they represent the same device. + * + * Returns ::CUDA_ERROR_INVALID_VALUE if \p attrib is not valid or if \p value is + * a null pointer. + * + * \param value - Returned value of the requested attribute + * \param attrib - The requested attribute of the link between \p srcDevice and \p dstDevice. + * \param srcDevice - The source device of the target link. + * \param dstDevice - The destination device of the target link. + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_DEVICE, + * ::CUDA_ERROR_INVALID_VALUE + * \notefnerr + * + * \sa + * ::cuCtxEnablePeerAccess, + * ::cuCtxDisablePeerAccess, + * ::cuDeviceCanAccessPeer, + * ::cudaDeviceGetP2PAttribute + */ +CUresult CUDAAPI cuDeviceGetP2PAttribute(int* value, CUdevice_P2PAttribute attrib, CUdevice srcDevice, CUdevice dstDevice); + +/** @} */ /* END CUDA_PEER_ACCESS */ + +/** + * \defgroup CUDA_GRAPHICS Graphics Interoperability + * + * ___MANBRIEF___ graphics interoperability functions of the low-level CUDA + * driver API (___CURRENT_FILE___) ___ENDMANBRIEF___ + * + * This section describes the graphics interoperability functions of the + * low-level CUDA driver application programming interface. + * + * @{ + */ + +/** + * \brief Unregisters a graphics resource for access by CUDA + * + * Unregisters the graphics resource \p resource so it is not accessible by + * CUDA unless registered again. + * + * If \p resource is invalid then ::CUDA_ERROR_INVALID_HANDLE is + * returned. + * + * \param resource - Resource to unregister + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_UNKNOWN + * \notefnerr + * + * \sa + * ::cuGraphicsD3D9RegisterResource, + * ::cuGraphicsD3D10RegisterResource, + * ::cuGraphicsD3D11RegisterResource, + * ::cuGraphicsGLRegisterBuffer, + * ::cuGraphicsGLRegisterImage, + * ::cudaGraphicsUnregisterResource + */ +CUresult CUDAAPI cuGraphicsUnregisterResource(CUgraphicsResource resource); + +/** + * \brief Get an array through which to access a subresource of a mapped graphics resource. + * + * Returns in \p *pArray an array through which the subresource of the mapped + * graphics resource \p resource which corresponds to array index \p arrayIndex + * and mipmap level \p mipLevel may be accessed. The value set in \p *pArray may + * change every time that \p resource is mapped. + * + * If \p resource is not a texture then it cannot be accessed via an array and + * ::CUDA_ERROR_NOT_MAPPED_AS_ARRAY is returned. + * If \p arrayIndex is not a valid array index for \p resource then + * ::CUDA_ERROR_INVALID_VALUE is returned. + * If \p mipLevel is not a valid mipmap level for \p resource then + * ::CUDA_ERROR_INVALID_VALUE is returned. + * If \p resource is not mapped then ::CUDA_ERROR_NOT_MAPPED is returned. + * + * \param pArray - Returned array through which a subresource of \p resource may be accessed + * \param resource - Mapped resource to access + * \param arrayIndex - Array index for array textures or cubemap face + * index as defined by ::CUarray_cubemap_face for + * cubemap textures for the subresource to access + * \param mipLevel - Mipmap level for the subresource to access + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_NOT_MAPPED, + * ::CUDA_ERROR_NOT_MAPPED_AS_ARRAY + * \notefnerr + * + * \sa + * ::cuGraphicsResourceGetMappedPointer, + * ::cudaGraphicsSubResourceGetMappedArray + */ +CUresult CUDAAPI cuGraphicsSubResourceGetMappedArray(CUarray *pArray, CUgraphicsResource resource, unsigned int arrayIndex, unsigned int mipLevel); + +/** + * \brief Get a mipmapped array through which to access a mapped graphics resource. + * + * Returns in \p *pMipmappedArray a mipmapped array through which the mapped graphics + * resource \p resource. The value set in \p *pMipmappedArray may change every time + * that \p resource is mapped. + * + * If \p resource is not a texture then it cannot be accessed via a mipmapped array and + * ::CUDA_ERROR_NOT_MAPPED_AS_ARRAY is returned. + * If \p resource is not mapped then ::CUDA_ERROR_NOT_MAPPED is returned. + * + * \param pMipmappedArray - Returned mipmapped array through which \p resource may be accessed + * \param resource - Mapped resource to access + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_NOT_MAPPED, + * ::CUDA_ERROR_NOT_MAPPED_AS_ARRAY + * \notefnerr + * + * \sa + * ::cuGraphicsResourceGetMappedPointer, + * ::cudaGraphicsResourceGetMappedMipmappedArray + */ +CUresult CUDAAPI cuGraphicsResourceGetMappedMipmappedArray(CUmipmappedArray *pMipmappedArray, CUgraphicsResource resource); + +/** + * \brief Get a device pointer through which to access a mapped graphics resource. + * + * Returns in \p *pDevPtr a pointer through which the mapped graphics resource + * \p resource may be accessed. + * Returns in \p pSize the size of the memory in bytes which may be accessed from that pointer. + * The value set in \p pPointer may change every time that \p resource is mapped. + * + * If \p resource is not a buffer then it cannot be accessed via a pointer and + * ::CUDA_ERROR_NOT_MAPPED_AS_POINTER is returned. + * If \p resource is not mapped then ::CUDA_ERROR_NOT_MAPPED is returned. + * * + * \param pDevPtr - Returned pointer through which \p resource may be accessed + * \param pSize - Returned size of the buffer accessible starting at \p *pPointer + * \param resource - Mapped resource to access + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_NOT_MAPPED, + * ::CUDA_ERROR_NOT_MAPPED_AS_POINTER + * \notefnerr + * + * \sa + * ::cuGraphicsMapResources, + * ::cuGraphicsSubResourceGetMappedArray, + * ::cudaGraphicsResourceGetMappedPointer + */ +CUresult CUDAAPI cuGraphicsResourceGetMappedPointer(CUdeviceptr *pDevPtr, size_t *pSize, CUgraphicsResource resource); + +/** + * \brief Set usage flags for mapping a graphics resource + * + * Set \p flags for mapping the graphics resource \p resource. + * + * Changes to \p flags will take effect the next time \p resource is mapped. + * The \p flags argument may be any of the following: + + * - ::CU_GRAPHICS_MAP_RESOURCE_FLAGS_NONE: Specifies no hints about how this + * resource will be used. It is therefore assumed that this resource will be + * read from and written to by CUDA kernels. This is the default value. + * - ::CU_GRAPHICS_MAP_RESOURCE_FLAGS_READONLY: Specifies that CUDA kernels which + * access this resource will not write to this resource. + * - ::CU_GRAPHICS_MAP_RESOURCE_FLAGS_WRITEDISCARD: Specifies that CUDA kernels + * which access this resource will not read from this resource and will + * write over the entire contents of the resource, so none of the data + * previously stored in the resource will be preserved. + * + * If \p resource is presently mapped for access by CUDA then + * ::CUDA_ERROR_ALREADY_MAPPED is returned. + * If \p flags is not one of the above values then ::CUDA_ERROR_INVALID_VALUE is returned. + * + * \param resource - Registered resource to set flags for + * \param flags - Parameters for resource mapping + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_ALREADY_MAPPED + * \notefnerr + * + * \sa + * ::cuGraphicsMapResources, + * ::cudaGraphicsResourceSetMapFlags + */ +CUresult CUDAAPI cuGraphicsResourceSetMapFlags(CUgraphicsResource resource, unsigned int flags); + +/** + * \brief Map graphics resources for access by CUDA + * + * Maps the \p count graphics resources in \p resources for access by CUDA. + * + * The resources in \p resources may be accessed by CUDA until they + * are unmapped. The graphics API from which \p resources were registered + * should not access any resources while they are mapped by CUDA. If an + * application does so, the results are undefined. + * + * This function provides the synchronization guarantee that any graphics calls + * issued before ::cuGraphicsMapResources() will complete before any subsequent CUDA + * work issued in \p stream begins. + * + * If \p resources includes any duplicate entries then ::CUDA_ERROR_INVALID_HANDLE is returned. + * If any of \p resources are presently mapped for access by CUDA then ::CUDA_ERROR_ALREADY_MAPPED is returned. + * + * \param count - Number of resources to map + * \param resources - Resources to map for CUDA usage + * \param hStream - Stream with which to synchronize + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_ALREADY_MAPPED, + * ::CUDA_ERROR_UNKNOWN + * \note_null_stream + * \notefnerr + * + * \sa + * ::cuGraphicsResourceGetMappedPointer, + * ::cuGraphicsSubResourceGetMappedArray, + * ::cuGraphicsUnmapResources, + * ::cudaGraphicsMapResources + */ +CUresult CUDAAPI cuGraphicsMapResources(unsigned int count, CUgraphicsResource *resources, CUstream hStream); + +/** + * \brief Unmap graphics resources. + * + * Unmaps the \p count graphics resources in \p resources. + * + * Once unmapped, the resources in \p resources may not be accessed by CUDA + * until they are mapped again. + * + * This function provides the synchronization guarantee that any CUDA work issued + * in \p stream before ::cuGraphicsUnmapResources() will complete before any + * subsequently issued graphics work begins. + * + * + * If \p resources includes any duplicate entries then ::CUDA_ERROR_INVALID_HANDLE is returned. + * If any of \p resources are not presently mapped for access by CUDA then ::CUDA_ERROR_NOT_MAPPED is returned. + * + * \param count - Number of resources to unmap + * \param resources - Resources to unmap + * \param hStream - Stream with which to synchronize + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_INVALID_HANDLE, + * ::CUDA_ERROR_NOT_MAPPED, + * ::CUDA_ERROR_UNKNOWN + * \note_null_stream + * \notefnerr + * + * \sa + * ::cuGraphicsMapResources, + * ::cudaGraphicsUnmapResources + */ +CUresult CUDAAPI cuGraphicsUnmapResources(unsigned int count, CUgraphicsResource *resources, CUstream hStream); + +/** @} */ /* END CUDA_GRAPHICS */ + +/** + * \defgroup CUDA_DRIVER_ENTRY_POINT Driver Entry Point Access + * + * ___MANBRIEF___ driver entry point access functions of the low-level CUDA driver API + * (___CURRENT_FILE___) ___ENDMANBRIEF___ + * + * This section describes the driver entry point access functions of the low-level CUDA + * driver application programming interface. + * + * @{ + */ + +/** + * \brief Returns the requested driver API function pointer + * + * Returns in \p **pfn the address of the CUDA driver function for the requested + * CUDA version and flags. + * + * The CUDA version is specified as (1000 * major + 10 * minor), so CUDA 11.2 + * should be specified as 11020. For a requested driver symbol, if the specified + * CUDA version is greater than or equal to the CUDA version in which the driver symbol + * was introduced, this API will return the function pointer to the corresponding + * versioned function. + * + * The pointer returned by the API should be cast to a function pointer matching the + * requested driver function's definition in the API header file. The function pointer + * typedef can be picked up from the corresponding typedefs header file. For example, + * cudaTypedefs.h consists of function pointer typedefs for driver APIs defined in cuda.h. + * + * The API will return ::CUDA_SUCCESS and set the returned \p pfn to NULL if the + * requested driver function is not supported on the platform, no ABI + * compatible driver function exists for the specified \p cudaVersion or if the + * driver symbol is invalid. + * + * It will also set the optional \p symbolStatus to one of the values in + * ::CUdriverProcAddressQueryResult with the following meanings: + * - ::CU_GET_PROC_ADDRESS_SUCCESS - The requested symbol was succesfully found based + * on input arguments and \p pfn is valid + * - ::CU_GET_PROC_ADDRESS_SYMBOL_NOT_FOUND - The requested symbol was not found + * - ::CU_GET_PROC_ADDRESS_VERSION_NOT_SUFFICIENT - The requested symbol was found but is + * not supported by cudaVersion specified + * + * The requested flags can be: + * - ::CU_GET_PROC_ADDRESS_DEFAULT: This is the default mode. This is equivalent to + * ::CU_GET_PROC_ADDRESS_PER_THREAD_DEFAULT_STREAM if the code is compiled with + * --default-stream per-thread compilation flag or the macro CUDA_API_PER_THREAD_DEFAULT_STREAM + * is defined; ::CU_GET_PROC_ADDRESS_LEGACY_STREAM otherwise. + * - ::CU_GET_PROC_ADDRESS_LEGACY_STREAM: This will enable the search for all driver symbols + * that match the requested driver symbol name except the corresponding per-thread versions. + * - ::CU_GET_PROC_ADDRESS_PER_THREAD_DEFAULT_STREAM: This will enable the search for all + * driver symbols that match the requested driver symbol name including the per-thread + * versions. If a per-thread version is not found, the API will return the legacy version + * of the driver function. + * + * \param symbol - The base name of the driver API function to look for. As an example, + * for the driver API ::cuMemAlloc_v2, \p symbol would be cuMemAlloc and + * \p cudaVersion would be the ABI compatible CUDA version for the _v2 variant. + * \param pfn - Location to return the function pointer to the requested driver function + * \param cudaVersion - The CUDA version to look for the requested driver symbol + * \param flags - Flags to specify search options. + * \param symbolStatus - Optional location to store the status of the search for + * \p symbol based on \p cudaVersion. See ::CUdriverProcAddressQueryResult + * for possible values. + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_NOT_SUPPORTED + * \note_version_mixing + * + * \sa + * ::cudaGetDriverEntryPoint + */ +CUresult CUDAAPI cuGetProcAddress(const char *symbol, void **pfn, int cudaVersion, cuuint64_t flags, CUdriverProcAddressQueryResult *symbolStatus); + +/** @} */ /* END CUDA_DRIVER_ENTRY_POINT */ + + +/** + * \defgroup CUDA_COREDUMP Coredump Attributes Control API + * + * ___MANBRIEF___ coredump attribute control functions for the low-level CUDA API + * (___CURRENT_FILE___) ___ENDMANBRIEF___ + * + * This section describes the coredump attribute control functions of the low-level CUDA + * driver application programming interface. + * + * @{ + */ + +/** + * Flags for choosing a coredump attribute to get/set + */ +typedef enum CUcoredumpSettings_enum { + CU_COREDUMP_ENABLE_ON_EXCEPTION = 1, + CU_COREDUMP_TRIGGER_HOST, + CU_COREDUMP_LIGHTWEIGHT, + CU_COREDUMP_ENABLE_USER_TRIGGER, + CU_COREDUMP_FILE, + CU_COREDUMP_PIPE, + CU_COREDUMP_MAX +} CUcoredumpSettings; + +/** + * \brief Allows caller to fetch a coredump attribute value for the current context + * + * Returns in \p *value the requested value specified by \p attrib. It is up to the caller + * to ensure that the data type and size of \p *value matches the request. + * + * If the caller calls this function with \p *value equal to NULL, the size of the memory + * region (in bytes) expected for \p attrib will be placed in \p size. + * + * The supported attributes are: + * - ::CU_COREDUMP_ENABLE_ON_EXCEPTION: Bool where ::true means that GPU exceptions from + * this context will create a coredump at the location specified by ::CU_COREDUMP_FILE. + * The default value is ::false unless set to ::true globally or locally, or the + * CU_CTX_USER_COREDUMP_ENABLE flag was set during context creation. + * - ::CU_COREDUMP_TRIGGER_HOST: Bool where ::true means that the host CPU will + * also create a coredump. The default value is ::true unless set to ::false globally or + * or locally. + * - ::CU_COREDUMP_LIGHTWEIGHT: Bool where ::true means that any resulting coredumps + * will not have a dump of GPU memory or non-reloc ELF images. The default value is + * ::false unless set to ::true globally or locally. + * - ::CU_COREDUMP_ENABLE_USER_TRIGGER: Bool where ::true means that a coredump can be + * created by writing to the system pipe specified by ::CU_COREDUMP_PIPE. The default + * value is ::false unless set to ::true globally or locally. + * - ::CU_COREDUMP_FILE: String of up to 1023 characters that defines the location where + * any coredumps generated by this context will be written. The default value is + * ::core.cuda.HOSTNAME.PID where ::HOSTNAME is the host name of the machine running + * the CUDA applications and ::PID is the process ID of the CUDA application. + * - ::CU_COREDUMP_PIPE: String of up to 1023 characters that defines the name of the pipe + * that will be monitored if user-triggered coredumps are enabled. The default value is + * ::corepipe.cuda.HOSTNAME.PID where ::HOSTNAME is the host name of the machine running + * the CUDA application and ::PID is the process ID of the CUDA application. + * + * \param attrib - The enum defining which value to fetch. + * \param value - void* containing the requested data. + * \param size - The size of the memory region \p value points to. + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_NOT_PERMITTED, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_CONTEXT_IS_DESTROYED + * + * \sa + * ::cuCoredumpGetAttributeGlobal, + * ::cuCoredumpSetAttribute, + * ::cuCoredumpSetAttributeGlobal + */ +CUresult CUDAAPI cuCoredumpGetAttribute(CUcoredumpSettings attrib, void* value, size_t *size); + +/** + * \brief Allows caller to fetch a coredump attribute value for the entire application + * + * Returns in \p *value the requested value specified by \p attrib. It is up to the caller + * to ensure that the data type and size of \p *value matches the request. + * + * If the caller calls this function with \p *value equal to NULL, the size of the memory + * region (in bytes) expected for \p attrib will be placed in \p size. + * + * The supported attributes are: + * - ::CU_COREDUMP_ENABLE_ON_EXCEPTION: Bool where ::true means that GPU exceptions from + * this context will create a coredump at the location specified by ::CU_COREDUMP_FILE. + * The default value is ::false. + * - ::CU_COREDUMP_TRIGGER_HOST: Bool where ::true means that the host CPU will + * also create a coredump. The default value is ::true. + * - ::CU_COREDUMP_LIGHTWEIGHT: Bool where ::true means that any resulting coredumps + * will not have a dump of GPU memory or non-reloc ELF images. The default value is + * ::false. + * - ::CU_COREDUMP_ENABLE_USER_TRIGGER: Bool where ::true means that a coredump can be + * created by writing to the system pipe specified by ::CU_COREDUMP_PIPE. The default + * value is ::false. + * - ::CU_COREDUMP_FILE: String of up to 1023 characters that defines the location where + * any coredumps generated by this context will be written. The default value is + * ::core.cuda.HOSTNAME.PID where ::HOSTNAME is the host name of the machine running + * the CUDA applications and ::PID is the process ID of the CUDA application. + * - ::CU_COREDUMP_PIPE: String of up to 1023 characters that defines the name of the pipe + * that will be monitored if user-triggered coredumps are enabled. The default value is + * ::corepipe.cuda.HOSTNAME.PID where ::HOSTNAME is the host name of the machine running + * the CUDA application and ::PID is the process ID of the CUDA application. + * + * \param attrib - The enum defining which value to fetch. + * \param value - void* containing the requested data. + * \param size - The size of the memory region \p value points to. + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE + * + * \sa + * ::cuCoredumpGetAttribute, + * ::cuCoredumpSetAttribute, + * ::cuCoredumpSetAttributeGlobal + */ +CUresult CUDAAPI cuCoredumpGetAttributeGlobal(CUcoredumpSettings attrib, void *value, size_t *size); + +/** + * \brief Allows caller to set a coredump attribute value for the current context + * + * This function should be considered an alternate interface to the CUDA-GDB environment + * variables defined in this document: https://docs.nvidia.com/cuda/cuda-gdb/index.html#gpu-coredump + * + * An important design decision to note is that any coredump environment variable values + * set before CUDA initializes will take permanent precedence over any values set with this + * this function. This decision was made to ensure no change in behavior for any users that + * may be currently using these variables to get coredumps. + * + * \p *value shall contain the requested value specified by \p set. It is up to the caller + * to ensure that the data type and size of \p *value matches the request. + * + * If the caller calls this function with \p *value equal to NULL, the size of the memory + * region (in bytes) expected for \p set will be placed in \p size. + * + * /note This function will return ::CUDA_ERROR_NOT_SUPPORTED if the caller attempts to set + * ::CU_COREDUMP_ENABLE_ON_EXCEPTION on a GPU of with Compute Capability < 6.0. ::cuCoredumpSetAttributeGlobal + * works on those platforms as an alternative. + * + * /note ::CU_COREDUMP_ENABLE_USER_TRIGGER and ::CU_COREDUMP_PIPE cannot be set on a per-context basis. + * + * The supported attributes are: + * - ::CU_COREDUMP_ENABLE_ON_EXCEPTION: Bool where ::true means that GPU exceptions from + * this context will create a coredump at the location specified by ::CU_COREDUMP_FILE. + * The default value is ::false. + * - ::CU_COREDUMP_TRIGGER_HOST: Bool where ::true means that the host CPU will + * also create a coredump. The default value is ::true. + * - ::CU_COREDUMP_LIGHTWEIGHT: Bool where ::true means that any resulting coredumps + * will not have a dump of GPU memory or non-reloc ELF images. The default value is + * ::false. + * - ::CU_COREDUMP_FILE: String of up to 1023 characters that defines the location where + * any coredumps generated by this context will be written. The default value is + * ::core.cuda.HOSTNAME.PID where ::HOSTNAME is the host name of the machine running + * the CUDA applications and ::PID is the process ID of the CUDA application. + * + * \param attrib - The enum defining which value to set. + * \param value - void* containing the requested data. + * \param size - The size of the memory region \p value points to. + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_NOT_PERMITTED, + * ::CUDA_ERROR_DEINITIALIZED, + * ::CUDA_ERROR_NOT_INITIALIZED, + * ::CUDA_ERROR_INVALID_CONTEXT, + * ::CUDA_ERROR_CONTEXT_IS_DESTROYED, + * ::CUDA_ERROR_NOT_SUPPORTED + * + * \sa + * ::cuCoredumpGetAttributeGlobal, + * ::cuCoredumpGetAttribute, + * ::cuCoredumpSetAttributeGlobal + */ +CUresult CUDAAPI cuCoredumpSetAttribute(CUcoredumpSettings attrib, void* value, size_t *size); + +/** + * \brief Allows caller to set a coredump attribute value globally + * + * This function should be considered an alternate interface to the CUDA-GDB environment + * variables defined in this document: https://docs.nvidia.com/cuda/cuda-gdb/index.html#gpu-coredump + * + * An important design decision to note is that any coredump environment variable values + * set before CUDA initializes will take permanent precedence over any values set with this + * this function. This decision was made to ensure no change in behavior for any users that + * may be currently using these variables to get coredumps. + * + * \p *value shall contain the requested value specified by \p set. It is up to the caller + * to ensure that the data type and size of \p *value matches the request. + * + * If the caller calls this function with \p *value equal to NULL, the size of the memory + * region (in bytes) expected for \p set will be placed in \p size. + * + * The supported attributes are: + * - ::CU_COREDUMP_ENABLE_ON_EXCEPTION: Bool where ::true means that GPU exceptions from + * this context will create a coredump at the location specified by ::CU_COREDUMP_FILE. + * The default value is ::false. + * - ::CU_COREDUMP_TRIGGER_HOST: Bool where ::true means that the host CPU will + * also create a coredump. The default value is ::true. + * - ::CU_COREDUMP_LIGHTWEIGHT: Bool where ::true means that any resulting coredumps + * will not have a dump of GPU memory or non-reloc ELF images. The default value is + * ::false. + * - ::CU_COREDUMP_ENABLE_USER_TRIGGER: Bool where ::true means that a coredump can be + * created by writing to the system pipe specified by ::CU_COREDUMP_PIPE. The default + * value is ::false. + * - ::CU_COREDUMP_FILE: String of up to 1023 characters that defines the location where + * any coredumps generated by this context will be written. The default value is + * ::core.cuda.HOSTNAME.PID where ::HOSTNAME is the host name of the machine running + * the CUDA applications and ::PID is the process ID of the CUDA application. + * - ::CU_COREDUMP_PIPE: String of up to 1023 characters that defines the name of the pipe + * that will be monitored if user-triggered coredumps are enabled. This value may not be + * changed after ::CU_COREDUMP_ENABLE_USER_TRIGGER is set to ::true. The default + * value is ::corepipe.cuda.HOSTNAME.PID where ::HOSTNAME is the host name of the machine + * running the CUDA application and ::PID is the process ID of the CUDA application. + * + * \param attrib - The enum defining which value to set. + * \param value - void* containing the requested data. + * \param size - The size of the memory region \p value points to. + * + * \return + * ::CUDA_SUCCESS, + * ::CUDA_ERROR_INVALID_VALUE, + * ::CUDA_ERROR_NOT_PERMITTED + * + * \sa + * ::cuCoredumpGetAttribute, + * ::cuCoredumpGetAttributeGlobal, + * ::cuCoredumpSetAttribute + */ +CUresult CUDAAPI cuCoredumpSetAttributeGlobal(CUcoredumpSettings attrib, void *value, size_t *size); + +/** @} */ /* END CUDA_COREDUMP */ + +CUresult CUDAAPI cuGetExportTable(const void **ppExportTable, const CUuuid *pExportTableId); + +/** + * CUDA API versioning support + */ +#if defined(__CUDA_API_VERSION_INTERNAL) + #undef cuMemHostRegister + #undef cuGraphicsResourceSetMapFlags + #undef cuLinkCreate + #undef cuLinkAddData + #undef cuLinkAddFile + #undef cuDeviceTotalMem + #undef cuCtxCreate + #undef cuModuleGetGlobal + #undef cuMemGetInfo + #undef cuMemAlloc + #undef cuMemAllocPitch + #undef cuMemFree + #undef cuMemGetAddressRange + #undef cuMemAllocHost + #undef cuMemHostGetDevicePointer + #undef cuMemcpyHtoD + #undef cuMemcpyDtoH + #undef cuMemcpyDtoD + #undef cuMemcpyDtoA + #undef cuMemcpyAtoD + #undef cuMemcpyHtoA + #undef cuMemcpyAtoH + #undef cuMemcpyAtoA + #undef cuMemcpyHtoAAsync + #undef cuMemcpyAtoHAsync + #undef cuMemcpy2D + #undef cuMemcpy2DUnaligned + #undef cuMemcpy3D + #undef cuMemcpyHtoDAsync + #undef cuMemcpyDtoHAsync + #undef cuMemcpyDtoDAsync + #undef cuMemcpy2DAsync + #undef cuMemcpy3DAsync + #undef cuMemsetD8 + #undef cuMemsetD16 + #undef cuMemsetD32 + #undef cuMemsetD2D8 + #undef cuMemsetD2D16 + #undef cuMemsetD2D32 + #undef cuArrayCreate + #undef cuArrayGetDescriptor + #undef cuArray3DCreate + #undef cuArray3DGetDescriptor + #undef cuTexRefSetAddress + #undef cuTexRefSetAddress2D + #undef cuTexRefGetAddress + #undef cuGraphicsResourceGetMappedPointer + #undef cuCtxDestroy + #undef cuCtxPopCurrent + #undef cuCtxPushCurrent + #undef cuStreamDestroy + #undef cuEventDestroy + #undef cuMemcpy + #undef cuMemcpyAsync + #undef cuMemcpyPeer + #undef cuMemcpyPeerAsync + #undef cuMemcpy3DPeer + #undef cuMemcpy3DPeerAsync + #undef cuMemsetD8Async + #undef cuMemsetD16Async + #undef cuMemsetD32Async + #undef cuMemsetD2D8Async + #undef cuMemsetD2D16Async + #undef cuMemsetD2D32Async + #undef cuStreamGetPriority + #undef cuStreamGetId + #undef cuStreamGetFlags + #undef cuStreamGetCtx + #undef cuStreamWaitEvent + #undef cuStreamAddCallback + #undef cuStreamAttachMemAsync + #undef cuStreamQuery + #undef cuStreamSynchronize + #undef cuEventRecord + #undef cuEventRecordWithFlags + #undef cuLaunchKernel + #undef cuLaunchKernelEx + #undef cuLaunchHostFunc + #undef cuGraphicsMapResources + #undef cuGraphicsUnmapResources + #undef cuStreamWriteValue32 + #undef cuStreamWaitValue32 + #undef cuStreamWriteValue64 + #undef cuStreamWaitValue64 + #undef cuStreamBatchMemOp + #undef cuStreamWriteValue32_v2 + #undef cuStreamWaitValue32_v2 + #undef cuStreamWriteValue64_v2 + #undef cuStreamWaitValue64_v2 + #undef cuStreamBatchMemOp_v2 + #undef cuMemPrefetchAsync + #undef cuLaunchCooperativeKernel + #undef cuSignalExternalSemaphoresAsync + #undef cuWaitExternalSemaphoresAsync + #undef cuStreamBeginCapture + #undef cuStreamEndCapture + #undef cuStreamIsCapturing + #undef cuStreamGetCaptureInfo + #undef cuStreamGetCaptureInfo_v2 + #undef cuGraphInstantiateWithParams + #undef cuGraphExecUpdate + #undef cuGraphUpload + #undef cuGraphLaunch + #undef cuDevicePrimaryCtxRelease + #undef cuDevicePrimaryCtxReset + #undef cuDevicePrimaryCtxSetFlags + #undef cuIpcOpenMemHandle + #undef cuStreamCopyAttributes + #undef cuStreamSetAttribute + #undef cuStreamGetAttribute + #undef cuGraphInstantiate + #undef cuGraphAddKernelNode + #undef cuGraphKernelNodeGetParams + #undef cuGraphKernelNodeSetParams + #undef cuGraphExecKernelNodeSetParams + #undef cuMemMapArrayAsync + #undef cuMemFreeAsync + #undef cuMemAllocAsync + #undef cuMemAllocFromPoolAsync + #undef cuStreamUpdateCaptureDependencies + #undef cuGetProcAddress + + CUresult CUDAAPI cuMemHostRegister(void *p, size_t bytesize, unsigned int Flags); + CUresult CUDAAPI cuGraphicsResourceSetMapFlags(CUgraphicsResource resource, unsigned int flags); + CUresult CUDAAPI cuLinkCreate(unsigned int numOptions, CUjit_option *options, void **optionValues, CUlinkState *stateOut); + CUresult CUDAAPI cuLinkAddData(CUlinkState state, CUjitInputType type, void *data, size_t size, const char *name, + unsigned int numOptions, CUjit_option *options, void **optionValues); + CUresult CUDAAPI cuLinkAddFile(CUlinkState state, CUjitInputType type, const char *path, + unsigned int numOptions, CUjit_option *options, void **optionValues); + CUresult CUDAAPI cuTexRefSetAddress2D_v2(CUtexref hTexRef, const CUDA_ARRAY_DESCRIPTOR *desc, CUdeviceptr dptr, size_t Pitch); + + typedef unsigned int CUdeviceptr_v1; + + typedef struct CUDA_MEMCPY2D_v1_st + { + unsigned int srcXInBytes; /**< Source X in bytes */ + unsigned int srcY; /**< Source Y */ + CUmemorytype srcMemoryType; /**< Source memory type (host, device, array) */ + const void *srcHost; /**< Source host pointer */ + CUdeviceptr_v1 srcDevice; /**< Source device pointer */ + CUarray srcArray; /**< Source array reference */ + unsigned int srcPitch; /**< Source pitch (ignored when src is array) */ + + unsigned int dstXInBytes; /**< Destination X in bytes */ + unsigned int dstY; /**< Destination Y */ + CUmemorytype dstMemoryType; /**< Destination memory type (host, device, array) */ + void *dstHost; /**< Destination host pointer */ + CUdeviceptr_v1 dstDevice; /**< Destination device pointer */ + CUarray dstArray; /**< Destination array reference */ + unsigned int dstPitch; /**< Destination pitch (ignored when dst is array) */ + + unsigned int WidthInBytes; /**< Width of 2D memory copy in bytes */ + unsigned int Height; /**< Height of 2D memory copy */ + } CUDA_MEMCPY2D_v1; + + typedef struct CUDA_MEMCPY3D_v1_st + { + unsigned int srcXInBytes; /**< Source X in bytes */ + unsigned int srcY; /**< Source Y */ + unsigned int srcZ; /**< Source Z */ + unsigned int srcLOD; /**< Source LOD */ + CUmemorytype srcMemoryType; /**< Source memory type (host, device, array) */ + const void *srcHost; /**< Source host pointer */ + CUdeviceptr_v1 srcDevice; /**< Source device pointer */ + CUarray srcArray; /**< Source array reference */ + void *reserved0; /**< Must be NULL */ + unsigned int srcPitch; /**< Source pitch (ignored when src is array) */ + unsigned int srcHeight; /**< Source height (ignored when src is array; may be 0 if Depth==1) */ + + unsigned int dstXInBytes; /**< Destination X in bytes */ + unsigned int dstY; /**< Destination Y */ + unsigned int dstZ; /**< Destination Z */ + unsigned int dstLOD; /**< Destination LOD */ + CUmemorytype dstMemoryType; /**< Destination memory type (host, device, array) */ + void *dstHost; /**< Destination host pointer */ + CUdeviceptr_v1 dstDevice; /**< Destination device pointer */ + CUarray dstArray; /**< Destination array reference */ + void *reserved1; /**< Must be NULL */ + unsigned int dstPitch; /**< Destination pitch (ignored when dst is array) */ + unsigned int dstHeight; /**< Destination height (ignored when dst is array; may be 0 if Depth==1) */ + + unsigned int WidthInBytes; /**< Width of 3D memory copy in bytes */ + unsigned int Height; /**< Height of 3D memory copy */ + unsigned int Depth; /**< Depth of 3D memory copy */ + } CUDA_MEMCPY3D_v1; + + typedef struct CUDA_ARRAY_DESCRIPTOR_v1_st + { + unsigned int Width; /**< Width of array */ + unsigned int Height; /**< Height of array */ + + CUarray_format Format; /**< Array format */ + unsigned int NumChannels; /**< Channels per array element */ + } CUDA_ARRAY_DESCRIPTOR_v1; + + typedef struct CUDA_ARRAY3D_DESCRIPTOR_v1_st + { + unsigned int Width; /**< Width of 3D array */ + unsigned int Height; /**< Height of 3D array */ + unsigned int Depth; /**< Depth of 3D array */ + + CUarray_format Format; /**< Array format */ + unsigned int NumChannels; /**< Channels per array element */ + unsigned int Flags; /**< Flags */ + } CUDA_ARRAY3D_DESCRIPTOR_v1; + + CUresult CUDAAPI cuDeviceTotalMem(unsigned int *bytes, CUdevice dev); + CUresult CUDAAPI cuCtxCreate(CUcontext *pctx, unsigned int flags, CUdevice dev); + CUresult CUDAAPI cuModuleGetGlobal(CUdeviceptr_v1 *dptr, unsigned int *bytes, CUmodule hmod, const char *name); + CUresult CUDAAPI cuMemGetInfo(unsigned int *free, unsigned int *total); + CUresult CUDAAPI cuMemAlloc(CUdeviceptr_v1 *dptr, unsigned int bytesize); + CUresult CUDAAPI cuMemAllocPitch(CUdeviceptr_v1 *dptr, unsigned int *pPitch, unsigned int WidthInBytes, unsigned int Height, unsigned int ElementSizeBytes); + CUresult CUDAAPI cuMemFree(CUdeviceptr_v1 dptr); + CUresult CUDAAPI cuMemGetAddressRange(CUdeviceptr_v1 *pbase, unsigned int *psize, CUdeviceptr_v1 dptr); + CUresult CUDAAPI cuMemAllocHost(void **pp, unsigned int bytesize); + CUresult CUDAAPI cuMemHostGetDevicePointer(CUdeviceptr_v1 *pdptr, void *p, unsigned int Flags); + CUresult CUDAAPI cuMemcpyHtoD(CUdeviceptr_v1 dstDevice, const void *srcHost, unsigned int ByteCount); + CUresult CUDAAPI cuMemcpyDtoH(void *dstHost, CUdeviceptr_v1 srcDevice, unsigned int ByteCount); + CUresult CUDAAPI cuMemcpyDtoD(CUdeviceptr_v1 dstDevice, CUdeviceptr_v1 srcDevice, unsigned int ByteCount); + CUresult CUDAAPI cuMemcpyDtoA(CUarray dstArray, unsigned int dstOffset, CUdeviceptr_v1 srcDevice, unsigned int ByteCount); + CUresult CUDAAPI cuMemcpyAtoD(CUdeviceptr_v1 dstDevice, CUarray srcArray, unsigned int srcOffset, unsigned int ByteCount); + CUresult CUDAAPI cuMemcpyHtoA(CUarray dstArray, unsigned int dstOffset, const void *srcHost, unsigned int ByteCount); + CUresult CUDAAPI cuMemcpyAtoH(void *dstHost, CUarray srcArray, unsigned int srcOffset, unsigned int ByteCount); + CUresult CUDAAPI cuMemcpyAtoA(CUarray dstArray, unsigned int dstOffset, CUarray srcArray, unsigned int srcOffset, unsigned int ByteCount); + CUresult CUDAAPI cuMemcpyHtoAAsync(CUarray dstArray, unsigned int dstOffset, const void *srcHost, unsigned int ByteCount, CUstream hStream); + CUresult CUDAAPI cuMemcpyAtoHAsync(void *dstHost, CUarray srcArray, unsigned int srcOffset, unsigned int ByteCount, CUstream hStream); + CUresult CUDAAPI cuMemcpy2D(const CUDA_MEMCPY2D_v1 *pCopy); + CUresult CUDAAPI cuMemcpy2DUnaligned(const CUDA_MEMCPY2D_v1 *pCopy); + CUresult CUDAAPI cuMemcpy3D(const CUDA_MEMCPY3D_v1 *pCopy); + CUresult CUDAAPI cuMemcpyHtoDAsync(CUdeviceptr_v1 dstDevice, const void *srcHost, unsigned int ByteCount, CUstream hStream); + CUresult CUDAAPI cuMemcpyDtoHAsync(void *dstHost, CUdeviceptr_v1 srcDevice, unsigned int ByteCount, CUstream hStream); + CUresult CUDAAPI cuMemcpyDtoDAsync(CUdeviceptr_v1 dstDevice, CUdeviceptr_v1 srcDevice, unsigned int ByteCount, CUstream hStream); + CUresult CUDAAPI cuMemcpy2DAsync(const CUDA_MEMCPY2D_v1 *pCopy, CUstream hStream); + CUresult CUDAAPI cuMemcpy3DAsync(const CUDA_MEMCPY3D_v1 *pCopy, CUstream hStream); + CUresult CUDAAPI cuMemsetD8(CUdeviceptr_v1 dstDevice, unsigned char uc, unsigned int N); + CUresult CUDAAPI cuMemsetD16(CUdeviceptr_v1 dstDevice, unsigned short us, unsigned int N); + CUresult CUDAAPI cuMemsetD32(CUdeviceptr_v1 dstDevice, unsigned int ui, unsigned int N); + CUresult CUDAAPI cuMemsetD2D8(CUdeviceptr_v1 dstDevice, unsigned int dstPitch, unsigned char uc, unsigned int Width, unsigned int Height); + CUresult CUDAAPI cuMemsetD2D16(CUdeviceptr_v1 dstDevice, unsigned int dstPitch, unsigned short us, unsigned int Width, unsigned int Height); + CUresult CUDAAPI cuMemsetD2D32(CUdeviceptr_v1 dstDevice, unsigned int dstPitch, unsigned int ui, unsigned int Width, unsigned int Height); + CUresult CUDAAPI cuArrayCreate(CUarray *pHandle, const CUDA_ARRAY_DESCRIPTOR_v1 *pAllocateArray); + CUresult CUDAAPI cuArrayGetDescriptor(CUDA_ARRAY_DESCRIPTOR_v1 *pArrayDescriptor, CUarray hArray); + CUresult CUDAAPI cuArray3DCreate(CUarray *pHandle, const CUDA_ARRAY3D_DESCRIPTOR_v1 *pAllocateArray); + CUresult CUDAAPI cuArray3DGetDescriptor(CUDA_ARRAY3D_DESCRIPTOR_v1 *pArrayDescriptor, CUarray hArray); + CUresult CUDAAPI cuTexRefSetAddress(unsigned int *ByteOffset, CUtexref hTexRef, CUdeviceptr_v1 dptr, unsigned int bytes); + CUresult CUDAAPI cuTexRefSetAddress2D(CUtexref hTexRef, const CUDA_ARRAY_DESCRIPTOR_v1 *desc, CUdeviceptr_v1 dptr, unsigned int Pitch); + CUresult CUDAAPI cuTexRefGetAddress(CUdeviceptr_v1 *pdptr, CUtexref hTexRef); + CUresult CUDAAPI cuGraphicsResourceGetMappedPointer(CUdeviceptr_v1 *pDevPtr, unsigned int *pSize, CUgraphicsResource resource); + + CUresult CUDAAPI cuCtxDestroy(CUcontext ctx); + CUresult CUDAAPI cuCtxPopCurrent(CUcontext *pctx); + CUresult CUDAAPI cuCtxPushCurrent(CUcontext ctx); + CUresult CUDAAPI cuStreamDestroy(CUstream hStream); + CUresult CUDAAPI cuEventDestroy(CUevent hEvent); + CUresult CUDAAPI cuDevicePrimaryCtxRelease(CUdevice dev); + CUresult CUDAAPI cuDevicePrimaryCtxReset(CUdevice dev); + CUresult CUDAAPI cuDevicePrimaryCtxSetFlags(CUdevice dev, unsigned int flags); + + CUresult CUDAAPI cuMemcpyHtoD_v2(CUdeviceptr dstDevice, const void *srcHost, size_t ByteCount); + CUresult CUDAAPI cuMemcpyDtoH_v2(void *dstHost, CUdeviceptr srcDevice, size_t ByteCount); + CUresult CUDAAPI cuMemcpyDtoD_v2(CUdeviceptr dstDevice, CUdeviceptr srcDevice, size_t ByteCount); + CUresult CUDAAPI cuMemcpyDtoA_v2(CUarray dstArray, size_t dstOffset, CUdeviceptr srcDevice, size_t ByteCount); + CUresult CUDAAPI cuMemcpyAtoD_v2(CUdeviceptr dstDevice, CUarray srcArray, size_t srcOffset, size_t ByteCount); + CUresult CUDAAPI cuMemcpyHtoA_v2(CUarray dstArray, size_t dstOffset, const void *srcHost, size_t ByteCount); + CUresult CUDAAPI cuMemcpyAtoH_v2(void *dstHost, CUarray srcArray, size_t srcOffset, size_t ByteCount); + CUresult CUDAAPI cuMemcpyAtoA_v2(CUarray dstArray, size_t dstOffset, CUarray srcArray, size_t srcOffset, size_t ByteCount); + CUresult CUDAAPI cuMemcpyHtoAAsync_v2(CUarray dstArray, size_t dstOffset, const void *srcHost, size_t ByteCount, CUstream hStream); + CUresult CUDAAPI cuMemcpyAtoHAsync_v2(void *dstHost, CUarray srcArray, size_t srcOffset, size_t ByteCount, CUstream hStream); + CUresult CUDAAPI cuMemcpy2D_v2(const CUDA_MEMCPY2D *pCopy); + CUresult CUDAAPI cuMemcpy2DUnaligned_v2(const CUDA_MEMCPY2D *pCopy); + CUresult CUDAAPI cuMemcpy3D_v2(const CUDA_MEMCPY3D *pCopy); + CUresult CUDAAPI cuMemcpyHtoDAsync_v2(CUdeviceptr dstDevice, const void *srcHost, size_t ByteCount, CUstream hStream); + CUresult CUDAAPI cuMemcpyDtoHAsync_v2(void *dstHost, CUdeviceptr srcDevice, size_t ByteCount, CUstream hStream); + CUresult CUDAAPI cuMemcpyDtoDAsync_v2(CUdeviceptr dstDevice, CUdeviceptr srcDevice, size_t ByteCount, CUstream hStream); + CUresult CUDAAPI cuMemcpy2DAsync_v2(const CUDA_MEMCPY2D *pCopy, CUstream hStream); + CUresult CUDAAPI cuMemcpy3DAsync_v2(const CUDA_MEMCPY3D *pCopy, CUstream hStream); + CUresult CUDAAPI cuMemsetD8_v2(CUdeviceptr dstDevice, unsigned char uc, size_t N); + CUresult CUDAAPI cuMemsetD16_v2(CUdeviceptr dstDevice, unsigned short us, size_t N); + CUresult CUDAAPI cuMemsetD32_v2(CUdeviceptr dstDevice, unsigned int ui, size_t N); + CUresult CUDAAPI cuMemsetD2D8_v2(CUdeviceptr dstDevice, size_t dstPitch, unsigned char uc, size_t Width, size_t Height); + CUresult CUDAAPI cuMemsetD2D16_v2(CUdeviceptr dstDevice, size_t dstPitch, unsigned short us, size_t Width, size_t Height); + CUresult CUDAAPI cuMemsetD2D32_v2(CUdeviceptr dstDevice, size_t dstPitch, unsigned int ui, size_t Width, size_t Height); + CUresult CUDAAPI cuMemcpy(CUdeviceptr dst, CUdeviceptr src, size_t ByteCount); + CUresult CUDAAPI cuMemcpyAsync(CUdeviceptr dst, CUdeviceptr src, size_t ByteCount, CUstream hStream); + CUresult CUDAAPI cuMemcpyPeer(CUdeviceptr dstDevice, CUcontext dstContext, CUdeviceptr srcDevice, CUcontext srcContext, size_t ByteCount); + CUresult CUDAAPI cuMemcpyPeerAsync(CUdeviceptr dstDevice, CUcontext dstContext, CUdeviceptr srcDevice, CUcontext srcContext, size_t ByteCount, CUstream hStream); + CUresult CUDAAPI cuMemcpy3DPeer(const CUDA_MEMCPY3D_PEER *pCopy); + CUresult CUDAAPI cuMemcpy3DPeerAsync(const CUDA_MEMCPY3D_PEER *pCopy, CUstream hStream); + + CUresult CUDAAPI cuMemsetD8Async(CUdeviceptr dstDevice, unsigned char uc, size_t N, CUstream hStream); + CUresult CUDAAPI cuMemsetD16Async(CUdeviceptr dstDevice, unsigned short us, size_t N, CUstream hStream); + CUresult CUDAAPI cuMemsetD32Async(CUdeviceptr dstDevice, unsigned int ui, size_t N, CUstream hStream); + CUresult CUDAAPI cuMemsetD2D8Async(CUdeviceptr dstDevice, size_t dstPitch, unsigned char uc, size_t Width, size_t Height, CUstream hStream); + CUresult CUDAAPI cuMemsetD2D16Async(CUdeviceptr dstDevice, size_t dstPitch, unsigned short us, size_t Width, size_t Height, CUstream hStream); + CUresult CUDAAPI cuMemsetD2D32Async(CUdeviceptr dstDevice, size_t dstPitch, unsigned int ui, size_t Width, size_t Height, CUstream hStream); + + CUresult CUDAAPI cuStreamGetPriority(CUstream hStream, int *priority); + CUresult CUDAAPI cuStreamGetId(CUstream hStream, unsigned long long *streamId); + CUresult CUDAAPI cuStreamGetFlags(CUstream hStream, unsigned int *flags); + CUresult CUDAAPI cuStreamGetCtx(CUstream hStream, CUcontext *pctx); + CUresult CUDAAPI cuStreamWaitEvent(CUstream hStream, CUevent hEvent, unsigned int Flags); + CUresult CUDAAPI cuStreamAddCallback(CUstream hStream, CUstreamCallback callback, void *userData, unsigned int flags); + CUresult CUDAAPI cuStreamAttachMemAsync(CUstream hStream, CUdeviceptr dptr, size_t length, unsigned int flags); + CUresult CUDAAPI cuStreamQuery(CUstream hStream); + CUresult CUDAAPI cuStreamSynchronize(CUstream hStream); + CUresult CUDAAPI cuEventRecord(CUevent hEvent, CUstream hStream); + CUresult CUDAAPI cuEventRecordWithFlags(CUevent hEvent, CUstream hStream, unsigned int flags); + CUresult CUDAAPI cuLaunchKernel(CUfunction f, unsigned int gridDimX, unsigned int gridDimY, unsigned int gridDimZ, unsigned int blockDimX, unsigned int blockDimY, unsigned int blockDimZ, unsigned int sharedMemBytes, CUstream hStream, void **kernelParams, void **extra); + CUresult CUDAAPI cuLaunchKernelEx(const CUlaunchConfig *config, CUfunction f, void **kernelParams, void **extra); + CUresult CUDAAPI cuLaunchHostFunc(CUstream hStream, CUhostFn fn, void *userData); + CUresult CUDAAPI cuGraphicsMapResources(unsigned int count, CUgraphicsResource *resources, CUstream hStream); + CUresult CUDAAPI cuGraphicsUnmapResources(unsigned int count, CUgraphicsResource *resources, CUstream hStream); + CUresult CUDAAPI cuStreamWriteValue32(CUstream stream, CUdeviceptr addr, cuuint32_t value, unsigned int flags); + CUresult CUDAAPI cuStreamWaitValue32(CUstream stream, CUdeviceptr addr, cuuint32_t value, unsigned int flags); + CUresult CUDAAPI cuStreamWriteValue64(CUstream stream, CUdeviceptr addr, cuuint64_t value, unsigned int flags); + CUresult CUDAAPI cuStreamWaitValue64(CUstream stream, CUdeviceptr addr, cuuint64_t value, unsigned int flags); + CUresult CUDAAPI cuStreamBatchMemOp(CUstream stream, unsigned int count, CUstreamBatchMemOpParams *paramArray, unsigned int flags); + + CUresult CUDAAPI cuStreamWriteValue32_ptsz(CUstream stream, CUdeviceptr addr, cuuint32_t value, unsigned int flags); + CUresult CUDAAPI cuStreamWaitValue32_ptsz(CUstream stream, CUdeviceptr addr, cuuint32_t value, unsigned int flags); + CUresult CUDAAPI cuStreamWriteValue64_ptsz(CUstream stream, CUdeviceptr addr, cuuint64_t value, unsigned int flags); + CUresult CUDAAPI cuStreamWaitValue64_ptsz(CUstream stream, CUdeviceptr addr, cuuint64_t value, unsigned int flags); + CUresult CUDAAPI cuStreamBatchMemOp_ptsz(CUstream stream, unsigned int count, CUstreamBatchMemOpParams *paramArray, unsigned int flags); + + CUresult CUDAAPI cuStreamWriteValue32_v2(CUstream stream, CUdeviceptr addr, cuuint32_t value, unsigned int flags); + CUresult CUDAAPI cuStreamWaitValue32_v2(CUstream stream, CUdeviceptr addr, cuuint32_t value, unsigned int flags); + CUresult CUDAAPI cuStreamWriteValue64_v2(CUstream stream, CUdeviceptr addr, cuuint64_t value, unsigned int flags); + CUresult CUDAAPI cuStreamWaitValue64_v2(CUstream stream, CUdeviceptr addr, cuuint64_t value, unsigned int flags); + CUresult CUDAAPI cuStreamBatchMemOp_v2(CUstream stream, unsigned int count, CUstreamBatchMemOpParams *paramArray, unsigned int flags); + CUresult CUDAAPI cuMemPrefetchAsync(CUdeviceptr devPtr, size_t count, CUdevice dstDevice, CUstream hStream); + CUresult CUDAAPI cuLaunchCooperativeKernel(CUfunction f, unsigned int gridDimX, unsigned int gridDimY, unsigned int gridDimZ, unsigned int blockDimX, unsigned int blockDimY, unsigned int blockDimZ, unsigned int sharedMemBytes, CUstream hStream, void **kernelParams); + CUresult CUDAAPI cuSignalExternalSemaphoresAsync(const CUexternalSemaphore *extSemArray, const CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS *paramsArray, unsigned int numExtSems, CUstream stream); + CUresult CUDAAPI cuWaitExternalSemaphoresAsync(const CUexternalSemaphore *extSemArray, const CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS *paramsArray, unsigned int numExtSems, CUstream stream); + CUresult CUDAAPI cuStreamBeginCapture(CUstream hStream); + CUresult CUDAAPI cuStreamBeginCapture_ptsz(CUstream hStream); + CUresult CUDAAPI cuStreamBeginCapture_v2(CUstream hStream, CUstreamCaptureMode mode); + CUresult CUDAAPI cuStreamEndCapture(CUstream hStream, CUgraph *phGraph); + CUresult CUDAAPI cuStreamIsCapturing(CUstream hStream, CUstreamCaptureStatus *captureStatus); + CUresult CUDAAPI cuStreamGetCaptureInfo(CUstream hStream, CUstreamCaptureStatus *captureStatus_out, cuuint64_t *id_out); + CUresult CUDAAPI cuStreamGetCaptureInfo_ptsz(CUstream hStream, CUstreamCaptureStatus *captureStatus_out, cuuint64_t *id_out); + CUresult CUDAAPI cuStreamGetCaptureInfo_v2(CUstream hStream, CUstreamCaptureStatus *captureStatus_out, cuuint64_t *id_out, CUgraph *graph_out, const CUgraphNode **dependencies_out, size_t *numDependencies_out); + CUresult CUDAAPI cuGraphAddKernelNode(CUgraphNode *phGraphNode, CUgraph hGraph, const CUgraphNode *dependencies, size_t numDependencies, const CUDA_KERNEL_NODE_PARAMS_v1 *nodeParams); + CUresult CUDAAPI cuGraphKernelNodeGetParams(CUgraphNode hNode, CUDA_KERNEL_NODE_PARAMS_v1 *nodeParams); + CUresult CUDAAPI cuGraphKernelNodeSetParams(CUgraphNode hNode, const CUDA_KERNEL_NODE_PARAMS_v1 *nodeParams); + CUresult CUDAAPI cuGraphExecKernelNodeSetParams(CUgraphExec hGraphExec, CUgraphNode hNode, const CUDA_KERNEL_NODE_PARAMS_v1 *nodeParams); + CUresult CUDAAPI cuGraphInstantiateWithParams(CUgraphExec *phGraphExec, CUgraph hGraph, CUDA_GRAPH_INSTANTIATE_PARAMS *instantiateParams); + CUresult CUDAAPI cuGraphExecUpdate(CUgraphExec hGraphExec, CUgraph hGraph, CUgraphNode *hErrorNode_out, CUgraphExecUpdateResult *updateResult_out); + CUresult CUDAAPI cuGraphUpload(CUgraphExec hGraph, CUstream hStream); + CUresult CUDAAPI cuGraphLaunch(CUgraphExec hGraph, CUstream hStream); + CUresult CUDAAPI cuStreamCopyAttributes(CUstream dstStream, CUstream srcStream); + CUresult CUDAAPI cuStreamGetAttribute(CUstream hStream, CUstreamAttrID attr, CUstreamAttrValue *value); + CUresult CUDAAPI cuStreamSetAttribute(CUstream hStream, CUstreamAttrID attr, const CUstreamAttrValue *param); + + CUresult CUDAAPI cuIpcOpenMemHandle(CUdeviceptr *pdptr, CUipcMemHandle handle, unsigned int Flags); + CUresult CUDAAPI cuGraphInstantiate(CUgraphExec *phGraphExec, CUgraph hGraph, CUgraphNode *phErrorNode, char *logBuffer, size_t bufferSize); + CUresult CUDAAPI cuGraphInstantiate_v2(CUgraphExec *phGraphExec, CUgraph hGraph, CUgraphNode *phErrorNode, char *logBuffer, size_t bufferSize); + + CUresult CUDAAPI cuMemMapArrayAsync(CUarrayMapInfo *mapInfoList, unsigned int count, CUstream hStream); + + CUresult CUDAAPI cuMemFreeAsync(CUdeviceptr dptr, CUstream hStream); + CUresult CUDAAPI cuMemAllocAsync(CUdeviceptr *dptr, size_t bytesize, CUstream hStream); + CUresult CUDAAPI cuMemAllocFromPoolAsync(CUdeviceptr *dptr, size_t bytesize, CUmemoryPool pool, CUstream hStream); + + CUresult CUDAAPI cuStreamUpdateCaptureDependencies(CUstream hStream, CUgraphNode *dependencies, size_t numDependencies, unsigned int flags); + CUresult CUDAAPI cuGetProcAddress(const char *symbol, void **pfn, int cudaVersion, cuuint64_t flags); + +#elif defined(__CUDA_API_PER_THREAD_DEFAULT_STREAM) +static inline CUresult cuGetProcAddress_v2_ptsz(const char *symbol, void **funcPtr, int driverVersion, cuuint64_t flags, CUdriverProcAddressQueryResult *symbolStatus) { + const int procAddressMask = (CU_GET_PROC_ADDRESS_LEGACY_STREAM| + CU_GET_PROC_ADDRESS_PER_THREAD_DEFAULT_STREAM); + if ((flags & procAddressMask) == 0) { + flags |= CU_GET_PROC_ADDRESS_PER_THREAD_DEFAULT_STREAM; + } + return cuGetProcAddress_v2(symbol, funcPtr, driverVersion, flags, symbolStatus); +} +#define cuGetProcAddress_v2 cuGetProcAddress_v2_ptsz +#endif + +#ifdef __cplusplus +} +#endif + +#if defined(__GNUC__) + #if defined(__CUDA_API_PUSH_VISIBILITY_DEFAULT) + #pragma GCC visibility pop + #endif +#endif + +#undef __CUDA_DEPRECATED + +#endif /* __cuda_cuda_h__ */ diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_awbarrier_primitives.h b/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_awbarrier_primitives.h new file mode 100644 index 0000000000000000000000000000000000000000..5562ef3f6afeb7fce4bad4cb8067b3cb1b9a690f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_awbarrier_primitives.h @@ -0,0 +1,109 @@ +/* + * Copyright 1993-2019 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#ifndef _CUDA_AWBARRIER_PRIMITIVES_H_ +#define _CUDA_AWBARRIER_PRIMITIVES_H_ + +#include "cuda_awbarrier_helpers.h" + +#if !defined(_CUDA_AWBARRIER_SM_TARGET) +# error This file requires compute capability 7.0 or greater. +#endif + +_CUDA_AWBARRIER_STATIC_QUALIFIER __host__ +uint32_t __mbarrier_maximum_count() { + return _CUDA_AWBARRIER_MAX_COUNT; +} + +_CUDA_AWBARRIER_STATIC_QUALIFIER +void __mbarrier_init(__mbarrier_t* barrier, uint32_t expected_count) { + _CUDA_AWBARRIER_INTERNAL_NAMESPACE::awbarrier_init(barrier, expected_count); +} + +_CUDA_AWBARRIER_STATIC_QUALIFIER +void __mbarrier_inval(__mbarrier_t* barrier) { + _CUDA_AWBARRIER_INTERNAL_NAMESPACE::awbarrier_inval(barrier); +} + +_CUDA_AWBARRIER_STATIC_QUALIFIER +__mbarrier_token_t __mbarrier_arrive(__mbarrier_t* barrier) { + return _CUDA_AWBARRIER_INTERNAL_NAMESPACE::awbarrier_arrive_drop(barrier); +} + +_CUDA_AWBARRIER_STATIC_QUALIFIER +__mbarrier_token_t __mbarrier_arrive_and_drop(__mbarrier_t* barrier) { + return _CUDA_AWBARRIER_INTERNAL_NAMESPACE::awbarrier_arrive_drop(barrier); +} + +_CUDA_AWBARRIER_STATIC_QUALIFIER +bool __mbarrier_test_wait(__mbarrier_t* barrier, __mbarrier_token_t token) { + return _CUDA_AWBARRIER_INTERNAL_NAMESPACE::awbarrier_test_wait(barrier, token); +} + +_CUDA_AWBARRIER_STATIC_QUALIFIER +uint32_t __mbarrier_token_pending_count(__mbarrier_token_t token) { + return _CUDA_AWBARRIER_INTERNAL_NAMESPACE::awbarrier_token_pending_count(token); +} + +_CUDA_AWBARRIER_STATIC_QUALIFIER +bool __mbarrier_test_wait_parity(__mbarrier_t* barrier, bool phase_parity) { + return _CUDA_AWBARRIER_INTERNAL_NAMESPACE::awbarrier_test_wait_parity(barrier, phase_parity); +} + +_CUDA_AWBARRIER_STATIC_QUALIFIER +bool __mbarrier_try_wait(__mbarrier_t* barrier, __mbarrier_token_t token, uint32_t max_sleep_nanosec) { + return _CUDA_AWBARRIER_INTERNAL_NAMESPACE::awbarrier_try_wait(barrier, token, max_sleep_nanosec); +} + +_CUDA_AWBARRIER_STATIC_QUALIFIER +bool __mbarrier_try_wait_parity(__mbarrier_t* barrier, bool phase_parity, uint32_t max_sleep_nanosec) { + return _CUDA_AWBARRIER_INTERNAL_NAMESPACE::awbarrier_try_wait_parity(barrier, phase_parity, max_sleep_nanosec); +} + +#endif /* !_CUDA_AWBARRIER_PRIMITIVES_H_ */ diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_fp8.hpp b/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_fp8.hpp new file mode 100644 index 0000000000000000000000000000000000000000..9212081df2c7ea8938cb1142a3b2ba5750b7329b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_fp8.hpp @@ -0,0 +1,1546 @@ +/* + * Copyright 2022 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(__CUDA_FP8_HPP__) +#define __CUDA_FP8_HPP__ + +#if !defined(__CUDA_FP8_H__) +#error "Do not include this file directly. Instead, include cuda_fp8.h." +#endif + +/* C++ header for std::memcpy (used for type punning in host-side + * implementations). When compiling as a CUDA source file memcpy is provided + * implicitly. !defined(__CUDACC__) implies !defined(__CUDACC_RTC__). + */ +#if defined(__cplusplus) && !defined(__CUDACC__) +#include +#elif !defined(__cplusplus) && !defined(__CUDACC__) +#include +#endif /* defined(__cplusplus) && !defined(__CUDACC__) */ + +/* Set up structure-alignment attribute */ +#if !(defined __CUDA_ALIGN__) +#if defined(__CUDACC__) +#define __CUDA_ALIGN__(align) __align__(align) +#else +/* Define alignment macro based on compiler type (cannot assume C11 "_Alignas" + * is available) */ +#if __cplusplus >= 201103L +#define __CUDA_ALIGN__(n) \ + alignas(n) /* C++11 kindly gives us a keyword for this */ +#else /* !defined(__CPP_VERSION_AT_LEAST_11_FP8)*/ +#if defined(__GNUC__) +#define __CUDA_ALIGN__(n) __attribute__((aligned(n))) +#elif defined(_MSC_VER) +#define __CUDA_ALIGN__(n) __declspec(align(n)) +#else +#define __CUDA_ALIGN__(n) +#endif /* defined(__GNUC__) */ +#endif /* defined(__CPP_VERSION_AT_LEAST_11_FP8) */ +#endif /* defined(__CUDACC__) */ +#endif /* !(defined __CUDA_ALIGN__) */ + +#if !(defined __CPP_VERSION_AT_LEAST_11_FP8) +/* need c++11 for explicit operators */ +#define __CUDA_NO_FP8_CONVERSION_OPERATORS__ +#endif + +__CUDA_HOSTDEVICE_FP8_DECL__ __nv_fp8_storage_t +__nv_cvt_double_to_fp8(const double x, const __nv_saturation_t saturate, + const __nv_fp8_interpretation_t fp8_interpretation) { + unsigned char res; + unsigned long long int xbits; + +#if defined(__CUDACC__) || (!defined __cplusplus) + (void)memcpy(&xbits, &x, sizeof(x)); +#else + (void)std::memcpy(&xbits, &x, sizeof(x)); +#endif + unsigned char FP8_MAXNORM; + unsigned char FP8_MANTISSA_MASK; + unsigned short int FP8_EXP_BIAS; + unsigned long long int FP8_SIGNIFICAND_BITS; + const unsigned long long int DP_INF_BITS = 0x7FF0000000000000ULL; + unsigned long long int FP8_MINDENORM_O2; + unsigned long long int FP8_OVERFLOW_THRESHOLD; + unsigned long long int FP8_MINNORM; + + if (fp8_interpretation == __NV_E4M3) { + FP8_EXP_BIAS = 7U; + FP8_SIGNIFICAND_BITS = 4ULL; + FP8_MANTISSA_MASK = 0x7U; + FP8_MINDENORM_O2 = 0x3F50000000000000ULL; // mindenorm/2 = 2^-10 + FP8_OVERFLOW_THRESHOLD = + 0x407D000000000000ULL; // maxnorm + 1/2ulp = 0x1.Cp+8 + 0x1p+4 + FP8_MAXNORM = 0x7EU; + FP8_MINNORM = 0x3F90000000000000ULL; // minnorm = 2^-6 + } else { //__NV_E5M2 + FP8_EXP_BIAS = 15U; + FP8_SIGNIFICAND_BITS = 3ULL; + FP8_MANTISSA_MASK = 0x3U; + FP8_MINDENORM_O2 = 0x3EE0000000000000ULL; // mindenorm/2 = 2^-17 + FP8_OVERFLOW_THRESHOLD = + 0x40EE000000000000ULL - + 1ULL; // maxnorm + 1/2ulp = 0x1.Ep+15, and -1 to have common code + FP8_MAXNORM = 0x7BU; + FP8_MINNORM = 0x3F10000000000000ULL; // minnorm = 2^-14 + } + + // 1/2 LSB of the target format, positioned in double precision mantissa + // helpful in midpoints detection during round-to-nearest-even step + const unsigned long long int FP8_DP_HALF_ULP = + (unsigned long long int)1ULL << (53ULL - FP8_SIGNIFICAND_BITS - 1ULL); + // prepare sign bit in target format + unsigned char sign = (unsigned char)((xbits >> 63ULL) << 7U); + // prepare exponent field in target format + unsigned char exp = + (unsigned char)((((unsigned short int)(xbits >> 52ULL)) & 0x7FFU) - + 1023U + FP8_EXP_BIAS); + // round mantissa to target format width, rounding towards zero + unsigned char mantissa = + (unsigned char)(xbits >> (53ULL - FP8_SIGNIFICAND_BITS)) & + FP8_MANTISSA_MASK; + unsigned long long int absx = xbits & 0x7FFFFFFFFFFFFFFFULL; + + if (absx <= FP8_MINDENORM_O2) { + // zero or underflow + res = 0U; + } else if (absx > DP_INF_BITS) { + // NaN + if (fp8_interpretation == __NV_E4M3) { + res = 0x7FU; + } else { + // NaN --> QNaN + res = 0x7EU | mantissa; + } + } else if (absx > FP8_OVERFLOW_THRESHOLD) { + if (saturate == __NV_SATFINITE) { + res = FP8_MAXNORM; + } else { + // __NV_NOSAT + if (fp8_interpretation == __NV_E4M3) { + // no Inf in E4M3 + res = 0x7FU; // NaN + } else { + res = 0x7CU; // Inf in E5M2 + } + } + } else if (absx >= FP8_MINNORM) { + res = (unsigned char)((exp << (FP8_SIGNIFICAND_BITS - 1U)) | mantissa); + // rounded-off bits + unsigned long long int round = + xbits & ((FP8_DP_HALF_ULP << 1ULL) - 1ULL); + // round-to-nearest-even adjustment + if ((round > FP8_DP_HALF_ULP) || + ((round == FP8_DP_HALF_ULP) && (mantissa & 1U))) { + res = (unsigned char)(res + 1U); + } + } else // Denormal range + { + unsigned char shift = (unsigned char)(1U - exp); + // add implicit leading bit + mantissa |= (unsigned char)(1U << (FP8_SIGNIFICAND_BITS - 1U)); + // additional round-off due to denormalization + res = (unsigned char)(mantissa >> shift); + + // rounded-off bits, including implicit leading bit + unsigned long long int round = + (xbits | ((unsigned long long int)1ULL << (53ULL - 1ULL))) & + ((FP8_DP_HALF_ULP << (shift + 1ULL)) - 1ULL); + // round-to-nearest-even adjustment + if ((round > (FP8_DP_HALF_ULP << shift)) || + ((round == (FP8_DP_HALF_ULP << shift)) && (res & 1U))) { + res = (unsigned char)(res + 1U); + } + } + + res |= sign; + + return (__nv_fp8_storage_t)res; +} + +__CUDA_HOSTDEVICE_FP8_DECL__ __nv_fp8x2_storage_t +__nv_cvt_double2_to_fp8x2(const double2 x, const __nv_saturation_t saturate, + const __nv_fp8_interpretation_t fp8_interpretation) { + __nv_fp8x2_storage_t storage = (__nv_fp8x2_storage_t)__nv_cvt_double_to_fp8( + x.y, saturate, fp8_interpretation); + storage = (__nv_fp8x2_storage_t)(storage << 8U); + storage = (__nv_fp8x2_storage_t)(storage | + __nv_cvt_double_to_fp8( + x.x, saturate, fp8_interpretation)); + return storage; +} + +__CUDA_HOSTDEVICE_FP8_DECL__ __nv_fp8_storage_t +__nv_cvt_float_to_fp8(const float x, const __nv_saturation_t saturate, + const __nv_fp8_interpretation_t fp8_interpretation) { + __nv_fp8_storage_t res = 0U; +#if (defined __CUDA_ARCH__) && (__CUDA_ARCH__ >= 890) + if (saturate == __NV_SATFINITE) { + __nv_fp8x2_storage_t storage; + if (fp8_interpretation == __NV_E5M2) { + asm("{cvt.rn.satfinite.e5m2x2.f32 %0, %2, %1;}\n" + : "=h"(storage) + : "f"(x), "f"(0.0f)); + } else { + asm("{cvt.rn.satfinite.e4m3x2.f32 %0, %2, %1;}\n" + : "=h"(storage) + : "f"(x), "f"(0.0f)); + } + res = (__nv_fp8_storage_t)storage; + } else +#endif + { + unsigned int xbits; +#if defined(__CUDACC__) || (!defined __cplusplus) + (void)memcpy(&xbits, &x, sizeof(x)); +#else + (void)std::memcpy(&xbits, &x, sizeof(x)); +#endif + + // isnan + if ((xbits & 0x7FFFFFFFU) > 0x7F800000U) { + // Canonical NaN + xbits = 0x7FFFFFFFU; + } + + float fx; +#if defined(__CUDACC__) || (!defined __cplusplus) + (void)memcpy(&fx, &xbits, sizeof(xbits)); +#else + (void)std::memcpy(&fx, &xbits, sizeof(xbits)); +#endif + + const double dx = (double)fx; + res = __nv_cvt_double_to_fp8(dx, saturate, fp8_interpretation); + } + return res; +} + +__CUDA_HOSTDEVICE_FP8_DECL__ __nv_fp8x2_storage_t +__nv_cvt_float2_to_fp8x2(const float2 x, const __nv_saturation_t saturate, + const __nv_fp8_interpretation_t fp8_interpretation) { + __nv_fp8x2_storage_t storage; +#if (defined __CUDA_ARCH__) && (__CUDA_ARCH__ >= 890) + if (saturate == __NV_SATFINITE) { + if (fp8_interpretation == __NV_E5M2) { + asm("{cvt.rn.satfinite.e5m2x2.f32 %0, %2, %1;}\n" + : "=h"(storage) + : "f"(x.x), "f"(x.y)); + } else { + asm("{cvt.rn.satfinite.e4m3x2.f32 %0, %2, %1;}\n" + : "=h"(storage) + : "f"(x.x), "f"(x.y)); + } + } else +#endif + { + storage = (__nv_fp8x2_storage_t)__nv_cvt_float_to_fp8( + x.y, saturate, fp8_interpretation); + storage = (__nv_fp8x2_storage_t)(storage << 8U); + storage = (__nv_fp8x2_storage_t)(storage | __nv_cvt_float_to_fp8( + x.x, saturate, + fp8_interpretation)); + } + return storage; +} + +__CUDA_HOSTDEVICE_FP8_DECL__ float +__internal_halfraw_to_float(const __half_raw x) { + float f; +#if (defined __CUDA_ARCH__) && (__CUDA_ARCH__ >= 530) + asm("{cvt.f32.f16 %0, %1;}\n" : "=f"(f) : "h"(x.x)); +#else + const unsigned int ux = (unsigned int)x.x; + unsigned int sign = (ux >> 15U) & 1U; + unsigned int exponent = (ux >> 10U) & 0x1fU; + unsigned int mantissa = (ux & 0x3ffU) << 13U; + if (exponent == 0x1fU) { /* NaN or Inf */ + /* discard sign of a NaN */ + sign = ((mantissa != 0U) ? (sign >> 1U) : sign); + mantissa = ((mantissa != 0U) ? 0x7fffffU : 0U); + exponent = 0xffU; + } else if (exponent == 0U) { /* Denorm or Zero */ + if (mantissa != 0U) { + unsigned int msb; + exponent = 0x71U; + do { + msb = (mantissa & 0x400000U); + mantissa <<= 1U; /* normalize */ + --exponent; + } while (msb == 0U); + mantissa &= 0x7fffffU; /* 1.mantissa is implicit */ + } + } else { + exponent += 0x70U; + } + const unsigned int u = ((sign << 31U) | (exponent << 23U) | mantissa); +#if defined(__CUDACC__) || (!defined __cplusplus) + (void)memcpy(&f, &u, sizeof(u)); +#else + (void)std::memcpy(&f, &u, sizeof(u)); +#endif +#endif /* (defined __CUDA_ARCH__) && (__CUDA_ARCH__ >= 530) */ + return f; +} + +__CUDA_HOSTDEVICE_FP8_DECL__ float2 +__internal_halfraw2_to_float2(const __half2_raw x) { + __half_raw raw; + float2 res; + raw.x = x.x; + res.x = __internal_halfraw_to_float(raw); + raw.x = x.y; + res.y = __internal_halfraw_to_float(raw); + return res; +} + +__CUDA_HOSTDEVICE_FP8_DECL__ __nv_fp8_storage_t +__nv_cvt_halfraw_to_fp8(const __half_raw x, const __nv_saturation_t saturate, + const __nv_fp8_interpretation_t fp8_interpretation) { + __nv_fp8_storage_t res = 0U; +#if (defined __CUDA_ARCH__) && (__CUDA_ARCH__ >= 890) + if (saturate == __NV_SATFINITE) { + unsigned int half2_storage = (unsigned int)(x.x); + __nv_fp8x2_storage_t tmp; + if (fp8_interpretation == __NV_E5M2) { + asm("{cvt.rn.satfinite.e5m2x2.f16x2 %0, %1;}\n" + : "=h"(tmp) + : "r"(half2_storage)); + } else { + asm("{cvt.rn.satfinite.e4m3x2.f16x2 %0, %1;}\n" + : "=h"(tmp) + : "r"(half2_storage)); + } + res = (__nv_fp8_storage_t)tmp; + } else +#endif + { + float fx = __internal_halfraw_to_float(x); + res = __nv_cvt_float_to_fp8(fx, saturate, fp8_interpretation); + } + return res; +} + +__CUDA_HOSTDEVICE_FP8_DECL__ __nv_fp8x2_storage_t __nv_cvt_halfraw2_to_fp8x2( + const __half2_raw x, const __nv_saturation_t saturate, + const __nv_fp8_interpretation_t fp8_interpretation) { + __nv_fp8x2_storage_t tmp; +#if (defined __CUDA_ARCH__) && (__CUDA_ARCH__ >= 890) + if (saturate == __NV_SATFINITE) { + unsigned int half2_storage; + (void)memcpy(&half2_storage, &x, sizeof(x)); + + if (fp8_interpretation == __NV_E5M2) { + asm("{cvt.rn.satfinite.e5m2x2.f16x2 %0, %1;}\n" + : "=h"(tmp) + : "r"(half2_storage)); + } else { + asm("{cvt.rn.satfinite.e4m3x2.f16x2 %0, %1;}\n" + : "=h"(tmp) + : "r"(half2_storage)); + } + } else +#endif + { + __half_raw raw; + raw.x = x.x; + __nv_fp8_storage_t lo = + __nv_cvt_halfraw_to_fp8(raw, saturate, fp8_interpretation); + raw.x = x.y; + __nv_fp8_storage_t hi = + __nv_cvt_halfraw_to_fp8(raw, saturate, fp8_interpretation); + tmp = hi; + tmp = (__nv_fp8x2_storage_t)(tmp << 8U); + tmp = (__nv_fp8x2_storage_t)(tmp | lo); + } + return tmp; +} + +__CUDA_HOSTDEVICE_FP8_DECL__ float +__internal_bf16raw_to_float(const __nv_bfloat16_raw x) { + const unsigned int ux = ((unsigned int)x.x) << 16U; + float fx; +#if defined(__CUDACC__) || (!defined __cplusplus) + (void)memcpy(&fx, &ux, sizeof(ux)); +#else + (void)std::memcpy(&fx, &ux, sizeof(ux)); +#endif + return fx; +} + +__CUDA_HOSTDEVICE_FP8_DECL__ __nv_bfloat16_raw +__internal_float_to_bf16raw_rz(const float x) { + unsigned int ux; + __nv_bfloat16_raw r; +#if defined(__CUDACC__) || (!defined __cplusplus) + (void)memcpy(&ux, &x, sizeof(x)); +#else + (void)std::memcpy(&ux, &x, sizeof(x)); +#endif + r.x = (unsigned short int)(ux >> 16U); + return r; +} + +__CUDA_HOSTDEVICE_FP8_DECL__ __nv_fp8_storage_t __nv_cvt_bfloat16raw_to_fp8( + const __nv_bfloat16_raw x, const __nv_saturation_t saturate, + const __nv_fp8_interpretation_t fp8_interpretation) { + const float fx = __internal_bf16raw_to_float(x); + const __nv_fp8_storage_t res = + __nv_cvt_float_to_fp8(fx, saturate, fp8_interpretation); + return res; +} + +__CUDA_HOSTDEVICE_FP8_DECL__ __nv_fp8x2_storage_t +__nv_cvt_bfloat16raw2_to_fp8x2( + const __nv_bfloat162_raw x, const __nv_saturation_t saturate, + const __nv_fp8_interpretation_t fp8_interpretation) { + __nv_bfloat16_raw raw; + raw.x = x.y; + __nv_fp8x2_storage_t storage = + (__nv_fp8x2_storage_t)__nv_cvt_bfloat16raw_to_fp8(raw, saturate, + fp8_interpretation); + storage = (__nv_fp8x2_storage_t)(storage << 8U); + raw.x = x.x; + storage = (__nv_fp8x2_storage_t)(storage | + __nv_cvt_bfloat16raw_to_fp8( + raw, saturate, fp8_interpretation)); + return storage; +} + +__CUDA_HOSTDEVICE_FP8_DECL__ __half2_raw +__nv_cvt_fp8x2_to_halfraw2(const __nv_fp8x2_storage_t x, + const __nv_fp8_interpretation_t fp8_interpretation); +__CUDA_HOSTDEVICE_FP8_DECL__ __half_raw +__nv_cvt_fp8_to_halfraw(const __nv_fp8_storage_t x, + const __nv_fp8_interpretation_t fp8_interpretation) { + __half_raw res; + res.x = 0U; +#if (defined __CUDA_ARCH__) && (__CUDA_ARCH__ >= 890) + res.x = + __nv_cvt_fp8x2_to_halfraw2((__nv_fp8x2_storage_t)x, fp8_interpretation) + .x; +#else + unsigned short int ur = (unsigned short int)x; + ur = (unsigned short int)(ur << 8U); + + if (fp8_interpretation == __NV_E5M2) { + if ((ur & 0x7FFFU) > 0x7C00U) { + /* If NaN, return canonical NaN */ + ur = 0x7FFFU; + } + } else { // __NV_E4M3 + unsigned short int sign = ur & 0x8000U; + unsigned short int exponent = + (unsigned short int)(((ur & 0x7800U) >> 1U) + 0x2000U); + unsigned short int mantissa = (ur & 0x0700U) >> 1U; + unsigned char absx = 0x7FU & (unsigned char)x; + + if (absx == 0x7FU) // NaN + { + ur = 0x7FFFU; // fp16 canonical NaN, discard sign + } else if (exponent == 0x2000U) { + // zero or denormal + if (mantissa != 0U) { + // normalize + mantissa = (unsigned short int)(mantissa << 1U); + while ((mantissa & 0x0400U) == 0U) { + mantissa = (unsigned short int)(mantissa << 1U); + exponent = (unsigned short int)(exponent - 0x0400U); + } + // discard implicit leading bit + mantissa &= 0x03FFU; + } else { // Zero + exponent = 0U; + } + + ur = (sign | exponent) | mantissa; + } else { + ur = (sign | exponent) | mantissa; + } + } + res.x = ur; +#endif + return res; +} + +__CUDA_HOSTDEVICE_FP8_DECL__ __half2_raw +__nv_cvt_fp8x2_to_halfraw2(const __nv_fp8x2_storage_t x, + const __nv_fp8_interpretation_t fp8_interpretation) { + __half2_raw res; +#if (defined __CUDA_ARCH__) && (__CUDA_ARCH__ >= 890) + unsigned int half2_storage; + if (fp8_interpretation == __NV_E5M2) { + asm("{cvt.rn.f16x2.e5m2x2 %0, %1;}\n" : "=r"(half2_storage) : "h"(x)); + } else { + asm("{cvt.rn.f16x2.e4m3x2 %0, %1;}\n" : "=r"(half2_storage) : "h"(x)); + } + (void)memcpy(&res, &half2_storage, sizeof(half2_storage)); +#else + res.x = + __nv_cvt_fp8_to_halfraw((__nv_fp8_storage_t)x, fp8_interpretation).x; + res.y = __nv_cvt_fp8_to_halfraw((__nv_fp8_storage_t)(x >> 8U), + fp8_interpretation) + .x; +#endif + return res; +} + +/* All other definitions in this file are only visible to C++ compilers */ +#if defined(__cplusplus) + +/** + * \defgroup CUDA_MATH_FP8_E5M2_STRUCT C++ struct for handling fp8 data type of e5m2 kind. + * \ingroup CUDA_MATH_INTRINSIC_FP8 + */ + +/** + * \ingroup CUDA_MATH_FP8_E5M2_STRUCT + * \brief __nv_fp8_e5m2 datatype + * + * \details This structure implements the datatype for handling + * \p fp8 floating-point numbers of \p e5m2 kind: + * with 1 sign, 5 exponent, 1 implicit and 2 explicit mantissa bits. + * + * The structure implements converting constructors and operators. + */ +struct __CUDA_ALIGN__(1) __nv_fp8_e5m2 { + public: + /** + * \ingroup CUDA_MATH_FP8_E5M2_STRUCT + * Storage variable contains the \p fp8 floating-point data. + */ + __nv_fp8_storage_t __x; + + /** + * \ingroup CUDA_MATH_FP8_E5M2_STRUCT + * Constructor by default. + */ +#if defined(__CPP_VERSION_AT_LEAST_11_FP8) + __nv_fp8_e5m2() = default; +#else + __CUDA_HOSTDEVICE_FP8__ __nv_fp8_e5m2() {} +#endif /* defined(__CPP_VERSION_AT_LEAST_11_FP8) */ + +#if !defined(__CUDA_NO_FP8_CONVERSIONS__) + + /* Construct from wider FP types */ + /* Note we do avoid constructor init-list because of special host/device + * compilation rules */ + + /** + * \ingroup CUDA_MATH_FP8_E5M2_STRUCT + * Constructor from \p __half data type, relies on \p __NV_SATFINITE + * behavior for out-of-range values. + */ + explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8_e5m2(const __half f) { + __x = __nv_cvt_halfraw_to_fp8(static_cast<__half_raw>(f), + __NV_SATFINITE, __NV_E5M2); + } + /** + * \ingroup CUDA_MATH_FP8_E5M2_STRUCT + * Constructor from \p __nv_bfloat16 data type, relies on \p __NV_SATFINITE + * behavior for out-of-range values. + */ + explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8_e5m2(const __nv_bfloat16 f) { + __x = __nv_cvt_bfloat16raw_to_fp8(static_cast<__nv_bfloat16_raw>(f), + __NV_SATFINITE, __NV_E5M2); + } + /** + * \ingroup CUDA_MATH_FP8_E5M2_STRUCT + * Constructor from \p float data type, relies on \p __NV_SATFINITE behavior + * for out-of-range values. + */ + explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8_e5m2(const float f) { + __x = __nv_cvt_float_to_fp8(f, __NV_SATFINITE, __NV_E5M2); + } + /** + * \ingroup CUDA_MATH_FP8_E5M2_STRUCT + * Constructor from \p double data type, relies on \p __NV_SATFINITE + * behavior for out-of-range values. + */ + explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8_e5m2(const double f) { + __x = __nv_cvt_double_to_fp8(f, __NV_SATFINITE, __NV_E5M2); + } + + /* Converts from integral */ + + /** + * \ingroup CUDA_MATH_FP8_E5M2_STRUCT + * Constructor from \p unsigned \p short \p int data type, relies on \p + * __NV_SATFINITE behavior for out-of-range values. + */ + explicit __CUDA_HOSTDEVICE_FP8__ + __nv_fp8_e5m2(const unsigned short int val) { + __x = static_cast<__nv_fp8_e5m2>(static_cast(val)).__x; + } + /** + * \ingroup CUDA_MATH_FP8_E5M2_STRUCT + * Constructor from \p unsigned \p int data type, relies on \p + * __NV_SATFINITE behavior for out-of-range values. + */ + explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8_e5m2(const unsigned int val) { + __x = static_cast<__nv_fp8_e5m2>(static_cast(val)).__x; + } + /** + * \ingroup CUDA_MATH_FP8_E5M2_STRUCT + * Constructor from \p unsigned \p long \p long \p int data type, relies on + * \p __NV_SATFINITE behavior for out-of-range values. + */ + explicit __CUDA_HOSTDEVICE_FP8__ + __nv_fp8_e5m2(const unsigned long long int val) { + __x = static_cast<__nv_fp8_e5m2>(static_cast(val)).__x; + } + + /** + * \ingroup CUDA_MATH_FP8_E5M2_STRUCT + * Constructor from \p short \p int data type. + */ + explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8_e5m2(const short int val) { + __x = static_cast<__nv_fp8_e5m2>(static_cast(val)).__x; + } + /** + * \ingroup CUDA_MATH_FP8_E5M2_STRUCT + * Constructor from \p int data type, relies on \p __NV_SATFINITE behavior + * for out-of-range values. + */ + explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8_e5m2(const int val) { + __x = static_cast<__nv_fp8_e5m2>(static_cast(val)).__x; + } + /** + * \ingroup CUDA_MATH_FP8_E5M2_STRUCT + * Constructor from \p long \p long \p int data type, relies on \p + * __NV_SATFINITE behavior for out-of-range values. + */ + explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8_e5m2(const long long int val) { + __x = static_cast<__nv_fp8_e5m2>(static_cast(val)).__x; + } + +#if !defined(__CUDA_NO_FP8_CONVERSION_OPERATORS__) + /* Widening FP converts */ + /** + * \ingroup CUDA_MATH_FP8_E5M2_STRUCT + * Conversion operator to \p __half data type. + */ + explicit __CUDA_HOSTDEVICE_FP8__ operator __half() const { + return static_cast<__half>(__nv_cvt_fp8_to_halfraw(__x, __NV_E5M2)); + } + /** + * \ingroup CUDA_MATH_FP8_E5M2_STRUCT + * Conversion operator to \p float data type. + */ + explicit __CUDA_HOSTDEVICE_FP8__ operator float() const { + return __internal_halfraw_to_float( + __nv_cvt_fp8_to_halfraw(__x, __NV_E5M2)); + } + /** + * \ingroup CUDA_MATH_FP8_E5M2_STRUCT + * Conversion operator to \p __nv_bfloat16 data type. + */ + explicit __CUDA_HOSTDEVICE_FP8__ operator __nv_bfloat16() const { + return static_cast<__nv_bfloat16>( + __internal_float_to_bf16raw_rz(float(*this))); + } + /** + * \ingroup CUDA_MATH_FP8_E5M2_STRUCT + * Conversion operator to \p double data type. + */ + explicit __CUDA_HOSTDEVICE_FP8__ operator double() const { + return static_cast(float(*this)); + } + + /* Convert to integral */ + + /** + * \ingroup CUDA_MATH_FP8_E5M2_STRUCT + * Conversion operator to \p unsigned \p char data type. + * Clamps negative and too large inputs to the output range. + * \p NaN inputs convert to \p zero. + */ + explicit __CUDA_HOSTDEVICE_FP8__ operator unsigned char() const { + unsigned char i; + const float f = float(*this); + const unsigned char max_val = 0xFFU; + const unsigned char min_val = 0U; + const unsigned char bits = (*this).__x; + // saturation fixup + if ((bits & 0x7FU) > 0x7CU) { + // NaN + i = 0; + } else if (f > static_cast(max_val)) { + // saturate maximum + i = max_val; + } else if (f < static_cast(min_val)) { + // saturate minimum + i = min_val; + } else { + // normal value + i = static_cast(f); + } + return i; + } + /** + * \ingroup CUDA_MATH_FP8_E5M2_STRUCT + * Conversion operator to \p unsigned \p short \p int data type. + * Clamps negative and too large inputs to the output range. + * \p NaN inputs convert to \p zero. + */ + explicit __CUDA_HOSTDEVICE_FP8__ operator unsigned short int() const { + return __half2ushort_rz(__half(*this)); + } + /** + * \ingroup CUDA_MATH_FP8_E5M2_STRUCT + * Conversion operator to \p unsigned \p int data type. + * Clamps negative and too large inputs to the output range. + * \p NaN inputs convert to \p zero. + */ + explicit __CUDA_HOSTDEVICE_FP8__ operator unsigned int() const { + return __half2uint_rz(__half(*this)); + } + /** + * \ingroup CUDA_MATH_FP8_E5M2_STRUCT + * Conversion operator to \p unsigned \p long \p long \p int data type. + * Clamps negative and too large inputs to the output range. + * \p NaN inputs convert to \p 0x8000000000000000ULL. + */ + explicit __CUDA_HOSTDEVICE_FP8__ operator unsigned long long int() const { + return __half2ull_rz(__half(*this)); + } + + /** + * \ingroup CUDA_MATH_FP8_E5M2_STRUCT + * Conversion operator to \p signed \p char data type. + * Clamps too large inputs to the output range. + * \p NaN inputs convert to \p zero. + */ + explicit __CUDA_HOSTDEVICE_FP8__ operator signed char() const { + signed char i; + const float f = float(*this); + const signed char max_val = (signed char)0x7FU; + const signed char min_val = (signed char)0x80U; + const unsigned char bits = (*this).__x; + // saturation fixup + if ((bits & 0x7FU) > 0x7CU) { + // NaN + i = 0; + } else if (f > static_cast(max_val)) { + // saturate maximum + i = max_val; + } else if (f < static_cast(min_val)) { + // saturate minimum + i = min_val; + } else { + // normal value + i = static_cast(f); + } + return i; + } + /** + * \ingroup CUDA_MATH_FP8_E5M2_STRUCT + * Conversion operator to \p short \p int data type. + * Clamps too large inputs to the output range. + * \p NaN inputs convert to \p zero. + */ + explicit __CUDA_HOSTDEVICE_FP8__ operator short int() const { + return __half2short_rz(__half(*this)); + } + /** + * \ingroup CUDA_MATH_FP8_E5M2_STRUCT + * Conversion operator to \p int data type. + * Clamps too large inputs to the output range. + * \p NaN inputs convert to \p zero. + */ + explicit __CUDA_HOSTDEVICE_FP8__ operator int() const { + return __half2int_rz(__half(*this)); + } + /** + * \ingroup CUDA_MATH_FP8_E5M2_STRUCT + * Conversion operator to \p long \p long \p int data type. + * Clamps too large inputs to the output range. + * \p NaN inputs convert to \p 0x8000000000000000LL. + */ + explicit __CUDA_HOSTDEVICE_FP8__ operator long long int() const { + return __half2ll_rz(__half(*this)); + } + + /** + * \ingroup CUDA_MATH_FP8_E5M2_STRUCT + * Conversion operator to \p bool data type. + * +0 and -0 inputs convert to \p false. + * Non-zero inputs convert to \p true. + */ + explicit __CUDA_HOSTDEVICE_FP8__ operator bool() const { + return (__x & 0x7FU) != 0U; + } +#endif /* !defined(__CUDA_NO_FP8_CONVERSION_OPERATORS__) */ +#endif /* !defined(__CUDA_NO_FP8_CONVERSIONS__) */ +}; + +/** + * \defgroup CUDA_MATH_FP8X2_E5M2_STRUCT C++ struct for handling vector type of two fp8 values of e5m2 kind. + * \ingroup CUDA_MATH_INTRINSIC_FP8 + */ + +/** + * \ingroup CUDA_MATH_FP8X2_E5M2_STRUCT + * \brief __nv_fp8x2_e5m2 datatype + * + * \details This structure implements the datatype for handling two + * \p fp8 floating-point numbers of \p e5m2 kind each: + * with 1 sign, 5 exponent, 1 implicit and 2 explicit mantissa bits. + * + * The structure implements converting constructors and operators. + */ +struct __CUDA_ALIGN__(2) __nv_fp8x2_e5m2 { + public: + /** + * \ingroup CUDA_MATH_FP8X2_E5M2_STRUCT + * Storage variable contains the vector of two \p fp8 floating-point data + * values. + */ + __nv_fp8x2_storage_t __x; + + /** + * \ingroup CUDA_MATH_FP8X2_E5M2_STRUCT + * Constructor by default. + */ +#if defined(__CPP_VERSION_AT_LEAST_11_FP8) + __nv_fp8x2_e5m2() = default; +#else + __CUDA_HOSTDEVICE_FP8__ __nv_fp8x2_e5m2() {} +#endif /* defined(__CPP_VERSION_AT_LEAST_11_FP8) */ + +#if !defined(__CUDA_NO_FP8_CONVERSIONS__) + + /* Construct from wider types */ + + /** + * \ingroup CUDA_MATH_FP8X2_E5M2_STRUCT + * Constructor from \p __half2 data type, relies on \p __NV_SATFINITE + * behavior for out-of-range values. + */ + explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8x2_e5m2(const __half2 f) { + __x = __nv_cvt_halfraw2_to_fp8x2(static_cast<__half2_raw>(f), + __NV_SATFINITE, __NV_E5M2); + } + /** + * \ingroup CUDA_MATH_FP8X2_E5M2_STRUCT + * Constructor from \p __nv_bfloat162 data type, relies on \p __NV_SATFINITE + * behavior for out-of-range values. + */ + explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8x2_e5m2(const __nv_bfloat162 f) { + __x = __nv_cvt_bfloat16raw2_to_fp8x2(static_cast<__nv_bfloat162_raw>(f), + __NV_SATFINITE, __NV_E5M2); + } + /** + * \ingroup CUDA_MATH_FP8X2_E5M2_STRUCT + * Constructor from \p float2 data type, relies on \p __NV_SATFINITE + * behavior for out-of-range values. + */ + explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8x2_e5m2(const float2 f) { + __x = __nv_cvt_float2_to_fp8x2(f, __NV_SATFINITE, __NV_E5M2); + } + /** + * \ingroup CUDA_MATH_FP8X2_E5M2_STRUCT + * Constructor from \p double2 data type, relies on \p __NV_SATFINITE + * behavior for out-of-range values. + */ + explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8x2_e5m2(const double2 f) { + __x = __nv_cvt_double2_to_fp8x2(f, __NV_SATFINITE, __NV_E5M2); + } + +#if !defined(__CUDA_NO_FP8_CONVERSION_OPERATORS__) + /* Widening converts */ + /** + * \ingroup CUDA_MATH_FP8X2_E5M2_STRUCT + * Conversion operator to \p __half2 data type. + */ + explicit __CUDA_HOSTDEVICE_FP8__ operator __half2() const { + return static_cast<__half2>(__nv_cvt_fp8x2_to_halfraw2(__x, __NV_E5M2)); + } + /** + * \ingroup CUDA_MATH_FP8X2_E5M2_STRUCT + * Conversion operator to \p float2 data type. + */ + explicit __CUDA_HOSTDEVICE_FP8__ operator float2() const { + return __internal_halfraw2_to_float2( + __nv_cvt_fp8x2_to_halfraw2(__x, __NV_E5M2)); + } +#endif /* !defined(__CUDA_NO_FP8_CONVERSION_OPERATORS__) */ +#endif /* !defined(__CUDA_NO_FP8_CONVERSIONS__) */ +}; + +__CUDA_HOSTDEVICE_FP8_DECL__ unsigned int +__internal_pack_u16x2_to_u32(const unsigned short int src_lo, + const unsigned short int src_hi) { + unsigned int dst; +#if (defined __CUDACC__) && (defined __CUDA_ARCH__) + asm("{ mov.b32 %0, {%1,%2};}\n" : "=r"(dst) : "h"(src_lo), "h"(src_hi)); +#else + dst = (static_cast(src_hi) << 16U) | + static_cast(src_lo); +#endif + return dst; +} + +/** + * \defgroup CUDA_MATH_FP8X4_E5M2_STRUCT C++ struct for handling vector type of four fp8 values of e5m2 kind. + * \ingroup CUDA_MATH_INTRINSIC_FP8 + */ + +/** + * \ingroup CUDA_MATH_FP8X4_E5M2_STRUCT + * \brief __nv_fp8x4_e5m2 datatype + * + * \details This structure implements the datatype for handling four + * \p fp8 floating-point numbers of \p e5m2 kind each: + * with 1 sign, 5 exponent, 1 implicit and 2 explicit mantissa bits. + * + * The structure implements converting constructors and operators. + */ +struct __CUDA_ALIGN__(4) __nv_fp8x4_e5m2 { + public: + /** + * \ingroup CUDA_MATH_FP8X4_E5M2_STRUCT + * Storage variable contains the vector of four \p fp8 floating-point data + * values. + */ + __nv_fp8x4_storage_t __x; + + /** + * \ingroup CUDA_MATH_FP8X4_E5M2_STRUCT + * Constructor by default. + */ +#if defined(__CPP_VERSION_AT_LEAST_11_FP8) + __nv_fp8x4_e5m2() = default; +#else + __CUDA_HOSTDEVICE_FP8__ __nv_fp8x4_e5m2() {} +#endif /* defined(__CPP_VERSION_AT_LEAST_11_FP8) */ + +#if !defined(__CUDA_NO_FP8_CONVERSIONS__) + + /* Construct from wider types */ + + /** + * \ingroup CUDA_MATH_FP8X4_E5M2_STRUCT + * Constructor from a pair of \p __half2 data type values, + * relies on \p __NV_SATFINITE behavior for out-of-range values. + */ + explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8x4_e5m2(const __half2 flo, + const __half2 fhi) { + const __nv_fp8x2_storage_t rlo = __nv_cvt_halfraw2_to_fp8x2( + static_cast<__half2_raw>(flo), __NV_SATFINITE, __NV_E5M2); + const __nv_fp8x2_storage_t rhi = __nv_cvt_halfraw2_to_fp8x2( + static_cast<__half2_raw>(fhi), __NV_SATFINITE, __NV_E5M2); + __x = __internal_pack_u16x2_to_u32(rlo, rhi); + } + /** + * \ingroup CUDA_MATH_FP8X4_E5M2_STRUCT + * Constructor from a pair of \p __nv_bfloat162 data type values, + * relies on \p __NV_SATFINITE behavior for out-of-range values. + */ + explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8x4_e5m2(const __nv_bfloat162 flo, + const __nv_bfloat162 fhi) { + const __nv_fp8x2_storage_t rlo = __nv_cvt_bfloat16raw2_to_fp8x2( + static_cast<__nv_bfloat162_raw>(flo), __NV_SATFINITE, __NV_E5M2); + const __nv_fp8x2_storage_t rhi = __nv_cvt_bfloat16raw2_to_fp8x2( + static_cast<__nv_bfloat162_raw>(fhi), __NV_SATFINITE, __NV_E5M2); + __x = __internal_pack_u16x2_to_u32(rlo, rhi); + } + /** + * \ingroup CUDA_MATH_FP8X4_E5M2_STRUCT + * Constructor from \p float4 vector data type, + * relies on \p __NV_SATFINITE behavior for out-of-range values. + */ + explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8x4_e5m2(const float4 f) { + const float2 flo = {f.x, f.y}; + const float2 fhi = {f.z, f.w}; + const __nv_fp8x2_storage_t rlo = + __nv_cvt_float2_to_fp8x2(flo, __NV_SATFINITE, __NV_E5M2); + const __nv_fp8x2_storage_t rhi = + __nv_cvt_float2_to_fp8x2(fhi, __NV_SATFINITE, __NV_E5M2); + __x = __internal_pack_u16x2_to_u32(rlo, rhi); + } + /** + * \ingroup CUDA_MATH_FP8X4_E5M2_STRUCT + * Constructor from \p double4 vector data type, + * relies on \p __NV_SATFINITE behavior for out-of-range values. + */ + explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8x4_e5m2(const double4 f) { + const double2 flo = {f.x, f.y}; + const double2 fhi = {f.z, f.w}; + const __nv_fp8x2_storage_t rlo = + __nv_cvt_double2_to_fp8x2(flo, __NV_SATFINITE, __NV_E5M2); + const __nv_fp8x2_storage_t rhi = + __nv_cvt_double2_to_fp8x2(fhi, __NV_SATFINITE, __NV_E5M2); + __x = __internal_pack_u16x2_to_u32(rlo, rhi); + } + +#if !defined(__CUDA_NO_FP8_CONVERSION_OPERATORS__) + /* Widening converts */ + + /** + * \ingroup CUDA_MATH_FP8X4_E5M2_STRUCT + * Conversion operator to \p float4 vector data type. + */ + explicit __CUDA_HOSTDEVICE_FP8__ operator float4() const { + const __nv_fp8x2_storage_t slo = static_cast<__nv_fp8x2_storage_t>(__x); + const __nv_fp8x2_storage_t shi = + static_cast<__nv_fp8x2_storage_t>(__x >> 16U); + float2 rlo = __internal_halfraw2_to_float2( + __nv_cvt_fp8x2_to_halfraw2(slo, __NV_E5M2)); + float2 rhi = __internal_halfraw2_to_float2( + __nv_cvt_fp8x2_to_halfraw2(shi, __NV_E5M2)); + float4 res = {rlo.x, rlo.y, rhi.x, rhi.y}; + return res; + } +#endif /* !defined(__CUDA_NO_FP8_CONVERSION_OPERATORS__) */ +#endif /* !defined(__CUDA_NO_FP8_CONVERSIONS__) */ +}; + +/** + * \defgroup CUDA_MATH_FP8_E4M3_STRUCT C++ struct for handling fp8 data type of e4m3 kind. + * \ingroup CUDA_MATH_INTRINSIC_FP8 + */ + +/** + * \ingroup CUDA_MATH_FP8_E4M3_STRUCT + * \brief __nv_fp8_e4m3 datatype + * + * \details This structure implements the datatype for storing + * \p fp8 floating-point numbers of \p e4m3 kind: + * with 1 sign, 4 exponent, 1 implicit and 3 explicit mantissa bits. + * The encoding doesn't support Infinity. + * NaNs are limited to 0x7F and 0xFF values. + * + * The structure implements converting constructors and operators. + */ +struct __CUDA_ALIGN__(1) __nv_fp8_e4m3 { + public: + /** + * \ingroup CUDA_MATH_FP8_E4M3_STRUCT + * Storage variable contains the \p fp8 floating-point data. + */ + __nv_fp8_storage_t __x; + + /** + * \ingroup CUDA_MATH_FP8_E4M3_STRUCT + * Constructor by default. + */ +#if defined(__CPP_VERSION_AT_LEAST_11_FP8) + __nv_fp8_e4m3() = default; +#else + __CUDA_HOSTDEVICE_FP8__ __nv_fp8_e4m3() {} +#endif /* defined(__CPP_VERSION_AT_LEAST_11_FP8) */ + +#if !defined(__CUDA_NO_FP8_CONVERSIONS__) + + /* Construct from wider FP types */ + /* Note we do avoid constructor init-list because of special host/device + * compilation rules */ + + /** + * \ingroup CUDA_MATH_FP8_E4M3_STRUCT + * Constructor from \p __half data type, relies on \p __NV_SATFINITE + * behavior for out-of-range values. + */ + explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8_e4m3(const __half f) { + __x = __nv_cvt_halfraw_to_fp8(static_cast<__half_raw>(f), + __NV_SATFINITE, __NV_E4M3); + } + /** + * \ingroup CUDA_MATH_FP8_E4M3_STRUCT + * Constructor from \p __nv_bfloat16 data type, relies on \p __NV_SATFINITE + * behavior for out-of-range values. + */ + explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8_e4m3(const __nv_bfloat16 f) { + __x = __nv_cvt_bfloat16raw_to_fp8(static_cast<__nv_bfloat16_raw>(f), + __NV_SATFINITE, __NV_E4M3); + } + /** + * \ingroup CUDA_MATH_FP8_E4M3_STRUCT + * Constructor from \p float data type, relies on \p __NV_SATFINITE behavior + * for out-of-range values. + */ + explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8_e4m3(const float f) { + __x = __nv_cvt_float_to_fp8(f, __NV_SATFINITE, __NV_E4M3); + } + /** + * \ingroup CUDA_MATH_FP8_E4M3_STRUCT + * Constructor from \p double data type, relies on \p __NV_SATFINITE + * behavior for out-of-range values. + */ + explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8_e4m3(const double f) { + __x = __nv_cvt_double_to_fp8(f, __NV_SATFINITE, __NV_E4M3); + } + + /* Converts from integral */ + + /** + * \ingroup CUDA_MATH_FP8_E4M3_STRUCT + * Constructor from \p unsigned \p short \p int data type, relies on \p + * __NV_SATFINITE behavior for out-of-range values. + */ + explicit __CUDA_HOSTDEVICE_FP8__ + __nv_fp8_e4m3(const unsigned short int val) { + __x = static_cast<__nv_fp8_e4m3>(static_cast(val)).__x; + } + /** + * \ingroup CUDA_MATH_FP8_E4M3_STRUCT + * Constructor from \p unsigned \p int data type, relies on \p + * __NV_SATFINITE behavior for out-of-range values. + */ + explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8_e4m3(const unsigned int val) { + __x = static_cast<__nv_fp8_e4m3>(static_cast(val)).__x; + } + /** + * \ingroup CUDA_MATH_FP8_E4M3_STRUCT + * Constructor from \p unsigned \p long \p long \p int data type, relies on + * \p __NV_SATFINITE behavior for out-of-range values. + */ + explicit __CUDA_HOSTDEVICE_FP8__ + __nv_fp8_e4m3(const unsigned long long int val) { + __x = static_cast<__nv_fp8_e4m3>(static_cast(val)).__x; + } + + /** + * \ingroup CUDA_MATH_FP8_E4M3_STRUCT + * Constructor from \p short \p int data type, relies on \p + * __NV_SATFINITE behavior for out-of-range values. + */ + explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8_e4m3(const short int val) { + __x = static_cast<__nv_fp8_e4m3>(static_cast(val)).__x; + } + /** + * \ingroup CUDA_MATH_FP8_E4M3_STRUCT + * Constructor from \p int data type, relies on \p __NV_SATFINITE behavior + * for out-of-range values. + */ + explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8_e4m3(const int val) { + __x = static_cast<__nv_fp8_e4m3>(static_cast(val)).__x; + } + /** + * \ingroup CUDA_MATH_FP8_E4M3_STRUCT + * Constructor from \p long \p long \p int data type, relies on \p + * __NV_SATFINITE behavior for out-of-range values. + */ + explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8_e4m3(const long long int val) { + __x = static_cast<__nv_fp8_e4m3>(static_cast(val)).__x; + } + +#if !defined(__CUDA_NO_FP8_CONVERSION_OPERATORS__) + /* Widening FP converts */ + /** + * \ingroup CUDA_MATH_FP8_E4M3_STRUCT + * Conversion operator to \p __half data type. + */ + explicit __CUDA_HOSTDEVICE_FP8__ operator __half() const { + return static_cast<__half>(__nv_cvt_fp8_to_halfraw(__x, __NV_E4M3)); + } + /** + * \ingroup CUDA_MATH_FP8_E4M3_STRUCT + * Conversion operator to \p float data type. + */ + explicit __CUDA_HOSTDEVICE_FP8__ operator float() const { + return __internal_halfraw_to_float( + __nv_cvt_fp8_to_halfraw(__x, __NV_E4M3)); + } + /** + * \ingroup CUDA_MATH_FP8_E4M3_STRUCT + * Conversion operator to \p __nv_bfloat16 data type. + */ + explicit __CUDA_HOSTDEVICE_FP8__ operator __nv_bfloat16() const { + return static_cast<__nv_bfloat16>( + __internal_float_to_bf16raw_rz(float(*this))); + } + /** + * \ingroup CUDA_MATH_FP8_E4M3_STRUCT + * Conversion operator to \p double data type. + */ + explicit __CUDA_HOSTDEVICE_FP8__ operator double() const { + return static_cast(float(*this)); + } + + /* Convert to integral */ + + /** + * \ingroup CUDA_MATH_FP8_E4M3_STRUCT + * Conversion operator to \p unsigned \p char data type. + * Clamps negative and too large inputs to the output range. + * \p NaN inputs convert to \p zero. + */ + explicit __CUDA_HOSTDEVICE_FP8__ operator unsigned char() const { + unsigned char i; + const float f = float(*this); + const unsigned char max_val = 0xFFU; + const unsigned char min_val = 0U; + const unsigned char bits = (*this).__x; + // saturation fixup + if ((bits & 0x7FU) == 0x7FU) { + // NaN + i = 0; + } else if (f > static_cast(max_val)) { + // saturate maximum + i = max_val; + } else if (f < static_cast(min_val)) { + // saturate minimum + i = min_val; + } else { + // normal value + i = static_cast(f); + } + return i; + } + + /** + * \ingroup CUDA_MATH_FP8_E4M3_STRUCT + * Conversion operator to \p unsigned \p short \p int data type. + * Clamps negative inputs to zero. + * \p NaN inputs convert to \p zero. + */ + explicit __CUDA_HOSTDEVICE_FP8__ operator unsigned short int() const { + return __half2ushort_rz(__half(*this)); + } + /** + * \ingroup CUDA_MATH_FP8_E4M3_STRUCT + * Conversion operator to \p unsigned \p int data type. + * Clamps negative inputs to zero. + * \p NaN inputs convert to \p zero. + */ + explicit __CUDA_HOSTDEVICE_FP8__ operator unsigned int() const { + return __half2uint_rz(__half(*this)); + } + /** + * \ingroup CUDA_MATH_FP8_E4M3_STRUCT + * Conversion operator to \p unsigned \p long \p long \p int data type. + * Clamps negative inputs to zero. + * \p NaN inputs convert to \p 0x8000000000000000ULL. + */ + explicit __CUDA_HOSTDEVICE_FP8__ operator unsigned long long int() const { + return __half2ull_rz(__half(*this)); + } + + /** + * \ingroup CUDA_MATH_FP8_E4M3_STRUCT + * Conversion operator to \p signed \p char data type. + * Clamps too large inputs to the output range. + * \p NaN inputs convert to \p zero. + */ + explicit __CUDA_HOSTDEVICE_FP8__ operator signed char() const { + signed char i; + const float f = float(*this); + const signed char max_val = (signed char)0x7FU; + const signed char min_val = (signed char)0x80U; + const unsigned char bits = (*this).__x; + // saturation fixup + if ((bits & 0x7FU) == 0x7FU) { + // NaN + i = 0; + } else if (f > static_cast(max_val)) { + // saturate maximum + i = max_val; + } else if (f < static_cast(min_val)) { + // saturate minimum + i = min_val; + } else { + // normal value + i = static_cast(f); + } + return i; + } + /** + * \ingroup CUDA_MATH_FP8_E4M3_STRUCT + * Conversion operator to \p short \p int data type. + * \p NaN inputs convert to \p zero. + */ + explicit __CUDA_HOSTDEVICE_FP8__ operator short int() const { + return __half2short_rz(__half(*this)); + } + /** + * \ingroup CUDA_MATH_FP8_E4M3_STRUCT + * Conversion operator to \p int data type. + * \p NaN inputs convert to \p zero. + */ + explicit __CUDA_HOSTDEVICE_FP8__ operator int() const { + return __half2int_rz(__half(*this)); + } + /** + * \ingroup CUDA_MATH_FP8_E4M3_STRUCT + * Conversion operator to \p long \p long \p int data type. + * \p NaN inputs convert to \p 0x8000000000000000LL. + */ + explicit __CUDA_HOSTDEVICE_FP8__ operator long long int() const { + return __half2ll_rz(__half(*this)); + } + + /** + * \ingroup CUDA_MATH_FP8_E4M3_STRUCT + * Conversion operator to \p bool data type. + * +0 and -0 inputs convert to \p false. + * Non-zero inputs convert to \p true. + */ + explicit __CUDA_HOSTDEVICE_FP8__ operator bool() const { + return (__x & 0x7FU) != 0U; + } +#endif /* !defined(__CUDA_NO_FP8_CONVERSION_OPERATORS__) */ +#endif /* !defined(__CUDA_NO_FP8_CONVERSIONS__) */ +}; + +/** + * \defgroup CUDA_MATH_FP8X2_E4M3_STRUCT C++ struct for handling vector type of two fp8 values of e4m3 kind. + * \ingroup CUDA_MATH_INTRINSIC_FP8 + */ + +/** + * \ingroup CUDA_MATH_FP8X2_E4M3_STRUCT + * \brief __nv_fp8x2_e4m3 datatype + * + * \details This structure implements the datatype for storage + * and operations on the vector of two \p fp8 values of \p e4m3 kind each: + * with 1 sign, 4 exponent, 1 implicit and 3 explicit mantissa bits. + * The encoding doesn't support Infinity. + * NaNs are limited to 0x7F and 0xFF values. + */ +struct __CUDA_ALIGN__(2) __nv_fp8x2_e4m3 { + public: + /** + * \ingroup CUDA_MATH_FP8X2_E4M3_STRUCT + * Storage variable contains the vector of two \p fp8 floating-point data + * values. + */ + __nv_fp8x2_storage_t __x; + + /** + * \ingroup CUDA_MATH_FP8X2_E4M3_STRUCT + * Constructor by default. + */ +#if defined(__CPP_VERSION_AT_LEAST_11_FP8) + __nv_fp8x2_e4m3() = default; +#else + __CUDA_HOSTDEVICE_FP8__ __nv_fp8x2_e4m3() {} +#endif /* defined(__CPP_VERSION_AT_LEAST_11_FP8) */ + +#if !defined(__CUDA_NO_FP8_CONVERSIONS__) + + /* Construct from wider types */ + + /** + * \ingroup CUDA_MATH_FP8X2_E4M3_STRUCT + * Constructor from \p __half2 data type, relies on \p __NV_SATFINITE + * behavior for out-of-range values. + */ + explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8x2_e4m3(const __half2 f) { + __x = __nv_cvt_halfraw2_to_fp8x2(static_cast<__half2_raw>(f), + __NV_SATFINITE, __NV_E4M3); + } + /** + * \ingroup CUDA_MATH_FP8X2_E4M3_STRUCT + * Constructor from \p __nv_bfloat162 data type, relies on \p __NV_SATFINITE + * behavior for out-of-range values. + */ + explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8x2_e4m3(const __nv_bfloat162 f) { + __x = __nv_cvt_bfloat16raw2_to_fp8x2(static_cast<__nv_bfloat162_raw>(f), + __NV_SATFINITE, __NV_E4M3); + } + /** + * \ingroup CUDA_MATH_FP8X2_E4M3_STRUCT + * Constructor from \p float2 data type, relies on \p __NV_SATFINITE + * behavior for out-of-range values. + */ + explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8x2_e4m3(const float2 f) { + __x = __nv_cvt_float2_to_fp8x2(f, __NV_SATFINITE, __NV_E4M3); + } + /** + * \ingroup CUDA_MATH_FP8X2_E4M3_STRUCT + * Constructor from \p double2 data type, relies on \p __NV_SATFINITE + * behavior for out-of-range values. + */ + explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8x2_e4m3(const double2 f) { + __x = __nv_cvt_double2_to_fp8x2(f, __NV_SATFINITE, __NV_E4M3); + } + +#if !defined(__CUDA_NO_FP8_CONVERSION_OPERATORS__) + /* Widening converts */ + /** + * \ingroup CUDA_MATH_FP8X2_E4M3_STRUCT + * Conversion operator to \p __half2 data type. + */ + explicit __CUDA_HOSTDEVICE_FP8__ operator __half2() const { + return static_cast<__half2>(__nv_cvt_fp8x2_to_halfraw2(__x, __NV_E4M3)); + } + /** + * \ingroup CUDA_MATH_FP8X2_E4M3_STRUCT + * Conversion operator to \p float2 data type. + */ + explicit __CUDA_HOSTDEVICE_FP8__ operator float2() const { + return __internal_halfraw2_to_float2( + __nv_cvt_fp8x2_to_halfraw2(__x, __NV_E4M3)); + } +#endif /* !defined(__CUDA_NO_FP8_CONVERSION_OPERATORS__) */ +#endif /* !defined(__CUDA_NO_FP8_CONVERSIONS__) */ +}; + +/** + * \defgroup CUDA_MATH_FP8X4_E4M3_STRUCT C++ struct for handling vector type of four fp8 values of e4m3 kind. + * \ingroup CUDA_MATH_INTRINSIC_FP8 + */ + +/** + * \ingroup CUDA_MATH_FP8X4_E4M3_STRUCT + * \brief __nv_fp8x4_e4m3 datatype + * + * \details This structure implements the datatype for storage + * and operations on the vector of four \p fp8 values of \p e4m3 kind each: + * with 1 sign, 4 exponent, 1 implicit and 3 explicit mantissa bits. + * The encoding doesn't support Infinity. + * NaNs are limited to 0x7F and 0xFF values. + */ +struct __CUDA_ALIGN__(4) __nv_fp8x4_e4m3 { + public: + /** + * \ingroup CUDA_MATH_FP8X4_E4M3_STRUCT + * Storage variable contains the vector of four \p fp8 floating-point data + * values. + */ + __nv_fp8x4_storage_t __x; + + /** + * \ingroup CUDA_MATH_FP8X4_E4M3_STRUCT + * Constructor by default. + */ +#if defined(__CPP_VERSION_AT_LEAST_11_FP8) + __nv_fp8x4_e4m3() = default; +#else + __CUDA_HOSTDEVICE_FP8__ __nv_fp8x4_e4m3() {} +#endif /* defined(__CPP_VERSION_AT_LEAST_11_FP8) */ + +#if !defined(__CUDA_NO_FP8_CONVERSIONS__) + + /* Construct from wider types */ + + /** + * \ingroup CUDA_MATH_FP8X4_E4M3_STRUCT + * Constructor from a pair of \p __half2 data type values, + * relies on \p __NV_SATFINITE behavior for out-of-range values. + */ + explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8x4_e4m3(const __half2 flo, + const __half2 fhi) { + const __nv_fp8x2_storage_t rlo = __nv_cvt_halfraw2_to_fp8x2( + static_cast<__half2_raw>(flo), __NV_SATFINITE, __NV_E4M3); + const __nv_fp8x2_storage_t rhi = __nv_cvt_halfraw2_to_fp8x2( + static_cast<__half2_raw>(fhi), __NV_SATFINITE, __NV_E4M3); + __x = __internal_pack_u16x2_to_u32(rlo, rhi); + } + /** + * \ingroup CUDA_MATH_FP8X4_E4M3_STRUCT + * Constructor from a pair of \p __nv_bfloat162 data type values, + * relies on \p __NV_SATFINITE behavior for out-of-range values. + */ + explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8x4_e4m3(const __nv_bfloat162 flo, + const __nv_bfloat162 fhi) { + const __nv_fp8x2_storage_t rlo = __nv_cvt_bfloat16raw2_to_fp8x2( + static_cast<__nv_bfloat162_raw>(flo), __NV_SATFINITE, __NV_E4M3); + const __nv_fp8x2_storage_t rhi = __nv_cvt_bfloat16raw2_to_fp8x2( + static_cast<__nv_bfloat162_raw>(fhi), __NV_SATFINITE, __NV_E4M3); + __x = __internal_pack_u16x2_to_u32(rlo, rhi); + } + /** + * \ingroup CUDA_MATH_FP8X4_E4M3_STRUCT + * Constructor from \p float4 vector data type, + * relies on \p __NV_SATFINITE behavior for out-of-range values. + */ + explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8x4_e4m3(const float4 f) { + const float2 flo = {f.x, f.y}; + const float2 fhi = {f.z, f.w}; + const __nv_fp8x2_storage_t rlo = + __nv_cvt_float2_to_fp8x2(flo, __NV_SATFINITE, __NV_E4M3); + const __nv_fp8x2_storage_t rhi = + __nv_cvt_float2_to_fp8x2(fhi, __NV_SATFINITE, __NV_E4M3); + __x = __internal_pack_u16x2_to_u32(rlo, rhi); + } + /** + * \ingroup CUDA_MATH_FP8X4_E4M3_STRUCT + * Constructor from \p double4 vector data type, + * relies on \p __NV_SATFINITE behavior for out-of-range values. + */ + explicit __CUDA_HOSTDEVICE_FP8__ __nv_fp8x4_e4m3(const double4 f) { + const double2 flo = {f.x, f.y}; + const double2 fhi = {f.z, f.w}; + const __nv_fp8x2_storage_t rlo = + __nv_cvt_double2_to_fp8x2(flo, __NV_SATFINITE, __NV_E4M3); + const __nv_fp8x2_storage_t rhi = + __nv_cvt_double2_to_fp8x2(fhi, __NV_SATFINITE, __NV_E4M3); + __x = __internal_pack_u16x2_to_u32(rlo, rhi); + } + +#if !defined(__CUDA_NO_FP8_CONVERSION_OPERATORS__) + /* Widening converts */ + + /** + * \ingroup CUDA_MATH_FP8X4_E4M3_STRUCT + * Conversion operator to \p float4 vector data type. + */ + explicit __CUDA_HOSTDEVICE_FP8__ operator float4() const { + const __nv_fp8x2_storage_t slo = static_cast<__nv_fp8x2_storage_t>(__x); + const __nv_fp8x2_storage_t shi = + static_cast<__nv_fp8x2_storage_t>(__x >> 16U); + float2 rlo = __internal_halfraw2_to_float2( + __nv_cvt_fp8x2_to_halfraw2(slo, __NV_E4M3)); + float2 rhi = __internal_halfraw2_to_float2( + __nv_cvt_fp8x2_to_halfraw2(shi, __NV_E4M3)); + float4 res = {rlo.x, rlo.y, rhi.x, rhi.y}; + return res; + } +#endif /* !defined(__CUDA_NO_FP8_CONVERSION_OPERATORS__) */ +#endif /* !defined(__CUDA_NO_FP8_CONVERSIONS__) */ +}; + +#endif /* defined(__cplusplus) */ + +#endif /* end of include guard: __CUDA_FP8_HPP__ */ diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_pipeline.h b/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_pipeline.h new file mode 100644 index 0000000000000000000000000000000000000000..46bc89e4499576f1ae58848cd8684ba3e32420cf --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_pipeline.h @@ -0,0 +1,224 @@ +/* + * Copyright 1993-2019 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#ifndef _CUDA_PIPELINE_H_ +# define _CUDA_PIPELINE_H_ + +# include "cuda_pipeline_primitives.h" + +# if !defined(_CUDA_PIPELINE_CPLUSPLUS_11_OR_LATER) +# error This file requires compiler support for the ISO C++ 2011 standard. This support must be enabled with the \ + -std=c++11 compiler option. +# endif + +# if defined(_CUDA_PIPELINE_ARCH_700_OR_LATER) +# include "cuda_awbarrier.h" +# endif + +// Integration with libcu++'s cuda::barrier. + +# if defined(_CUDA_PIPELINE_ARCH_700_OR_LATER) +# if defined(_LIBCUDACXX_CUDA_ABI_VERSION) +# define _LIBCUDACXX_PIPELINE_ASSUMED_ABI_VERSION _LIBCUDACXX_CUDA_ABI_VERSION +# else +# define _LIBCUDACXX_PIPELINE_ASSUMED_ABI_VERSION 4 +# endif + +# define _LIBCUDACXX_PIPELINE_CONCAT(X, Y) X ## Y +# define _LIBCUDACXX_PIPELINE_CONCAT2(X, Y) _LIBCUDACXX_PIPELINE_CONCAT(X, Y) +# define _LIBCUDACXX_PIPELINE_INLINE_NAMESPACE _LIBCUDACXX_PIPELINE_CONCAT2(__, _LIBCUDACXX_PIPELINE_ASSUMED_ABI_VERSION) + +namespace cuda { inline namespace _LIBCUDACXX_PIPELINE_INLINE_NAMESPACE { + struct __block_scope_barrier_base; +}} + +# endif + +_CUDA_PIPELINE_BEGIN_NAMESPACE + +template +_CUDA_PIPELINE_QUALIFIER +auto segment(T* ptr) -> T(*)[N]; + +class pipeline { +public: + pipeline(const pipeline&) = delete; + pipeline(pipeline&&) = delete; + pipeline& operator=(const pipeline&) = delete; + pipeline& operator=(pipeline&&) = delete; + + _CUDA_PIPELINE_QUALIFIER pipeline(); + _CUDA_PIPELINE_QUALIFIER size_t commit(); + _CUDA_PIPELINE_QUALIFIER void commit_and_wait(); + _CUDA_PIPELINE_QUALIFIER void wait(size_t batch); + template + _CUDA_PIPELINE_QUALIFIER void wait_prior(); + +# if defined(_CUDA_PIPELINE_ARCH_700_OR_LATER) + _CUDA_PIPELINE_QUALIFIER void arrive_on(awbarrier& barrier); + _CUDA_PIPELINE_QUALIFIER void arrive_on(cuda::__block_scope_barrier_base& barrier); +# endif + +private: + size_t current_batch; +}; + +template +_CUDA_PIPELINE_QUALIFIER +void memcpy_async(T& dst, const T& src, pipeline& pipe); + +template +_CUDA_PIPELINE_QUALIFIER +void memcpy_async(T(*dst)[DstN], const T(*src)[SrcN], pipeline& pipe); + +template +_CUDA_PIPELINE_QUALIFIER +auto segment(T* ptr) -> T(*)[N] +{ + return (T(*)[N])ptr; +} + +_CUDA_PIPELINE_QUALIFIER +pipeline::pipeline() + : current_batch(0) +{ +} + +_CUDA_PIPELINE_QUALIFIER +size_t pipeline::commit() +{ + _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_commit(); + return this->current_batch++; +} + +_CUDA_PIPELINE_QUALIFIER +void pipeline::commit_and_wait() +{ + (void)pipeline::commit(); + pipeline::wait_prior<0>(); +} + +_CUDA_PIPELINE_QUALIFIER +void pipeline::wait(size_t batch) +{ + const size_t prior = this->current_batch > batch ? this->current_batch - batch : 0; + + switch (prior) { + case 0 : _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<0>(); break; + case 1 : _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<1>(); break; + case 2 : _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<2>(); break; + case 3 : _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<3>(); break; + case 4 : _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<4>(); break; + case 5 : _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<5>(); break; + case 6 : _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<6>(); break; + case 7 : _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<7>(); break; + default : _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<8>(); break; + } +} + +template +_CUDA_PIPELINE_QUALIFIER +void pipeline::wait_prior() +{ + _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior(); +} + +# if defined(_CUDA_PIPELINE_ARCH_700_OR_LATER) +_CUDA_PIPELINE_QUALIFIER +void pipeline::arrive_on(awbarrier& barrier) +{ + _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_arrive_on(&barrier.barrier); +} + +_CUDA_PIPELINE_QUALIFIER +void pipeline::arrive_on(cuda::__block_scope_barrier_base & barrier) +{ + _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_arrive_on(reinterpret_cast(&barrier)); +} +# endif + +template +_CUDA_PIPELINE_QUALIFIER +void memcpy_async(T& dst, const T& src, pipeline& pipe) +{ + _CUDA_PIPELINE_ASSERT(!(reinterpret_cast(&src) & (alignof(T) - 1))); + _CUDA_PIPELINE_ASSERT(!(reinterpret_cast(&dst) & (alignof(T) - 1))); + + if (__is_trivially_copyable(T)) { + _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_copy_relaxed( + reinterpret_cast(&dst), reinterpret_cast(&src)); + } else { + dst = src; + } +} + +template +_CUDA_PIPELINE_QUALIFIER +void memcpy_async(T(*dst)[DstN], const T(*src)[SrcN], pipeline& pipe) +{ + constexpr size_t dst_size = sizeof(*dst); + constexpr size_t src_size = sizeof(*src); + static_assert(dst_size == 4 || dst_size == 8 || dst_size == 16, "Unsupported copy size."); + static_assert(src_size <= dst_size, "Source size must be less than or equal to destination size."); + _CUDA_PIPELINE_ASSERT(!(reinterpret_cast(src) & (dst_size - 1))); + _CUDA_PIPELINE_ASSERT(!(reinterpret_cast(dst) & (dst_size - 1))); + + if (__is_trivially_copyable(T)) { + _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_copy_strict( + reinterpret_cast(*dst), reinterpret_cast(*src)); + } else { + for (size_t i = 0; i < DstN; ++i) { + (*dst)[i] = (i < SrcN) ? (*src)[i] : T(); + } + } +} + +_CUDA_PIPELINE_END_NAMESPACE + +#endif /* !_CUDA_PIPELINE_H_ */ diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_pipeline_helpers.h b/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_pipeline_helpers.h new file mode 100644 index 0000000000000000000000000000000000000000..01882b6b976347b9bfdf276c3d0adcec1d8a55fa --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_pipeline_helpers.h @@ -0,0 +1,373 @@ +/* + * Copyright 1993-2019 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#ifndef _CUDA_PIPELINE_HELPERS_H_ +# define _CUDA_PIPELINE_HELPERS_H_ + +# define _CUDA_PIPELINE_NAMESPACE nvcuda::experimental +# define _CUDA_PIPELINE_BEGIN_NAMESPACE namespace nvcuda { namespace experimental { +# define _CUDA_PIPELINE_END_NAMESPACE } } + +# define _CUDA_PIPELINE_INTERNAL_NAMESPACE _CUDA_PIPELINE_NAMESPACE::__pipeline_internal +# define _CUDA_PIPELINE_BEGIN_INTERNAL_NAMESPACE _CUDA_PIPELINE_BEGIN_NAMESPACE namespace __pipeline_internal { +# define _CUDA_PIPELINE_END_INTERNAL_NAMESPACE } _CUDA_PIPELINE_END_NAMESPACE + +# if !defined(_CUDA_PIPELINE_QUALIFIER) +# define _CUDA_PIPELINE_QUALIFIER inline __device__ +# endif +# if !defined(_CUDA_PIPELINE_STATIC_QUALIFIER) +# define _CUDA_PIPELINE_STATIC_QUALIFIER static inline __device__ +# endif + +# if defined(_NVHPC_CUDA) || !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 700) +# define _CUDA_PIPELINE_ARCH_700_OR_LATER +# endif + +# if (__CUDA_ARCH__ >= 800) +# define _CUDA_PIPELINE_HAS_ASYNC_COPY 1 +# else +# define _CUDA_PIPELINE_HAS_ASYNC_COPY 0 +# endif + +# if !defined(_CUDA_PIPELINE_MAX_STAGES) +# define _CUDA_PIPELINE_MAX_STAGES 8 +# endif + +# if defined(__cplusplus) && ((__cplusplus >= 201103L) || (defined(_MSC_VER) && (_MSC_VER >= 1900))) +# define _CUDA_PIPELINE_CPLUSPLUS_11_OR_LATER +# endif + +# if !defined(_CUDA_PIPELINE_DEBUG) +# if defined(__CUDACC_DEBUG__) +# define _CUDA_PIPELINE_DEBUG 1 +# else +# define _CUDA_PIPELINE_DEBUG 0 +# endif +# endif + +# if defined(_CUDA_PIPELINE_DEBUG) && (_CUDA_PIPELINE_DEBUG == 1) && !defined(NDEBUG) +# if !defined(__CUDACC_RTC__) +# include +# endif +# define _CUDA_PIPELINE_ASSERT(x) assert((x)); +# define _CUDA_PIPELINE_ABORT() assert(0); +# else +# define _CUDA_PIPELINE_ASSERT(x) +# define _CUDA_PIPELINE_ABORT() __trap(); +# endif + +# if defined(_CUDA_PIPELINE_CPLUSPLUS_11_OR_LATER) +# define _CUDA_PIPELINE_STATIC_ASSERT(c, m) static_assert(c, m) +# else +# define _CUDA_PIPELINE_STATIC_ASSERT(c, m) +# endif + +# if (defined(_MSC_VER) && !defined(_WIN64)) || defined(__arm__) +# define _CUDA_PIPELINE_ASM_PTR_CONSTRAINT "r" +# else +# define _CUDA_PIPELINE_ASM_PTR_CONSTRAINT "l" +# endif + +# if defined(__CUDACC_RTC__) +typedef unsigned int uint32_t; +typedef unsigned long long uint64_t; +typedef uint64_t uintptr_t; +# else +# include +# endif + +_CUDA_PIPELINE_BEGIN_INTERNAL_NAMESPACE + +_CUDA_PIPELINE_STATIC_ASSERT(sizeof(short) == 2, "Size mismatch for type 'short'"); +_CUDA_PIPELINE_STATIC_ASSERT(sizeof(int) == 4, "Size mismatch for type 'int'"); +_CUDA_PIPELINE_STATIC_ASSERT(sizeof(int2) == 8, "Size mismatch for type 'int2'"); +_CUDA_PIPELINE_STATIC_ASSERT(sizeof(int4) == 16, "Size mismatch for type 'int4'"); + +extern "C" __device__ uint32_t __nvvm_get_smem_pointer(void *); + +template +_CUDA_PIPELINE_QUALIFIER +void pipeline_memcpy_sync(void* __restrict__ dst, const void* __restrict__ src) +{ + _CUDA_PIPELINE_STATIC_ASSERT(CopySize == 4 || CopySize == 8 || CopySize == 16, "Unsupported copy size."); + _CUDA_PIPELINE_STATIC_ASSERT(SourceSize <= CopySize, "Source size must be less than or equal to copy size"); + _CUDA_PIPELINE_ASSERT(!(reinterpret_cast(dst) & (CopySize - 1))); + _CUDA_PIPELINE_ASSERT(!(reinterpret_cast(src) & (CopySize - 1))); + + char* const d = reinterpret_cast(dst); + const char* const s = reinterpret_cast(src); + + size_t copy_step_size; + if (SourceSize == 0) { + copy_step_size = CopySize; + } else if (SourceSize == 2 || SourceSize == 4 || SourceSize == 8 || SourceSize == 16) { + copy_step_size = SourceSize; + } else { + copy_step_size = 1; + } + + for (size_t i = 0; i < CopySize; i += copy_step_size) { + const bool copy_source = SourceSize && (i < SourceSize); + + switch (copy_step_size) { + case 1: + d[i] = copy_source ? s[i] : char(); + break; + case 2: + *reinterpret_cast(d + i) = copy_source ? *reinterpret_cast(s + i) : short(); + break; + case 4: + *reinterpret_cast(d + i) = copy_source ? *reinterpret_cast(s + i) : int(); + break; + case 8: + *reinterpret_cast(d + i) = copy_source ? *reinterpret_cast(s + i) : int2(); + break; + case 16: + *reinterpret_cast(d + i) = copy_source ? *reinterpret_cast(s + i) : int4(); + break; + } + } +} + +template +struct ImplementationChooser; + +template<> +struct ImplementationChooser { + template + struct CpAsyncChooser { + _CUDA_PIPELINE_STATIC_QUALIFIER + void cp_async(void* __restrict__ dst, const void* __restrict__ src) + { + asm volatile ("cp.async.ca.shared.global [%0], [%1], %2, %3;" + : + : "r"(__nvvm_get_smem_pointer(dst)), _CUDA_PIPELINE_ASM_PTR_CONSTRAINT(src), "n"(CopySize), + "n"(SourceSize) + : "memory"); + } + }; + + template + struct CpAsyncChooser<16, SourceSize> { + _CUDA_PIPELINE_STATIC_QUALIFIER + void cp_async(void* __restrict__ dst, const void* __restrict__ src) + { + asm volatile ("cp.async.cg.shared.global [%0], [%1], %2, %3;" + : + : "r"(__nvvm_get_smem_pointer(dst)), _CUDA_PIPELINE_ASM_PTR_CONSTRAINT(src), "n"(16), "n"(SourceSize) + : "memory"); + } + }; + + template + _CUDA_PIPELINE_STATIC_QUALIFIER + void pipeline_memcpy_async(void* __restrict__ dst, const void* __restrict__ src) + { + _CUDA_PIPELINE_STATIC_ASSERT(CopySize == 4 || CopySize == 8 || CopySize == 16, "Unsupported copy size."); + _CUDA_PIPELINE_STATIC_ASSERT(SourceSize <= CopySize, "Source size must be less than or equal to copy size"); + _CUDA_PIPELINE_ASSERT(__isShared(dst)); + _CUDA_PIPELINE_ASSERT(__isGlobal(src)); + _CUDA_PIPELINE_ASSERT(!(reinterpret_cast(dst) & (CopySize - 1))); + _CUDA_PIPELINE_ASSERT(!(reinterpret_cast(src) & (CopySize - 1))); + + CpAsyncChooser::cp_async(dst, src); + } + + _CUDA_PIPELINE_STATIC_QUALIFIER + void pipeline_commit() + { + asm volatile ("cp.async.commit_group;"); + } + + template + _CUDA_PIPELINE_STATIC_QUALIFIER + void pipeline_wait_prior() + { + asm volatile ("cp.async.wait_group %0;" + : + : "n"(N < _CUDA_PIPELINE_MAX_STAGES ? N : _CUDA_PIPELINE_MAX_STAGES)); + } + + _CUDA_PIPELINE_STATIC_QUALIFIER + void pipeline_arrive_on(uint64_t* barrier) + { + _CUDA_PIPELINE_ASSERT(__isShared(barrier)); + + asm volatile ("cp.async.mbarrier.arrive.shared.b64 [%0];" + : + : "r"(__nvvm_get_smem_pointer(barrier))); + } +}; + +template<> +struct ImplementationChooser { + template + _CUDA_PIPELINE_STATIC_QUALIFIER + void pipeline_memcpy_async(void* __restrict__ dst, const void* __restrict__ src) + { + _CUDA_PIPELINE_STATIC_ASSERT(CopySize == 4 || CopySize == 8 || CopySize == 16, "Unsupported copy size."); + _CUDA_PIPELINE_STATIC_ASSERT(SourceSize <= CopySize, "Source size must be less than or equal to copy size"); + _CUDA_PIPELINE_ASSERT(__isShared(dst)); + _CUDA_PIPELINE_ASSERT(__isGlobal(src)); + _CUDA_PIPELINE_ASSERT(!(reinterpret_cast(dst) & (CopySize - 1))); + _CUDA_PIPELINE_ASSERT(!(reinterpret_cast(src) & (CopySize - 1))); + + pipeline_memcpy_sync(dst, src); + } + + _CUDA_PIPELINE_STATIC_QUALIFIER + void pipeline_commit() + { + } + + template + _CUDA_PIPELINE_STATIC_QUALIFIER + void pipeline_wait_prior() + { + } + + _CUDA_PIPELINE_STATIC_QUALIFIER + void pipeline_arrive_on(uint64_t* barrier) + { + } +}; + +template +_CUDA_PIPELINE_QUALIFIER +void pipeline_memcpy_async(void* __restrict__ dst, const void* __restrict__ src) +{ + _CUDA_PIPELINE_STATIC_ASSERT(CopySize == 4 || CopySize == 8 || CopySize == 16, "Unsupported copy size."); + _CUDA_PIPELINE_STATIC_ASSERT(SourceSize <= CopySize, "Source size must be less than or equal to copy size"); + _CUDA_PIPELINE_ASSERT(__isShared(dst)); + _CUDA_PIPELINE_ASSERT(__isGlobal(src)); + _CUDA_PIPELINE_ASSERT(!(reinterpret_cast(dst) & (CopySize - 1))); + _CUDA_PIPELINE_ASSERT(!(reinterpret_cast(src) & (CopySize - 1))); + + ImplementationChooser<_CUDA_PIPELINE_HAS_ASYNC_COPY>::pipeline_memcpy_async(dst, src); +} + +_CUDA_PIPELINE_QUALIFIER +void pipeline_commit() +{ + ImplementationChooser<_CUDA_PIPELINE_HAS_ASYNC_COPY>::pipeline_commit(); +} + +template +_CUDA_PIPELINE_QUALIFIER +void pipeline_wait_prior() +{ + ImplementationChooser<_CUDA_PIPELINE_HAS_ASYNC_COPY>::pipeline_wait_prior(); +} + +_CUDA_PIPELINE_QUALIFIER +void pipeline_arrive_on(uint64_t* barrier) +{ + ImplementationChooser<_CUDA_PIPELINE_HAS_ASYNC_COPY>::pipeline_arrive_on(barrier); +} + +template +_CUDA_PIPELINE_QUALIFIER +void pipeline_copy_strict(void* __restrict__ dst, const void* __restrict__ src) +{ + _CUDA_PIPELINE_STATIC_ASSERT(CopySize == 4 || CopySize == 8 || CopySize == 16, "Unsupported copy size."); + _CUDA_PIPELINE_STATIC_ASSERT(SourceSize <= CopySize, "Source size must be less than or equal to copy size."); + _CUDA_PIPELINE_ASSERT(!(reinterpret_cast(src) & (CopySize - 1))); + _CUDA_PIPELINE_ASSERT(!(reinterpret_cast(dst) & (CopySize - 1))); + + if (__isGlobal(src) && __isShared(dst)) { + pipeline_memcpy_async(dst, src); + } else { + pipeline_memcpy_sync(dst, src); + } +} + +template +_CUDA_PIPELINE_QUALIFIER +void pipeline_copy_relaxed(void* __restrict__ dst, const void* __restrict__ src) +{ + _CUDA_PIPELINE_ASSERT(!(reinterpret_cast(src) & (Align - 1))); + _CUDA_PIPELINE_ASSERT(!(reinterpret_cast(dst) & (Align - 1))); + + const char* s = reinterpret_cast(src); + char* d = reinterpret_cast(dst); + size_t remaining = CopySize; + + while (remaining) { + if ((Align >= 16) && (remaining >= 16)) { + pipeline_copy_strict<16, 16>(dst, src); + d += 16; + s += 16; + remaining -= 16; + } else if ((Align >= 8) && (remaining >= 8)) { + pipeline_copy_strict<8, 8>(dst, src); + d += 8; + s += 8; + remaining -= 8; + } else if ((Align >= 4) && (remaining >= 4)) { + pipeline_copy_strict<4, 4>(dst, src); + d += 4; + s += 4; + remaining -= 4; + } else if ((Align >= 2) && (remaining >= 2)) { + *reinterpret_cast(d) = *reinterpret_cast(s); + d += 2; + s += 2; + remaining -= 2; + } else { + *d = *s; + d += 1; + s += 1; + remaining -= 1; + } + } +} + +_CUDA_PIPELINE_END_INTERNAL_NAMESPACE + +#endif /* !_CUDA_PIPELINE_HELPERS_H_ */ diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_pipeline_primitives.h b/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_pipeline_primitives.h new file mode 100644 index 0000000000000000000000000000000000000000..eaba0cfb5ac9184bec5e837d2ec2f9db11d873ae --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_pipeline_primitives.h @@ -0,0 +1,148 @@ +/* + * Copyright 1993-2019 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#ifndef _CUDA_PIPELINE_PRIMITIVES_H_ +# define _CUDA_PIPELINE_PRIMITIVES_H_ + +# include "cuda_pipeline_helpers.h" + +_CUDA_PIPELINE_STATIC_QUALIFIER +void __pipeline_memcpy_async(void* __restrict__ dst_shared, const void* __restrict__ src_global, size_t size_and_align, + size_t zfill = 0) +{ + _CUDA_PIPELINE_ASSERT(size_and_align == 4 || size_and_align == 8 || size_and_align == 16); + _CUDA_PIPELINE_ASSERT(zfill <= size_and_align); + _CUDA_PIPELINE_ASSERT(__isShared(dst_shared)); + _CUDA_PIPELINE_ASSERT(__isGlobal(src_global)); + _CUDA_PIPELINE_ASSERT(!(reinterpret_cast(dst_shared) & (size_and_align - 1))); + _CUDA_PIPELINE_ASSERT(!(reinterpret_cast(src_global) & (size_and_align - 1))); + + switch (size_and_align) { + case 16: + switch (zfill) { + case 0: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async<16, 16>(dst_shared, src_global); return; + case 1: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async<16, 15>(dst_shared, src_global); return; + case 2: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async<16, 14>(dst_shared, src_global); return; + case 3: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async<16, 13>(dst_shared, src_global); return; + case 4: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async<16, 12>(dst_shared, src_global); return; + case 5: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async<16, 11>(dst_shared, src_global); return; + case 6: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async<16, 10>(dst_shared, src_global); return; + case 7: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async<16, 9>(dst_shared, src_global); return; + case 8: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async<16, 8>(dst_shared, src_global); return; + case 9: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async<16, 7>(dst_shared, src_global); return; + case 10: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async<16, 6>(dst_shared, src_global); return; + case 11: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async<16, 5>(dst_shared, src_global); return; + case 12: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async<16, 4>(dst_shared, src_global); return; + case 13: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async<16, 3>(dst_shared, src_global); return; + case 14: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async<16, 2>(dst_shared, src_global); return; + case 15: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async<16, 1>(dst_shared, src_global); return; + case 16: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async<16, 0>(dst_shared, src_global); return; + default: _CUDA_PIPELINE_ABORT(); return; + } + case 8: + switch (zfill) { + case 0: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async< 8, 8>(dst_shared, src_global); return; + case 1: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async< 8, 7>(dst_shared, src_global); return; + case 2: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async< 8, 6>(dst_shared, src_global); return; + case 3: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async< 8, 5>(dst_shared, src_global); return; + case 4: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async< 8, 4>(dst_shared, src_global); return; + case 5: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async< 8, 3>(dst_shared, src_global); return; + case 6: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async< 8, 2>(dst_shared, src_global); return; + case 7: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async< 8, 1>(dst_shared, src_global); return; + case 8: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async< 8, 0>(dst_shared, src_global); return; + default: _CUDA_PIPELINE_ABORT(); return; + } + case 4: + switch (zfill) { + case 0: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async< 4, 4>(dst_shared, src_global); return; + case 1: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async< 4, 3>(dst_shared, src_global); return; + case 2: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async< 4, 2>(dst_shared, src_global); return; + case 3: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async< 4, 1>(dst_shared, src_global); return; + case 4: _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_memcpy_async< 4, 0>(dst_shared, src_global); return; + default: _CUDA_PIPELINE_ABORT(); return; + } + default: + _CUDA_PIPELINE_ABORT(); + return; + } +} + +_CUDA_PIPELINE_STATIC_QUALIFIER +void __pipeline_commit() +{ + _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_commit(); +} + +_CUDA_PIPELINE_STATIC_QUALIFIER +void __pipeline_wait_prior(size_t prior) +{ + switch (prior) { + case 0 : _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<0>(); return; + case 1 : _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<1>(); return; + case 2 : _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<2>(); return; + case 3 : _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<3>(); return; + case 4 : _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<4>(); return; + case 5 : _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<5>(); return; + case 6 : _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<6>(); return; + case 7 : _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<7>(); return; + default : _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<8>(); return; + } +} + +# if defined(_CUDA_PIPELINE_ARCH_700_OR_LATER) +# include "cuda_awbarrier_primitives.h" + +_CUDA_PIPELINE_STATIC_QUALIFIER +void __pipeline_arrive_on(__mbarrier_t* barrier) +{ + _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_arrive_on(barrier); +} +# endif + +#endif /* !_CUDA_PIPELINE_PRIMITIVES_H_ */ diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_runtime.h b/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_runtime.h new file mode 100644 index 0000000000000000000000000000000000000000..782bff6caaf2c5991f7f14d2de45863cdb3a3fad --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cuda_runtime.h @@ -0,0 +1,2300 @@ +/* + * Copyright 1993-2018 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(__CUDA_RUNTIME_H__) +#define __CUDA_RUNTIME_H__ + +#if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__) +#define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ +#define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_CUDA_RUNTIME_H__ +#endif + +#if !defined(__CUDACC_RTC__) +#if defined(__GNUC__) +#if defined(__clang__) || (!defined(__PGIC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6))) +#pragma GCC diagnostic push +#endif +#if defined(__clang__) || (!defined(__PGIC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 2))) +#pragma GCC diagnostic ignored "-Wunused-function" +#endif +#elif defined(_MSC_VER) +#pragma warning(push) +#pragma warning(disable: 4820) +#endif +#endif + +#ifdef __QNX__ +#if (__GNUC__ == 4 && __GNUC_MINOR__ >= 7) +typedef unsigned size_t; +#endif +#endif +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +#include "crt/host_config.h" + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +#include "builtin_types.h" +#include "library_types.h" +#if !defined(__CUDACC_RTC__) +#define EXCLUDE_FROM_RTC +#include "channel_descriptor.h" +#include "cuda_runtime_api.h" +#include "driver_functions.h" +#undef EXCLUDE_FROM_RTC +#endif /* !__CUDACC_RTC__ */ +#include "crt/host_defines.h" +#ifdef __CUDACC_RTC__ +#include "target" +#endif /* defined(__CUDACC_RTC__) */ + + +#include "vector_functions.h" + +#if defined(__CUDACC__) + +#if defined(__CUDACC_RTC__) +#include "nvrtc_device_runtime.h" +#include "crt/device_functions.h" +#include "crt/common_functions.h" +#include "device_launch_parameters.h" + +#else /* !__CUDACC_RTC__ */ +#define EXCLUDE_FROM_RTC +#include "crt/common_functions.h" +#include "crt/device_functions.h" +#include "device_launch_parameters.h" + +#if defined(__CUDACC_EXTENDED_LAMBDA__) +#include +#include +struct __device_builtin__ __nv_lambda_preheader_injection { }; +#endif /* defined(__CUDACC_EXTENDED_LAMBDA__) */ + +#undef EXCLUDE_FROM_RTC +#endif /* __CUDACC_RTC__ */ + +#endif /* __CUDACC__ */ + +/** \cond impl_private */ +#if defined(__DOXYGEN_ONLY__) || defined(CUDA_ENABLE_DEPRECATED) +#define __CUDA_DEPRECATED +#elif defined(_MSC_VER) +#define __CUDA_DEPRECATED __declspec(deprecated) +#elif defined(__GNUC__) +#define __CUDA_DEPRECATED __attribute__((deprecated)) +#else +#define __CUDA_DEPRECATED +#endif +/** \endcond impl_private */ + +#if defined(__cplusplus) && !defined(__CUDACC_RTC__) + +#if __cplusplus >= 201103 +#include +#endif + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +/** + * \addtogroup CUDART_HIGHLEVEL + * @{ + */ + +/** + *\brief Launches a device function + * + * The function invokes kernel \p func on \p gridDim (\p gridDim.x × \p gridDim.y + * × \p gridDim.z) grid of blocks. Each block contains \p blockDim (\p blockDim.x × + * \p blockDim.y × \p blockDim.z) threads. + * + * If the kernel has N parameters the \p args should point to array of N pointers. + * Each pointer, from args[0] to args[N - 1], point to the region + * of memory from which the actual parameter will be copied. + * + * \p sharedMem sets the amount of dynamic shared memory that will be available to + * each thread block. + * + * \p stream specifies a stream the invocation is associated to. + * + * \param func - Device function symbol + * \param gridDim - Grid dimentions + * \param blockDim - Block dimentions + * \param args - Arguments + * \param sharedMem - Shared memory (defaults to 0) + * \param stream - Stream identifier (defaults to NULL) + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidDeviceFunction, + * ::cudaErrorInvalidConfiguration, + * ::cudaErrorLaunchFailure, + * ::cudaErrorLaunchTimeout, + * ::cudaErrorLaunchOutOfResources, + * ::cudaErrorSharedObjectInitFailed, + * ::cudaErrorInvalidPtx, + * ::cudaErrorUnsupportedPtxVersion, + * ::cudaErrorNoKernelImageForDevice, + * ::cudaErrorJitCompilerNotFound, + * ::cudaErrorJitCompilationDisabled + * \notefnerr + * \note_async + * \note_null_stream + * \note_init_rt + * \note_callback + * + * \ref ::cudaLaunchKernel(const void *func, dim3 gridDim, dim3 blockDim, void **args, size_t sharedMem, cudaStream_t stream) "cudaLaunchKernel (C API)" + */ +template +static __inline__ __host__ cudaError_t cudaLaunchKernel( + const T *func, + dim3 gridDim, + dim3 blockDim, + void **args, + size_t sharedMem = 0, + cudaStream_t stream = 0 +) +{ + return ::cudaLaunchKernel((const void *)func, gridDim, blockDim, args, sharedMem, stream); +} + + +#if __cplusplus >= 201103 || defined(__DOXYGEN_ONLY__) +/** + * \brief Launches a CUDA function with launch-time configuration + * + * Invokes the kernel \p func on \p config->gridDim (\p config->gridDim.x + * × \p config->gridDim.y × \p config->gridDim.z) grid of blocks. + * Each block contains \p config->blockDim (\p config->blockDim.x × + * \p config->blockDim.y × \p config->blockDim.z) threads. + * + * \p config->dynamicSmemBytes sets the amount of dynamic shared memory that + * will be available to each thread block. + * + * \p config->stream specifies a stream the invocation is associated to. + * + * Configuration beyond grid and block dimensions, dynamic shared memory size, + * and stream can be provided with the following two fields of \p config: + * + * \p config->attrs is an array of \p config->numAttrs contiguous + * ::cudaLaunchAttribute elements. The value of this pointer is not considered + * if \p config->numAttrs is zero. However, in that case, it is recommended to + * set the pointer to NULL. + * \p config->numAttrs is the number of attributes populating the first + * \p config->numAttrs positions of the \p config->attrs array. + * + * The kernel arguments should be passed as arguments to this function via the + * \p args parameter pack. + * + * The C API version of this function, \p cudaLaunchKernelExC, is also available + * for pre-C++11 compilers and for use cases where the ability to pass kernel + * parameters via void* array is preferable. + * + * \param config - Launch configuration + * \param func - Kernel to launch + * \param args - Parameter pack of kernel parameters + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidDeviceFunction, + * ::cudaErrorInvalidConfiguration, + * ::cudaErrorLaunchFailure, + * ::cudaErrorLaunchTimeout, + * ::cudaErrorLaunchOutOfResources, + * ::cudaErrorSharedObjectInitFailed, + * ::cudaErrorInvalidPtx, + * ::cudaErrorUnsupportedPtxVersion, + * ::cudaErrorNoKernelImageForDevice, + * ::cudaErrorJitCompilerNotFound, + * ::cudaErrorJitCompilationDisabled + * \note_null_stream + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * \ref ::cudaLaunchKernelExC(const cudaLaunchConfig_t *config, const void *func, void **args) "cudaLaunchKernelEx (C API)", + * ::cuLaunchKernelEx + */ +template +static __inline__ __host__ cudaError_t cudaLaunchKernelEx( + const cudaLaunchConfig_t *config, + void (*kernel)(ExpTypes...), + ActTypes &&... args +) +{ + return [&](ExpTypes... coercedArgs){ + void *pArgs[] = { &coercedArgs... }; + return ::cudaLaunchKernelExC(config, (const void *)kernel, pArgs); + }(std::forward(args)...); +} +#endif + +/** + *\brief Launches a device function + * + * The function invokes kernel \p func on \p gridDim (\p gridDim.x × \p gridDim.y + * × \p gridDim.z) grid of blocks. Each block contains \p blockDim (\p blockDim.x × + * \p blockDim.y × \p blockDim.z) threads. + * + * The device on which this kernel is invoked must have a non-zero value for + * the device attribute ::cudaDevAttrCooperativeLaunch. + * + * The total number of blocks launched cannot exceed the maximum number of blocks per + * multiprocessor as returned by ::cudaOccupancyMaxActiveBlocksPerMultiprocessor (or + * ::cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags) times the number of multiprocessors + * as specified by the device attribute ::cudaDevAttrMultiProcessorCount. + * + * The kernel cannot make use of CUDA dynamic parallelism. + * + * If the kernel has N parameters the \p args should point to array of N pointers. + * Each pointer, from args[0] to args[N - 1], point to the region + * of memory from which the actual parameter will be copied. + * + * \p sharedMem sets the amount of dynamic shared memory that will be available to + * each thread block. + * + * \p stream specifies a stream the invocation is associated to. + * + * \param func - Device function symbol + * \param gridDim - Grid dimentions + * \param blockDim - Block dimentions + * \param args - Arguments + * \param sharedMem - Shared memory (defaults to 0) + * \param stream - Stream identifier (defaults to NULL) + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidDeviceFunction, + * ::cudaErrorInvalidConfiguration, + * ::cudaErrorLaunchFailure, + * ::cudaErrorLaunchTimeout, + * ::cudaErrorLaunchOutOfResources, + * ::cudaErrorSharedObjectInitFailed + * \notefnerr + * \note_async + * \note_null_stream + * \note_init_rt + * \note_callback + * + * \ref ::cudaLaunchCooperativeKernel(const void *func, dim3 gridDim, dim3 blockDim, void **args, size_t sharedMem, cudaStream_t stream) "cudaLaunchCooperativeKernel (C API)" + */ +template +static __inline__ __host__ cudaError_t cudaLaunchCooperativeKernel( + const T *func, + dim3 gridDim, + dim3 blockDim, + void **args, + size_t sharedMem = 0, + cudaStream_t stream = 0 +) +{ + return ::cudaLaunchCooperativeKernel((const void *)func, gridDim, blockDim, args, sharedMem, stream); +} + +/** + * \brief \hl Creates an event object with the specified flags + * + * Creates an event object with the specified flags. Valid flags include: + * - ::cudaEventDefault: Default event creation flag. + * - ::cudaEventBlockingSync: Specifies that event should use blocking + * synchronization. A host thread that uses ::cudaEventSynchronize() to wait + * on an event created with this flag will block until the event actually + * completes. + * - ::cudaEventDisableTiming: Specifies that the created event does not need + * to record timing data. Events created with this flag specified and + * the ::cudaEventBlockingSync flag not specified will provide the best + * performance when used with ::cudaStreamWaitEvent() and ::cudaEventQuery(). + * + * \param event - Newly created event + * \param flags - Flags for new event + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorLaunchFailure, + * ::cudaErrorMemoryAllocation + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa \ref ::cudaEventCreate(cudaEvent_t*) "cudaEventCreate (C API)", + * ::cudaEventCreateWithFlags, ::cudaEventRecord, ::cudaEventQuery, + * ::cudaEventSynchronize, ::cudaEventDestroy, ::cudaEventElapsedTime, + * ::cudaStreamWaitEvent + */ +static __inline__ __host__ cudaError_t cudaEventCreate( + cudaEvent_t *event, + unsigned int flags +) +{ + return ::cudaEventCreateWithFlags(event, flags); +} + +/** + * \brief Creates an executable graph from a graph + * + * Instantiates \p graph as an executable graph. The graph is validated for any + * structural constraints or intra-node constraints which were not previously + * validated. If instantiation is successful, a handle to the instantiated graph + * is returned in \p pGraphExec. + * + * If there are any errors, diagnostic information may be returned in \p pErrorNode and + * \p pLogBuffer. This is the primary way to inspect instantiation errors. The output + * will be null terminated unless the diagnostics overflow + * the buffer. In this case, they will be truncated, and the last byte can be + * inspected to determine if truncation occurred. + * + * \param pGraphExec - Returns instantiated graph + * \param graph - Graph to instantiate + * \param pErrorNode - In case of an instantiation error, this may be modified to + * indicate a node contributing to the error + * \param pLogBuffer - A character buffer to store diagnostic messages + * \param bufferSize - Size of the log buffer in bytes + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaGraphInstantiateWithFlags, + * ::cudaGraphCreate, + * ::cudaGraphUpload, + * ::cudaGraphLaunch, + * ::cudaGraphExecDestroy + */ +static __inline__ __host__ cudaError_t cudaGraphInstantiate( + cudaGraphExec_t *pGraphExec, + cudaGraph_t graph, + cudaGraphNode_t *pErrorNode, + char *pLogBuffer, + size_t bufferSize +) +{ + (void)pErrorNode; + (void)pLogBuffer; + (void)bufferSize; + return ::cudaGraphInstantiate(pGraphExec, graph, 0); +} + +/** + * \brief \hl Allocates page-locked memory on the host + * + * Allocates \p size bytes of host memory that is page-locked and accessible + * to the device. The driver tracks the virtual memory ranges allocated with + * this function and automatically accelerates calls to functions such as + * ::cudaMemcpy(). Since the memory can be accessed directly by the device, it + * can be read or written with much higher bandwidth than pageable memory + * obtained with functions such as ::malloc(). Allocating excessive amounts of + * pinned memory may degrade system performance, since it reduces the amount + * of memory available to the system for paging. As a result, this function is + * best used sparingly to allocate staging areas for data exchange between host + * and device. + * + * The \p flags parameter enables different options to be specified that affect + * the allocation, as follows. + * - ::cudaHostAllocDefault: This flag's value is defined to be 0. + * - ::cudaHostAllocPortable: The memory returned by this call will be + * considered as pinned memory by all CUDA contexts, not just the one that + * performed the allocation. + * - ::cudaHostAllocMapped: Maps the allocation into the CUDA address space. + * The device pointer to the memory may be obtained by calling + * ::cudaHostGetDevicePointer(). + * - ::cudaHostAllocWriteCombined: Allocates the memory as write-combined (WC). + * WC memory can be transferred across the PCI Express bus more quickly on some + * system configurations, but cannot be read efficiently by most CPUs. WC + * memory is a good option for buffers that will be written by the CPU and read + * by the device via mapped pinned memory or host->device transfers. + * + * All of these flags are orthogonal to one another: a developer may allocate + * memory that is portable, mapped and/or write-combined with no restrictions. + * + * ::cudaSetDeviceFlags() must have been called with the ::cudaDeviceMapHost + * flag in order for the ::cudaHostAllocMapped flag to have any effect. + * + * The ::cudaHostAllocMapped flag may be specified on CUDA contexts for devices + * that do not support mapped pinned memory. The failure is deferred to + * ::cudaHostGetDevicePointer() because the memory may be mapped into other + * CUDA contexts via the ::cudaHostAllocPortable flag. + * + * Memory allocated by this function must be freed with ::cudaFreeHost(). + * + * \param ptr - Device pointer to allocated memory + * \param size - Requested allocation size in bytes + * \param flags - Requested properties of allocated memory + * + * \return + * ::cudaSuccess, + * ::cudaErrorMemoryAllocation + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaSetDeviceFlags, + * \ref ::cudaMallocHost(void**, size_t) "cudaMallocHost (C API)", + * ::cudaFreeHost, ::cudaHostAlloc + */ +static __inline__ __host__ cudaError_t cudaMallocHost( + void **ptr, + size_t size, + unsigned int flags +) +{ + return ::cudaHostAlloc(ptr, size, flags); +} + +template +static __inline__ __host__ cudaError_t cudaHostAlloc( + T **ptr, + size_t size, + unsigned int flags +) +{ + return ::cudaHostAlloc((void**)(void*)ptr, size, flags); +} + +template +static __inline__ __host__ cudaError_t cudaHostGetDevicePointer( + T **pDevice, + void *pHost, + unsigned int flags +) +{ + return ::cudaHostGetDevicePointer((void**)(void*)pDevice, pHost, flags); +} + +/** + * \brief Allocates memory that will be automatically managed by the Unified Memory system + * + * Allocates \p size bytes of managed memory on the device and returns in + * \p *devPtr a pointer to the allocated memory. If the device doesn't support + * allocating managed memory, ::cudaErrorNotSupported is returned. Support + * for managed memory can be queried using the device attribute + * ::cudaDevAttrManagedMemory. The allocated memory is suitably + * aligned for any kind of variable. The memory is not cleared. If \p size + * is 0, ::cudaMallocManaged returns ::cudaErrorInvalidValue. The pointer + * is valid on the CPU and on all GPUs in the system that support managed memory. + * All accesses to this pointer must obey the Unified Memory programming model. + * + * \p flags specifies the default stream association for this allocation. + * \p flags must be one of ::cudaMemAttachGlobal or ::cudaMemAttachHost. The + * default value for \p flags is ::cudaMemAttachGlobal. + * If ::cudaMemAttachGlobal is specified, then this memory is accessible from + * any stream on any device. If ::cudaMemAttachHost is specified, then the + * allocation should not be accessed from devices that have a zero value for the + * device attribute ::cudaDevAttrConcurrentManagedAccess; an explicit call to + * ::cudaStreamAttachMemAsync will be required to enable access on such devices. + * + * If the association is later changed via ::cudaStreamAttachMemAsync to + * a single stream, the default association, as specifed during ::cudaMallocManaged, + * is restored when that stream is destroyed. For __managed__ variables, the + * default association is always ::cudaMemAttachGlobal. Note that destroying a + * stream is an asynchronous operation, and as a result, the change to default + * association won't happen until all work in the stream has completed. + * + * Memory allocated with ::cudaMallocManaged should be released with ::cudaFree. + * + * Device memory oversubscription is possible for GPUs that have a non-zero value for the + * device attribute ::cudaDevAttrConcurrentManagedAccess. Managed memory on + * such GPUs may be evicted from device memory to host memory at any time by the Unified + * Memory driver in order to make room for other allocations. + * + * In a multi-GPU system where all GPUs have a non-zero value for the device attribute + * ::cudaDevAttrConcurrentManagedAccess, managed memory may not be populated when this + * API returns and instead may be populated on access. In such systems, managed memory can + * migrate to any processor's memory at any time. The Unified Memory driver will employ heuristics to + * maintain data locality and prevent excessive page faults to the extent possible. The application + * can also guide the driver about memory usage patterns via ::cudaMemAdvise. The application + * can also explicitly migrate memory to a desired processor's memory via + * ::cudaMemPrefetchAsync. + * + * In a multi-GPU system where all of the GPUs have a zero value for the device attribute + * ::cudaDevAttrConcurrentManagedAccess and all the GPUs have peer-to-peer support + * with each other, the physical storage for managed memory is created on the GPU which is active + * at the time ::cudaMallocManaged is called. All other GPUs will reference the data at reduced + * bandwidth via peer mappings over the PCIe bus. The Unified Memory driver does not migrate + * memory among such GPUs. + * + * In a multi-GPU system where not all GPUs have peer-to-peer support with each other and + * where the value of the device attribute ::cudaDevAttrConcurrentManagedAccess + * is zero for at least one of those GPUs, the location chosen for physical storage of managed + * memory is system-dependent. + * - On Linux, the location chosen will be device memory as long as the current set of active + * contexts are on devices that either have peer-to-peer support with each other or have a + * non-zero value for the device attribute ::cudaDevAttrConcurrentManagedAccess. + * If there is an active context on a GPU that does not have a non-zero value for that device + * attribute and it does not have peer-to-peer support with the other devices that have active + * contexts on them, then the location for physical storage will be 'zero-copy' or host memory. + * Note that this means that managed memory that is located in device memory is migrated to + * host memory if a new context is created on a GPU that doesn't have a non-zero value for + * the device attribute and does not support peer-to-peer with at least one of the other devices + * that has an active context. This in turn implies that context creation may fail if there is + * insufficient host memory to migrate all managed allocations. + * - On Windows, the physical storage is always created in 'zero-copy' or host memory. + * All GPUs will reference the data at reduced bandwidth over the PCIe bus. In these + * circumstances, use of the environment variable CUDA_VISIBLE_DEVICES is recommended to + * restrict CUDA to only use those GPUs that have peer-to-peer support. + * Alternatively, users can also set CUDA_MANAGED_FORCE_DEVICE_ALLOC to a non-zero + * value to force the driver to always use device memory for physical storage. + * When this environment variable is set to a non-zero value, all devices used in + * that process that support managed memory have to be peer-to-peer compatible + * with each other. The error ::cudaErrorInvalidDevice will be returned if a device + * that supports managed memory is used and it is not peer-to-peer compatible with + * any of the other managed memory supporting devices that were previously used in + * that process, even if ::cudaDeviceReset has been called on those devices. These + * environment variables are described in the CUDA programming guide under the + * "CUDA environment variables" section. + * - On ARM, managed memory is not available on discrete gpu with Drive PX-2. + * + * \param devPtr - Pointer to allocated device memory + * \param size - Requested allocation size in bytes + * \param flags - Must be either ::cudaMemAttachGlobal or ::cudaMemAttachHost (defaults to ::cudaMemAttachGlobal) + * + * \return + * ::cudaSuccess, + * ::cudaErrorMemoryAllocation, + * ::cudaErrorNotSupported, + * ::cudaErrorInvalidValue + * \note_init_rt + * \note_callback + * + * \sa ::cudaMallocPitch, ::cudaFree, ::cudaMallocArray, ::cudaFreeArray, + * ::cudaMalloc3D, ::cudaMalloc3DArray, + * \ref ::cudaMallocHost(void**, size_t) "cudaMallocHost (C API)", + * ::cudaFreeHost, ::cudaHostAlloc, ::cudaDeviceGetAttribute, ::cudaStreamAttachMemAsync + */ +template +static __inline__ __host__ cudaError_t cudaMallocManaged( + T **devPtr, + size_t size, + unsigned int flags = cudaMemAttachGlobal +) +{ + return ::cudaMallocManaged((void**)(void*)devPtr, size, flags); +} + +/** + * \brief Attach memory to a stream asynchronously + * + * Enqueues an operation in \p stream to specify stream association of + * \p length bytes of memory starting from \p devPtr. This function is a + * stream-ordered operation, meaning that it is dependent on, and will + * only take effect when, previous work in stream has completed. Any + * previous association is automatically replaced. + * + * \p devPtr must point to an one of the following types of memories: + * - managed memory declared using the __managed__ keyword or allocated with + * ::cudaMallocManaged. + * - a valid host-accessible region of system-allocated pageable memory. This + * type of memory may only be specified if the device associated with the + * stream reports a non-zero value for the device attribute + * ::cudaDevAttrPageableMemoryAccess. + * + * For managed allocations, \p length must be either zero or the entire + * allocation's size. Both indicate that the entire allocation's stream + * association is being changed. Currently, it is not possible to change stream + * association for a portion of a managed allocation. + * + * For pageable allocations, \p length must be non-zero. + * + * The stream association is specified using \p flags which must be + * one of ::cudaMemAttachGlobal, ::cudaMemAttachHost or ::cudaMemAttachSingle. + * The default value for \p flags is ::cudaMemAttachSingle + * If the ::cudaMemAttachGlobal flag is specified, the memory can be accessed + * by any stream on any device. + * If the ::cudaMemAttachHost flag is specified, the program makes a guarantee + * that it won't access the memory on the device from any stream on a device that + * has a zero value for the device attribute ::cudaDevAttrConcurrentManagedAccess. + * If the ::cudaMemAttachSingle flag is specified and \p stream is associated with + * a device that has a zero value for the device attribute ::cudaDevAttrConcurrentManagedAccess, + * the program makes a guarantee that it will only access the memory on the device + * from \p stream. It is illegal to attach singly to the NULL stream, because the + * NULL stream is a virtual global stream and not a specific stream. An error will + * be returned in this case. + * + * When memory is associated with a single stream, the Unified Memory system will + * allow CPU access to this memory region so long as all operations in \p stream + * have completed, regardless of whether other streams are active. In effect, + * this constrains exclusive ownership of the managed memory region by + * an active GPU to per-stream activity instead of whole-GPU activity. + * + * Accessing memory on the device from streams that are not associated with + * it will produce undefined results. No error checking is performed by the + * Unified Memory system to ensure that kernels launched into other streams + * do not access this region. + * + * It is a program's responsibility to order calls to ::cudaStreamAttachMemAsync + * via events, synchronization or other means to ensure legal access to memory + * at all times. Data visibility and coherency will be changed appropriately + * for all kernels which follow a stream-association change. + * + * If \p stream is destroyed while data is associated with it, the association is + * removed and the association reverts to the default visibility of the allocation + * as specified at ::cudaMallocManaged. For __managed__ variables, the default + * association is always ::cudaMemAttachGlobal. Note that destroying a stream is an + * asynchronous operation, and as a result, the change to default association won't + * happen until all work in the stream has completed. + * + * \param stream - Stream in which to enqueue the attach operation + * \param devPtr - Pointer to memory (must be a pointer to managed memory or + * to a valid host-accessible region of system-allocated + * memory) + * \param length - Length of memory (defaults to zero) + * \param flags - Must be one of ::cudaMemAttachGlobal, ::cudaMemAttachHost or ::cudaMemAttachSingle (defaults to ::cudaMemAttachSingle) + * + * \return + * ::cudaSuccess, + * ::cudaErrorNotReady, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidResourceHandle + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaStreamCreate, ::cudaStreamCreateWithFlags, ::cudaStreamWaitEvent, ::cudaStreamSynchronize, ::cudaStreamAddCallback, ::cudaStreamDestroy, ::cudaMallocManaged + */ +template +static __inline__ __host__ cudaError_t cudaStreamAttachMemAsync( + cudaStream_t stream, + T *devPtr, + size_t length = 0, + unsigned int flags = cudaMemAttachSingle +) +{ + return ::cudaStreamAttachMemAsync(stream, (void*)devPtr, length, flags); +} + +template +static __inline__ __host__ cudaError_t cudaMalloc( + T **devPtr, + size_t size +) +{ + return ::cudaMalloc((void**)(void*)devPtr, size); +} + +template +static __inline__ __host__ cudaError_t cudaMallocHost( + T **ptr, + size_t size, + unsigned int flags = 0 +) +{ + return cudaMallocHost((void**)(void*)ptr, size, flags); +} + +template +static __inline__ __host__ cudaError_t cudaMallocPitch( + T **devPtr, + size_t *pitch, + size_t width, + size_t height +) +{ + return ::cudaMallocPitch((void**)(void*)devPtr, pitch, width, height); +} + +/** + * \brief Allocate from a pool + * + * This is an alternate spelling for cudaMallocFromPoolAsync + * made available through operator overloading. + * + * \sa ::cudaMallocFromPoolAsync, + * \ref ::cudaMallocAsync(void** ptr, size_t size, cudaStream_t hStream) "cudaMallocAsync (C API)" + */ +static __inline__ __host__ cudaError_t cudaMallocAsync( + void **ptr, + size_t size, + cudaMemPool_t memPool, + cudaStream_t stream +) +{ + return ::cudaMallocFromPoolAsync(ptr, size, memPool, stream); +} + +template +static __inline__ __host__ cudaError_t cudaMallocAsync( + T **ptr, + size_t size, + cudaMemPool_t memPool, + cudaStream_t stream +) +{ + return ::cudaMallocFromPoolAsync((void**)(void*)ptr, size, memPool, stream); +} + +template +static __inline__ __host__ cudaError_t cudaMallocAsync( + T **ptr, + size_t size, + cudaStream_t stream +) +{ + return ::cudaMallocAsync((void**)(void*)ptr, size, stream); +} + +template +static __inline__ __host__ cudaError_t cudaMallocFromPoolAsync( + T **ptr, + size_t size, + cudaMemPool_t memPool, + cudaStream_t stream +) +{ + return ::cudaMallocFromPoolAsync((void**)(void*)ptr, size, memPool, stream); +} + +#if defined(__CUDACC__) + +/** + * \brief \hl Copies data to the given symbol on the device + * + * Copies \p count bytes from the memory area pointed to by \p src + * to the memory area \p offset bytes from the start of symbol + * \p symbol. The memory areas may not overlap. \p symbol is a variable that + * resides in global or constant memory space. \p kind can be either + * ::cudaMemcpyHostToDevice or ::cudaMemcpyDeviceToDevice. + * + * \param symbol - Device symbol reference + * \param src - Source memory address + * \param count - Size in bytes to copy + * \param offset - Offset from start of symbol in bytes + * \param kind - Type of transfer + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidSymbol, + * ::cudaErrorInvalidMemcpyDirection, + * ::cudaErrorNoKernelImageForDevice + * \notefnerr + * \note_sync + * \note_string_api_deprecation + * \note_init_rt + * \note_callback + * + * \sa ::cudaMemcpy, ::cudaMemcpy2D, + * ::cudaMemcpy2DToArray, ::cudaMemcpy2DFromArray, + * ::cudaMemcpy2DArrayToArray, + * ::cudaMemcpyFromSymbol, ::cudaMemcpyAsync, ::cudaMemcpy2DAsync, + * ::cudaMemcpy2DToArrayAsync, + * ::cudaMemcpy2DFromArrayAsync, + * ::cudaMemcpyToSymbolAsync, ::cudaMemcpyFromSymbolAsync + */ +template +static __inline__ __host__ cudaError_t cudaMemcpyToSymbol( + const T &symbol, + const void *src, + size_t count, + size_t offset = 0, + enum cudaMemcpyKind kind = cudaMemcpyHostToDevice +) +{ + return ::cudaMemcpyToSymbol((const void*)&symbol, src, count, offset, kind); +} + +/** + * \brief \hl Copies data to the given symbol on the device + * + * Copies \p count bytes from the memory area pointed to by \p src + * to the memory area \p offset bytes from the start of symbol + * \p symbol. The memory areas may not overlap. \p symbol is a variable that + * resides in global or constant memory space. \p kind can be either + * ::cudaMemcpyHostToDevice or ::cudaMemcpyDeviceToDevice. + * + * ::cudaMemcpyToSymbolAsync() is asynchronous with respect to the host, so + * the call may return before the copy is complete. The copy can optionally + * be associated to a stream by passing a non-zero \p stream argument. If + * \p kind is ::cudaMemcpyHostToDevice and \p stream is non-zero, the copy + * may overlap with operations in other streams. + * + * \param symbol - Device symbol reference + * \param src - Source memory address + * \param count - Size in bytes to copy + * \param offset - Offset from start of symbol in bytes + * \param kind - Type of transfer + * \param stream - Stream identifier + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidSymbol, + * ::cudaErrorInvalidMemcpyDirection, + * ::cudaErrorNoKernelImageForDevice + * \notefnerr + * \note_async + * \note_string_api_deprecation + * \note_init_rt + * \note_callback + * + * \sa ::cudaMemcpy, ::cudaMemcpy2D, + * ::cudaMemcpy2DToArray, ::cudaMemcpy2DFromArray, + * ::cudaMemcpy2DArrayToArray, ::cudaMemcpyToSymbol, + * ::cudaMemcpyFromSymbol, ::cudaMemcpyAsync, ::cudaMemcpy2DAsync, + * ::cudaMemcpy2DToArrayAsync, + * ::cudaMemcpy2DFromArrayAsync, + * ::cudaMemcpyFromSymbolAsync + */ +template +static __inline__ __host__ cudaError_t cudaMemcpyToSymbolAsync( + const T &symbol, + const void *src, + size_t count, + size_t offset = 0, + enum cudaMemcpyKind kind = cudaMemcpyHostToDevice, + cudaStream_t stream = 0 +) +{ + return ::cudaMemcpyToSymbolAsync((const void*)&symbol, src, count, offset, kind, stream); +} + +/** + * \brief \hl Copies data from the given symbol on the device + * + * Copies \p count bytes from the memory area \p offset bytes + * from the start of symbol \p symbol to the memory area pointed to by \p dst. + * The memory areas may not overlap. \p symbol is a variable that + * resides in global or constant memory space. \p kind can be either + * ::cudaMemcpyDeviceToHost or ::cudaMemcpyDeviceToDevice. + * + * \param dst - Destination memory address + * \param symbol - Device symbol reference + * \param count - Size in bytes to copy + * \param offset - Offset from start of symbol in bytes + * \param kind - Type of transfer + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidSymbol, + * ::cudaErrorInvalidMemcpyDirection, + * ::cudaErrorNoKernelImageForDevice + * \notefnerr + * \note_sync + * \note_string_api_deprecation + * \note_init_rt + * \note_callback + * + * \sa ::cudaMemcpy, ::cudaMemcpy2D, + * ::cudaMemcpy2DToArray, ::cudaMemcpy2DFromArray, + * ::cudaMemcpy2DArrayToArray, ::cudaMemcpyToSymbol, + * ::cudaMemcpyAsync, ::cudaMemcpy2DAsync, + * ::cudaMemcpy2DToArrayAsync, + * ::cudaMemcpy2DFromArrayAsync, + * ::cudaMemcpyToSymbolAsync, ::cudaMemcpyFromSymbolAsync + */ +template +static __inline__ __host__ cudaError_t cudaMemcpyFromSymbol( + void *dst, + const T &symbol, + size_t count, + size_t offset = 0, + enum cudaMemcpyKind kind = cudaMemcpyDeviceToHost +) +{ + return ::cudaMemcpyFromSymbol(dst, (const void*)&symbol, count, offset, kind); +} + +/** + * \brief \hl Copies data from the given symbol on the device + * + * Copies \p count bytes from the memory area \p offset bytes + * from the start of symbol \p symbol to the memory area pointed to by \p dst. + * The memory areas may not overlap. \p symbol is a variable that resides in + * global or constant memory space. \p kind can be either + * ::cudaMemcpyDeviceToHost or ::cudaMemcpyDeviceToDevice. + * + * ::cudaMemcpyFromSymbolAsync() is asynchronous with respect to the host, so + * the call may return before the copy is complete. The copy can optionally be + * associated to a stream by passing a non-zero \p stream argument. If \p kind + * is ::cudaMemcpyDeviceToHost and \p stream is non-zero, the copy may overlap + * with operations in other streams. + * + * \param dst - Destination memory address + * \param symbol - Device symbol reference + * \param count - Size in bytes to copy + * \param offset - Offset from start of symbol in bytes + * \param kind - Type of transfer + * \param stream - Stream identifier + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidSymbol, + * ::cudaErrorInvalidMemcpyDirection, + * ::cudaErrorNoKernelImageForDevice + * \notefnerr + * \note_async + * \note_string_api_deprecation + * \note_init_rt + * \note_callback + * + * \sa ::cudaMemcpy, ::cudaMemcpy2D, + * ::cudaMemcpy2DToArray, ::cudaMemcpy2DFromArray, + * ::cudaMemcpy2DArrayToArray, ::cudaMemcpyToSymbol, + * ::cudaMemcpyFromSymbol, ::cudaMemcpyAsync, ::cudaMemcpy2DAsync, + * ::cudaMemcpy2DToArrayAsync, + * ::cudaMemcpy2DFromArrayAsync, + * ::cudaMemcpyToSymbolAsync + */ +template +static __inline__ __host__ cudaError_t cudaMemcpyFromSymbolAsync( + void *dst, + const T &symbol, + size_t count, + size_t offset = 0, + enum cudaMemcpyKind kind = cudaMemcpyDeviceToHost, + cudaStream_t stream = 0 +) +{ + return ::cudaMemcpyFromSymbolAsync(dst, (const void*)&symbol, count, offset, kind, stream); +} + +/** + * \brief Creates a memcpy node to copy to a symbol on the device and adds it to a graph + * + * Creates a new memcpy node to copy to \p symbol and adds it to \p graph with + * \p numDependencies dependencies specified via \p pDependencies. + * It is possible for \p numDependencies to be 0, in which case the node will be placed + * at the root of the graph. \p pDependencies may not have any duplicate entries. + * A handle to the new node will be returned in \p pGraphNode. + * + * When the graph is launched, the node will copy \p count bytes from the memory area + * pointed to by \p src to the memory area pointed to by \p offset bytes from the start + * of symbol \p symbol. The memory areas may not overlap. \p symbol is a variable that + * resides in global or constant memory space. \p kind can be either + * ::cudaMemcpyHostToDevice, ::cudaMemcpyDeviceToDevice, or ::cudaMemcpyDefault. + * Passing ::cudaMemcpyDefault is recommended, in which case the type of + * transfer is inferred from the pointer values. However, ::cudaMemcpyDefault + * is only allowed on systems that support unified virtual addressing. + * + * Memcpy nodes have some additional restrictions with regards to managed memory, if the + * system contains at least one device which has a zero value for the device attribute + * ::cudaDevAttrConcurrentManagedAccess. + * + * \param pGraphNode - Returns newly created node + * \param graph - Graph to which to add the node + * \param pDependencies - Dependencies of the node + * \param numDependencies - Number of dependencies + * \param symbol - Device symbol address + * \param src - Source memory address + * \param count - Size in bytes to copy + * \param offset - Offset from start of symbol in bytes + * \param kind - Type of transfer + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaMemcpyToSymbol, + * ::cudaGraphAddMemcpyNode, + * ::cudaGraphAddMemcpyNodeFromSymbol, + * ::cudaGraphMemcpyNodeGetParams, + * ::cudaGraphMemcpyNodeSetParams, + * ::cudaGraphMemcpyNodeSetParamsToSymbol, + * ::cudaGraphMemcpyNodeSetParamsFromSymbol, + * ::cudaGraphCreate, + * ::cudaGraphDestroyNode, + * ::cudaGraphAddChildGraphNode, + * ::cudaGraphAddEmptyNode, + * ::cudaGraphAddKernelNode, + * ::cudaGraphAddHostNode, + * ::cudaGraphAddMemsetNode + */ +template +static __inline__ __host__ cudaError_t cudaGraphAddMemcpyNodeToSymbol( + cudaGraphNode_t *pGraphNode, + cudaGraph_t graph, + const cudaGraphNode_t *pDependencies, + size_t numDependencies, + const T &symbol, + const void* src, + size_t count, + size_t offset, + enum cudaMemcpyKind kind) +{ + return ::cudaGraphAddMemcpyNodeToSymbol(pGraphNode, graph, pDependencies, numDependencies, (const void*)&symbol, src, count, offset, kind); +} + +/** + * \brief Creates a memcpy node to copy from a symbol on the device and adds it to a graph + * + * Creates a new memcpy node to copy from \p symbol and adds it to \p graph with + * \p numDependencies dependencies specified via \p pDependencies. + * It is possible for \p numDependencies to be 0, in which case the node will be placed + * at the root of the graph. \p pDependencies may not have any duplicate entries. + * A handle to the new node will be returned in \p pGraphNode. + * + * When the graph is launched, the node will copy \p count bytes from the memory area + * pointed to by \p offset bytes from the start of symbol \p symbol to the memory area + * pointed to by \p dst. The memory areas may not overlap. \p symbol is a variable + * that resides in global or constant memory space. \p kind can be either + * ::cudaMemcpyDeviceToHost, ::cudaMemcpyDeviceToDevice, or ::cudaMemcpyDefault. + * Passing ::cudaMemcpyDefault is recommended, in which case the type of transfer + * is inferred from the pointer values. However, ::cudaMemcpyDefault is only + * allowed on systems that support unified virtual addressing. + * + * Memcpy nodes have some additional restrictions with regards to managed memory, if the + * system contains at least one device which has a zero value for the device attribute + * ::cudaDevAttrConcurrentManagedAccess. + * + * \param pGraphNode - Returns newly created node + * \param graph - Graph to which to add the node + * \param pDependencies - Dependencies of the node + * \param numDependencies - Number of dependencies + * \param dst - Destination memory address + * \param symbol - Device symbol address + * \param count - Size in bytes to copy + * \param offset - Offset from start of symbol in bytes + * \param kind - Type of transfer + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaMemcpyFromSymbol, + * ::cudaGraphAddMemcpyNode, + * ::cudaGraphAddMemcpyNodeToSymbol, + * ::cudaGraphMemcpyNodeGetParams, + * ::cudaGraphMemcpyNodeSetParams, + * ::cudaGraphMemcpyNodeSetParamsFromSymbol, + * ::cudaGraphMemcpyNodeSetParamsToSymbol, + * ::cudaGraphCreate, + * ::cudaGraphDestroyNode, + * ::cudaGraphAddChildGraphNode, + * ::cudaGraphAddEmptyNode, + * ::cudaGraphAddKernelNode, + * ::cudaGraphAddHostNode, + * ::cudaGraphAddMemsetNode + */ +template +static __inline__ __host__ cudaError_t cudaGraphAddMemcpyNodeFromSymbol( + cudaGraphNode_t* pGraphNode, + cudaGraph_t graph, + const cudaGraphNode_t* pDependencies, + size_t numDependencies, + void* dst, + const T &symbol, + size_t count, + size_t offset, + enum cudaMemcpyKind kind) +{ + return ::cudaGraphAddMemcpyNodeFromSymbol(pGraphNode, graph, pDependencies, numDependencies, dst, (const void*)&symbol, count, offset, kind); +} + +/** + * \brief Sets a memcpy node's parameters to copy to a symbol on the device + * + * Sets the parameters of memcpy node \p node to the copy described by the provided parameters. + * + * When the graph is launched, the node will copy \p count bytes from the memory area + * pointed to by \p src to the memory area pointed to by \p offset bytes from the start + * of symbol \p symbol. The memory areas may not overlap. \p symbol is a variable that + * resides in global or constant memory space. \p kind can be either + * ::cudaMemcpyHostToDevice, ::cudaMemcpyDeviceToDevice, or ::cudaMemcpyDefault. + * Passing ::cudaMemcpyDefault is recommended, in which case the type of + * transfer is inferred from the pointer values. However, ::cudaMemcpyDefault + * is only allowed on systems that support unified virtual addressing. + * + * \param node - Node to set the parameters for + * \param symbol - Device symbol address + * \param src - Source memory address + * \param count - Size in bytes to copy + * \param offset - Offset from start of symbol in bytes + * \param kind - Type of transfer + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaMemcpyToSymbol, + * ::cudaGraphMemcpyNodeSetParams, + * ::cudaGraphMemcpyNodeSetParamsFromSymbol, + * ::cudaGraphAddMemcpyNode, + * ::cudaGraphMemcpyNodeGetParams + */ +template +static __inline__ __host__ cudaError_t cudaGraphMemcpyNodeSetParamsToSymbol( + cudaGraphNode_t node, + const T &symbol, + const void* src, + size_t count, + size_t offset, + enum cudaMemcpyKind kind) +{ + return ::cudaGraphMemcpyNodeSetParamsToSymbol(node, (const void*)&symbol, src, count, offset, kind); +} + +/** + * \brief Sets a memcpy node's parameters to copy from a symbol on the device + * + * Sets the parameters of memcpy node \p node to the copy described by the provided parameters. + * + * When the graph is launched, the node will copy \p count bytes from the memory area + * pointed to by \p offset bytes from the start of symbol \p symbol to the memory area + * pointed to by \p dst. The memory areas may not overlap. \p symbol is a variable + * that resides in global or constant memory space. \p kind can be either + * ::cudaMemcpyDeviceToHost, ::cudaMemcpyDeviceToDevice, or ::cudaMemcpyDefault. + * Passing ::cudaMemcpyDefault is recommended, in which case the type of transfer + * is inferred from the pointer values. However, ::cudaMemcpyDefault is only + * allowed on systems that support unified virtual addressing. + * + * \param node - Node to set the parameters for + * \param dst - Destination memory address + * \param symbol - Device symbol address + * \param count - Size in bytes to copy + * \param offset - Offset from start of symbol in bytes + * \param kind - Type of transfer + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaMemcpyFromSymbol, + * ::cudaGraphMemcpyNodeSetParams, + * ::cudaGraphMemcpyNodeSetParamsToSymbol, + * ::cudaGraphAddMemcpyNode, + * ::cudaGraphMemcpyNodeGetParams + */ +template +static __inline__ __host__ cudaError_t cudaGraphMemcpyNodeSetParamsFromSymbol( + cudaGraphNode_t node, + void* dst, + const T &symbol, + size_t count, + size_t offset, + enum cudaMemcpyKind kind) +{ + return ::cudaGraphMemcpyNodeSetParamsFromSymbol(node, dst, (const void*)&symbol, count, offset, kind); +} + +/** + * \brief Sets the parameters for a memcpy node in the given graphExec to copy to a symbol on the device + * + * Updates the work represented by \p node in \p hGraphExec as though \p node had + * contained the given params at instantiation. \p node must remain in the graph which was + * used to instantiate \p hGraphExec. Changed edges to and from \p node are ignored. + * + * \p src and \p symbol must be allocated from the same contexts as the original source and + * destination memory. The instantiation-time memory operands must be 1-dimensional. + * Zero-length operations are not supported. + * + * The modifications only affect future launches of \p hGraphExec. Already enqueued + * or running launches of \p hGraphExec are not affected by this call. \p node is also + * not modified by this call. + * + * Returns ::cudaErrorInvalidValue if the memory operands' mappings changed or + * the original memory operands are multidimensional. + * + * \param hGraphExec - The executable graph in which to set the specified node + * \param node - Memcpy node from the graph which was used to instantiate graphExec + * \param symbol - Device symbol address + * \param src - Source memory address + * \param count - Size in bytes to copy + * \param offset - Offset from start of symbol in bytes + * \param kind - Type of transfer + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaGraphAddMemcpyNode, + * ::cudaGraphAddMemcpyNodeToSymbol, + * ::cudaGraphMemcpyNodeSetParams, + * ::cudaGraphMemcpyNodeSetParamsToSymbol, + * ::cudaGraphInstantiate, + * ::cudaGraphExecMemcpyNodeSetParams, + * ::cudaGraphExecMemcpyNodeSetParamsFromSymbol, + * ::cudaGraphExecKernelNodeSetParams, + * ::cudaGraphExecMemsetNodeSetParams, + * ::cudaGraphExecHostNodeSetParams + */ +template +static __inline__ __host__ cudaError_t cudaGraphExecMemcpyNodeSetParamsToSymbol( + cudaGraphExec_t hGraphExec, + cudaGraphNode_t node, + const T &symbol, + const void* src, + size_t count, + size_t offset, + enum cudaMemcpyKind kind) +{ + return ::cudaGraphExecMemcpyNodeSetParamsToSymbol(hGraphExec, node, (const void*)&symbol, src, count, offset, kind); +} + +/** + * \brief Sets the parameters for a memcpy node in the given graphExec to copy from a symbol on the device + * + * Updates the work represented by \p node in \p hGraphExec as though \p node had + * contained the given params at instantiation. \p node must remain in the graph which was + * used to instantiate \p hGraphExec. Changed edges to and from \p node are ignored. + * + * \p symbol and \p dst must be allocated from the same contexts as the original source and + * destination memory. The instantiation-time memory operands must be 1-dimensional. + * Zero-length operations are not supported. + * + * The modifications only affect future launches of \p hGraphExec. Already enqueued + * or running launches of \p hGraphExec are not affected by this call. \p node is also + * not modified by this call. + * + * Returns ::cudaErrorInvalidValue if the memory operands' mappings changed or + * the original memory operands are multidimensional. + * + * \param hGraphExec - The executable graph in which to set the specified node + * \param node - Memcpy node from the graph which was used to instantiate graphExec + * \param dst - Destination memory address + * \param symbol - Device symbol address + * \param count - Size in bytes to copy + * \param offset - Offset from start of symbol in bytes + * \param kind - Type of transfer + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * \note_graph_thread_safety + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaGraphAddMemcpyNode, + * ::cudaGraphAddMemcpyNodeFromSymbol, + * ::cudaGraphMemcpyNodeSetParams, + * ::cudaGraphMemcpyNodeSetParamsFromSymbol, + * ::cudaGraphInstantiate, + * ::cudaGraphExecMemcpyNodeSetParams, + * ::cudaGraphExecMemcpyNodeSetParamsToSymbol, + * ::cudaGraphExecKernelNodeSetParams, + * ::cudaGraphExecMemsetNodeSetParams, + * ::cudaGraphExecHostNodeSetParams + */ +template +static __inline__ __host__ cudaError_t cudaGraphExecMemcpyNodeSetParamsFromSymbol( + cudaGraphExec_t hGraphExec, + cudaGraphNode_t node, + void* dst, + const T &symbol, + size_t count, + size_t offset, + enum cudaMemcpyKind kind) +{ + return ::cudaGraphExecMemcpyNodeSetParamsFromSymbol(hGraphExec, node, dst, (const void*)&symbol, count, offset, kind); +} + +// convenience function to avoid source breakage in c++ code +static __inline__ __host__ cudaError_t CUDARTAPI cudaGraphExecUpdate(cudaGraphExec_t hGraphExec, cudaGraph_t hGraph, cudaGraphNode_t *hErrorNode_out, enum cudaGraphExecUpdateResult *updateResult_out) +{ + cudaGraphExecUpdateResultInfo resultInfo; + cudaError_t status = cudaGraphExecUpdate(hGraphExec, hGraph, &resultInfo); + if (hErrorNode_out) { + *hErrorNode_out = resultInfo.errorNode; + } + if (updateResult_out) { + *updateResult_out = resultInfo.result; + } + return status; +} + +#if __cplusplus >= 201103 + +/** + * \brief Creates a user object by wrapping a C++ object + * + * TODO detail + * + * \param object_out - Location to return the user object handle + * \param objectToWrap - This becomes the \ptr argument to ::cudaUserObjectCreate. A + * lambda will be passed for the \p destroy argument, which calls + * delete on this object pointer. + * \param initialRefcount - The initial refcount to create the object with, typically 1. The + * initial references are owned by the calling thread. + * \param flags - Currently it is required to pass cudaUserObjectNoDestructorSync, + * which is the only defined flag. This indicates that the destroy + * callback cannot be waited on by any CUDA API. Users requiring + * synchronization of the callback should signal its completion + * manually. + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidValue + * + * \sa + * ::cudaUserObjectCreate + */ +template +static __inline__ __host__ cudaError_t cudaUserObjectCreate( + cudaUserObject_t *object_out, + T *objectToWrap, + unsigned int initialRefcount, + unsigned int flags) +{ + return ::cudaUserObjectCreate( + object_out, + objectToWrap, + [](void *vpObj) { delete reinterpret_cast(vpObj); }, + initialRefcount, + flags); +} + +template +static __inline__ __host__ cudaError_t cudaUserObjectCreate( + cudaUserObject_t *object_out, + T *objectToWrap, + unsigned int initialRefcount, + cudaUserObjectFlags flags) +{ + return cudaUserObjectCreate(object_out, objectToWrap, initialRefcount, (unsigned int)flags); +} + +#endif + +/** + * \brief \hl Finds the address associated with a CUDA symbol + * + * Returns in \p *devPtr the address of symbol \p symbol on the device. + * \p symbol can either be a variable that resides in global or constant memory space. + * If \p symbol cannot be found, or if \p symbol is not declared + * in the global or constant memory space, \p *devPtr is unchanged and the error + * ::cudaErrorInvalidSymbol is returned. + * + * \param devPtr - Return device pointer associated with symbol + * \param symbol - Device symbol reference + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidSymbol, + * ::cudaErrorNoKernelImageForDevice + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa \ref ::cudaGetSymbolAddress(void**, const void*) "cudaGetSymbolAddress (C API)", + * \ref ::cudaGetSymbolSize(size_t*, const T&) "cudaGetSymbolSize (C++ API)" + */ +template +static __inline__ __host__ cudaError_t cudaGetSymbolAddress( + void **devPtr, + const T &symbol +) +{ + return ::cudaGetSymbolAddress(devPtr, (const void*)&symbol); +} + +/** + * \brief \hl Finds the size of the object associated with a CUDA symbol + * + * Returns in \p *size the size of symbol \p symbol. \p symbol must be a + * variable that resides in global or constant memory space. + * If \p symbol cannot be found, or if \p symbol is not declared + * in global or constant memory space, \p *size is unchanged and the error + * ::cudaErrorInvalidSymbol is returned. + * + * \param size - Size of object associated with symbol + * \param symbol - Device symbol reference + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidSymbol, + * ::cudaErrorNoKernelImageForDevice + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa \ref ::cudaGetSymbolAddress(void**, const T&) "cudaGetSymbolAddress (C++ API)", + * \ref ::cudaGetSymbolSize(size_t*, const void*) "cudaGetSymbolSize (C API)" + */ +template +static __inline__ __host__ cudaError_t cudaGetSymbolSize( + size_t *size, + const T &symbol +) +{ + return ::cudaGetSymbolSize(size, (const void*)&symbol); +} + +/** + * \brief \hl Sets the preferred cache configuration for a device function + * + * On devices where the L1 cache and shared memory use the same hardware + * resources, this sets through \p cacheConfig the preferred cache configuration + * for the function specified via \p func. This is only a preference. The + * runtime will use the requested configuration if possible, but it is free to + * choose a different configuration if required to execute \p func. + * + * \p func must be a pointer to a function that executes on the device. + * The parameter specified by \p func must be declared as a \p __global__ + * function. If the specified function does not exist, + * then ::cudaErrorInvalidDeviceFunction is returned. + * + * This setting does nothing on devices where the size of the L1 cache and + * shared memory are fixed. + * + * Launching a kernel with a different preference than the most recent + * preference setting may insert a device-side synchronization point. + * + * The supported cache configurations are: + * - ::cudaFuncCachePreferNone: no preference for shared memory or L1 (default) + * - ::cudaFuncCachePreferShared: prefer larger shared memory and smaller L1 cache + * - ::cudaFuncCachePreferL1: prefer larger L1 cache and smaller shared memory + * + * \param func - device function pointer + * \param cacheConfig - Requested cache configuration + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidDeviceFunction + * \notefnerr + * \note_init_rt + * \note_callback + * + * \ref ::cudaLaunchKernel(const T *func, dim3 gridDim, dim3 blockDim, void **args, size_t sharedMem, cudaStream_t stream) "cudaLaunchKernel (C++ API)", + * \ref ::cudaFuncSetCacheConfig(const void*, enum cudaFuncCache) "cudaFuncSetCacheConfig (C API)", + * \ref ::cudaFuncGetAttributes(struct cudaFuncAttributes*, T*) "cudaFuncGetAttributes (C++ API)", + * ::cudaSetDoubleForDevice, + * ::cudaSetDoubleForHost, + * ::cudaThreadGetCacheConfig, + * ::cudaThreadSetCacheConfig + */ +template +static __inline__ __host__ cudaError_t cudaFuncSetCacheConfig( + T *func, + enum cudaFuncCache cacheConfig +) +{ + return ::cudaFuncSetCacheConfig((const void*)func, cacheConfig); +} + +template +static __inline__ __host__ cudaError_t cudaFuncSetSharedMemConfig( + T *func, + enum cudaSharedMemConfig config +) +{ + return ::cudaFuncSetSharedMemConfig((const void*)func, config); +} + +#endif // __CUDACC__ + +/** + * \brief Returns occupancy for a device function + * + * Returns in \p *numBlocks the maximum number of active blocks per + * streaming multiprocessor for the device function. + * + * \param numBlocks - Returned occupancy + * \param func - Kernel function for which occupancy is calulated + * \param blockSize - Block size the kernel is intended to be launched with + * \param dynamicSMemSize - Per-block dynamic shared memory usage intended, in bytes + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidDevice, + * ::cudaErrorInvalidDeviceFunction, + * ::cudaErrorInvalidValue, + * ::cudaErrorUnknown, + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags + * \sa ::cudaOccupancyMaxPotentialBlockSize + * \sa ::cudaOccupancyMaxPotentialBlockSizeWithFlags + * \sa ::cudaOccupancyMaxPotentialBlockSizeVariableSMem + * \sa ::cudaOccupancyMaxPotentialBlockSizeVariableSMemWithFlags + * \sa ::cudaOccupancyAvailableDynamicSMemPerBlock + */ +template +static __inline__ __host__ cudaError_t cudaOccupancyMaxActiveBlocksPerMultiprocessor( + int *numBlocks, + T func, + int blockSize, + size_t dynamicSMemSize) +{ + return ::cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags(numBlocks, (const void*)func, blockSize, dynamicSMemSize, cudaOccupancyDefault); +} + +/** + * \brief Returns occupancy for a device function with the specified flags + * + * Returns in \p *numBlocks the maximum number of active blocks per + * streaming multiprocessor for the device function. + * + * The \p flags parameter controls how special cases are handled. Valid flags include: + * + * - ::cudaOccupancyDefault: keeps the default behavior as + * ::cudaOccupancyMaxActiveBlocksPerMultiprocessor + * + * - ::cudaOccupancyDisableCachingOverride: suppresses the default behavior + * on platform where global caching affects occupancy. On such platforms, if caching + * is enabled, but per-block SM resource usage would result in zero occupancy, the + * occupancy calculator will calculate the occupancy as if caching is disabled. + * Setting this flag makes the occupancy calculator to return 0 in such cases. + * More information can be found about this feature in the "Unified L1/Texture Cache" + * section of the Maxwell tuning guide. + * + * \param numBlocks - Returned occupancy + * \param func - Kernel function for which occupancy is calulated + * \param blockSize - Block size the kernel is intended to be launched with + * \param dynamicSMemSize - Per-block dynamic shared memory usage intended, in bytes + * \param flags - Requested behavior for the occupancy calculator + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidDevice, + * ::cudaErrorInvalidDeviceFunction, + * ::cudaErrorInvalidValue, + * ::cudaErrorUnknown, + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaOccupancyMaxActiveBlocksPerMultiprocessor + * \sa ::cudaOccupancyMaxPotentialBlockSize + * \sa ::cudaOccupancyMaxPotentialBlockSizeWithFlags + * \sa ::cudaOccupancyMaxPotentialBlockSizeVariableSMem + * \sa ::cudaOccupancyMaxPotentialBlockSizeVariableSMemWithFlags + * \sa ::cudaOccupancyAvailableDynamicSMemPerBlock + */ +template +static __inline__ __host__ cudaError_t cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags( + int *numBlocks, + T func, + int blockSize, + size_t dynamicSMemSize, + unsigned int flags) +{ + return ::cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags(numBlocks, (const void*)func, blockSize, dynamicSMemSize, flags); +} + +/** + * Helper functor for cudaOccupancyMaxPotentialBlockSize + */ +class __cudaOccupancyB2DHelper { + size_t n; +public: + inline __host__ CUDART_DEVICE __cudaOccupancyB2DHelper(size_t n_) : n(n_) {} + inline __host__ CUDART_DEVICE size_t operator()(int) + { + return n; + } +}; + +/** + * \brief Returns grid and block size that achieves maximum potential occupancy for a device function + * + * Returns in \p *minGridSize and \p *blocksize a suggested grid / + * block size pair that achieves the best potential occupancy + * (i.e. the maximum number of active warps with the smallest number + * of blocks). + * + * The \p flags parameter controls how special cases are handled. Valid flags include: + * + * - ::cudaOccupancyDefault: keeps the default behavior as + * ::cudaOccupancyMaxPotentialBlockSizeVariableSMemWithFlags + * + * - ::cudaOccupancyDisableCachingOverride: This flag suppresses the default behavior + * on platform where global caching affects occupancy. On such platforms, if caching + * is enabled, but per-block SM resource usage would result in zero occupancy, the + * occupancy calculator will calculate the occupancy as if caching is disabled. + * Setting this flag makes the occupancy calculator to return 0 in such cases. + * More information can be found about this feature in the "Unified L1/Texture Cache" + * section of the Maxwell tuning guide. + * + * \param minGridSize - Returned minimum grid size needed to achieve the best potential occupancy + * \param blockSize - Returned block size + * \param func - Device function symbol + * \param blockSizeToDynamicSMemSize - A unary function / functor that takes block size, and returns the size, in bytes, of dynamic shared memory needed for a block + * \param blockSizeLimit - The maximum block size \p func is designed to work with. 0 means no limit. + * \param flags - Requested behavior for the occupancy calculator + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidDevice, + * ::cudaErrorInvalidDeviceFunction, + * ::cudaErrorInvalidValue, + * ::cudaErrorUnknown, + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaOccupancyMaxPotentialBlockSizeVariableSMem + * \sa ::cudaOccupancyMaxActiveBlocksPerMultiprocessor + * \sa ::cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags + * \sa ::cudaOccupancyMaxPotentialBlockSize + * \sa ::cudaOccupancyMaxPotentialBlockSizeWithFlags + * \sa ::cudaOccupancyAvailableDynamicSMemPerBlock + */ + +template +static __inline__ __host__ CUDART_DEVICE cudaError_t cudaOccupancyMaxPotentialBlockSizeVariableSMemWithFlags( + int *minGridSize, + int *blockSize, + T func, + UnaryFunction blockSizeToDynamicSMemSize, + int blockSizeLimit = 0, + unsigned int flags = 0) +{ + cudaError_t status; + + // Device and function properties + int device; + struct cudaFuncAttributes attr; + + // Limits + int maxThreadsPerMultiProcessor; + int warpSize; + int devMaxThreadsPerBlock; + int multiProcessorCount; + int funcMaxThreadsPerBlock; + int occupancyLimit; + int granularity; + + // Recorded maximum + int maxBlockSize = 0; + int numBlocks = 0; + int maxOccupancy = 0; + + // Temporary + int blockSizeToTryAligned; + int blockSizeToTry; + int blockSizeLimitAligned; + int occupancyInBlocks; + int occupancyInThreads; + size_t dynamicSMemSize; + + /////////////////////////// + // Check user input + /////////////////////////// + + if (!minGridSize || !blockSize || !func) { + return cudaErrorInvalidValue; + } + + ////////////////////////////////////////////// + // Obtain device and function properties + ////////////////////////////////////////////// + + status = ::cudaGetDevice(&device); + if (status != cudaSuccess) { + return status; + } + + status = cudaDeviceGetAttribute( + &maxThreadsPerMultiProcessor, + cudaDevAttrMaxThreadsPerMultiProcessor, + device); + if (status != cudaSuccess) { + return status; + } + + status = cudaDeviceGetAttribute( + &warpSize, + cudaDevAttrWarpSize, + device); + if (status != cudaSuccess) { + return status; + } + + status = cudaDeviceGetAttribute( + &devMaxThreadsPerBlock, + cudaDevAttrMaxThreadsPerBlock, + device); + if (status != cudaSuccess) { + return status; + } + + status = cudaDeviceGetAttribute( + &multiProcessorCount, + cudaDevAttrMultiProcessorCount, + device); + if (status != cudaSuccess) { + return status; + } + + status = cudaFuncGetAttributes(&attr, func); + if (status != cudaSuccess) { + return status; + } + + funcMaxThreadsPerBlock = attr.maxThreadsPerBlock; + + ///////////////////////////////////////////////////////////////////////////////// + // Try each block size, and pick the block size with maximum occupancy + ///////////////////////////////////////////////////////////////////////////////// + + occupancyLimit = maxThreadsPerMultiProcessor; + granularity = warpSize; + + if (blockSizeLimit == 0) { + blockSizeLimit = devMaxThreadsPerBlock; + } + + if (devMaxThreadsPerBlock < blockSizeLimit) { + blockSizeLimit = devMaxThreadsPerBlock; + } + + if (funcMaxThreadsPerBlock < blockSizeLimit) { + blockSizeLimit = funcMaxThreadsPerBlock; + } + + blockSizeLimitAligned = ((blockSizeLimit + (granularity - 1)) / granularity) * granularity; + + for (blockSizeToTryAligned = blockSizeLimitAligned; blockSizeToTryAligned > 0; blockSizeToTryAligned -= granularity) { + // This is needed for the first iteration, because + // blockSizeLimitAligned could be greater than blockSizeLimit + // + if (blockSizeLimit < blockSizeToTryAligned) { + blockSizeToTry = blockSizeLimit; + } else { + blockSizeToTry = blockSizeToTryAligned; + } + + dynamicSMemSize = blockSizeToDynamicSMemSize(blockSizeToTry); + + status = cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags( + &occupancyInBlocks, + func, + blockSizeToTry, + dynamicSMemSize, + flags); + + if (status != cudaSuccess) { + return status; + } + + occupancyInThreads = blockSizeToTry * occupancyInBlocks; + + if (occupancyInThreads > maxOccupancy) { + maxBlockSize = blockSizeToTry; + numBlocks = occupancyInBlocks; + maxOccupancy = occupancyInThreads; + } + + // Early out if we have reached the maximum + // + if (occupancyLimit == maxOccupancy) { + break; + } + } + + /////////////////////////// + // Return best available + /////////////////////////// + + // Suggested min grid size to achieve a full machine launch + // + *minGridSize = numBlocks * multiProcessorCount; + *blockSize = maxBlockSize; + + return status; +} + +/** + * \brief Returns grid and block size that achieves maximum potential occupancy for a device function + * + * Returns in \p *minGridSize and \p *blocksize a suggested grid / + * block size pair that achieves the best potential occupancy + * (i.e. the maximum number of active warps with the smallest number + * of blocks). + * + * \param minGridSize - Returned minimum grid size needed to achieve the best potential occupancy + * \param blockSize - Returned block size + * \param func - Device function symbol + * \param blockSizeToDynamicSMemSize - A unary function / functor that takes block size, and returns the size, in bytes, of dynamic shared memory needed for a block + * \param blockSizeLimit - The maximum block size \p func is designed to work with. 0 means no limit. + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidDevice, + * ::cudaErrorInvalidDeviceFunction, + * ::cudaErrorInvalidValue, + * ::cudaErrorUnknown, + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaOccupancyMaxPotentialBlockSizeVariableSMemWithFlags + * \sa ::cudaOccupancyMaxActiveBlocksPerMultiprocessor + * \sa ::cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags + * \sa ::cudaOccupancyMaxPotentialBlockSize + * \sa ::cudaOccupancyMaxPotentialBlockSizeWithFlags + * \sa ::cudaOccupancyAvailableDynamicSMemPerBlock + */ + +template +static __inline__ __host__ CUDART_DEVICE cudaError_t cudaOccupancyMaxPotentialBlockSizeVariableSMem( + int *minGridSize, + int *blockSize, + T func, + UnaryFunction blockSizeToDynamicSMemSize, + int blockSizeLimit = 0) +{ + return cudaOccupancyMaxPotentialBlockSizeVariableSMemWithFlags(minGridSize, blockSize, func, blockSizeToDynamicSMemSize, blockSizeLimit, cudaOccupancyDefault); +} + +/** + * \brief Returns grid and block size that achieves maximum potential occupancy for a device function + * + * Returns in \p *minGridSize and \p *blocksize a suggested grid / + * block size pair that achieves the best potential occupancy + * (i.e. the maximum number of active warps with the smallest number + * of blocks). + * + * Use \sa ::cudaOccupancyMaxPotentialBlockSizeVariableSMem if the + * amount of per-block dynamic shared memory changes with different + * block sizes. + * + * \param minGridSize - Returned minimum grid size needed to achieve the best potential occupancy + * \param blockSize - Returned block size + * \param func - Device function symbol + * \param dynamicSMemSize - Per-block dynamic shared memory usage intended, in bytes + * \param blockSizeLimit - The maximum block size \p func is designed to work with. 0 means no limit. + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidDevice, + * ::cudaErrorInvalidDeviceFunction, + * ::cudaErrorInvalidValue, + * ::cudaErrorUnknown, + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaOccupancyMaxPotentialBlockSizeWithFlags + * \sa ::cudaOccupancyMaxActiveBlocksPerMultiprocessor + * \sa ::cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags + * \sa ::cudaOccupancyMaxPotentialBlockSizeVariableSMem + * \sa ::cudaOccupancyMaxPotentialBlockSizeVariableSMemWithFlags + * \sa ::cudaOccupancyAvailableDynamicSMemPerBlock + */ +template +static __inline__ __host__ CUDART_DEVICE cudaError_t cudaOccupancyMaxPotentialBlockSize( + int *minGridSize, + int *blockSize, + T func, + size_t dynamicSMemSize = 0, + int blockSizeLimit = 0) +{ + return cudaOccupancyMaxPotentialBlockSizeVariableSMemWithFlags(minGridSize, blockSize, func, __cudaOccupancyB2DHelper(dynamicSMemSize), blockSizeLimit, cudaOccupancyDefault); +} + +/** + * \brief Returns dynamic shared memory available per block when launching \p numBlocks blocks on SM. + * + * Returns in \p *dynamicSmemSize the maximum size of dynamic shared memory to allow \p numBlocks blocks per SM. + * + * \param dynamicSmemSize - Returned maximum dynamic shared memory + * \param func - Kernel function for which occupancy is calculated + * \param numBlocks - Number of blocks to fit on SM + * \param blockSize - Size of the block + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidDevice, + * ::cudaErrorInvalidDeviceFunction, + * ::cudaErrorInvalidValue, + * ::cudaErrorUnknown, + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaOccupancyMaxPotentialBlockSize + * \sa ::cudaOccupancyMaxPotentialBlockSizeWithFlags + * \sa ::cudaOccupancyMaxActiveBlocksPerMultiprocessor + * \sa ::cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags + * \sa ::cudaOccupancyMaxPotentialBlockSizeVariableSMem + * \sa ::cudaOccupancyMaxPotentialBlockSizeVariableSMemWithFlags + */ +template +static __inline__ __host__ cudaError_t cudaOccupancyAvailableDynamicSMemPerBlock( + size_t *dynamicSmemSize, + T func, + int numBlocks, + int blockSize) +{ + return ::cudaOccupancyAvailableDynamicSMemPerBlock(dynamicSmemSize, (const void*)func, numBlocks, blockSize); +} + +/** + * \brief Returns grid and block size that achived maximum potential occupancy for a device function with the specified flags + * + * Returns in \p *minGridSize and \p *blocksize a suggested grid / + * block size pair that achieves the best potential occupancy + * (i.e. the maximum number of active warps with the smallest number + * of blocks). + * + * The \p flags parameter controls how special cases are handle. Valid flags include: + * + * - ::cudaOccupancyDefault: keeps the default behavior as + * ::cudaOccupancyMaxPotentialBlockSize + * + * - ::cudaOccupancyDisableCachingOverride: This flag suppresses the default behavior + * on platform where global caching affects occupancy. On such platforms, if caching + * is enabled, but per-block SM resource usage would result in zero occupancy, the + * occupancy calculator will calculate the occupancy as if caching is disabled. + * Setting this flag makes the occupancy calculator to return 0 in such cases. + * More information can be found about this feature in the "Unified L1/Texture Cache" + * section of the Maxwell tuning guide. + * + * Use \sa ::cudaOccupancyMaxPotentialBlockSizeVariableSMem if the + * amount of per-block dynamic shared memory changes with different + * block sizes. + * + * \param minGridSize - Returned minimum grid size needed to achieve the best potential occupancy + * \param blockSize - Returned block size + * \param func - Device function symbol + * \param dynamicSMemSize - Per-block dynamic shared memory usage intended, in bytes + * \param blockSizeLimit - The maximum block size \p func is designed to work with. 0 means no limit. + * \param flags - Requested behavior for the occupancy calculator + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidDevice, + * ::cudaErrorInvalidDeviceFunction, + * ::cudaErrorInvalidValue, + * ::cudaErrorUnknown, + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa ::cudaOccupancyMaxPotentialBlockSize + * \sa ::cudaOccupancyMaxActiveBlocksPerMultiprocessor + * \sa ::cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags + * \sa ::cudaOccupancyMaxPotentialBlockSizeVariableSMem + * \sa ::cudaOccupancyMaxPotentialBlockSizeVariableSMemWithFlags + * \sa ::cudaOccupancyAvailableDynamicSMemPerBlock + */ +template +static __inline__ __host__ CUDART_DEVICE cudaError_t cudaOccupancyMaxPotentialBlockSizeWithFlags( + int *minGridSize, + int *blockSize, + T func, + size_t dynamicSMemSize = 0, + int blockSizeLimit = 0, + unsigned int flags = 0) +{ + return cudaOccupancyMaxPotentialBlockSizeVariableSMemWithFlags(minGridSize, blockSize, func, __cudaOccupancyB2DHelper(dynamicSMemSize), blockSizeLimit, flags); +} + +/** + * \brief Given the kernel function (\p func) and launch configuration + * (\p config), return the maximum cluster size in \p *clusterSize. + * + * The cluster dimensions in \p config are ignored. If func has a required + * cluster size set (see ::cudaFuncGetAttributes),\p *clusterSize will reflect + * the required cluster size. + * + * By default this function will always return a value that's portable on + * future hardware. A higher value may be returned if the kernel function + * allows non-portable cluster sizes. + * + * This function will respect the compile time launch bounds. + * + * \param clusterSize - Returned maximum cluster size that can be launched + * for the given kernel function and launch configuration + * \param func - Kernel function for which maximum cluster + * size is calculated + * \param config - Launch configuration for the given kernel function + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidDeviceFunction, + * ::cudaErrorInvalidValue, + * ::cudaErrorUnknown, + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaFuncGetAttributes + */ +template +static __inline__ __host__ cudaError_t cudaOccupancyMaxPotentialClusterSize( + int *clusterSize, + T *func, + const cudaLaunchConfig_t *config) +{ + return ::cudaOccupancyMaxPotentialClusterSize(clusterSize, (const void*)func, config); +} + +/** + * \brief Given the kernel function (\p func) and launch configuration + * (\p config), return the maximum number of clusters that could co-exist + * on the target device in \p *numClusters. + * + * If the function has required cluster size already set (see + * ::cudaFuncGetAttributes), the cluster size from config must either be + * unspecified or match the required size. + * Without required sizes, the cluster size must be specified in config, + * else the function will return an error. + * + * Note that various attributes of the kernel function may affect occupancy + * calculation. Runtime environment may affect how the hardware schedules + * the clusters, so the calculated occupancy is not guaranteed to be achievable. + * + * \param numClusters - Returned maximum number of clusters that + * could co-exist on the target device + * \param func - Kernel function for which maximum number + * of clusters are calculated + * \param config - Launch configuration for the given kernel function + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidDeviceFunction, + * ::cudaErrorInvalidValue, + * ::cudaErrorInvalidClusterSize, + * ::cudaErrorUnknown, + * \notefnerr + * \note_init_rt + * \note_callback + * + * \sa + * ::cudaFuncGetAttributes + */ +template +static __inline__ __host__ cudaError_t cudaOccupancyMaxActiveClusters( + int *numClusters, + T *func, + const cudaLaunchConfig_t *config) +{ + return ::cudaOccupancyMaxActiveClusters(numClusters, (const void*)func, config); +} + +#if defined __CUDACC__ + +/** + * \brief \hl Find out attributes for a given function + * + * This function obtains the attributes of a function specified via \p entry. + * The parameter \p entry must be a pointer to a function that executes + * on the device. The parameter specified by \p entry must be declared as a \p __global__ + * function. The fetched attributes are placed in \p attr. If the specified + * function does not exist, then ::cudaErrorInvalidDeviceFunction is returned. + * + * Note that some function attributes such as + * \ref ::cudaFuncAttributes::maxThreadsPerBlock "maxThreadsPerBlock" + * may vary based on the device that is currently being used. + * + * \param attr - Return pointer to function's attributes + * \param entry - Function to get attributes of + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidDeviceFunction + * \notefnerr + * \note_init_rt + * \note_callback + * + * \ref ::cudaLaunchKernel(const T *func, dim3 gridDim, dim3 blockDim, void **args, size_t sharedMem, cudaStream_t stream) "cudaLaunchKernel (C++ API)", + * \ref ::cudaFuncSetCacheConfig(T*, enum cudaFuncCache) "cudaFuncSetCacheConfig (C++ API)", + * \ref ::cudaFuncGetAttributes(struct cudaFuncAttributes*, const void*) "cudaFuncGetAttributes (C API)", + * ::cudaSetDoubleForDevice, + * ::cudaSetDoubleForHost + */ +template +static __inline__ __host__ cudaError_t cudaFuncGetAttributes( + struct cudaFuncAttributes *attr, + T *entry +) +{ + return ::cudaFuncGetAttributes(attr, (const void*)entry); +} + +/** + * \brief \hl Set attributes for a given function + * + * This function sets the attributes of a function specified via \p entry. + * The parameter \p entry must be a pointer to a function that executes + * on the device. The parameter specified by \p entry must be declared as a \p __global__ + * function. The enumeration defined by \p attr is set to the value defined by \p value. + * If the specified function does not exist, then ::cudaErrorInvalidDeviceFunction is returned. + * If the specified attribute cannot be written, or if the value is incorrect, + * then ::cudaErrorInvalidValue is returned. + * + * Valid values for \p attr are: + * - ::cudaFuncAttributeMaxDynamicSharedMemorySize - The requested maximum size in bytes of dynamically-allocated shared memory. The sum of this value and the function attribute ::sharedSizeBytes + * cannot exceed the device attribute ::cudaDevAttrMaxSharedMemoryPerBlockOptin. The maximal size of requestable dynamic shared memory may differ by GPU architecture. + * - ::cudaFuncAttributePreferredSharedMemoryCarveout - On devices where the L1 cache and shared memory use the same hardware resources, + * this sets the shared memory carveout preference, in percent of the total shared memory. See ::cudaDevAttrMaxSharedMemoryPerMultiprocessor. + * This is only a hint, and the driver can choose a different ratio if required to execute the function. + * - ::cudaFuncAttributeRequiredClusterWidth: The required cluster width in + * blocks. The width, height, and depth values must either all be 0 or all be + * positive. The validity of the cluster dimensions is checked at launch time. + * If the value is set during compile time, it cannot be set at runtime. + * Setting it at runtime will return cudaErrorNotPermitted. + * - ::cudaFuncAttributeRequiredClusterHeight: The required cluster height in + * blocks. The width, height, and depth values must either all be 0 or all be + * positive. The validity of the cluster dimensions is checked at launch time. + * If the value is set during compile time, it cannot be set at runtime. + * Setting it at runtime will return cudaErrorNotPermitted. + * - ::cudaFuncAttributeRequiredClusterDepth: The required cluster depth in + * blocks. The width, height, and depth values must either all be 0 or all be + * positive. The validity of the cluster dimensions is checked at launch time. + * If the value is set during compile time, it cannot be set at runtime. + * Setting it at runtime will return cudaErrorNotPermitted. + * - ::cudaFuncAttributeClusterSchedulingPolicyPreference: The block + * scheduling policy of a function. The value type is cudaClusterSchedulingPolicy. + * + * \param entry - Function to get attributes of + * \param attr - Attribute to set + * \param value - Value to set + * + * \return + * ::cudaSuccess, + * ::cudaErrorInvalidDeviceFunction, + * ::cudaErrorInvalidValue + * \notefnerr + * \note_init_rt + * \note_callback + * + * \ref ::cudaLaunchKernel(const T *func, dim3 gridDim, dim3 blockDim, void **args, size_t sharedMem, cudaStream_t stream) "cudaLaunchKernel (C++ API)", + * \ref ::cudaFuncSetCacheConfig(T*, enum cudaFuncCache) "cudaFuncSetCacheConfig (C++ API)", + * \ref ::cudaFuncGetAttributes(struct cudaFuncAttributes*, const void*) "cudaFuncGetAttributes (C API)", + * ::cudaSetDoubleForDevice, + * ::cudaSetDoubleForHost + */ +template +static __inline__ __host__ cudaError_t cudaFuncSetAttribute( + T *entry, + enum cudaFuncAttribute attr, + int value +) +{ + return ::cudaFuncSetAttribute((const void*)entry, attr, value); +} + +/** + * \brief Get pointer to device kernel that matches entry function \p entryFuncAddr + * + * Returns in \p kernelPtr the device kernel corresponding to the entry function \p entryFuncAddr. + * + * \param kernelPtr - Returns the device kernel + * \param entryFuncAddr - Address of device entry function to search kernel for + * + * \return + * ::cudaSuccess + * + * \sa + * \ref ::cudaGetKernel(cudaKernel_t *kernelPtr, const void *entryFuncAddr) "cudaGetKernel (C API)" + */ +template +static __inline__ __host__ cudaError_t cudaGetKernel( + cudaKernel_t *kernelPtr, + const T *entryFuncAddr +) +{ + return ::cudaGetKernel(kernelPtr, (const void *)entryFuncAddr); +} + +#endif /* __CUDACC__ */ + +/** @} */ /* END CUDART_HIGHLEVEL */ + +#endif /* __cplusplus && !__CUDACC_RTC__ */ + +#if !defined(__CUDACC_RTC__) +#if defined(__GNUC__) +#if defined(__clang__) || (!defined(__PGIC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6))) +#pragma GCC diagnostic pop +#endif +#elif defined(_MSC_VER) +#pragma warning(pop) +#endif +#endif + +#undef __CUDA_DEPRECATED + +#if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_CUDA_RUNTIME_H__) +#undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__ +#undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_CUDA_RUNTIME_H__ +#endif + +#endif /* !__CUDA_RUNTIME_H__ */ diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/device_launch_parameters.h b/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/device_launch_parameters.h new file mode 100644 index 0000000000000000000000000000000000000000..8f552db8faab7d21e90e06a1ea2184a5563d3bf2 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/device_launch_parameters.h @@ -0,0 +1,118 @@ +/* + * Copyright 1993-2012 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(__DEVICE_LAUNCH_PARAMETERS_H__) +#define __DEVICE_LAUNCH_PARAMETERS_H__ + +#include "vector_types.h" + +#if !defined(__STORAGE__) + +#if defined(__CUDACC_RTC__) +#define __STORAGE__ \ + extern const __device__ +#else /* !__CUDACC_RTC__ */ +#define __STORAGE__ \ + extern const +#endif /* __CUDACC_RTC__ */ + +#endif /* __STORAGE__ */ + +#if defined(__cplusplus) +extern "C" { +#endif /* __cplusplus */ + +uint3 __device_builtin__ __STORAGE__ threadIdx; +uint3 __device_builtin__ __STORAGE__ blockIdx; +dim3 __device_builtin__ __STORAGE__ blockDim; +dim3 __device_builtin__ __STORAGE__ gridDim; +int __device_builtin__ __STORAGE__ warpSize; + +#undef __STORAGE__ + +#if defined(__cplusplus) +} +#endif /* __cplusplus */ + +#if !defined(__cudaGet_threadIdx) + +#define __cudaGet_threadIdx() \ + threadIdx + +#endif /* __cudaGet_threadIdx */ + +#if !defined(__cudaGet_blockIdx) + +#define __cudaGet_blockIdx() \ + blockIdx + +#endif /* __cudaGet_blockIdx */ + +#if !defined(__cudaGet_blockDim) + +#define __cudaGet_blockDim() \ + blockDim + +#endif /* __cudaGet_blockDim */ + +#if !defined(__cudaGet_gridDim) + +#define __cudaGet_gridDim() \ + gridDim + +#endif /* __cudaGet_gridDim */ + +#if !defined(__cudaGet_warpSize) + +#define __cudaGet_warpSize() \ + warpSize + +#endif /* __cudaGet_warpSize */ + +#endif /* !__DEVICE_LAUNCH_PARAMETERS_H__ */ diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_20_atomic_functions.hpp b/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_20_atomic_functions.hpp new file mode 100644 index 0000000000000000000000000000000000000000..ac4aa9bfc6b8d5d4d240e05a2fd557889f30c47f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_20_atomic_functions.hpp @@ -0,0 +1,85 @@ +/* + * Copyright 1993-2014 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(__SM_20_ATOMIC_FUNCTIONS_HPP__) +#define __SM_20_ATOMIC_FUNCTIONS_HPP__ + +#if defined(__CUDACC_RTC__) +#define __SM_20_ATOMIC_FUNCTIONS_DECL__ __device__ +#else /* __CUDACC_RTC__ */ +#define __SM_20_ATOMIC_FUNCTIONS_DECL__ static __inline__ __device__ +#endif /* __CUDACC_RTC__ */ + +#if defined(__cplusplus) && defined(__CUDACC__) + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +#include "cuda_runtime_api.h" + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +__SM_20_ATOMIC_FUNCTIONS_DECL__ float atomicAdd(float *address, float val) +{ + return __fAtomicAdd(address, val); +} + +#endif /* __cplusplus && __CUDACC__ */ + +#undef __SM_20_ATOMIC_FUNCTIONS_DECL__ + +#endif /* !__SM_20_ATOMIC_FUNCTIONS_HPP__ */ + diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_20_intrinsics.hpp b/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_20_intrinsics.hpp new file mode 100644 index 0000000000000000000000000000000000000000..30c1ab99e0d66ebbceb8fe88b1122443cbf5f998 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_20_intrinsics.hpp @@ -0,0 +1,221 @@ +/* + * Copyright 1993-2014 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(__SM_20_INTRINSICS_HPP__) +#define __SM_20_INTRINSICS_HPP__ + +#if defined(__CUDACC_RTC__) +#define __SM_20_INTRINSICS_DECL__ __device__ +#else /* __CUDACC_RTC__ */ +#define __SM_20_INTRINSICS_DECL__ static __inline__ __device__ +#endif /* __CUDACC_RTC__ */ + +#if defined(__cplusplus) && defined(__CUDACC__) + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +#include "cuda_runtime_api.h" + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +__SM_20_INTRINSICS_DECL__ unsigned int ballot(bool pred) +{ + return __ballot((int)pred); +} + +__SM_20_INTRINSICS_DECL__ int syncthreads_count(bool pred) +{ + return __syncthreads_count((int)pred); +} + +__SM_20_INTRINSICS_DECL__ bool syncthreads_and(bool pred) +{ + return (bool)__syncthreads_and((int)pred); +} + +__SM_20_INTRINSICS_DECL__ bool syncthreads_or(bool pred) +{ + return (bool)__syncthreads_or((int)pred); +} + + +extern "C" { + __device__ unsigned __nv_isGlobal_impl(const void *); + __device__ unsigned __nv_isShared_impl(const void *); + __device__ unsigned __nv_isConstant_impl(const void *); + __device__ unsigned __nv_isLocal_impl(const void *); + __device__ unsigned __nv_isGridConstant_impl(const void *); +} + +__SM_20_INTRINSICS_DECL__ unsigned int __isGlobal(const void *ptr) +{ + return __nv_isGlobal_impl(ptr); +} + +__SM_20_INTRINSICS_DECL__ unsigned int __isShared(const void *ptr) +{ + return __nv_isShared_impl(ptr); +} + +__SM_20_INTRINSICS_DECL__ unsigned int __isConstant(const void *ptr) +{ + return __nv_isConstant_impl(ptr); +} + +__SM_20_INTRINSICS_DECL__ unsigned int __isLocal(const void *ptr) +{ + return __nv_isLocal_impl(ptr); +} + +#if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 700) +__SM_20_INTRINSICS_DECL__ unsigned int __isGridConstant(const void *ptr) +{ + return __nv_isGridConstant_impl(ptr); +} +#endif /* !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 700) */ + +extern "C" { + __device__ size_t __nv_cvta_generic_to_global_impl(const void *); + __device__ size_t __nv_cvta_generic_to_shared_impl(const void *); + __device__ size_t __nv_cvta_generic_to_constant_impl(const void *); + __device__ size_t __nv_cvta_generic_to_local_impl(const void *); + __device__ void * __nv_cvta_global_to_generic_impl(size_t); + __device__ void * __nv_cvta_shared_to_generic_impl(size_t); + __device__ void * __nv_cvta_constant_to_generic_impl(size_t); + __device__ void * __nv_cvta_local_to_generic_impl(size_t); +} + +__SM_20_INTRINSICS_DECL__ size_t __cvta_generic_to_global(const void *p) +{ + return __nv_cvta_generic_to_global_impl(p); +} + +__SM_20_INTRINSICS_DECL__ size_t __cvta_generic_to_shared(const void *p) +{ + return __nv_cvta_generic_to_shared_impl(p); +} + +__SM_20_INTRINSICS_DECL__ size_t __cvta_generic_to_constant(const void *p) +{ + return __nv_cvta_generic_to_constant_impl(p); +} + +__SM_20_INTRINSICS_DECL__ size_t __cvta_generic_to_local(const void *p) +{ + return __nv_cvta_generic_to_local_impl(p); +} + +__SM_20_INTRINSICS_DECL__ void * __cvta_global_to_generic(size_t rawbits) +{ + return __nv_cvta_global_to_generic_impl(rawbits); +} + +__SM_20_INTRINSICS_DECL__ void * __cvta_shared_to_generic(size_t rawbits) +{ + return __nv_cvta_shared_to_generic_impl(rawbits); +} + +__SM_20_INTRINSICS_DECL__ void * __cvta_constant_to_generic(size_t rawbits) +{ + return __nv_cvta_constant_to_generic_impl(rawbits); +} + +__SM_20_INTRINSICS_DECL__ void * __cvta_local_to_generic(size_t rawbits) +{ + return __nv_cvta_local_to_generic_impl(rawbits); +} + +#if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 700) +#if (defined(_MSC_VER) && defined(_WIN64)) || defined(__LP64__) || defined(__CUDACC_RTC__) +#define __CVTA_PTR_64 1 +#endif + +__SM_20_INTRINSICS_DECL__ size_t __cvta_generic_to_grid_constant(const void *ptr) +{ +#if __CVTA_PTR_64 + unsigned long long ret; + asm("cvta.to.param.u64 %0, %1;" : "=l"(ret) : "l"(ptr)); +#else /* !__CVTA_PTR_64 */ + unsigned ret; + asm("cvta.to.param.u32 %0, %1;" : "=r"(ret) : "r"(ptr)); +#endif /* __CVTA_PTR_64 */ + return (size_t)ret; + +} + +__SM_20_INTRINSICS_DECL__ void * __cvta_grid_constant_to_generic(size_t rawbits) +{ + void *ret; +#if __CVTA_PTR_64 + unsigned long long in = rawbits; + asm("cvta.param.u64 %0, %1;" : "=l"(ret) : "l"(in)); +#else /* !__CVTA_PTR_64 */ + unsigned in = rawbits; + asm("cvta.param.u32 %0, %1;" : "=r"(ret) : "r"(in)); +#endif /* __CVTA_PTR_64 */ + return ret; +} +#undef __CVTA_PTR_64 +#endif /* !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 700) */ + + +#endif /* __cplusplus && __CUDACC__ */ + +#undef __SM_20_INTRINSICS_DECL__ + +#endif /* !__SM_20_INTRINSICS_HPP__ */ + diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_30_intrinsics.h b/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_30_intrinsics.h new file mode 100644 index 0000000000000000000000000000000000000000..c2efc2e248350f5800f56e44348ebbc9b04b480c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_30_intrinsics.h @@ -0,0 +1,221 @@ +/* + * Copyright 1993-2014 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(__SM_30_INTRINSICS_H__) +#define __SM_30_INTRINSICS_H__ + +#if defined(__CUDACC_RTC__) +#define __SM_30_INTRINSICS_DECL__ __device__ +#elif defined(_NVHPC_CUDA) +#define __SM_30_INTRINSICS_DECL__ extern __device__ __cudart_builtin__ +#else /* !__CUDACC_RTC__ */ +#define __SM_30_INTRINSICS_DECL__ static __device__ __inline__ +#endif /* __CUDACC_RTC__ */ + +#if defined(__cplusplus) && defined(__CUDACC__) + +#if defined(_NVHPC_CUDA) || !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 300 + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +#include "cuda_runtime_api.h" + +/* Add !defined(_NVHPC_CUDA) to avoid empty function definition in CUDA + * C++ compiler where the macro __CUDA_ARCH__ is not defined. */ +#if !defined(__CUDA_ARCH__) && !defined(_NVHPC_CUDA) +#define __DEF_IF_HOST { } +#else /* !__CUDA_ARCH__ */ +#define __DEF_IF_HOST ; +#endif /* __CUDA_ARCH__ */ + + +/******************************************************************************* +* * +* Below are declarations of SM-3.0 intrinsics which are included as * +* source (instead of being built in to the compiler) * +* * +*******************************************************************************/ + +#if !defined warpSize && !defined __local_warpSize +#define warpSize 32 +#define __local_warpSize +#endif + +#if defined(_WIN32) +# define __DEPRECATED__(msg) __declspec(deprecated(msg)) +#elif (defined(__GNUC__) && (__GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 5 && !defined(__clang__)))) +# define __DEPRECATED__(msg) __attribute__((deprecated)) +#else +# define __DEPRECATED__(msg) __attribute__((deprecated(msg))) +#endif + +#if defined(_NVHPC_CUDA) || !defined(__CUDA_ARCH__) || __CUDA_ARCH__ < 700 +#define __WSB_DEPRECATION_MESSAGE(x) #x"() is deprecated in favor of "#x"_sync() and may be removed in a future release (Use -Wno-deprecated-declarations to suppress this warning)." +#elif defined(_NVHPC_CUDA) +#define __WSB_DEPRECATION_MESSAGE(x) #x"() is not valid on cc70 and above, and should be replaced with "#x"_sync()." +#endif + +__SM_30_INTRINSICS_DECL__ unsigned __fns(unsigned mask, unsigned base, int offset) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ void __barrier_sync(unsigned id) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ void __barrier_sync_count(unsigned id, unsigned cnt) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ void __syncwarp(unsigned mask=0xFFFFFFFF) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ int __all_sync(unsigned mask, int pred) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ int __any_sync(unsigned mask, int pred) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ int __uni_sync(unsigned mask, int pred) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ unsigned __ballot_sync(unsigned mask, int pred) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ unsigned __activemask() __DEF_IF_HOST + +// Warp register exchange (shuffle) intrinsics. +// Notes: +// a) Warp size is hardcoded to 32 here, because the compiler does not know +// the "warpSize" constant at this time +// b) we cannot map the float __shfl to the int __shfl because it'll mess with +// the register number (especially if you're doing two shfls to move a double). +#if defined(_NVHPC_CUDA) || !defined(__CUDA_ARCH__) || __CUDA_ARCH__ < 700 +__SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl)) int __shfl(int var, int srcLane, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl)) unsigned int __shfl(unsigned int var, int srcLane, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_up)) int __shfl_up(int var, unsigned int delta, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_up)) unsigned int __shfl_up(unsigned int var, unsigned int delta, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_down)) int __shfl_down(int var, unsigned int delta, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_down)) unsigned int __shfl_down(unsigned int var, unsigned int delta, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_xor)) int __shfl_xor(int var, int laneMask, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_xor)) unsigned int __shfl_xor(unsigned int var, int laneMask, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl)) float __shfl(float var, int srcLane, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_up)) float __shfl_up(float var, unsigned int delta, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_down)) float __shfl_down(float var, unsigned int delta, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_xor)) float __shfl_xor(float var, int laneMask, int width=warpSize) __DEF_IF_HOST +#endif + +__SM_30_INTRINSICS_DECL__ int __shfl_sync(unsigned mask, int var, int srcLane, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ unsigned int __shfl_sync(unsigned mask, unsigned int var, int srcLane, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ int __shfl_up_sync(unsigned mask, int var, unsigned int delta, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ unsigned int __shfl_up_sync(unsigned mask, unsigned int var, unsigned int delta, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ int __shfl_down_sync(unsigned mask, int var, unsigned int delta, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ unsigned int __shfl_down_sync(unsigned mask, unsigned int var, unsigned int delta, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ int __shfl_xor_sync(unsigned mask, int var, int laneMask, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ unsigned int __shfl_xor_sync(unsigned mask, unsigned int var, int laneMask, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ float __shfl_sync(unsigned mask, float var, int srcLane, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ float __shfl_up_sync(unsigned mask, float var, unsigned int delta, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ float __shfl_down_sync(unsigned mask, float var, unsigned int delta, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ float __shfl_xor_sync(unsigned mask, float var, int laneMask, int width=warpSize) __DEF_IF_HOST + +// 64-bits SHFL +#if defined(_NVHPC_CUDA) || !defined(__CUDA_ARCH__) || __CUDA_ARCH__ < 700 +__SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl)) unsigned long long __shfl(unsigned long long var, int srcLane, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl)) long long __shfl(long long var, int srcLane, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_up)) long long __shfl_up(long long var, unsigned int delta, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_up)) unsigned long long __shfl_up(unsigned long long var, unsigned int delta, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_down)) long long __shfl_down(long long var, unsigned int delta, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_down)) unsigned long long __shfl_down(unsigned long long var, unsigned int delta, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_xor)) long long __shfl_xor(long long var, int laneMask, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_xor)) unsigned long long __shfl_xor(unsigned long long var, int laneMask, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl)) double __shfl(double var, int srcLane, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_up)) double __shfl_up(double var, unsigned int delta, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_down)) double __shfl_down(double var, unsigned int delta, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_xor)) double __shfl_xor(double var, int laneMask, int width=warpSize) __DEF_IF_HOST +#endif + +__SM_30_INTRINSICS_DECL__ long long __shfl_sync(unsigned mask, long long var, int srcLane, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ unsigned long long __shfl_sync(unsigned mask, unsigned long long var, int srcLane, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ long long __shfl_up_sync(unsigned mask, long long var, unsigned int delta, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ unsigned long long __shfl_up_sync(unsigned mask, unsigned long long var, unsigned int delta, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ long long __shfl_down_sync(unsigned mask, long long var, unsigned int delta, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ unsigned long long __shfl_down_sync(unsigned mask, unsigned long long var, unsigned int delta, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ long long __shfl_xor_sync(unsigned mask, long long var, int laneMask, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ unsigned long long __shfl_xor_sync(unsigned mask, unsigned long long var, int laneMask, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ double __shfl_sync(unsigned mask, double var, int srcLane, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ double __shfl_up_sync(unsigned mask, double var, unsigned int delta, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ double __shfl_down_sync(unsigned mask, double var, unsigned int delta, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ double __shfl_xor_sync(unsigned mask, double var, int laneMask, int width=warpSize) __DEF_IF_HOST + +// long needs some help to choose between 32-bits and 64-bits +#if defined(_NVHPC_CUDA) || !defined(__CUDA_ARCH__) || __CUDA_ARCH__ < 700 +__SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl)) long __shfl(long var, int srcLane, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl)) unsigned long __shfl(unsigned long var, int srcLane, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_up)) long __shfl_up(long var, unsigned int delta, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_up)) unsigned long __shfl_up(unsigned long var, unsigned int delta, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_down)) long __shfl_down(long var, unsigned int delta, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_down)) unsigned long __shfl_down(unsigned long var, unsigned int delta, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_xor)) long __shfl_xor(long var, int laneMask, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ __DEPRECATED__(__WSB_DEPRECATION_MESSAGE(__shfl_xor)) unsigned long __shfl_xor(unsigned long var, int laneMask, int width=warpSize) __DEF_IF_HOST +#endif + +__SM_30_INTRINSICS_DECL__ long __shfl_sync(unsigned mask, long var, int srcLane, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ unsigned long __shfl_sync(unsigned mask, unsigned long var, int srcLane, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ long __shfl_up_sync(unsigned mask, long var, unsigned int delta, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ unsigned long __shfl_up_sync(unsigned mask, unsigned long var, unsigned int delta, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ long __shfl_down_sync(unsigned mask, long var, unsigned int delta, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ unsigned long __shfl_down_sync(unsigned mask, unsigned long var, unsigned int delta, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ long __shfl_xor_sync(unsigned mask, long var, int laneMask, int width=warpSize) __DEF_IF_HOST +__SM_30_INTRINSICS_DECL__ unsigned long __shfl_xor_sync(unsigned mask, unsigned long var, int laneMask, int width=warpSize) __DEF_IF_HOST + +#undef __DEPRECATED__ +#undef __WSB_DEPRECATION_MESSAGE + +#if defined(__local_warpSize) +#undef warpSize +#undef __local_warpSize +#endif + +#endif /* !__CUDA_ARCH__ || __CUDA_ARCH__ >= 300 */ + +#endif /* __cplusplus && __CUDACC__ */ + +#undef __DEF_IF_HOST +#undef __SM_30_INTRINSICS_DECL__ + +#if !defined(__CUDACC_RTC__) && defined(__CUDA_ARCH__) +#include "sm_30_intrinsics.hpp" +#endif /* !__CUDACC_RTC__ && defined(__CUDA_ARCH__) */ + +#endif /* !__SM_30_INTRINSICS_H__ */ diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_30_intrinsics.hpp b/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_30_intrinsics.hpp new file mode 100644 index 0000000000000000000000000000000000000000..a5bcac5ee68c0cf547e4de7c08badf37106639dc --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_30_intrinsics.hpp @@ -0,0 +1,604 @@ +/* + * Copyright 1993-2014 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(__SM_30_INTRINSICS_HPP__) +#define __SM_30_INTRINSICS_HPP__ + +#if defined(__CUDACC_RTC__) +#define __SM_30_INTRINSICS_DECL__ __device__ +#else /* !__CUDACC_RTC__ */ +#define __SM_30_INTRINSICS_DECL__ static __device__ __inline__ +#endif /* __CUDACC_RTC__ */ + +#if defined(__cplusplus) && defined(__CUDACC__) + +#if defined(_NVHPC_CUDA) || !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 300 + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +#include "cuda_runtime_api.h" + +// In here are intrinsics which are built in to the compiler. These may be +// referenced by intrinsic implementations from this file. +extern "C" +{ +} + +/******************************************************************************* +* * +* Below are implementations of SM-3.0 intrinsics which are included as * +* source (instead of being built in to the compiler) * +* * +*******************************************************************************/ + +#if !defined warpSize && !defined __local_warpSize +#define warpSize 32 +#define __local_warpSize +#endif + +__SM_30_INTRINSICS_DECL__ +unsigned __fns(unsigned mask, unsigned base, int offset) { + extern __device__ __device_builtin__ unsigned int __nvvm_fns(unsigned int mask, unsigned int base, int offset); + return __nvvm_fns(mask, base, offset); +} + +__SM_30_INTRINSICS_DECL__ +void __barrier_sync(unsigned id) { + extern __device__ __device_builtin__ void __nvvm_barrier_sync(unsigned id); + return __nvvm_barrier_sync(id); +} + +__SM_30_INTRINSICS_DECL__ +void __barrier_sync_count(unsigned id, unsigned cnt) { + extern __device__ __device_builtin__ void __nvvm_barrier_sync_cnt(unsigned id, unsigned cnt); + return __nvvm_barrier_sync_cnt(id, cnt); +} + +__SM_30_INTRINSICS_DECL__ +void __syncwarp(unsigned mask) { + extern __device__ __device_builtin__ void __nvvm_bar_warp_sync(unsigned mask); + return __nvvm_bar_warp_sync(mask); +} + +__SM_30_INTRINSICS_DECL__ +int __all_sync(unsigned mask, int pred) { + extern __device__ __device_builtin__ int __nvvm_vote_all_sync(unsigned int mask, int pred); + return __nvvm_vote_all_sync(mask, pred); +} + +__SM_30_INTRINSICS_DECL__ +int __any_sync(unsigned mask, int pred) { + extern __device__ __device_builtin__ int __nvvm_vote_any_sync(unsigned int mask, int pred); + return __nvvm_vote_any_sync(mask, pred); +} + +__SM_30_INTRINSICS_DECL__ +int __uni_sync(unsigned mask, int pred) { + extern __device__ __device_builtin__ int __nvvm_vote_uni_sync(unsigned int mask, int pred); + return __nvvm_vote_uni_sync(mask, pred); +} + +__SM_30_INTRINSICS_DECL__ +unsigned __ballot_sync(unsigned mask, int pred) { + extern __device__ __device_builtin__ unsigned int __nvvm_vote_ballot_sync(unsigned int mask, int pred); + return __nvvm_vote_ballot_sync(mask, pred); +} + +__SM_30_INTRINSICS_DECL__ +unsigned __activemask() { + unsigned ret; + asm volatile ("activemask.b32 %0;" : "=r"(ret)); + return ret; +} + +// These are removed starting with compute_70 and onwards +#if defined(_NVHPC_CUDA) || !defined(__CUDA_ARCH__) || __CUDA_ARCH__ < 700 + +__SM_30_INTRINSICS_DECL__ int __shfl(int var, int srcLane, int width) { + int ret; + int c = ((warpSize-width) << 8) | 0x1f; + asm volatile ("shfl.idx.b32 %0, %1, %2, %3;" : "=r"(ret) : "r"(var), "r"(srcLane), "r"(c)); + return ret; +} + +__SM_30_INTRINSICS_DECL__ unsigned int __shfl(unsigned int var, int srcLane, int width) { + return (unsigned int) __shfl((int)var, srcLane, width); +} + +__SM_30_INTRINSICS_DECL__ int __shfl_up(int var, unsigned int delta, int width) { + int ret; + int c = (warpSize-width) << 8; + asm volatile ("shfl.up.b32 %0, %1, %2, %3;" : "=r"(ret) : "r"(var), "r"(delta), "r"(c)); + return ret; +} + +__SM_30_INTRINSICS_DECL__ unsigned int __shfl_up(unsigned int var, unsigned int delta, int width) { + return (unsigned int) __shfl_up((int)var, delta, width); +} + +__SM_30_INTRINSICS_DECL__ int __shfl_down(int var, unsigned int delta, int width) { + int ret; + int c = ((warpSize-width) << 8) | 0x1f; + asm volatile ("shfl.down.b32 %0, %1, %2, %3;" : "=r"(ret) : "r"(var), "r"(delta), "r"(c)); + return ret; +} + +__SM_30_INTRINSICS_DECL__ unsigned int __shfl_down(unsigned int var, unsigned int delta, int width) { + return (unsigned int) __shfl_down((int)var, delta, width); +} + +__SM_30_INTRINSICS_DECL__ int __shfl_xor(int var, int laneMask, int width) { + int ret; + int c = ((warpSize-width) << 8) | 0x1f; + asm volatile ("shfl.bfly.b32 %0, %1, %2, %3;" : "=r"(ret) : "r"(var), "r"(laneMask), "r"(c)); + return ret; +} + +__SM_30_INTRINSICS_DECL__ unsigned int __shfl_xor(unsigned int var, int laneMask, int width) { + return (unsigned int) __shfl_xor((int)var, laneMask, width); +} + +__SM_30_INTRINSICS_DECL__ float __shfl(float var, int srcLane, int width) { + float ret; + int c; + c = ((warpSize-width) << 8) | 0x1f; + asm volatile ("shfl.idx.b32 %0, %1, %2, %3;" : "=f"(ret) : "f"(var), "r"(srcLane), "r"(c)); + return ret; +} + +__SM_30_INTRINSICS_DECL__ float __shfl_up(float var, unsigned int delta, int width) { + float ret; + int c; + c = (warpSize-width) << 8; + asm volatile ("shfl.up.b32 %0, %1, %2, %3;" : "=f"(ret) : "f"(var), "r"(delta), "r"(c)); + return ret; +} + +__SM_30_INTRINSICS_DECL__ float __shfl_down(float var, unsigned int delta, int width) { + float ret; + int c; + c = ((warpSize-width) << 8) | 0x1f; + asm volatile ("shfl.down.b32 %0, %1, %2, %3;" : "=f"(ret) : "f"(var), "r"(delta), "r"(c)); + return ret; +} + +__SM_30_INTRINSICS_DECL__ float __shfl_xor(float var, int laneMask, int width) { + float ret; + int c; + c = ((warpSize-width) << 8) | 0x1f; + asm volatile ("shfl.bfly.b32 %0, %1, %2, %3;" : "=f"(ret) : "f"(var), "r"(laneMask), "r"(c)); + return ret; +} + +// 64-bits SHFL + +__SM_30_INTRINSICS_DECL__ long long __shfl(long long var, int srcLane, int width) { + int lo, hi; + asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(lo), "=r"(hi) : "l"(var)); + hi = __shfl(hi, srcLane, width); + lo = __shfl(lo, srcLane, width); + asm volatile("mov.b64 %0, {%1,%2};" : "=l"(var) : "r"(lo), "r"(hi)); + return var; +} + +__SM_30_INTRINSICS_DECL__ unsigned long long __shfl(unsigned long long var, int srcLane, int width) { + return (unsigned long long) __shfl((long long) var, srcLane, width); +} + +__SM_30_INTRINSICS_DECL__ long long __shfl_up(long long var, unsigned int delta, int width) { + int lo, hi; + asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(lo), "=r"(hi) : "l"(var)); + hi = __shfl_up(hi, delta, width); + lo = __shfl_up(lo, delta, width); + asm volatile("mov.b64 %0, {%1,%2};" : "=l"(var) : "r"(lo), "r"(hi)); + return var; +} + +__SM_30_INTRINSICS_DECL__ unsigned long long __shfl_up(unsigned long long var, unsigned int delta, int width) { + return (unsigned long long) __shfl_up((long long) var, delta, width); +} + +__SM_30_INTRINSICS_DECL__ long long __shfl_down(long long var, unsigned int delta, int width) { + int lo, hi; + asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(lo), "=r"(hi) : "l"(var)); + hi = __shfl_down(hi, delta, width); + lo = __shfl_down(lo, delta, width); + asm volatile("mov.b64 %0, {%1,%2};" : "=l"(var) : "r"(lo), "r"(hi)); + return var; +} + +__SM_30_INTRINSICS_DECL__ unsigned long long __shfl_down(unsigned long long var, unsigned int delta, int width) { + return (unsigned long long) __shfl_down((long long) var, delta, width); +} + +__SM_30_INTRINSICS_DECL__ long long __shfl_xor(long long var, int laneMask, int width) { + int lo, hi; + asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(lo), "=r"(hi) : "l"(var)); + hi = __shfl_xor(hi, laneMask, width); + lo = __shfl_xor(lo, laneMask, width); + asm volatile("mov.b64 %0, {%1,%2};" : "=l"(var) : "r"(lo), "r"(hi)); + return var; +} + +__SM_30_INTRINSICS_DECL__ unsigned long long __shfl_xor(unsigned long long var, int laneMask, int width) { + return (unsigned long long) __shfl_xor((long long) var, laneMask, width); +} + +__SM_30_INTRINSICS_DECL__ double __shfl(double var, int srcLane, int width) { + unsigned lo, hi; + asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(lo), "=r"(hi) : "d"(var)); + hi = __shfl(hi, srcLane, width); + lo = __shfl(lo, srcLane, width); + asm volatile("mov.b64 %0, {%1,%2};" : "=d"(var) : "r"(lo), "r"(hi)); + return var; +} + +__SM_30_INTRINSICS_DECL__ double __shfl_up(double var, unsigned int delta, int width) { + unsigned lo, hi; + asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(lo), "=r"(hi) : "d"(var)); + hi = __shfl_up(hi, delta, width); + lo = __shfl_up(lo, delta, width); + asm volatile("mov.b64 %0, {%1,%2};" : "=d"(var) : "r"(lo), "r"(hi)); + return var; +} + +__SM_30_INTRINSICS_DECL__ double __shfl_down(double var, unsigned int delta, int width) { + unsigned lo, hi; + asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(lo), "=r"(hi) : "d"(var)); + hi = __shfl_down(hi, delta, width); + lo = __shfl_down(lo, delta, width); + asm volatile("mov.b64 %0, {%1,%2};" : "=d"(var) : "r"(lo), "r"(hi)); + return var; +} + +__SM_30_INTRINSICS_DECL__ double __shfl_xor(double var, int laneMask, int width) { + unsigned lo, hi; + asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(lo), "=r"(hi) : "d"(var)); + hi = __shfl_xor(hi, laneMask, width); + lo = __shfl_xor(lo, laneMask, width); + asm volatile("mov.b64 %0, {%1,%2};" : "=d"(var) : "r"(lo), "r"(hi)); + return var; +} + +__SM_30_INTRINSICS_DECL__ long __shfl(long var, int srcLane, int width) { + return (sizeof(long) == sizeof(long long)) ? + __shfl((long long) var, srcLane, width) : + __shfl((int) var, srcLane, width); +} + +__SM_30_INTRINSICS_DECL__ unsigned long __shfl(unsigned long var, int srcLane, int width) { + return (sizeof(long) == sizeof(long long)) ? + __shfl((unsigned long long) var, srcLane, width) : + __shfl((unsigned int) var, srcLane, width); +} + +__SM_30_INTRINSICS_DECL__ long __shfl_up(long var, unsigned int delta, int width) { + return (sizeof(long) == sizeof(long long)) ? + __shfl_up((long long) var, delta, width) : + __shfl_up((int) var, delta, width); +} + +__SM_30_INTRINSICS_DECL__ unsigned long __shfl_up(unsigned long var, unsigned int delta, int width) { + return (sizeof(long) == sizeof(long long)) ? + __shfl_up((unsigned long long) var, delta, width) : + __shfl_up((unsigned int) var, delta, width); +} + +__SM_30_INTRINSICS_DECL__ long __shfl_down(long var, unsigned int delta, int width) { + return (sizeof(long) == sizeof(long long)) ? + __shfl_down((long long) var, delta, width) : + __shfl_down((int) var, delta, width); +} + +__SM_30_INTRINSICS_DECL__ unsigned long __shfl_down(unsigned long var, unsigned int delta, int width) { + return (sizeof(long) == sizeof(long long)) ? + __shfl_down((unsigned long long) var, delta, width) : + __shfl_down((unsigned int) var, delta, width); +} + +__SM_30_INTRINSICS_DECL__ long __shfl_xor(long var, int laneMask, int width) { + return (sizeof(long) == sizeof(long long)) ? + __shfl_xor((long long) var, laneMask, width) : + __shfl_xor((int) var, laneMask, width); +} + +__SM_30_INTRINSICS_DECL__ unsigned long __shfl_xor(unsigned long var, int laneMask, int width) { + return (sizeof(long) == sizeof(long long)) ? + __shfl_xor((unsigned long long) var, laneMask, width) : + __shfl_xor((unsigned int) var, laneMask, width); +} + +#endif /* defined(_NVHPC_CUDA) || !defined(__CUDA_ARCH__) || __CUDA_ARCH__ < 700 */ + +// Warp register exchange (shuffle) intrinsics. +// Notes: +// a) Warp size is hardcoded to 32 here, because the compiler does not know +// the "warpSize" constant at this time +// b) we cannot map the float __shfl to the int __shfl because it'll mess with +// the register number (especially if you're doing two shfls to move a double). +__SM_30_INTRINSICS_DECL__ int __shfl_sync(unsigned mask, int var, int srcLane, int width) { + extern __device__ __device_builtin__ unsigned __nvvm_shfl_idx_sync(unsigned mask, unsigned a, unsigned b, unsigned c); + int ret; + int c = ((warpSize-width) << 8) | 0x1f; + ret = __nvvm_shfl_idx_sync(mask, var, srcLane, c); + return ret; +} + +__SM_30_INTRINSICS_DECL__ unsigned int __shfl_sync(unsigned mask, unsigned int var, int srcLane, int width) { + return (unsigned int) __shfl_sync(mask, (int)var, srcLane, width); +} + +__SM_30_INTRINSICS_DECL__ int __shfl_up_sync(unsigned mask, int var, unsigned int delta, int width) { + extern __device__ __device_builtin__ unsigned __nvvm_shfl_up_sync(unsigned mask, unsigned a, unsigned b, unsigned c); + int ret; + int c = (warpSize-width) << 8; + ret = __nvvm_shfl_up_sync(mask, var, delta, c); + return ret; +} + +__SM_30_INTRINSICS_DECL__ unsigned int __shfl_up_sync(unsigned mask, unsigned int var, unsigned int delta, int width) { + return (unsigned int) __shfl_up_sync(mask, (int)var, delta, width); +} + +__SM_30_INTRINSICS_DECL__ int __shfl_down_sync(unsigned mask, int var, unsigned int delta, int width) { + extern __device__ __device_builtin__ unsigned __nvvm_shfl_down_sync(unsigned mask, unsigned a, unsigned b, unsigned c); + int ret; + int c = ((warpSize-width) << 8) | 0x1f; + ret = __nvvm_shfl_down_sync(mask, var, delta, c); + return ret; +} + +__SM_30_INTRINSICS_DECL__ unsigned int __shfl_down_sync(unsigned mask, unsigned int var, unsigned int delta, int width) { + return (unsigned int) __shfl_down_sync(mask, (int)var, delta, width); +} + +__SM_30_INTRINSICS_DECL__ int __shfl_xor_sync(unsigned mask, int var, int laneMask, int width) { + extern __device__ __device_builtin__ unsigned __nvvm_shfl_bfly_sync(unsigned mask, unsigned a, unsigned b, unsigned c); + int ret; + int c = ((warpSize-width) << 8) | 0x1f; + ret = __nvvm_shfl_bfly_sync(mask, var, laneMask, c); + return ret; +} + +__SM_30_INTRINSICS_DECL__ unsigned int __shfl_xor_sync(unsigned mask, unsigned int var, int laneMask, int width) { + return (unsigned int) __shfl_xor_sync(mask, (int)var, laneMask, width); +} + +__SM_30_INTRINSICS_DECL__ float __shfl_sync(unsigned mask, float var, int srcLane, int width) { + extern __device__ __device_builtin__ unsigned __nvvm_shfl_idx_sync(unsigned mask, unsigned a, unsigned b, unsigned c); + int ret; + int c; + c = ((warpSize-width) << 8) | 0x1f; + ret = __nvvm_shfl_idx_sync(mask, __float_as_int(var), srcLane, c); + return __int_as_float(ret); +} + +__SM_30_INTRINSICS_DECL__ float __shfl_up_sync(unsigned mask, float var, unsigned int delta, int width) { + extern __device__ __device_builtin__ unsigned __nvvm_shfl_up_sync(unsigned mask, unsigned a, unsigned b, unsigned c); + int ret; + int c; + c = (warpSize-width) << 8; + ret = __nvvm_shfl_up_sync(mask, __float_as_int(var), delta, c); + return __int_as_float(ret); +} + +__SM_30_INTRINSICS_DECL__ float __shfl_down_sync(unsigned mask, float var, unsigned int delta, int width) { + extern __device__ __device_builtin__ unsigned __nvvm_shfl_down_sync(unsigned mask, unsigned a, unsigned b, unsigned c); + int ret; + int c; + c = ((warpSize-width) << 8) | 0x1f; + ret = __nvvm_shfl_down_sync(mask, __float_as_int(var), delta, c); + return __int_as_float(ret); +} + +__SM_30_INTRINSICS_DECL__ float __shfl_xor_sync(unsigned mask, float var, int laneMask, int width) { + extern __device__ __device_builtin__ unsigned __nvvm_shfl_bfly_sync(unsigned mask, unsigned a, unsigned b, unsigned c); + int ret; + int c; + c = ((warpSize-width) << 8) | 0x1f; + ret = __nvvm_shfl_bfly_sync(mask, __float_as_int(var), laneMask, c); + return __int_as_float(ret); +} + +// 64-bits SHFL +__SM_30_INTRINSICS_DECL__ long long __shfl_sync(unsigned mask, long long var, int srcLane, int width) { + int lo, hi; + asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(lo), "=r"(hi) : "l"(var)); + hi = __shfl_sync(mask, hi, srcLane, width); + lo = __shfl_sync(mask, lo, srcLane, width); + asm volatile("mov.b64 %0, {%1,%2};" : "=l"(var) : "r"(lo), "r"(hi)); + return var; +} + +__SM_30_INTRINSICS_DECL__ unsigned long long __shfl_sync(unsigned mask, unsigned long long var, int srcLane, int width) { + return (unsigned long long) __shfl_sync(mask, (long long) var, srcLane, width); +} + +__SM_30_INTRINSICS_DECL__ long long __shfl_up_sync(unsigned mask, long long var, unsigned int delta, int width) { + int lo, hi; + asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(lo), "=r"(hi) : "l"(var)); + hi = __shfl_up_sync(mask, hi, delta, width); + lo = __shfl_up_sync(mask, lo, delta, width); + asm volatile("mov.b64 %0, {%1,%2};" : "=l"(var) : "r"(lo), "r"(hi)); + return var; +} + +__SM_30_INTRINSICS_DECL__ unsigned long long __shfl_up_sync(unsigned mask, unsigned long long var, unsigned int delta, int width) { + return (unsigned long long) __shfl_up_sync(mask, (long long) var, delta, width); +} + +__SM_30_INTRINSICS_DECL__ long long __shfl_down_sync(unsigned mask, long long var, unsigned int delta, int width) { + int lo, hi; + asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(lo), "=r"(hi) : "l"(var)); + hi = __shfl_down_sync(mask, hi, delta, width); + lo = __shfl_down_sync(mask, lo, delta, width); + asm volatile("mov.b64 %0, {%1,%2};" : "=l"(var) : "r"(lo), "r"(hi)); + return var; +} + +__SM_30_INTRINSICS_DECL__ unsigned long long __shfl_down_sync(unsigned mask, unsigned long long var, unsigned int delta, int width) { + return (unsigned long long) __shfl_down_sync(mask, (long long) var, delta, width); +} + +__SM_30_INTRINSICS_DECL__ long long __shfl_xor_sync(unsigned mask, long long var, int laneMask, int width) { + int lo, hi; + asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(lo), "=r"(hi) : "l"(var)); + hi = __shfl_xor_sync(mask, hi, laneMask, width); + lo = __shfl_xor_sync(mask, lo, laneMask, width); + asm volatile("mov.b64 %0, {%1,%2};" : "=l"(var) : "r"(lo), "r"(hi)); + return var; +} + +__SM_30_INTRINSICS_DECL__ unsigned long long __shfl_xor_sync(unsigned mask, unsigned long long var, int laneMask, int width) { + return (unsigned long long) __shfl_xor_sync(mask, (long long) var, laneMask, width); +} + +__SM_30_INTRINSICS_DECL__ double __shfl_sync(unsigned mask, double var, int srcLane, int width) { + unsigned lo, hi; + asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(lo), "=r"(hi) : "d"(var)); + hi = __shfl_sync(mask, hi, srcLane, width); + lo = __shfl_sync(mask, lo, srcLane, width); + asm volatile("mov.b64 %0, {%1,%2};" : "=d"(var) : "r"(lo), "r"(hi)); + return var; +} + +__SM_30_INTRINSICS_DECL__ double __shfl_up_sync(unsigned mask, double var, unsigned int delta, int width) { + unsigned lo, hi; + asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(lo), "=r"(hi) : "d"(var)); + hi = __shfl_up_sync(mask, hi, delta, width); + lo = __shfl_up_sync(mask, lo, delta, width); + asm volatile("mov.b64 %0, {%1,%2};" : "=d"(var) : "r"(lo), "r"(hi)); + return var; +} + +__SM_30_INTRINSICS_DECL__ double __shfl_down_sync(unsigned mask, double var, unsigned int delta, int width) { + unsigned lo, hi; + asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(lo), "=r"(hi) : "d"(var)); + hi = __shfl_down_sync(mask, hi, delta, width); + lo = __shfl_down_sync(mask, lo, delta, width); + asm volatile("mov.b64 %0, {%1,%2};" : "=d"(var) : "r"(lo), "r"(hi)); + return var; +} + +__SM_30_INTRINSICS_DECL__ double __shfl_xor_sync(unsigned mask, double var, int laneMask, int width) { + unsigned lo, hi; + asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(lo), "=r"(hi) : "d"(var)); + hi = __shfl_xor_sync(mask, hi, laneMask, width); + lo = __shfl_xor_sync(mask, lo, laneMask, width); + asm volatile("mov.b64 %0, {%1,%2};" : "=d"(var) : "r"(lo), "r"(hi)); + return var; +} + +// long needs some help to choose between 32-bits and 64-bits + +__SM_30_INTRINSICS_DECL__ long __shfl_sync(unsigned mask, long var, int srcLane, int width) { + return (sizeof(long) == sizeof(long long)) ? + __shfl_sync(mask, (long long) var, srcLane, width) : + __shfl_sync(mask, (int) var, srcLane, width); +} + +__SM_30_INTRINSICS_DECL__ unsigned long __shfl_sync(unsigned mask, unsigned long var, int srcLane, int width) { + return (sizeof(long) == sizeof(long long)) ? + __shfl_sync(mask, (unsigned long long) var, srcLane, width) : + __shfl_sync(mask, (unsigned int) var, srcLane, width); +} + +__SM_30_INTRINSICS_DECL__ long __shfl_up_sync(unsigned mask, long var, unsigned int delta, int width) { + return (sizeof(long) == sizeof(long long)) ? + __shfl_up_sync(mask, (long long) var, delta, width) : + __shfl_up_sync(mask, (int) var, delta, width); +} + +__SM_30_INTRINSICS_DECL__ unsigned long __shfl_up_sync(unsigned mask, unsigned long var, unsigned int delta, int width) { + return (sizeof(long) == sizeof(long long)) ? + __shfl_up_sync(mask, (unsigned long long) var, delta, width) : + __shfl_up_sync(mask, (unsigned int) var, delta, width); +} + +__SM_30_INTRINSICS_DECL__ long __shfl_down_sync(unsigned mask, long var, unsigned int delta, int width) { + return (sizeof(long) == sizeof(long long)) ? + __shfl_down_sync(mask, (long long) var, delta, width) : + __shfl_down_sync(mask, (int) var, delta, width); +} + +__SM_30_INTRINSICS_DECL__ unsigned long __shfl_down_sync(unsigned mask, unsigned long var, unsigned int delta, int width) { + return (sizeof(long) == sizeof(long long)) ? + __shfl_down_sync(mask, (unsigned long long) var, delta, width) : + __shfl_down_sync(mask, (unsigned int) var, delta, width); +} + +__SM_30_INTRINSICS_DECL__ long __shfl_xor_sync(unsigned mask, long var, int laneMask, int width) { + return (sizeof(long) == sizeof(long long)) ? + __shfl_xor_sync(mask, (long long) var, laneMask, width) : + __shfl_xor_sync(mask, (int) var, laneMask, width); +} + +__SM_30_INTRINSICS_DECL__ unsigned long __shfl_xor_sync(unsigned mask, unsigned long var, int laneMask, int width) { + return (sizeof(long) == sizeof(long long)) ? + __shfl_xor_sync(mask, (unsigned long long) var, laneMask, width) : + __shfl_xor_sync(mask, (unsigned int) var, laneMask, width); +} + +#if defined(__local_warpSize) +#undef warpSize +#undef __local_warpSize +#endif + +#endif /* _NVHPC_CUDA || !__CUDA_ARCH__ || __CUDA_ARCH__ >= 300 */ + +#endif /* __cplusplus && __CUDACC__ */ + +#undef __SM_30_INTRINSICS_DECL__ + +#endif /* !__SM_30_INTRINSICS_HPP__ */ + diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_32_intrinsics.hpp b/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_32_intrinsics.hpp new file mode 100644 index 0000000000000000000000000000000000000000..d50f9cea5c4d89bc555855a8ca73d617bcfa461a --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_32_intrinsics.hpp @@ -0,0 +1,588 @@ +/* + * Copyright 1993-2020 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(__SM_32_INTRINSICS_HPP__) +#define __SM_32_INTRINSICS_HPP__ + +#if defined(__CUDACC_RTC__) +#define __SM_32_INTRINSICS_DECL__ __device__ +#else /* !__CUDACC_RTC__ */ +#define __SM_32_INTRINSICS_DECL__ static __device__ __inline__ +#endif /* __CUDACC_RTC__ */ + +#if defined(__cplusplus) && defined(__CUDACC__) + +#if defined(_NVHPC_CUDA) || !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 320 + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +#include "cuda_runtime_api.h" + +// In here are intrinsics which are built in to the compiler. These may be +// referenced by intrinsic implementations from this file. +extern "C" +{ + // There are no intrinsics built in to the compiler for SM-3.5, + // all intrinsics are now implemented as inline PTX below. +} + +/******************************************************************************* +* * +* Below are implementations of SM-3.5 intrinsics which are included as * +* source (instead of being built in to the compiler) * +* * +*******************************************************************************/ + +// LDG is a "load from global via texture path" command which can exhibit higher +// bandwidth on GK110 than a regular LD. +// Define a different pointer storage size for 64 and 32 bit +#if (defined(_MSC_VER) && defined(_WIN64)) || defined(__LP64__) || defined(__CUDACC_RTC__) +#define __LDG_PTR "l" +#else +#define __LDG_PTR "r" +#endif + +/****************************************************************************** + * __ldg * + ******************************************************************************/ + +// Size of long is architecture and OS specific. +#if defined(__LP64__) // 64 bits +__SM_32_INTRINSICS_DECL__ long __ldg(const long *ptr) { unsigned long ret; asm volatile ("ld.global.nc.s64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr)); return (long)ret; } +__SM_32_INTRINSICS_DECL__ unsigned long __ldg(const unsigned long *ptr) { unsigned long ret; asm volatile ("ld.global.nc.u64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr)); return ret; } +#else // 32 bits +__SM_32_INTRINSICS_DECL__ long __ldg(const long *ptr) { unsigned long ret; asm volatile ("ld.global.nc.s32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return (long)ret; } +__SM_32_INTRINSICS_DECL__ unsigned long __ldg(const unsigned long *ptr) { unsigned long ret; asm volatile ("ld.global.nc.u32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return ret; } +#endif + + +__SM_32_INTRINSICS_DECL__ char __ldg(const char *ptr) { unsigned int ret; asm volatile ("ld.global.nc.s8 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return (char)ret; } +__SM_32_INTRINSICS_DECL__ signed char __ldg(const signed char *ptr) { unsigned int ret; asm volatile ("ld.global.nc.s8 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return (signed char)ret; } +__SM_32_INTRINSICS_DECL__ short __ldg(const short *ptr) { unsigned short ret; asm volatile ("ld.global.nc.s16 %0, [%1];" : "=h"(ret) : __LDG_PTR (ptr)); return (short)ret; } +__SM_32_INTRINSICS_DECL__ int __ldg(const int *ptr) { unsigned int ret; asm volatile ("ld.global.nc.s32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return (int)ret; } +__SM_32_INTRINSICS_DECL__ long long __ldg(const long long *ptr) { unsigned long long ret; asm volatile ("ld.global.nc.s64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr)); return (long long)ret; } +__SM_32_INTRINSICS_DECL__ char2 __ldg(const char2 *ptr) { char2 ret; int2 tmp; asm volatile ("ld.global.nc.v2.s8 {%0,%1}, [%2];" : "=r"(tmp.x), "=r"(tmp.y) : __LDG_PTR (ptr)); ret.x = (char)tmp.x; ret.y = (char)tmp.y; return ret; } +__SM_32_INTRINSICS_DECL__ char4 __ldg(const char4 *ptr) { char4 ret; int4 tmp; asm volatile ("ld.global.nc.v4.s8 {%0,%1,%2,%3}, [%4];" : "=r"(tmp.x), "=r"(tmp.y), "=r"(tmp.z), "=r"(tmp.w) : __LDG_PTR (ptr)); ret.x = (char)tmp.x; ret.y = (char)tmp.y; ret.z = (char)tmp.z; ret.w = (char)tmp.w; return ret; } +__SM_32_INTRINSICS_DECL__ short2 __ldg(const short2 *ptr) { short2 ret; asm volatile ("ld.global.nc.v2.s16 {%0,%1}, [%2];" : "=h"(ret.x), "=h"(ret.y) : __LDG_PTR (ptr)); return ret; } +__SM_32_INTRINSICS_DECL__ short4 __ldg(const short4 *ptr) { short4 ret; asm volatile ("ld.global.nc.v4.s16 {%0,%1,%2,%3}, [%4];" : "=h"(ret.x), "=h"(ret.y), "=h"(ret.z), "=h"(ret.w) : __LDG_PTR (ptr)); return ret; } +__SM_32_INTRINSICS_DECL__ int2 __ldg(const int2 *ptr) { int2 ret; asm volatile ("ld.global.nc.v2.s32 {%0,%1}, [%2];" : "=r"(ret.x), "=r"(ret.y) : __LDG_PTR (ptr)); return ret; } +__SM_32_INTRINSICS_DECL__ int4 __ldg(const int4 *ptr) { int4 ret; asm volatile ("ld.global.nc.v4.s32 {%0,%1,%2,%3}, [%4];" : "=r"(ret.x), "=r"(ret.y), "=r"(ret.z), "=r"(ret.w) : __LDG_PTR (ptr)); return ret; } +__SM_32_INTRINSICS_DECL__ longlong2 __ldg(const longlong2 *ptr) { longlong2 ret; asm volatile ("ld.global.nc.v2.s64 {%0,%1}, [%2];" : "=l"(ret.x), "=l"(ret.y) : __LDG_PTR (ptr)); return ret; } + +__SM_32_INTRINSICS_DECL__ unsigned char __ldg(const unsigned char *ptr) { unsigned int ret; asm volatile ("ld.global.nc.u8 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return (unsigned char)ret; } +__SM_32_INTRINSICS_DECL__ unsigned short __ldg(const unsigned short *ptr) { unsigned short ret; asm volatile ("ld.global.nc.u16 %0, [%1];" : "=h"(ret) : __LDG_PTR (ptr)); return ret; } +__SM_32_INTRINSICS_DECL__ unsigned int __ldg(const unsigned int *ptr) { unsigned int ret; asm volatile ("ld.global.nc.u32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return ret; } +__SM_32_INTRINSICS_DECL__ unsigned long long __ldg(const unsigned long long *ptr) { unsigned long long ret; asm volatile ("ld.global.nc.u64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr)); return ret; } +__SM_32_INTRINSICS_DECL__ uchar2 __ldg(const uchar2 *ptr) { uchar2 ret; uint2 tmp; asm volatile ("ld.global.nc.v2.u8 {%0,%1}, [%2];" : "=r"(tmp.x), "=r"(tmp.y) : __LDG_PTR (ptr)); ret.x = (unsigned char)tmp.x; ret.y = (unsigned char)tmp.y; return ret; } +__SM_32_INTRINSICS_DECL__ uchar4 __ldg(const uchar4 *ptr) { uchar4 ret; uint4 tmp; asm volatile ("ld.global.nc.v4.u8 {%0,%1,%2,%3}, [%4];" : "=r"(tmp.x), "=r"(tmp.y), "=r"(tmp.z), "=r"(tmp.w) : __LDG_PTR (ptr)); ret.x = (unsigned char)tmp.x; ret.y = (unsigned char)tmp.y; ret.z = (unsigned char)tmp.z; ret.w = (unsigned char)tmp.w; return ret; } +__SM_32_INTRINSICS_DECL__ ushort2 __ldg(const ushort2 *ptr) { ushort2 ret; asm volatile ("ld.global.nc.v2.u16 {%0,%1}, [%2];" : "=h"(ret.x), "=h"(ret.y) : __LDG_PTR (ptr)); return ret; } +__SM_32_INTRINSICS_DECL__ ushort4 __ldg(const ushort4 *ptr) { ushort4 ret; asm volatile ("ld.global.nc.v4.u16 {%0,%1,%2,%3}, [%4];" : "=h"(ret.x), "=h"(ret.y), "=h"(ret.z), "=h"(ret.w) : __LDG_PTR (ptr)); return ret; } +__SM_32_INTRINSICS_DECL__ uint2 __ldg(const uint2 *ptr) { uint2 ret; asm volatile ("ld.global.nc.v2.u32 {%0,%1}, [%2];" : "=r"(ret.x), "=r"(ret.y) : __LDG_PTR (ptr)); return ret; } +__SM_32_INTRINSICS_DECL__ uint4 __ldg(const uint4 *ptr) { uint4 ret; asm volatile ("ld.global.nc.v4.u32 {%0,%1,%2,%3}, [%4];" : "=r"(ret.x), "=r"(ret.y), "=r"(ret.z), "=r"(ret.w) : __LDG_PTR (ptr)); return ret; } +__SM_32_INTRINSICS_DECL__ ulonglong2 __ldg(const ulonglong2 *ptr) { ulonglong2 ret; asm volatile ("ld.global.nc.v2.u64 {%0,%1}, [%2];" : "=l"(ret.x), "=l"(ret.y) : __LDG_PTR (ptr)); return ret; } + +__SM_32_INTRINSICS_DECL__ float __ldg(const float *ptr) { float ret; asm volatile ("ld.global.nc.f32 %0, [%1];" : "=f"(ret) : __LDG_PTR (ptr)); return ret; } +__SM_32_INTRINSICS_DECL__ double __ldg(const double *ptr) { double ret; asm volatile ("ld.global.nc.f64 %0, [%1];" : "=d"(ret) : __LDG_PTR (ptr)); return ret; } +__SM_32_INTRINSICS_DECL__ float2 __ldg(const float2 *ptr) { float2 ret; asm volatile ("ld.global.nc.v2.f32 {%0,%1}, [%2];" : "=f"(ret.x), "=f"(ret.y) : __LDG_PTR (ptr)); return ret; } +__SM_32_INTRINSICS_DECL__ float4 __ldg(const float4 *ptr) { float4 ret; asm volatile ("ld.global.nc.v4.f32 {%0,%1,%2,%3}, [%4];" : "=f"(ret.x), "=f"(ret.y), "=f"(ret.z), "=f"(ret.w) : __LDG_PTR (ptr)); return ret; } +__SM_32_INTRINSICS_DECL__ double2 __ldg(const double2 *ptr) { double2 ret; asm volatile ("ld.global.nc.v2.f64 {%0,%1}, [%2];" : "=d"(ret.x), "=d"(ret.y) : __LDG_PTR (ptr)); return ret; } + + +/****************************************************************************** + * __ldcg * + ******************************************************************************/ + +// Size of long is architecture and OS specific. +#if defined(__LP64__) // 64 bits +__SM_32_INTRINSICS_DECL__ long __ldcg(const long *ptr) { unsigned long ret; asm volatile ("ld.global.cg.s64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr)); return (long)ret; } +__SM_32_INTRINSICS_DECL__ unsigned long __ldcg(const unsigned long *ptr) { unsigned long ret; asm volatile ("ld.global.cg.u64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr)); return ret; } +#else // 32 bits +__SM_32_INTRINSICS_DECL__ long __ldcg(const long *ptr) { unsigned long ret; asm volatile ("ld.global.cg.s32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return (long)ret; } +__SM_32_INTRINSICS_DECL__ unsigned long __ldcg(const unsigned long *ptr) { unsigned long ret; asm volatile ("ld.global.cg.u32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return ret; } +#endif + + +__SM_32_INTRINSICS_DECL__ char __ldcg(const char *ptr) { unsigned int ret; asm volatile ("ld.global.cg.s8 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return (char)ret; } +__SM_32_INTRINSICS_DECL__ signed char __ldcg(const signed char *ptr) { unsigned int ret; asm volatile ("ld.global.cg.s8 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return (signed char)ret; } +__SM_32_INTRINSICS_DECL__ short __ldcg(const short *ptr) { unsigned short ret; asm volatile ("ld.global.cg.s16 %0, [%1];" : "=h"(ret) : __LDG_PTR (ptr)); return (short)ret; } +__SM_32_INTRINSICS_DECL__ int __ldcg(const int *ptr) { unsigned int ret; asm volatile ("ld.global.cg.s32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return (int)ret; } +__SM_32_INTRINSICS_DECL__ long long __ldcg(const long long *ptr) { unsigned long long ret; asm volatile ("ld.global.cg.s64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr)); return (long long)ret; } +__SM_32_INTRINSICS_DECL__ char2 __ldcg(const char2 *ptr) { char2 ret; int2 tmp; asm volatile ("ld.global.cg.v2.s8 {%0,%1}, [%2];" : "=r"(tmp.x), "=r"(tmp.y) : __LDG_PTR (ptr)); ret.x = (char)tmp.x; ret.y = (char)tmp.y; return ret; } +__SM_32_INTRINSICS_DECL__ char4 __ldcg(const char4 *ptr) { char4 ret; int4 tmp; asm volatile ("ld.global.cg.v4.s8 {%0,%1,%2,%3}, [%4];" : "=r"(tmp.x), "=r"(tmp.y), "=r"(tmp.z), "=r"(tmp.w) : __LDG_PTR (ptr)); ret.x = (char)tmp.x; ret.y = (char)tmp.y; ret.z = (char)tmp.z; ret.w = (char)tmp.w; return ret; } +__SM_32_INTRINSICS_DECL__ short2 __ldcg(const short2 *ptr) { short2 ret; asm volatile ("ld.global.cg.v2.s16 {%0,%1}, [%2];" : "=h"(ret.x), "=h"(ret.y) : __LDG_PTR (ptr)); return ret; } +__SM_32_INTRINSICS_DECL__ short4 __ldcg(const short4 *ptr) { short4 ret; asm volatile ("ld.global.cg.v4.s16 {%0,%1,%2,%3}, [%4];" : "=h"(ret.x), "=h"(ret.y), "=h"(ret.z), "=h"(ret.w) : __LDG_PTR (ptr)); return ret; } +__SM_32_INTRINSICS_DECL__ int2 __ldcg(const int2 *ptr) { int2 ret; asm volatile ("ld.global.cg.v2.s32 {%0,%1}, [%2];" : "=r"(ret.x), "=r"(ret.y) : __LDG_PTR (ptr)); return ret; } +__SM_32_INTRINSICS_DECL__ int4 __ldcg(const int4 *ptr) { int4 ret; asm volatile ("ld.global.cg.v4.s32 {%0,%1,%2,%3}, [%4];" : "=r"(ret.x), "=r"(ret.y), "=r"(ret.z), "=r"(ret.w) : __LDG_PTR (ptr)); return ret; } +__SM_32_INTRINSICS_DECL__ longlong2 __ldcg(const longlong2 *ptr) { longlong2 ret; asm volatile ("ld.global.cg.v2.s64 {%0,%1}, [%2];" : "=l"(ret.x), "=l"(ret.y) : __LDG_PTR (ptr)); return ret; } + +__SM_32_INTRINSICS_DECL__ unsigned char __ldcg(const unsigned char *ptr) { unsigned int ret; asm volatile ("ld.global.cg.u8 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return (unsigned char)ret; } +__SM_32_INTRINSICS_DECL__ unsigned short __ldcg(const unsigned short *ptr) { unsigned short ret; asm volatile ("ld.global.cg.u16 %0, [%1];" : "=h"(ret) : __LDG_PTR (ptr)); return ret; } +__SM_32_INTRINSICS_DECL__ unsigned int __ldcg(const unsigned int *ptr) { unsigned int ret; asm volatile ("ld.global.cg.u32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return ret; } +__SM_32_INTRINSICS_DECL__ unsigned long long __ldcg(const unsigned long long *ptr) { unsigned long long ret; asm volatile ("ld.global.cg.u64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr)); return ret; } +__SM_32_INTRINSICS_DECL__ uchar2 __ldcg(const uchar2 *ptr) { uchar2 ret; uint2 tmp; asm volatile ("ld.global.cg.v2.u8 {%0,%1}, [%2];" : "=r"(tmp.x), "=r"(tmp.y) : __LDG_PTR (ptr)); ret.x = (unsigned char)tmp.x; ret.y = (unsigned char)tmp.y; return ret; } +__SM_32_INTRINSICS_DECL__ uchar4 __ldcg(const uchar4 *ptr) { uchar4 ret; uint4 tmp; asm volatile ("ld.global.cg.v4.u8 {%0,%1,%2,%3}, [%4];" : "=r"(tmp.x), "=r"(tmp.y), "=r"(tmp.z), "=r"(tmp.w) : __LDG_PTR (ptr)); ret.x = (unsigned char)tmp.x; ret.y = (unsigned char)tmp.y; ret.z = (unsigned char)tmp.z; ret.w = (unsigned char)tmp.w; return ret; } +__SM_32_INTRINSICS_DECL__ ushort2 __ldcg(const ushort2 *ptr) { ushort2 ret; asm volatile ("ld.global.cg.v2.u16 {%0,%1}, [%2];" : "=h"(ret.x), "=h"(ret.y) : __LDG_PTR (ptr)); return ret; } +__SM_32_INTRINSICS_DECL__ ushort4 __ldcg(const ushort4 *ptr) { ushort4 ret; asm volatile ("ld.global.cg.v4.u16 {%0,%1,%2,%3}, [%4];" : "=h"(ret.x), "=h"(ret.y), "=h"(ret.z), "=h"(ret.w) : __LDG_PTR (ptr)); return ret; } +__SM_32_INTRINSICS_DECL__ uint2 __ldcg(const uint2 *ptr) { uint2 ret; asm volatile ("ld.global.cg.v2.u32 {%0,%1}, [%2];" : "=r"(ret.x), "=r"(ret.y) : __LDG_PTR (ptr)); return ret; } +__SM_32_INTRINSICS_DECL__ uint4 __ldcg(const uint4 *ptr) { uint4 ret; asm volatile ("ld.global.cg.v4.u32 {%0,%1,%2,%3}, [%4];" : "=r"(ret.x), "=r"(ret.y), "=r"(ret.z), "=r"(ret.w) : __LDG_PTR (ptr)); return ret; } +__SM_32_INTRINSICS_DECL__ ulonglong2 __ldcg(const ulonglong2 *ptr) { ulonglong2 ret; asm volatile ("ld.global.cg.v2.u64 {%0,%1}, [%2];" : "=l"(ret.x), "=l"(ret.y) : __LDG_PTR (ptr)); return ret; } + +__SM_32_INTRINSICS_DECL__ float __ldcg(const float *ptr) { float ret; asm volatile ("ld.global.cg.f32 %0, [%1];" : "=f"(ret) : __LDG_PTR (ptr)); return ret; } +__SM_32_INTRINSICS_DECL__ double __ldcg(const double *ptr) { double ret; asm volatile ("ld.global.cg.f64 %0, [%1];" : "=d"(ret) : __LDG_PTR (ptr)); return ret; } +__SM_32_INTRINSICS_DECL__ float2 __ldcg(const float2 *ptr) { float2 ret; asm volatile ("ld.global.cg.v2.f32 {%0,%1}, [%2];" : "=f"(ret.x), "=f"(ret.y) : __LDG_PTR (ptr)); return ret; } +__SM_32_INTRINSICS_DECL__ float4 __ldcg(const float4 *ptr) { float4 ret; asm volatile ("ld.global.cg.v4.f32 {%0,%1,%2,%3}, [%4];" : "=f"(ret.x), "=f"(ret.y), "=f"(ret.z), "=f"(ret.w) : __LDG_PTR (ptr)); return ret; } +__SM_32_INTRINSICS_DECL__ double2 __ldcg(const double2 *ptr) { double2 ret; asm volatile ("ld.global.cg.v2.f64 {%0,%1}, [%2];" : "=d"(ret.x), "=d"(ret.y) : __LDG_PTR (ptr)); return ret; } + +/****************************************************************************** + * __ldca * + ******************************************************************************/ + +// Size of long is architecture and OS specific. +#if defined(__LP64__) // 64 bits +__SM_32_INTRINSICS_DECL__ long __ldca(const long *ptr) { unsigned long ret; asm volatile ("ld.global.ca.s64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr)); return (long)ret; } +__SM_32_INTRINSICS_DECL__ unsigned long __ldca(const unsigned long *ptr) { unsigned long ret; asm volatile ("ld.global.ca.u64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr)); return ret; } +#else // 32 bits +__SM_32_INTRINSICS_DECL__ long __ldca(const long *ptr) { unsigned long ret; asm volatile ("ld.global.ca.s32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return (long)ret; } +__SM_32_INTRINSICS_DECL__ unsigned long __ldca(const unsigned long *ptr) { unsigned long ret; asm volatile ("ld.global.ca.u32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return ret; } +#endif + + +__SM_32_INTRINSICS_DECL__ char __ldca(const char *ptr) { unsigned int ret; asm volatile ("ld.global.ca.s8 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return (char)ret; } +__SM_32_INTRINSICS_DECL__ signed char __ldca(const signed char *ptr) { unsigned int ret; asm volatile ("ld.global.ca.s8 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return (signed char)ret; } +__SM_32_INTRINSICS_DECL__ short __ldca(const short *ptr) { unsigned short ret; asm volatile ("ld.global.ca.s16 %0, [%1];" : "=h"(ret) : __LDG_PTR (ptr)); return (short)ret; } +__SM_32_INTRINSICS_DECL__ int __ldca(const int *ptr) { unsigned int ret; asm volatile ("ld.global.ca.s32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return (int)ret; } +__SM_32_INTRINSICS_DECL__ long long __ldca(const long long *ptr) { unsigned long long ret; asm volatile ("ld.global.ca.s64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr)); return (long long)ret; } +__SM_32_INTRINSICS_DECL__ char2 __ldca(const char2 *ptr) { char2 ret; int2 tmp; asm volatile ("ld.global.ca.v2.s8 {%0,%1}, [%2];" : "=r"(tmp.x), "=r"(tmp.y) : __LDG_PTR (ptr)); ret.x = (char)tmp.x; ret.y = (char)tmp.y; return ret; } +__SM_32_INTRINSICS_DECL__ char4 __ldca(const char4 *ptr) { char4 ret; int4 tmp; asm volatile ("ld.global.ca.v4.s8 {%0,%1,%2,%3}, [%4];" : "=r"(tmp.x), "=r"(tmp.y), "=r"(tmp.z), "=r"(tmp.w) : __LDG_PTR (ptr)); ret.x = (char)tmp.x; ret.y = (char)tmp.y; ret.z = (char)tmp.z; ret.w = (char)tmp.w; return ret; } +__SM_32_INTRINSICS_DECL__ short2 __ldca(const short2 *ptr) { short2 ret; asm volatile ("ld.global.ca.v2.s16 {%0,%1}, [%2];" : "=h"(ret.x), "=h"(ret.y) : __LDG_PTR (ptr)); return ret; } +__SM_32_INTRINSICS_DECL__ short4 __ldca(const short4 *ptr) { short4 ret; asm volatile ("ld.global.ca.v4.s16 {%0,%1,%2,%3}, [%4];" : "=h"(ret.x), "=h"(ret.y), "=h"(ret.z), "=h"(ret.w) : __LDG_PTR (ptr)); return ret; } +__SM_32_INTRINSICS_DECL__ int2 __ldca(const int2 *ptr) { int2 ret; asm volatile ("ld.global.ca.v2.s32 {%0,%1}, [%2];" : "=r"(ret.x), "=r"(ret.y) : __LDG_PTR (ptr)); return ret; } +__SM_32_INTRINSICS_DECL__ int4 __ldca(const int4 *ptr) { int4 ret; asm volatile ("ld.global.ca.v4.s32 {%0,%1,%2,%3}, [%4];" : "=r"(ret.x), "=r"(ret.y), "=r"(ret.z), "=r"(ret.w) : __LDG_PTR (ptr)); return ret; } +__SM_32_INTRINSICS_DECL__ longlong2 __ldca(const longlong2 *ptr) { longlong2 ret; asm volatile ("ld.global.ca.v2.s64 {%0,%1}, [%2];" : "=l"(ret.x), "=l"(ret.y) : __LDG_PTR (ptr)); return ret; } + +__SM_32_INTRINSICS_DECL__ unsigned char __ldca(const unsigned char *ptr) { unsigned int ret; asm volatile ("ld.global.ca.u8 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return (unsigned char)ret; } +__SM_32_INTRINSICS_DECL__ unsigned short __ldca(const unsigned short *ptr) { unsigned short ret; asm volatile ("ld.global.ca.u16 %0, [%1];" : "=h"(ret) : __LDG_PTR (ptr)); return ret; } +__SM_32_INTRINSICS_DECL__ unsigned int __ldca(const unsigned int *ptr) { unsigned int ret; asm volatile ("ld.global.ca.u32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return ret; } +__SM_32_INTRINSICS_DECL__ unsigned long long __ldca(const unsigned long long *ptr) { unsigned long long ret; asm volatile ("ld.global.ca.u64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr)); return ret; } +__SM_32_INTRINSICS_DECL__ uchar2 __ldca(const uchar2 *ptr) { uchar2 ret; uint2 tmp; asm volatile ("ld.global.ca.v2.u8 {%0,%1}, [%2];" : "=r"(tmp.x), "=r"(tmp.y) : __LDG_PTR (ptr)); ret.x = (unsigned char)tmp.x; ret.y = (unsigned char)tmp.y; return ret; } +__SM_32_INTRINSICS_DECL__ uchar4 __ldca(const uchar4 *ptr) { uchar4 ret; uint4 tmp; asm volatile ("ld.global.ca.v4.u8 {%0,%1,%2,%3}, [%4];" : "=r"(tmp.x), "=r"(tmp.y), "=r"(tmp.z), "=r"(tmp.w) : __LDG_PTR (ptr)); ret.x = (unsigned char)tmp.x; ret.y = (unsigned char)tmp.y; ret.z = (unsigned char)tmp.z; ret.w = (unsigned char)tmp.w; return ret; } +__SM_32_INTRINSICS_DECL__ ushort2 __ldca(const ushort2 *ptr) { ushort2 ret; asm volatile ("ld.global.ca.v2.u16 {%0,%1}, [%2];" : "=h"(ret.x), "=h"(ret.y) : __LDG_PTR (ptr)); return ret; } +__SM_32_INTRINSICS_DECL__ ushort4 __ldca(const ushort4 *ptr) { ushort4 ret; asm volatile ("ld.global.ca.v4.u16 {%0,%1,%2,%3}, [%4];" : "=h"(ret.x), "=h"(ret.y), "=h"(ret.z), "=h"(ret.w) : __LDG_PTR (ptr)); return ret; } +__SM_32_INTRINSICS_DECL__ uint2 __ldca(const uint2 *ptr) { uint2 ret; asm volatile ("ld.global.ca.v2.u32 {%0,%1}, [%2];" : "=r"(ret.x), "=r"(ret.y) : __LDG_PTR (ptr)); return ret; } +__SM_32_INTRINSICS_DECL__ uint4 __ldca(const uint4 *ptr) { uint4 ret; asm volatile ("ld.global.ca.v4.u32 {%0,%1,%2,%3}, [%4];" : "=r"(ret.x), "=r"(ret.y), "=r"(ret.z), "=r"(ret.w) : __LDG_PTR (ptr)); return ret; } +__SM_32_INTRINSICS_DECL__ ulonglong2 __ldca(const ulonglong2 *ptr) { ulonglong2 ret; asm volatile ("ld.global.ca.v2.u64 {%0,%1}, [%2];" : "=l"(ret.x), "=l"(ret.y) : __LDG_PTR (ptr)); return ret; } + +__SM_32_INTRINSICS_DECL__ float __ldca(const float *ptr) { float ret; asm volatile ("ld.global.ca.f32 %0, [%1];" : "=f"(ret) : __LDG_PTR (ptr)); return ret; } +__SM_32_INTRINSICS_DECL__ double __ldca(const double *ptr) { double ret; asm volatile ("ld.global.ca.f64 %0, [%1];" : "=d"(ret) : __LDG_PTR (ptr)); return ret; } +__SM_32_INTRINSICS_DECL__ float2 __ldca(const float2 *ptr) { float2 ret; asm volatile ("ld.global.ca.v2.f32 {%0,%1}, [%2];" : "=f"(ret.x), "=f"(ret.y) : __LDG_PTR (ptr)); return ret; } +__SM_32_INTRINSICS_DECL__ float4 __ldca(const float4 *ptr) { float4 ret; asm volatile ("ld.global.ca.v4.f32 {%0,%1,%2,%3}, [%4];" : "=f"(ret.x), "=f"(ret.y), "=f"(ret.z), "=f"(ret.w) : __LDG_PTR (ptr)); return ret; } +__SM_32_INTRINSICS_DECL__ double2 __ldca(const double2 *ptr) { double2 ret; asm volatile ("ld.global.ca.v2.f64 {%0,%1}, [%2];" : "=d"(ret.x), "=d"(ret.y) : __LDG_PTR (ptr)); return ret; } + +/****************************************************************************** + * __ldcs * + ******************************************************************************/ + +// Size of long is architecture and OS specific. +#if defined(__LP64__) // 64 bits +__SM_32_INTRINSICS_DECL__ long __ldcs(const long *ptr) { unsigned long ret; asm volatile ("ld.global.cs.s64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr)); return (long)ret; } +__SM_32_INTRINSICS_DECL__ unsigned long __ldcs(const unsigned long *ptr) { unsigned long ret; asm volatile ("ld.global.cs.u64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr)); return ret; } +#else // 32 bits +__SM_32_INTRINSICS_DECL__ long __ldcs(const long *ptr) { unsigned long ret; asm volatile ("ld.global.cs.s32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return (long)ret; } +__SM_32_INTRINSICS_DECL__ unsigned long __ldcs(const unsigned long *ptr) { unsigned long ret; asm volatile ("ld.global.cs.u32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return ret; } +#endif + + +__SM_32_INTRINSICS_DECL__ char __ldcs(const char *ptr) { unsigned int ret; asm volatile ("ld.global.cs.s8 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return (char)ret; } +__SM_32_INTRINSICS_DECL__ signed char __ldcs(const signed char *ptr) { unsigned int ret; asm volatile ("ld.global.cs.s8 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return (signed char)ret; } +__SM_32_INTRINSICS_DECL__ short __ldcs(const short *ptr) { unsigned short ret; asm volatile ("ld.global.cs.s16 %0, [%1];" : "=h"(ret) : __LDG_PTR (ptr)); return (short)ret; } +__SM_32_INTRINSICS_DECL__ int __ldcs(const int *ptr) { unsigned int ret; asm volatile ("ld.global.cs.s32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return (int)ret; } +__SM_32_INTRINSICS_DECL__ long long __ldcs(const long long *ptr) { unsigned long long ret; asm volatile ("ld.global.cs.s64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr)); return (long long)ret; } +__SM_32_INTRINSICS_DECL__ char2 __ldcs(const char2 *ptr) { char2 ret; int2 tmp; asm volatile ("ld.global.cs.v2.s8 {%0,%1}, [%2];" : "=r"(tmp.x), "=r"(tmp.y) : __LDG_PTR (ptr)); ret.x = (char)tmp.x; ret.y = (char)tmp.y; return ret; } +__SM_32_INTRINSICS_DECL__ char4 __ldcs(const char4 *ptr) { char4 ret; int4 tmp; asm volatile ("ld.global.cs.v4.s8 {%0,%1,%2,%3}, [%4];" : "=r"(tmp.x), "=r"(tmp.y), "=r"(tmp.z), "=r"(tmp.w) : __LDG_PTR (ptr)); ret.x = (char)tmp.x; ret.y = (char)tmp.y; ret.z = (char)tmp.z; ret.w = (char)tmp.w; return ret; } +__SM_32_INTRINSICS_DECL__ short2 __ldcs(const short2 *ptr) { short2 ret; asm volatile ("ld.global.cs.v2.s16 {%0,%1}, [%2];" : "=h"(ret.x), "=h"(ret.y) : __LDG_PTR (ptr)); return ret; } +__SM_32_INTRINSICS_DECL__ short4 __ldcs(const short4 *ptr) { short4 ret; asm volatile ("ld.global.cs.v4.s16 {%0,%1,%2,%3}, [%4];" : "=h"(ret.x), "=h"(ret.y), "=h"(ret.z), "=h"(ret.w) : __LDG_PTR (ptr)); return ret; } +__SM_32_INTRINSICS_DECL__ int2 __ldcs(const int2 *ptr) { int2 ret; asm volatile ("ld.global.cs.v2.s32 {%0,%1}, [%2];" : "=r"(ret.x), "=r"(ret.y) : __LDG_PTR (ptr)); return ret; } +__SM_32_INTRINSICS_DECL__ int4 __ldcs(const int4 *ptr) { int4 ret; asm volatile ("ld.global.cs.v4.s32 {%0,%1,%2,%3}, [%4];" : "=r"(ret.x), "=r"(ret.y), "=r"(ret.z), "=r"(ret.w) : __LDG_PTR (ptr)); return ret; } +__SM_32_INTRINSICS_DECL__ longlong2 __ldcs(const longlong2 *ptr) { longlong2 ret; asm volatile ("ld.global.cs.v2.s64 {%0,%1}, [%2];" : "=l"(ret.x), "=l"(ret.y) : __LDG_PTR (ptr)); return ret; } + +__SM_32_INTRINSICS_DECL__ unsigned char __ldcs(const unsigned char *ptr) { unsigned int ret; asm volatile ("ld.global.cs.u8 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return (unsigned char)ret; } +__SM_32_INTRINSICS_DECL__ unsigned short __ldcs(const unsigned short *ptr) { unsigned short ret; asm volatile ("ld.global.cs.u16 %0, [%1];" : "=h"(ret) : __LDG_PTR (ptr)); return ret; } +__SM_32_INTRINSICS_DECL__ unsigned int __ldcs(const unsigned int *ptr) { unsigned int ret; asm volatile ("ld.global.cs.u32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr)); return ret; } +__SM_32_INTRINSICS_DECL__ unsigned long long __ldcs(const unsigned long long *ptr) { unsigned long long ret; asm volatile ("ld.global.cs.u64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr)); return ret; } +__SM_32_INTRINSICS_DECL__ uchar2 __ldcs(const uchar2 *ptr) { uchar2 ret; uint2 tmp; asm volatile ("ld.global.cs.v2.u8 {%0,%1}, [%2];" : "=r"(tmp.x), "=r"(tmp.y) : __LDG_PTR (ptr)); ret.x = (unsigned char)tmp.x; ret.y = (unsigned char)tmp.y; return ret; } +__SM_32_INTRINSICS_DECL__ uchar4 __ldcs(const uchar4 *ptr) { uchar4 ret; uint4 tmp; asm volatile ("ld.global.cs.v4.u8 {%0,%1,%2,%3}, [%4];" : "=r"(tmp.x), "=r"(tmp.y), "=r"(tmp.z), "=r"(tmp.w) : __LDG_PTR (ptr)); ret.x = (unsigned char)tmp.x; ret.y = (unsigned char)tmp.y; ret.z = (unsigned char)tmp.z; ret.w = (unsigned char)tmp.w; return ret; } +__SM_32_INTRINSICS_DECL__ ushort2 __ldcs(const ushort2 *ptr) { ushort2 ret; asm volatile ("ld.global.cs.v2.u16 {%0,%1}, [%2];" : "=h"(ret.x), "=h"(ret.y) : __LDG_PTR (ptr)); return ret; } +__SM_32_INTRINSICS_DECL__ ushort4 __ldcs(const ushort4 *ptr) { ushort4 ret; asm volatile ("ld.global.cs.v4.u16 {%0,%1,%2,%3}, [%4];" : "=h"(ret.x), "=h"(ret.y), "=h"(ret.z), "=h"(ret.w) : __LDG_PTR (ptr)); return ret; } +__SM_32_INTRINSICS_DECL__ uint2 __ldcs(const uint2 *ptr) { uint2 ret; asm volatile ("ld.global.cs.v2.u32 {%0,%1}, [%2];" : "=r"(ret.x), "=r"(ret.y) : __LDG_PTR (ptr)); return ret; } +__SM_32_INTRINSICS_DECL__ uint4 __ldcs(const uint4 *ptr) { uint4 ret; asm volatile ("ld.global.cs.v4.u32 {%0,%1,%2,%3}, [%4];" : "=r"(ret.x), "=r"(ret.y), "=r"(ret.z), "=r"(ret.w) : __LDG_PTR (ptr)); return ret; } +__SM_32_INTRINSICS_DECL__ ulonglong2 __ldcs(const ulonglong2 *ptr) { ulonglong2 ret; asm volatile ("ld.global.cs.v2.u64 {%0,%1}, [%2];" : "=l"(ret.x), "=l"(ret.y) : __LDG_PTR (ptr)); return ret; } + +__SM_32_INTRINSICS_DECL__ float __ldcs(const float *ptr) { float ret; asm volatile ("ld.global.cs.f32 %0, [%1];" : "=f"(ret) : __LDG_PTR (ptr)); return ret; } +__SM_32_INTRINSICS_DECL__ double __ldcs(const double *ptr) { double ret; asm volatile ("ld.global.cs.f64 %0, [%1];" : "=d"(ret) : __LDG_PTR (ptr)); return ret; } +__SM_32_INTRINSICS_DECL__ float2 __ldcs(const float2 *ptr) { float2 ret; asm volatile ("ld.global.cs.v2.f32 {%0,%1}, [%2];" : "=f"(ret.x), "=f"(ret.y) : __LDG_PTR (ptr)); return ret; } +__SM_32_INTRINSICS_DECL__ float4 __ldcs(const float4 *ptr) { float4 ret; asm volatile ("ld.global.cs.v4.f32 {%0,%1,%2,%3}, [%4];" : "=f"(ret.x), "=f"(ret.y), "=f"(ret.z), "=f"(ret.w) : __LDG_PTR (ptr)); return ret; } +__SM_32_INTRINSICS_DECL__ double2 __ldcs(const double2 *ptr) { double2 ret; asm volatile ("ld.global.cs.v2.f64 {%0,%1}, [%2];" : "=d"(ret.x), "=d"(ret.y) : __LDG_PTR (ptr)); return ret; } + +/****************************************************************************** + * __ldlu * + ******************************************************************************/ + +// Size of long is architecture and OS specific. +#if defined(__LP64__) // 64 bits +__SM_32_INTRINSICS_DECL__ long __ldlu(const long *ptr) { unsigned long ret; asm ("ld.global.lu.s64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr) : "memory"); return (long)ret; } +__SM_32_INTRINSICS_DECL__ unsigned long __ldlu(const unsigned long *ptr) { unsigned long ret; asm ("ld.global.lu.u64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr) : "memory"); return ret; } +#else // 32 bits +__SM_32_INTRINSICS_DECL__ long __ldlu(const long *ptr) { unsigned long ret; asm ("ld.global.lu.s32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr) : "memory"); return (long)ret; } +__SM_32_INTRINSICS_DECL__ unsigned long __ldlu(const unsigned long *ptr) { unsigned long ret; asm ("ld.global.lu.u32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr) : "memory"); return ret; } +#endif + + +__SM_32_INTRINSICS_DECL__ char __ldlu(const char *ptr) { unsigned int ret; asm ("ld.global.lu.s8 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr) : "memory"); return (char)ret; } +__SM_32_INTRINSICS_DECL__ signed char __ldlu(const signed char *ptr) { unsigned int ret; asm ("ld.global.lu.s8 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr) : "memory"); return (signed char)ret; } +__SM_32_INTRINSICS_DECL__ short __ldlu(const short *ptr) { unsigned short ret; asm ("ld.global.lu.s16 %0, [%1];" : "=h"(ret) : __LDG_PTR (ptr) : "memory"); return (short)ret; } +__SM_32_INTRINSICS_DECL__ int __ldlu(const int *ptr) { unsigned int ret; asm ("ld.global.lu.s32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr) : "memory"); return (int)ret; } +__SM_32_INTRINSICS_DECL__ long long __ldlu(const long long *ptr) { unsigned long long ret; asm ("ld.global.lu.s64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr) : "memory"); return (long long)ret; } +__SM_32_INTRINSICS_DECL__ char2 __ldlu(const char2 *ptr) { char2 ret; int2 tmp; asm ("ld.global.lu.v2.s8 {%0,%1}, [%2];" : "=r"(tmp.x), "=r"(tmp.y) : __LDG_PTR (ptr) : "memory"); ret.x = (char)tmp.x; ret.y = (char)tmp.y; return ret; } +__SM_32_INTRINSICS_DECL__ char4 __ldlu(const char4 *ptr) { char4 ret; int4 tmp; asm ("ld.global.lu.v4.s8 {%0,%1,%2,%3}, [%4];" : "=r"(tmp.x), "=r"(tmp.y), "=r"(tmp.z), "=r"(tmp.w) : __LDG_PTR (ptr) : "memory"); ret.x = (char)tmp.x; ret.y = (char)tmp.y; ret.z = (char)tmp.z; ret.w = (char)tmp.w; return ret; } +__SM_32_INTRINSICS_DECL__ short2 __ldlu(const short2 *ptr) { short2 ret; asm ("ld.global.lu.v2.s16 {%0,%1}, [%2];" : "=h"(ret.x), "=h"(ret.y) : __LDG_PTR (ptr) : "memory"); return ret; } +__SM_32_INTRINSICS_DECL__ short4 __ldlu(const short4 *ptr) { short4 ret; asm ("ld.global.lu.v4.s16 {%0,%1,%2,%3}, [%4];" : "=h"(ret.x), "=h"(ret.y), "=h"(ret.z), "=h"(ret.w) : __LDG_PTR (ptr) : "memory"); return ret; } +__SM_32_INTRINSICS_DECL__ int2 __ldlu(const int2 *ptr) { int2 ret; asm ("ld.global.lu.v2.s32 {%0,%1}, [%2];" : "=r"(ret.x), "=r"(ret.y) : __LDG_PTR (ptr) : "memory"); return ret; } +__SM_32_INTRINSICS_DECL__ int4 __ldlu(const int4 *ptr) { int4 ret; asm ("ld.global.lu.v4.s32 {%0,%1,%2,%3}, [%4];" : "=r"(ret.x), "=r"(ret.y), "=r"(ret.z), "=r"(ret.w) : __LDG_PTR (ptr) : "memory"); return ret; } +__SM_32_INTRINSICS_DECL__ longlong2 __ldlu(const longlong2 *ptr) { longlong2 ret; asm ("ld.global.lu.v2.s64 {%0,%1}, [%2];" : "=l"(ret.x), "=l"(ret.y) : __LDG_PTR (ptr) : "memory"); return ret; } + +__SM_32_INTRINSICS_DECL__ unsigned char __ldlu(const unsigned char *ptr) { unsigned int ret; asm ("ld.global.lu.u8 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr) : "memory"); return (unsigned char)ret; } +__SM_32_INTRINSICS_DECL__ unsigned short __ldlu(const unsigned short *ptr) { unsigned short ret; asm ("ld.global.lu.u16 %0, [%1];" : "=h"(ret) : __LDG_PTR (ptr) : "memory"); return ret; } +__SM_32_INTRINSICS_DECL__ unsigned int __ldlu(const unsigned int *ptr) { unsigned int ret; asm ("ld.global.lu.u32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr) : "memory"); return ret; } +__SM_32_INTRINSICS_DECL__ unsigned long long __ldlu(const unsigned long long *ptr) { unsigned long long ret; asm ("ld.global.lu.u64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr) : "memory"); return ret; } +__SM_32_INTRINSICS_DECL__ uchar2 __ldlu(const uchar2 *ptr) { uchar2 ret; uint2 tmp; asm ("ld.global.lu.v2.u8 {%0,%1}, [%2];" : "=r"(tmp.x), "=r"(tmp.y) : __LDG_PTR (ptr) : "memory"); ret.x = (unsigned char)tmp.x; ret.y = (unsigned char)tmp.y; return ret; } +__SM_32_INTRINSICS_DECL__ uchar4 __ldlu(const uchar4 *ptr) { uchar4 ret; uint4 tmp; asm ("ld.global.lu.v4.u8 {%0,%1,%2,%3}, [%4];" : "=r"(tmp.x), "=r"(tmp.y), "=r"(tmp.z), "=r"(tmp.w) : __LDG_PTR (ptr) : "memory"); ret.x = (unsigned char)tmp.x; ret.y = (unsigned char)tmp.y; ret.z = (unsigned char)tmp.z; ret.w = (unsigned char)tmp.w; return ret; } +__SM_32_INTRINSICS_DECL__ ushort2 __ldlu(const ushort2 *ptr) { ushort2 ret; asm ("ld.global.lu.v2.u16 {%0,%1}, [%2];" : "=h"(ret.x), "=h"(ret.y) : __LDG_PTR (ptr) : "memory"); return ret; } +__SM_32_INTRINSICS_DECL__ ushort4 __ldlu(const ushort4 *ptr) { ushort4 ret; asm ("ld.global.lu.v4.u16 {%0,%1,%2,%3}, [%4];" : "=h"(ret.x), "=h"(ret.y), "=h"(ret.z), "=h"(ret.w) : __LDG_PTR (ptr) : "memory"); return ret; } +__SM_32_INTRINSICS_DECL__ uint2 __ldlu(const uint2 *ptr) { uint2 ret; asm ("ld.global.lu.v2.u32 {%0,%1}, [%2];" : "=r"(ret.x), "=r"(ret.y) : __LDG_PTR (ptr) : "memory"); return ret; } +__SM_32_INTRINSICS_DECL__ uint4 __ldlu(const uint4 *ptr) { uint4 ret; asm ("ld.global.lu.v4.u32 {%0,%1,%2,%3}, [%4];" : "=r"(ret.x), "=r"(ret.y), "=r"(ret.z), "=r"(ret.w) : __LDG_PTR (ptr) : "memory"); return ret; } +__SM_32_INTRINSICS_DECL__ ulonglong2 __ldlu(const ulonglong2 *ptr) { ulonglong2 ret; asm ("ld.global.lu.v2.u64 {%0,%1}, [%2];" : "=l"(ret.x), "=l"(ret.y) : __LDG_PTR (ptr) : "memory"); return ret; } + +__SM_32_INTRINSICS_DECL__ float __ldlu(const float *ptr) { float ret; asm ("ld.global.lu.f32 %0, [%1];" : "=f"(ret) : __LDG_PTR (ptr) : "memory"); return ret; } +__SM_32_INTRINSICS_DECL__ double __ldlu(const double *ptr) { double ret; asm ("ld.global.lu.f64 %0, [%1];" : "=d"(ret) : __LDG_PTR (ptr) : "memory"); return ret; } +__SM_32_INTRINSICS_DECL__ float2 __ldlu(const float2 *ptr) { float2 ret; asm ("ld.global.lu.v2.f32 {%0,%1}, [%2];" : "=f"(ret.x), "=f"(ret.y) : __LDG_PTR (ptr) : "memory"); return ret; } +__SM_32_INTRINSICS_DECL__ float4 __ldlu(const float4 *ptr) { float4 ret; asm ("ld.global.lu.v4.f32 {%0,%1,%2,%3}, [%4];" : "=f"(ret.x), "=f"(ret.y), "=f"(ret.z), "=f"(ret.w) : __LDG_PTR (ptr) : "memory"); return ret; } +__SM_32_INTRINSICS_DECL__ double2 __ldlu(const double2 *ptr) { double2 ret; asm ("ld.global.lu.v2.f64 {%0,%1}, [%2];" : "=d"(ret.x), "=d"(ret.y) : __LDG_PTR (ptr) : "memory"); return ret; } + +/****************************************************************************** + * __ldcv * + ******************************************************************************/ + +// Size of long is architecture and OS specific. +#if defined(__LP64__) // 64 bits +__SM_32_INTRINSICS_DECL__ long __ldcv(const long *ptr) { unsigned long ret; asm ("ld.global.cv.s64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr) : "memory"); return (long)ret; } +__SM_32_INTRINSICS_DECL__ unsigned long __ldcv(const unsigned long *ptr) { unsigned long ret; asm ("ld.global.cv.u64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr) : "memory"); return ret; } +#else // 32 bits +__SM_32_INTRINSICS_DECL__ long __ldcv(const long *ptr) { unsigned long ret; asm ("ld.global.cv.s32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr) : "memory"); return (long)ret; } +__SM_32_INTRINSICS_DECL__ unsigned long __ldcv(const unsigned long *ptr) { unsigned long ret; asm ("ld.global.cv.u32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr) : "memory"); return ret; } +#endif + + +__SM_32_INTRINSICS_DECL__ char __ldcv(const char *ptr) { unsigned int ret; asm ("ld.global.cv.s8 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr) : "memory"); return (char)ret; } +__SM_32_INTRINSICS_DECL__ signed char __ldcv(const signed char *ptr) { unsigned int ret; asm ("ld.global.cv.s8 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr) : "memory"); return (signed char)ret; } +__SM_32_INTRINSICS_DECL__ short __ldcv(const short *ptr) { unsigned short ret; asm ("ld.global.cv.s16 %0, [%1];" : "=h"(ret) : __LDG_PTR (ptr) : "memory"); return (short)ret; } +__SM_32_INTRINSICS_DECL__ int __ldcv(const int *ptr) { unsigned int ret; asm ("ld.global.cv.s32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr) : "memory"); return (int)ret; } +__SM_32_INTRINSICS_DECL__ long long __ldcv(const long long *ptr) { unsigned long long ret; asm ("ld.global.cv.s64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr) : "memory"); return (long long)ret; } +__SM_32_INTRINSICS_DECL__ char2 __ldcv(const char2 *ptr) { char2 ret; int2 tmp; asm ("ld.global.cv.v2.s8 {%0,%1}, [%2];" : "=r"(tmp.x), "=r"(tmp.y) : __LDG_PTR (ptr) : "memory"); ret.x = (char)tmp.x; ret.y = (char)tmp.y; return ret; } +__SM_32_INTRINSICS_DECL__ char4 __ldcv(const char4 *ptr) { char4 ret; int4 tmp; asm ("ld.global.cv.v4.s8 {%0,%1,%2,%3}, [%4];" : "=r"(tmp.x), "=r"(tmp.y), "=r"(tmp.z), "=r"(tmp.w) : __LDG_PTR (ptr) : "memory"); ret.x = (char)tmp.x; ret.y = (char)tmp.y; ret.z = (char)tmp.z; ret.w = (char)tmp.w; return ret; } +__SM_32_INTRINSICS_DECL__ short2 __ldcv(const short2 *ptr) { short2 ret; asm ("ld.global.cv.v2.s16 {%0,%1}, [%2];" : "=h"(ret.x), "=h"(ret.y) : __LDG_PTR (ptr) : "memory"); return ret; } +__SM_32_INTRINSICS_DECL__ short4 __ldcv(const short4 *ptr) { short4 ret; asm ("ld.global.cv.v4.s16 {%0,%1,%2,%3}, [%4];" : "=h"(ret.x), "=h"(ret.y), "=h"(ret.z), "=h"(ret.w) : __LDG_PTR (ptr) : "memory"); return ret; } +__SM_32_INTRINSICS_DECL__ int2 __ldcv(const int2 *ptr) { int2 ret; asm ("ld.global.cv.v2.s32 {%0,%1}, [%2];" : "=r"(ret.x), "=r"(ret.y) : __LDG_PTR (ptr) : "memory"); return ret; } +__SM_32_INTRINSICS_DECL__ int4 __ldcv(const int4 *ptr) { int4 ret; asm ("ld.global.cv.v4.s32 {%0,%1,%2,%3}, [%4];" : "=r"(ret.x), "=r"(ret.y), "=r"(ret.z), "=r"(ret.w) : __LDG_PTR (ptr) : "memory"); return ret; } +__SM_32_INTRINSICS_DECL__ longlong2 __ldcv(const longlong2 *ptr) { longlong2 ret; asm ("ld.global.cv.v2.s64 {%0,%1}, [%2];" : "=l"(ret.x), "=l"(ret.y) : __LDG_PTR (ptr) : "memory"); return ret; } + +__SM_32_INTRINSICS_DECL__ unsigned char __ldcv(const unsigned char *ptr) { unsigned int ret; asm ("ld.global.cv.u8 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr) : "memory"); return (unsigned char)ret; } +__SM_32_INTRINSICS_DECL__ unsigned short __ldcv(const unsigned short *ptr) { unsigned short ret; asm ("ld.global.cv.u16 %0, [%1];" : "=h"(ret) : __LDG_PTR (ptr) : "memory"); return ret; } +__SM_32_INTRINSICS_DECL__ unsigned int __ldcv(const unsigned int *ptr) { unsigned int ret; asm ("ld.global.cv.u32 %0, [%1];" : "=r"(ret) : __LDG_PTR (ptr) : "memory"); return ret; } +__SM_32_INTRINSICS_DECL__ unsigned long long __ldcv(const unsigned long long *ptr) { unsigned long long ret; asm ("ld.global.cv.u64 %0, [%1];" : "=l"(ret) : __LDG_PTR (ptr) : "memory"); return ret; } +__SM_32_INTRINSICS_DECL__ uchar2 __ldcv(const uchar2 *ptr) { uchar2 ret; uint2 tmp; asm ("ld.global.cv.v2.u8 {%0,%1}, [%2];" : "=r"(tmp.x), "=r"(tmp.y) : __LDG_PTR (ptr) : "memory"); ret.x = (unsigned char)tmp.x; ret.y = (unsigned char)tmp.y; return ret; } +__SM_32_INTRINSICS_DECL__ uchar4 __ldcv(const uchar4 *ptr) { uchar4 ret; uint4 tmp; asm ("ld.global.cv.v4.u8 {%0,%1,%2,%3}, [%4];" : "=r"(tmp.x), "=r"(tmp.y), "=r"(tmp.z), "=r"(tmp.w) : __LDG_PTR (ptr) : "memory"); ret.x = (unsigned char)tmp.x; ret.y = (unsigned char)tmp.y; ret.z = (unsigned char)tmp.z; ret.w = (unsigned char)tmp.w; return ret; } +__SM_32_INTRINSICS_DECL__ ushort2 __ldcv(const ushort2 *ptr) { ushort2 ret; asm ("ld.global.cv.v2.u16 {%0,%1}, [%2];" : "=h"(ret.x), "=h"(ret.y) : __LDG_PTR (ptr) : "memory"); return ret; } +__SM_32_INTRINSICS_DECL__ ushort4 __ldcv(const ushort4 *ptr) { ushort4 ret; asm ("ld.global.cv.v4.u16 {%0,%1,%2,%3}, [%4];" : "=h"(ret.x), "=h"(ret.y), "=h"(ret.z), "=h"(ret.w) : __LDG_PTR (ptr) : "memory"); return ret; } +__SM_32_INTRINSICS_DECL__ uint2 __ldcv(const uint2 *ptr) { uint2 ret; asm ("ld.global.cv.v2.u32 {%0,%1}, [%2];" : "=r"(ret.x), "=r"(ret.y) : __LDG_PTR (ptr) : "memory"); return ret; } +__SM_32_INTRINSICS_DECL__ uint4 __ldcv(const uint4 *ptr) { uint4 ret; asm ("ld.global.cv.v4.u32 {%0,%1,%2,%3}, [%4];" : "=r"(ret.x), "=r"(ret.y), "=r"(ret.z), "=r"(ret.w) : __LDG_PTR (ptr) : "memory"); return ret; } +__SM_32_INTRINSICS_DECL__ ulonglong2 __ldcv(const ulonglong2 *ptr) { ulonglong2 ret; asm ("ld.global.cv.v2.u64 {%0,%1}, [%2];" : "=l"(ret.x), "=l"(ret.y) : __LDG_PTR (ptr) : "memory"); return ret; } + +__SM_32_INTRINSICS_DECL__ float __ldcv(const float *ptr) { float ret; asm ("ld.global.cv.f32 %0, [%1];" : "=f"(ret) : __LDG_PTR (ptr) : "memory"); return ret; } +__SM_32_INTRINSICS_DECL__ double __ldcv(const double *ptr) { double ret; asm ("ld.global.cv.f64 %0, [%1];" : "=d"(ret) : __LDG_PTR (ptr) : "memory"); return ret; } +__SM_32_INTRINSICS_DECL__ float2 __ldcv(const float2 *ptr) { float2 ret; asm ("ld.global.cv.v2.f32 {%0,%1}, [%2];" : "=f"(ret.x), "=f"(ret.y) : __LDG_PTR (ptr) : "memory"); return ret; } +__SM_32_INTRINSICS_DECL__ float4 __ldcv(const float4 *ptr) { float4 ret; asm ("ld.global.cv.v4.f32 {%0,%1,%2,%3}, [%4];" : "=f"(ret.x), "=f"(ret.y), "=f"(ret.z), "=f"(ret.w) : __LDG_PTR (ptr) : "memory"); return ret; } +__SM_32_INTRINSICS_DECL__ double2 __ldcv(const double2 *ptr) { double2 ret; asm ("ld.global.cv.v2.f64 {%0,%1}, [%2];" : "=d"(ret.x), "=d"(ret.y) : __LDG_PTR (ptr) : "memory"); return ret; } + +/****************************************************************************** + * __stwb * + ******************************************************************************/ + +// Size of long is architecture and OS specific. +#if defined(__LP64__) // 64 bits +__SM_32_INTRINSICS_DECL__ void __stwb(long *ptr, long value) { asm ("st.global.wb.s64 [%0], %1;" :: __LDG_PTR (ptr), "l"(value) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stwb(unsigned long *ptr, unsigned long value) { asm ("st.global.wb.u64 [%0], %1;" :: __LDG_PTR (ptr), "l"(value) : "memory"); } +#else // 32 bits +__SM_32_INTRINSICS_DECL__ void __stwb(long *ptr, long value) { asm ("st.global.wb.s32 [%0], %1;" :: __LDG_PTR (ptr), "r"(value) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stwb(unsigned long *ptr, unsigned long value) { asm ("st.global.wb.u32 [%0], %1;" :: __LDG_PTR (ptr), "r"(value) : "memory"); } +#endif + + +__SM_32_INTRINSICS_DECL__ void __stwb(char *ptr, char value) { asm ("st.global.wb.s8 [%0], %1;" :: __LDG_PTR (ptr), "r"((int)value) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stwb(signed char *ptr, signed char value) { asm ("st.global.wb.s8 [%0], %1;" :: __LDG_PTR (ptr), "r"((int)value) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stwb(short *ptr, short value) { asm ("st.global.wb.s16 [%0], %1;" :: __LDG_PTR (ptr), "h"(value) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stwb(int *ptr, int value) { asm ("st.global.wb.s32 [%0], %1;" :: __LDG_PTR (ptr), "r"(value) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stwb(long long *ptr, long long value) { asm ("st.global.wb.s64 [%0], %1;" :: __LDG_PTR (ptr), "l"(value) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stwb(char2 *ptr, char2 value) { const int x = value.x, y = value.y; asm ("st.global.wb.v2.s8 [%0], {%1,%2};" :: __LDG_PTR (ptr), "r"(x), "r"(y) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stwb(char4 *ptr, char4 value) { const int x = value.x, y = value.y, z = value.z, w = value.w; asm ("st.global.wb.v4.s8 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "r"(x), "r"(y), "r"(z), "r"(w) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stwb(short2 *ptr, short2 value) { asm ("st.global.wb.v2.s16 [%0], {%1,%2};" :: __LDG_PTR (ptr), "h"(value.x), "h"(value.y) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stwb(short4 *ptr, short4 value) { asm ("st.global.wb.v4.s16 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "h"(value.x), "h"(value.y), "h"(value.z), "h"(value.w) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stwb(int2 *ptr, int2 value) { asm ("st.global.wb.v2.s32 [%0], {%1,%2};" :: __LDG_PTR (ptr), "r"(value.x), "r"(value.y) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stwb(int4 *ptr, int4 value) { asm ("st.global.wb.v4.s32 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "r"(value.x), "r"(value.y), "r"(value.z), "r"(value.w) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stwb(longlong2 *ptr, longlong2 value) { asm ("st.global.wb.v2.s64 [%0], {%1,%2};" :: __LDG_PTR (ptr), "l"(value.x), "l"(value.y) : "memory"); } + +__SM_32_INTRINSICS_DECL__ void __stwb(unsigned char *ptr, unsigned char value) { asm ("st.global.wb.u8 [%0], %1;" :: __LDG_PTR (ptr), "r"((int)value) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stwb(unsigned short *ptr, unsigned short value) { asm ("st.global.wb.u16 [%0], %1;" :: __LDG_PTR (ptr), "h"(value) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stwb(unsigned int *ptr, unsigned int value) { asm ("st.global.wb.u32 [%0], %1;" :: __LDG_PTR (ptr), "r"(value) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stwb(unsigned long long *ptr, unsigned long long value) { asm ("st.global.wb.u64 [%0], %1;" :: __LDG_PTR (ptr), "l"(value) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stwb(uchar2 *ptr, uchar2 value) { const int x = value.x, y = value.y; asm ("st.global.wb.v2.u8 [%0], {%1,%2};" :: __LDG_PTR (ptr), "r"(x), "r"(y) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stwb(uchar4 *ptr, uchar4 value) { const int x = value.x, y = value.y, z = value.z, w = value.w; asm ("st.global.wb.v4.u8 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "r"(x), "r"(y), "r"(z), "r"(w) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stwb(ushort2 *ptr, ushort2 value) { asm ("st.global.wb.v2.u16 [%0], {%1,%2};" :: __LDG_PTR (ptr), "h"(value.x), "h"(value.y) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stwb(ushort4 *ptr, ushort4 value) { asm ("st.global.wb.v4.u16 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "h"(value.x), "h"(value.y), "h"(value.z), "h"(value.w) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stwb(uint2 *ptr, uint2 value) { asm ("st.global.wb.v2.u32 [%0], {%1,%2};" :: __LDG_PTR (ptr), "r"(value.x), "r"(value.y) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stwb(uint4 *ptr, uint4 value) { asm ("st.global.wb.v4.u32 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "r"(value.x), "r"(value.y), "r"(value.z), "r"(value.w) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stwb(ulonglong2 *ptr, ulonglong2 value) { asm ("st.global.wb.v2.u64 [%0], {%1,%2};" :: __LDG_PTR (ptr), "l"(value.x), "l"(value.y) : "memory"); } + +__SM_32_INTRINSICS_DECL__ void __stwb(float *ptr, float value) { asm ("st.global.wb.f32 [%0], %1;" :: __LDG_PTR (ptr), "f"(value) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stwb(double *ptr, double value) { asm ("st.global.wb.f64 [%0], %1;" :: __LDG_PTR (ptr), "d"(value) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stwb(float2 *ptr, float2 value) { asm ("st.global.wb.v2.f32 [%0], {%1,%2};" :: __LDG_PTR (ptr), "f"(value.x), "f"(value.y) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stwb(float4 *ptr, float4 value) { asm ("st.global.wb.v4.f32 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "f"(value.x), "f"(value.y), "f"(value.z), "f"(value.w) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stwb(double2 *ptr, double2 value) { asm ("st.global.wb.v2.f64 [%0], {%1,%2};" :: __LDG_PTR (ptr), "d"(value.x), "d"(value.y) : "memory"); } + +/****************************************************************************** + * __stcg * + ******************************************************************************/ + +// Size of long is architecture and OS specific. +#if defined(__LP64__) // 64 bits +__SM_32_INTRINSICS_DECL__ void __stcg(long *ptr, long value) { asm ("st.global.cg.s64 [%0], %1;" :: __LDG_PTR (ptr), "l"(value) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stcg(unsigned long *ptr, unsigned long value) { asm ("st.global.cg.u64 [%0], %1;" :: __LDG_PTR (ptr), "l"(value) : "memory"); } +#else // 32 bits +__SM_32_INTRINSICS_DECL__ void __stcg(long *ptr, long value) { asm ("st.global.cg.s32 [%0], %1;" :: __LDG_PTR (ptr), "r"(value) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stcg(unsigned long *ptr, unsigned long value) { asm ("st.global.cg.u32 [%0], %1;" :: __LDG_PTR (ptr), "r"(value) : "memory"); } +#endif + + +__SM_32_INTRINSICS_DECL__ void __stcg(char *ptr, char value) { asm ("st.global.cg.s8 [%0], %1;" :: __LDG_PTR (ptr), "r"((int)value) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stcg(signed char *ptr, signed char value) { asm ("st.global.cg.s8 [%0], %1;" :: __LDG_PTR (ptr), "r"((int)value) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stcg(short *ptr, short value) { asm ("st.global.cg.s16 [%0], %1;" :: __LDG_PTR (ptr), "h"(value) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stcg(int *ptr, int value) { asm ("st.global.cg.s32 [%0], %1;" :: __LDG_PTR (ptr), "r"(value) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stcg(long long *ptr, long long value) { asm ("st.global.cg.s64 [%0], %1;" :: __LDG_PTR (ptr), "l"(value) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stcg(char2 *ptr, char2 value) { const int x = value.x, y = value.y; asm ("st.global.cg.v2.s8 [%0], {%1,%2};" :: __LDG_PTR (ptr), "r"(x), "r"(y) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stcg(char4 *ptr, char4 value) { const int x = value.x, y = value.y, z = value.z, w = value.w; asm ("st.global.cg.v4.s8 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "r"(x), "r"(y), "r"(z), "r"(w) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stcg(short2 *ptr, short2 value) { asm ("st.global.cg.v2.s16 [%0], {%1,%2};" :: __LDG_PTR (ptr), "h"(value.x), "h"(value.y) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stcg(short4 *ptr, short4 value) { asm ("st.global.cg.v4.s16 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "h"(value.x), "h"(value.y), "h"(value.z), "h"(value.w) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stcg(int2 *ptr, int2 value) { asm ("st.global.cg.v2.s32 [%0], {%1,%2};" :: __LDG_PTR (ptr), "r"(value.x), "r"(value.y) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stcg(int4 *ptr, int4 value) { asm ("st.global.cg.v4.s32 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "r"(value.x), "r"(value.y), "r"(value.z), "r"(value.w) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stcg(longlong2 *ptr, longlong2 value) { asm ("st.global.cg.v2.s64 [%0], {%1,%2};" :: __LDG_PTR (ptr), "l"(value.x), "l"(value.y) : "memory"); } + +__SM_32_INTRINSICS_DECL__ void __stcg(unsigned char *ptr, unsigned char value) { asm ("st.global.cg.u8 [%0], %1;" :: __LDG_PTR (ptr), "r"((int)value) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stcg(unsigned short *ptr, unsigned short value) { asm ("st.global.cg.u16 [%0], %1;" :: __LDG_PTR (ptr), "h"(value) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stcg(unsigned int *ptr, unsigned int value) { asm ("st.global.cg.u32 [%0], %1;" :: __LDG_PTR (ptr), "r"(value) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stcg(unsigned long long *ptr, unsigned long long value) { asm ("st.global.cg.u64 [%0], %1;" :: __LDG_PTR (ptr), "l"(value) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stcg(uchar2 *ptr, uchar2 value) { const int x = value.x, y = value.y; asm ("st.global.cg.v2.u8 [%0], {%1,%2};" :: __LDG_PTR (ptr), "r"(x), "r"(y) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stcg(uchar4 *ptr, uchar4 value) { const int x = value.x, y = value.y, z = value.z, w = value.w; asm ("st.global.cg.v4.u8 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "r"(x), "r"(y), "r"(z), "r"(w) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stcg(ushort2 *ptr, ushort2 value) { asm ("st.global.cg.v2.u16 [%0], {%1,%2};" :: __LDG_PTR (ptr), "h"(value.x), "h"(value.y) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stcg(ushort4 *ptr, ushort4 value) { asm ("st.global.cg.v4.u16 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "h"(value.x), "h"(value.y), "h"(value.z), "h"(value.w) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stcg(uint2 *ptr, uint2 value) { asm ("st.global.cg.v2.u32 [%0], {%1,%2};" :: __LDG_PTR (ptr), "r"(value.x), "r"(value.y) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stcg(uint4 *ptr, uint4 value) { asm ("st.global.cg.v4.u32 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "r"(value.x), "r"(value.y), "r"(value.z), "r"(value.w) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stcg(ulonglong2 *ptr, ulonglong2 value) { asm ("st.global.cg.v2.u64 [%0], {%1,%2};" :: __LDG_PTR (ptr), "l"(value.x), "l"(value.y) : "memory"); } + +__SM_32_INTRINSICS_DECL__ void __stcg(float *ptr, float value) { asm ("st.global.cg.f32 [%0], %1;" :: __LDG_PTR (ptr), "f"(value) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stcg(double *ptr, double value) { asm ("st.global.cg.f64 [%0], %1;" :: __LDG_PTR (ptr), "d"(value) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stcg(float2 *ptr, float2 value) { asm ("st.global.cg.v2.f32 [%0], {%1,%2};" :: __LDG_PTR (ptr), "f"(value.x), "f"(value.y) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stcg(float4 *ptr, float4 value) { asm ("st.global.cg.v4.f32 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "f"(value.x), "f"(value.y), "f"(value.z), "f"(value.w) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stcg(double2 *ptr, double2 value) { asm ("st.global.cg.v2.f64 [%0], {%1,%2};" :: __LDG_PTR (ptr), "d"(value.x), "d"(value.y) : "memory"); } + +/****************************************************************************** + * __stcs * + ******************************************************************************/ + +// Size of long is architecture and OS specific. +#if defined(__LP64__) // 64 bits +__SM_32_INTRINSICS_DECL__ void __stcs(long *ptr, long value) { asm ("st.global.cs.s64 [%0], %1;" :: __LDG_PTR (ptr), "l"(value) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stcs(unsigned long *ptr, unsigned long value) { asm ("st.global.cs.u64 [%0], %1;" :: __LDG_PTR (ptr), "l"(value) : "memory"); } +#else // 32 bits +__SM_32_INTRINSICS_DECL__ void __stcs(long *ptr, long value) { asm ("st.global.cs.s32 [%0], %1;" :: __LDG_PTR (ptr), "r"(value) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stcs(unsigned long *ptr, unsigned long value) { asm ("st.global.cs.u32 [%0], %1;" :: __LDG_PTR (ptr), "r"(value) : "memory"); } +#endif + + +__SM_32_INTRINSICS_DECL__ void __stcs(char *ptr, char value) { asm ("st.global.cs.s8 [%0], %1;" :: __LDG_PTR (ptr), "r"((int)value) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stcs(signed char *ptr, signed char value) { asm ("st.global.cs.s8 [%0], %1;" :: __LDG_PTR (ptr), "r"((int)value) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stcs(short *ptr, short value) { asm ("st.global.cs.s16 [%0], %1;" :: __LDG_PTR (ptr), "h"(value) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stcs(int *ptr, int value) { asm ("st.global.cs.s32 [%0], %1;" :: __LDG_PTR (ptr), "r"(value) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stcs(long long *ptr, long long value) { asm ("st.global.cs.s64 [%0], %1;" :: __LDG_PTR (ptr), "l"(value) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stcs(char2 *ptr, char2 value) { const int x = value.x, y = value.y; asm ("st.global.cs.v2.s8 [%0], {%1,%2};" :: __LDG_PTR (ptr), "r"(x), "r"(y) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stcs(char4 *ptr, char4 value) { const int x = value.x, y = value.y, z = value.z, w = value.w; asm ("st.global.cs.v4.s8 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "r"(x), "r"(y), "r"(z), "r"(w) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stcs(short2 *ptr, short2 value) { asm ("st.global.cs.v2.s16 [%0], {%1,%2};" :: __LDG_PTR (ptr), "h"(value.x), "h"(value.y) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stcs(short4 *ptr, short4 value) { asm ("st.global.cs.v4.s16 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "h"(value.x), "h"(value.y), "h"(value.z), "h"(value.w) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stcs(int2 *ptr, int2 value) { asm ("st.global.cs.v2.s32 [%0], {%1,%2};" :: __LDG_PTR (ptr), "r"(value.x), "r"(value.y) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stcs(int4 *ptr, int4 value) { asm ("st.global.cs.v4.s32 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "r"(value.x), "r"(value.y), "r"(value.z), "r"(value.w) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stcs(longlong2 *ptr, longlong2 value) { asm ("st.global.cs.v2.s64 [%0], {%1,%2};" :: __LDG_PTR (ptr), "l"(value.x), "l"(value.y) : "memory"); } + +__SM_32_INTRINSICS_DECL__ void __stcs(unsigned char *ptr, unsigned char value) { asm ("st.global.cs.u8 [%0], %1;" :: __LDG_PTR (ptr), "r"((int)value) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stcs(unsigned short *ptr, unsigned short value) { asm ("st.global.cs.u16 [%0], %1;" :: __LDG_PTR (ptr), "h"(value) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stcs(unsigned int *ptr, unsigned int value) { asm ("st.global.cs.u32 [%0], %1;" :: __LDG_PTR (ptr), "r"(value) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stcs(unsigned long long *ptr, unsigned long long value) { asm ("st.global.cs.u64 [%0], %1;" :: __LDG_PTR (ptr), "l"(value) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stcs(uchar2 *ptr, uchar2 value) { const int x = value.x, y = value.y; asm ("st.global.cs.v2.u8 [%0], {%1,%2};" :: __LDG_PTR (ptr), "r"(x), "r"(y) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stcs(uchar4 *ptr, uchar4 value) { const int x = value.x, y = value.y, z = value.z, w = value.w; asm ("st.global.cs.v4.u8 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "r"(x), "r"(y), "r"(z), "r"(w) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stcs(ushort2 *ptr, ushort2 value) { asm ("st.global.cs.v2.u16 [%0], {%1,%2};" :: __LDG_PTR (ptr), "h"(value.x), "h"(value.y) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stcs(ushort4 *ptr, ushort4 value) { asm ("st.global.cs.v4.u16 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "h"(value.x), "h"(value.y), "h"(value.z), "h"(value.w) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stcs(uint2 *ptr, uint2 value) { asm ("st.global.cs.v2.u32 [%0], {%1,%2};" :: __LDG_PTR (ptr), "r"(value.x), "r"(value.y) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stcs(uint4 *ptr, uint4 value) { asm ("st.global.cs.v4.u32 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "r"(value.x), "r"(value.y), "r"(value.z), "r"(value.w) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stcs(ulonglong2 *ptr, ulonglong2 value) { asm ("st.global.cs.v2.u64 [%0], {%1,%2};" :: __LDG_PTR (ptr), "l"(value.x), "l"(value.y) : "memory"); } + +__SM_32_INTRINSICS_DECL__ void __stcs(float *ptr, float value) { asm ("st.global.cs.f32 [%0], %1;" :: __LDG_PTR (ptr), "f"(value) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stcs(double *ptr, double value) { asm ("st.global.cs.f64 [%0], %1;" :: __LDG_PTR (ptr), "d"(value) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stcs(float2 *ptr, float2 value) { asm ("st.global.cs.v2.f32 [%0], {%1,%2};" :: __LDG_PTR (ptr), "f"(value.x), "f"(value.y) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stcs(float4 *ptr, float4 value) { asm ("st.global.cs.v4.f32 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "f"(value.x), "f"(value.y), "f"(value.z), "f"(value.w) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stcs(double2 *ptr, double2 value) { asm ("st.global.cs.v2.f64 [%0], {%1,%2};" :: __LDG_PTR (ptr), "d"(value.x), "d"(value.y) : "memory"); } + +/****************************************************************************** + * __stwt * + ******************************************************************************/ + +// Size of long is architecture and OS specific. +#if defined(__LP64__) // 64 bits +__SM_32_INTRINSICS_DECL__ void __stwt(long *ptr, long value) { asm ("st.global.wt.s64 [%0], %1;" :: __LDG_PTR (ptr), "l"(value) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stwt(unsigned long *ptr, unsigned long value) { asm ("st.global.wt.u64 [%0], %1;" :: __LDG_PTR (ptr), "l"(value) : "memory"); } +#else // 32 bits +__SM_32_INTRINSICS_DECL__ void __stwt(long *ptr, long value) { asm ("st.global.wt.s32 [%0], %1;" :: __LDG_PTR (ptr), "r"(value) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stwt(unsigned long *ptr, unsigned long value) { asm ("st.global.wt.u32 [%0], %1;" :: __LDG_PTR (ptr), "r"(value) : "memory"); } +#endif + + +__SM_32_INTRINSICS_DECL__ void __stwt(char *ptr, char value) { asm ("st.global.wt.s8 [%0], %1;" :: __LDG_PTR (ptr), "r"((int)value) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stwt(signed char *ptr, signed char value) { asm ("st.global.wt.s8 [%0], %1;" :: __LDG_PTR (ptr), "r"((int)value) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stwt(short *ptr, short value) { asm ("st.global.wt.s16 [%0], %1;" :: __LDG_PTR (ptr), "h"(value) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stwt(int *ptr, int value) { asm ("st.global.wt.s32 [%0], %1;" :: __LDG_PTR (ptr), "r"(value) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stwt(long long *ptr, long long value) { asm ("st.global.wt.s64 [%0], %1;" :: __LDG_PTR (ptr), "l"(value) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stwt(char2 *ptr, char2 value) { const int x = value.x, y = value.y; asm ("st.global.wt.v2.s8 [%0], {%1,%2};" :: __LDG_PTR (ptr), "r"(x), "r"(y) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stwt(char4 *ptr, char4 value) { const int x = value.x, y = value.y, z = value.z, w = value.w; asm ("st.global.wt.v4.s8 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "r"(x), "r"(y), "r"(z), "r"(w) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stwt(short2 *ptr, short2 value) { asm ("st.global.wt.v2.s16 [%0], {%1,%2};" :: __LDG_PTR (ptr), "h"(value.x), "h"(value.y) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stwt(short4 *ptr, short4 value) { asm ("st.global.wt.v4.s16 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "h"(value.x), "h"(value.y), "h"(value.z), "h"(value.w) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stwt(int2 *ptr, int2 value) { asm ("st.global.wt.v2.s32 [%0], {%1,%2};" :: __LDG_PTR (ptr), "r"(value.x), "r"(value.y) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stwt(int4 *ptr, int4 value) { asm ("st.global.wt.v4.s32 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "r"(value.x), "r"(value.y), "r"(value.z), "r"(value.w) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stwt(longlong2 *ptr, longlong2 value) { asm ("st.global.wt.v2.s64 [%0], {%1,%2};" :: __LDG_PTR (ptr), "l"(value.x), "l"(value.y) : "memory"); } + +__SM_32_INTRINSICS_DECL__ void __stwt(unsigned char *ptr, unsigned char value) { asm ("st.global.wt.u8 [%0], %1;" :: __LDG_PTR (ptr), "r"((int)value) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stwt(unsigned short *ptr, unsigned short value) { asm ("st.global.wt.u16 [%0], %1;" :: __LDG_PTR (ptr), "h"(value) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stwt(unsigned int *ptr, unsigned int value) { asm ("st.global.wt.u32 [%0], %1;" :: __LDG_PTR (ptr), "r"(value) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stwt(unsigned long long *ptr, unsigned long long value) { asm ("st.global.wt.u64 [%0], %1;" :: __LDG_PTR (ptr), "l"(value) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stwt(uchar2 *ptr, uchar2 value) { const int x = value.x, y = value.y; asm ("st.global.wt.v2.u8 [%0], {%1,%2};" :: __LDG_PTR (ptr), "r"(x), "r"(y) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stwt(uchar4 *ptr, uchar4 value) { const int x = value.x, y = value.y, z = value.z, w = value.w; asm ("st.global.wt.v4.u8 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "r"(x), "r"(y), "r"(z), "r"(w) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stwt(ushort2 *ptr, ushort2 value) { asm ("st.global.wt.v2.u16 [%0], {%1,%2};" :: __LDG_PTR (ptr), "h"(value.x), "h"(value.y) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stwt(ushort4 *ptr, ushort4 value) { asm ("st.global.wt.v4.u16 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "h"(value.x), "h"(value.y), "h"(value.z), "h"(value.w) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stwt(uint2 *ptr, uint2 value) { asm ("st.global.wt.v2.u32 [%0], {%1,%2};" :: __LDG_PTR (ptr), "r"(value.x), "r"(value.y) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stwt(uint4 *ptr, uint4 value) { asm ("st.global.wt.v4.u32 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "r"(value.x), "r"(value.y), "r"(value.z), "r"(value.w) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stwt(ulonglong2 *ptr, ulonglong2 value) { asm ("st.global.wt.v2.u64 [%0], {%1,%2};" :: __LDG_PTR (ptr), "l"(value.x), "l"(value.y) : "memory"); } + +__SM_32_INTRINSICS_DECL__ void __stwt(float *ptr, float value) { asm ("st.global.wt.f32 [%0], %1;" :: __LDG_PTR (ptr), "f"(value) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stwt(double *ptr, double value) { asm ("st.global.wt.f64 [%0], %1;" :: __LDG_PTR (ptr), "d"(value) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stwt(float2 *ptr, float2 value) { asm ("st.global.wt.v2.f32 [%0], {%1,%2};" :: __LDG_PTR (ptr), "f"(value.x), "f"(value.y) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stwt(float4 *ptr, float4 value) { asm ("st.global.wt.v4.f32 [%0], {%1,%2,%3,%4};" :: __LDG_PTR (ptr), "f"(value.x), "f"(value.y), "f"(value.z), "f"(value.w) : "memory"); } +__SM_32_INTRINSICS_DECL__ void __stwt(double2 *ptr, double2 value) { asm ("st.global.wt.v2.f64 [%0], {%1,%2};" :: __LDG_PTR (ptr), "d"(value.x), "d"(value.y) : "memory"); } + +#undef __LDG_PTR + + +// SHF is the "funnel shift" operation - an accelerated left/right shift with carry +// operating on 64-bit quantities, which are concatenations of two 32-bit registers. + +// This shifts [b:a] left by "shift" bits, returning the most significant bits of the result. +__SM_32_INTRINSICS_DECL__ unsigned int __funnelshift_l(unsigned int lo, unsigned int hi, unsigned int shift) +{ + unsigned int ret; + asm volatile ("shf.l.wrap.b32 %0, %1, %2, %3;" : "=r"(ret) : "r"(lo), "r"(hi), "r"(shift)); + return ret; +} +__SM_32_INTRINSICS_DECL__ unsigned int __funnelshift_lc(unsigned int lo, unsigned int hi, unsigned int shift) +{ + unsigned int ret; + asm volatile ("shf.l.clamp.b32 %0, %1, %2, %3;" : "=r"(ret) : "r"(lo), "r"(hi), "r"(shift)); + return ret; +} + +// This shifts [b:a] right by "shift" bits, returning the least significant bits of the result. +__SM_32_INTRINSICS_DECL__ unsigned int __funnelshift_r(unsigned int lo, unsigned int hi, unsigned int shift) +{ + unsigned int ret; + asm volatile ("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(ret) : "r"(lo), "r"(hi), "r"(shift)); + return ret; +} +__SM_32_INTRINSICS_DECL__ unsigned int __funnelshift_rc(unsigned int lo, unsigned int hi, unsigned int shift) +{ + unsigned int ret; + asm volatile ("shf.r.clamp.b32 %0, %1, %2, %3;" : "=r"(ret) : "r"(lo), "r"(hi), "r"(shift)); + return ret; +} + + +#endif /* _NVHPC_CUDA || !__CUDA_ARCH__ || __CUDA_ARCH__ >= 320 */ + +#endif /* __cplusplus && __CUDACC__ */ + +#undef __SM_32_INTRINSICS_DECL__ + +#endif /* !__SM_32_INTRINSICS_HPP__ */ + diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_60_atomic_functions.h b/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_60_atomic_functions.h new file mode 100644 index 0000000000000000000000000000000000000000..f2f6da7032f66f3988868ad769d7533d15def5a8 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_60_atomic_functions.h @@ -0,0 +1,543 @@ +/* + * Copyright 1993-2014 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(__SM_60_ATOMIC_FUNCTIONS_H__) +#define __SM_60_ATOMIC_FUNCTIONS_H__ + + +#if defined(__CUDACC_RTC__) +#define __SM_60_ATOMIC_FUNCTIONS_DECL__ __device__ +#elif defined(_NVHPC_CUDA) +#define __SM_60_ATOMIC_FUNCTIONS_DECL__ extern __device__ __cudart_builtin__ +#else /* __CUDACC_RTC__ */ +#define __SM_60_ATOMIC_FUNCTIONS_DECL__ static __inline__ __device__ +#endif /* __CUDACC_RTC__ */ + +#if defined(__cplusplus) && defined(__CUDACC__) + +#if defined(_NVHPC_CUDA) || !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 600 + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +#include "cuda_runtime_api.h" + +/* Add !defined(_NVHPC_CUDA) to avoid empty function definition in CUDA + * C++ compiler where the macro __CUDA_ARCH__ is not defined. */ +#if !defined(__CUDA_ARCH__) && !defined(_NVHPC_CUDA) +#define __DEF_IF_HOST { } +#else /* !__CUDA_ARCH__ */ +#define __DEF_IF_HOST ; +#endif /* __CUDA_ARCH__ */ + + + +#ifdef __CUDA_ARCH__ +extern "C" +{ +extern __device__ __device_builtin__ double __dAtomicAdd(double *address, double val); + +extern __device__ __device_builtin__ +int __iAtomicAdd_block(int *address, int val); + +extern __device__ __device_builtin__ +int __iAtomicAdd_system(int *address, int val); + +extern __device__ __device_builtin__ +unsigned int __uAtomicAdd_block(unsigned int *address, unsigned int val); + +extern __device__ __device_builtin__ +unsigned int __uAtomicAdd_system(unsigned int *address, unsigned int val); + +extern __device__ __device_builtin__ +unsigned long long __ullAtomicAdd_block(unsigned long long *address, unsigned long long val); + +extern __device__ __device_builtin__ +unsigned long long __ullAtomicAdd_system(unsigned long long *address, unsigned long long val); + +extern __device__ __device_builtin__ +float __fAtomicAdd_block(float *address, float val); + +extern __device__ __device_builtin__ +float __fAtomicAdd_system(float *address, float val); + +extern __device__ __device_builtin__ +double __dAtomicAdd_block(double *address, double val); + +extern __device__ __device_builtin__ +double __dAtomicAdd_system(double *address, double val); + +extern __device__ __device_builtin__ +int __iAtomicExch_block(int *address, int val); + +extern __device__ __device_builtin__ +int __iAtomicExch_system(int *address, int val); + +extern __device__ __device_builtin__ +unsigned int __uAtomicExch_block(unsigned int *address, unsigned int val); + +extern __device__ __device_builtin__ +unsigned int __uAtomicExch_system(unsigned int *address, unsigned int val); + +extern __device__ __device_builtin__ +unsigned long long __ullAtomicExch_block(unsigned long long *address, unsigned long long val); + +extern __device__ __device_builtin__ +unsigned long long __ullAtomicExch_system(unsigned long long *address, unsigned long long val); + +extern __device__ __device_builtin__ +float __fAtomicExch_block(float *address, float val); + +extern __device__ __device_builtin__ +float __fAtomicExch_system(float *address, float val); + +extern __device__ __device_builtin__ +int __iAtomicMin_block(int *address, int val); + +extern __device__ __device_builtin__ +int __iAtomicMin_system(int *address, int val); + +extern __device__ __device_builtin__ +long long __illAtomicMin_block(long long *address, long long val); + +extern __device__ __device_builtin__ +long long __illAtomicMin_system(long long *address, long long val); + +extern __device__ __device_builtin__ +unsigned int __uAtomicMin_block(unsigned int *address, unsigned int val); + +extern __device__ __device_builtin__ +unsigned int __uAtomicMin_system(unsigned int *address, unsigned int val); + +extern __device__ __device_builtin__ +unsigned long long __ullAtomicMin_block(unsigned long long *address, unsigned long long val); + +extern __device__ __device_builtin__ +unsigned long long __ullAtomicMin_system(unsigned long long *address, unsigned long long val); + +extern __device__ __device_builtin__ +int __iAtomicMax_block(int *address, int val); + +extern __device__ __device_builtin__ +int __iAtomicMax_system(int *address, int val); + +extern __device__ __device_builtin__ +long long __illAtomicMax_block(long long *address, long long val); + +extern __device__ __device_builtin__ +long long __illAtomicMax_system(long long *address, long long val); + +extern __device__ __device_builtin__ +unsigned int __uAtomicMax_block(unsigned int *address, unsigned int val); + +extern __device__ __device_builtin__ +unsigned int __uAtomicMax_system(unsigned int *address, unsigned int val); + +extern __device__ __device_builtin__ +unsigned long long __ullAtomicMax_block(unsigned long long *address, unsigned long long val); + +extern __device__ __device_builtin__ +unsigned long long __ullAtomicMax_system(unsigned long long *address, unsigned long long val); + +extern __device__ __device_builtin__ +unsigned int __uAtomicInc_block(unsigned int *address, unsigned int val); + +extern __device__ __device_builtin__ +unsigned int __uAtomicInc_system(unsigned int *address, unsigned int val); + +extern __device__ __device_builtin__ +unsigned int __uAtomicDec_block(unsigned int *address, unsigned int val); + +extern __device__ __device_builtin__ +unsigned int __uAtomicDec_system(unsigned int *address, unsigned int val); + +extern __device__ __device_builtin__ +int __iAtomicCAS_block(int *address, int compare, int val); + +extern __device__ __device_builtin__ +int __iAtomicCAS_system(int *address, int compare, int val); + +extern __device__ __device_builtin__ +unsigned int __uAtomicCAS_block(unsigned int *address, unsigned int compare, + unsigned int val); + +extern __device__ __device_builtin__ +unsigned int __uAtomicCAS_system(unsigned int *address, unsigned int compare, + unsigned int val); + +extern __device__ __device_builtin__ +unsigned long long __ullAtomicCAS_block(unsigned long long int *address, + unsigned long long int compare, + unsigned long long int val); + +extern __device__ __device_builtin__ +unsigned long long __ullAtomicCAS_system(unsigned long long int *address, + unsigned long long int compare, + unsigned long long int val); + +extern __device__ __device_builtin__ +int __iAtomicAnd_block(int *address, int val); + +extern __device__ __device_builtin__ +int __iAtomicAnd_system(int *address, int val); + +extern __device__ __device_builtin__ +long long __llAtomicAnd_block(long long *address, long long val); + +extern __device__ __device_builtin__ +long long __llAtomicAnd_system(long long *address, long long val); + +extern __device__ __device_builtin__ +unsigned int __uAtomicAnd_block(unsigned int *address, unsigned int val); + +extern __device__ __device_builtin__ +unsigned int __uAtomicAnd_system(unsigned int *address, unsigned int val); + +extern __device__ __device_builtin__ +unsigned long long __ullAtomicAnd_block(unsigned long long *address, unsigned long long val); + +extern __device__ __device_builtin__ +unsigned long long __ullAtomicAnd_system(unsigned long long *address, unsigned long long val); + +extern __device__ __device_builtin__ +int __iAtomicOr_block(int *address, int val); + +extern __device__ __device_builtin__ +int __iAtomicOr_system(int *address, int val); + +extern __device__ __device_builtin__ +long long __llAtomicOr_block(long long *address, long long val); + +extern __device__ __device_builtin__ +long long __llAtomicOr_system(long long *address, long long val); + +extern __device__ __device_builtin__ +unsigned int __uAtomicOr_block(unsigned int *address, unsigned int val); + +extern __device__ __device_builtin__ +unsigned int __uAtomicOr_system(unsigned int *address, unsigned int val); + +extern __device__ __device_builtin__ +unsigned long long __ullAtomicOr_block(unsigned long long *address, unsigned long long val); + +extern __device__ __device_builtin__ +unsigned long long __ullAtomicOr_system(unsigned long long *address, unsigned long long val); + +extern __device__ __device_builtin__ +int __iAtomicXor_block(int *address, int val); + +extern __device__ __device_builtin__ +int __iAtomicXor_system(int *address, int val); + +extern __device__ __device_builtin__ +long long __llAtomicXor_block(long long *address, long long val); + +extern __device__ __device_builtin__ +long long __llAtomicXor_system(long long *address, long long val); + +extern __device__ __device_builtin__ +unsigned int __uAtomicXor_block(unsigned int *address, unsigned int val); + +extern __device__ __device_builtin__ +unsigned int __uAtomicXor_system(unsigned int *address, unsigned int val); + +extern __device__ __device_builtin__ +unsigned long long __ullAtomicXor_block(unsigned long long *address, unsigned long long val); + +extern __device__ __device_builtin__ +unsigned long long __ullAtomicXor_system(unsigned long long *address, unsigned long long val); +} +#endif /* __CUDA_ARCH__ */ + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +__SM_60_ATOMIC_FUNCTIONS_DECL__ double atomicAdd(double *address, double val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +int atomicAdd_block(int *address, int val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +int atomicAdd_system(int *address, int val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicAdd_block(unsigned int *address, unsigned int val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicAdd_system(unsigned int *address, unsigned int val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned long long atomicAdd_block(unsigned long long *address, unsigned long long val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned long long atomicAdd_system(unsigned long long *address, unsigned long long val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +float atomicAdd_block(float *address, float val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +float atomicAdd_system(float *address, float val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +double atomicAdd_block(double *address, double val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +double atomicAdd_system(double *address, double val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +int atomicSub_block(int *address, int val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +int atomicSub_system(int *address, int val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicSub_block(unsigned int *address, unsigned int val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicSub_system(unsigned int *address, unsigned int val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +int atomicExch_block(int *address, int val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +int atomicExch_system(int *address, int val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicExch_block(unsigned int *address, unsigned int val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicExch_system(unsigned int *address, unsigned int val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned long long atomicExch_block(unsigned long long *address, unsigned long long val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned long long atomicExch_system(unsigned long long *address, unsigned long long val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +float atomicExch_block(float *address, float val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +float atomicExch_system(float *address, float val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +int atomicMin_block(int *address, int val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +int atomicMin_system(int *address, int val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +long long atomicMin_block(long long *address, long long val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +long long atomicMin_system(long long *address, long long val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicMin_block(unsigned int *address, unsigned int val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicMin_system(unsigned int *address, unsigned int val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned long long atomicMin_block(unsigned long long *address, unsigned long long val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned long long atomicMin_system(unsigned long long *address, unsigned long long val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +int atomicMax_block(int *address, int val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +int atomicMax_system(int *address, int val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +long long atomicMax_block(long long *address, long long val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +long long atomicMax_system(long long *address, long long val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicMax_block(unsigned int *address, unsigned int val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicMax_system(unsigned int *address, unsigned int val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned long long atomicMax_block(unsigned long long *address, unsigned long long val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned long long atomicMax_system(unsigned long long *address, unsigned long long val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicInc_block(unsigned int *address, unsigned int val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicInc_system(unsigned int *address, unsigned int val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicDec_block(unsigned int *address, unsigned int val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicDec_system(unsigned int *address, unsigned int val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +int atomicCAS_block(int *address, int compare, int val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +int atomicCAS_system(int *address, int compare, int val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicCAS_block(unsigned int *address, unsigned int compare, + unsigned int val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicCAS_system(unsigned int *address, unsigned int compare, + unsigned int val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned long long int atomicCAS_block(unsigned long long int *address, + unsigned long long int compare, + unsigned long long int val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned long long int atomicCAS_system(unsigned long long int *address, + unsigned long long int compare, + unsigned long long int val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +int atomicAnd_block(int *address, int val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +int atomicAnd_system(int *address, int val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +long long atomicAnd_block(long long *address, long long val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +long long atomicAnd_system(long long *address, long long val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicAnd_block(unsigned int *address, unsigned int val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicAnd_system(unsigned int *address, unsigned int val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned long long atomicAnd_block(unsigned long long *address, unsigned long long val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned long long atomicAnd_system(unsigned long long *address, unsigned long long val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +int atomicOr_block(int *address, int val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +int atomicOr_system(int *address, int val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +long long atomicOr_block(long long *address, long long val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +long long atomicOr_system(long long *address, long long val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicOr_block(unsigned int *address, unsigned int val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicOr_system(unsigned int *address, unsigned int val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned long long atomicOr_block(unsigned long long *address, unsigned long long val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned long long atomicOr_system(unsigned long long *address, unsigned long long val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +int atomicXor_block(int *address, int val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +int atomicXor_system(int *address, int val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +long long atomicXor_block(long long *address, long long val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +long long atomicXor_system(long long *address, long long val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicXor_block(unsigned int *address, unsigned int val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicXor_system(unsigned int *address, unsigned int val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned long long atomicXor_block(unsigned long long *address, unsigned long long val) __DEF_IF_HOST + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned long long atomicXor_system(unsigned long long *address, unsigned long long val) __DEF_IF_HOST + +#endif /* !__CUDA_ARCH__ || __CUDA_ARCH__ >= 600 */ + +#endif /* __cplusplus && __CUDACC__ */ + +#undef __SM_60_ATOMIC_FUNCTIONS_DECL__ +#undef __DEF_IF_HOST + +#if !defined(__CUDACC_RTC__) && defined(__CUDA_ARCH__) +#include "sm_60_atomic_functions.hpp" +#endif /* !__CUDACC_RTC__ && defined(__CUDA_ARCH__) */ + +#endif /* !__SM_60_ATOMIC_FUNCTIONS_H__ */ + diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_60_atomic_functions.hpp b/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_60_atomic_functions.hpp new file mode 100644 index 0000000000000000000000000000000000000000..858b373238a4d87f8d5fc669bf4145f4f2a6e877 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/sm_60_atomic_functions.hpp @@ -0,0 +1,527 @@ +/* + * Copyright 1993-2014 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(__SM_60_ATOMIC_FUNCTIONS_HPP__) +#define __SM_60_ATOMIC_FUNCTIONS_HPP__ + +#if defined(__CUDACC_RTC__) +#define __SM_60_ATOMIC_FUNCTIONS_DECL__ __device__ +#else /* __CUDACC_RTC__ */ +#define __SM_60_ATOMIC_FUNCTIONS_DECL__ static __inline__ __device__ +#endif /* __CUDACC_RTC__ */ + +#if defined(__cplusplus) && defined(__CUDACC__) + +#if defined(_NVHPC_CUDA) || !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 600 + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +#include "cuda_runtime_api.h" + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +__SM_60_ATOMIC_FUNCTIONS_DECL__ double atomicAdd(double *address, double val) +{ + return __dAtomicAdd(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +int atomicAdd_block(int *address, int val) +{ + return __iAtomicAdd_block(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +int atomicAdd_system(int *address, int val) +{ + return __iAtomicAdd_system(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicAdd_block(unsigned int *address, unsigned int val) +{ + return __uAtomicAdd_block(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicAdd_system(unsigned int *address, unsigned int val) +{ + return __uAtomicAdd_system(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned long long atomicAdd_block(unsigned long long *address, unsigned long long val) +{ + return __ullAtomicAdd_block(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned long long atomicAdd_system(unsigned long long *address, unsigned long long val) +{ + return __ullAtomicAdd_system(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +float atomicAdd_block(float *address, float val) +{ + return __fAtomicAdd_block(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +float atomicAdd_system(float *address, float val) +{ + return __fAtomicAdd_system(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +double atomicAdd_block(double *address, double val) +{ + return __dAtomicAdd_block(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +double atomicAdd_system(double *address, double val) +{ + return __dAtomicAdd_system(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +int atomicSub_block(int *address, int val) +{ + return __iAtomicAdd_block(address, (unsigned int)-(int)val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +int atomicSub_system(int *address, int val) +{ + return __iAtomicAdd_system(address, (unsigned int)-(int)val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicSub_block(unsigned int *address, unsigned int val) +{ + return __uAtomicAdd_block(address, (unsigned int)-(int)val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicSub_system(unsigned int *address, unsigned int val) +{ + return __uAtomicAdd_system(address, (unsigned int)-(int)val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +int atomicExch_block(int *address, int val) +{ + return __iAtomicExch_block(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +int atomicExch_system(int *address, int val) +{ + return __iAtomicExch_system(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicExch_block(unsigned int *address, unsigned int val) +{ + return __uAtomicExch_block(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicExch_system(unsigned int *address, unsigned int val) +{ + return __uAtomicExch_system(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned long long atomicExch_block(unsigned long long *address, unsigned long long val) +{ + return __ullAtomicExch_block(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned long long atomicExch_system(unsigned long long *address, unsigned long long val) +{ + return __ullAtomicExch_system(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +float atomicExch_block(float *address, float val) +{ + return __fAtomicExch_block(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +float atomicExch_system(float *address, float val) +{ + return __fAtomicExch_system(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +int atomicMin_block(int *address, int val) +{ + return __iAtomicMin_block(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +int atomicMin_system(int *address, int val) +{ + return __iAtomicMin_system(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +long long atomicMin_block(long long *address, long long val) +{ + return __illAtomicMin_block(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +long long atomicMin_system(long long *address, long long val) +{ + return __illAtomicMin_system(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicMin_block(unsigned int *address, unsigned int val) +{ + return __uAtomicMin_block(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicMin_system(unsigned int *address, unsigned int val) +{ + return __uAtomicMin_system(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned long long atomicMin_block(unsigned long long *address, unsigned long long val) +{ + return __ullAtomicMin_block(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned long long atomicMin_system(unsigned long long *address, unsigned long long val) +{ + return __ullAtomicMin_system(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +int atomicMax_block(int *address, int val) +{ + return __iAtomicMax_block(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +int atomicMax_system(int *address, int val) +{ + return __iAtomicMax_system(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +long long atomicMax_block(long long *address, long long val) +{ + return __illAtomicMax_block(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +long long atomicMax_system(long long *address, long long val) +{ + return __illAtomicMax_system(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicMax_block(unsigned int *address, unsigned int val) +{ + return __uAtomicMax_block(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicMax_system(unsigned int *address, unsigned int val) +{ + return __uAtomicMax_system(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned long long atomicMax_block(unsigned long long *address, unsigned long long val) +{ + return __ullAtomicMax_block(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned long long atomicMax_system(unsigned long long *address, unsigned long long val) +{ + return __ullAtomicMax_system(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicInc_block(unsigned int *address, unsigned int val) +{ + return __uAtomicInc_block(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicInc_system(unsigned int *address, unsigned int val) +{ + return __uAtomicInc_system(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicDec_block(unsigned int *address, unsigned int val) +{ + return __uAtomicDec_block(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicDec_system(unsigned int *address, unsigned int val) +{ + return __uAtomicDec_system(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +int atomicCAS_block(int *address, int compare, int val) +{ + return __iAtomicCAS_block(address, compare, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +int atomicCAS_system(int *address, int compare, int val) +{ + return __iAtomicCAS_system(address, compare, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicCAS_block(unsigned int *address, unsigned int compare, + unsigned int val) +{ + return __uAtomicCAS_block(address, compare, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicCAS_system(unsigned int *address, unsigned int compare, + unsigned int val) +{ + return __uAtomicCAS_system(address, compare, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned long long int atomicCAS_block(unsigned long long int *address, + unsigned long long int compare, + unsigned long long int val) +{ + return __ullAtomicCAS_block(address, compare, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned long long int atomicCAS_system(unsigned long long int *address, + unsigned long long int compare, + unsigned long long int val) +{ + return __ullAtomicCAS_system(address, compare, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +int atomicAnd_block(int *address, int val) +{ + return __iAtomicAnd_block(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +int atomicAnd_system(int *address, int val) +{ + return __iAtomicAnd_system(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +long long atomicAnd_block(long long *address, long long val) +{ + return __llAtomicAnd_block(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +long long atomicAnd_system(long long *address, long long val) +{ + return __llAtomicAnd_system(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicAnd_block(unsigned int *address, unsigned int val) +{ + return __uAtomicAnd_block(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicAnd_system(unsigned int *address, unsigned int val) +{ + return __uAtomicAnd_system(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned long long atomicAnd_block(unsigned long long *address, unsigned long long val) +{ + return __ullAtomicAnd_block(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned long long atomicAnd_system(unsigned long long *address, unsigned long long val) +{ + return __ullAtomicAnd_system(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +int atomicOr_block(int *address, int val) +{ + return __iAtomicOr_block(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +int atomicOr_system(int *address, int val) +{ + return __iAtomicOr_system(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +long long atomicOr_block(long long *address, long long val) +{ + return __llAtomicOr_block(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +long long atomicOr_system(long long *address, long long val) +{ + return __llAtomicOr_system(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicOr_block(unsigned int *address, unsigned int val) +{ + return __uAtomicOr_block(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicOr_system(unsigned int *address, unsigned int val) +{ + return __uAtomicOr_system(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned long long atomicOr_block(unsigned long long *address, unsigned long long val) +{ + return __ullAtomicOr_block(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned long long atomicOr_system(unsigned long long *address, unsigned long long val) +{ + return __ullAtomicOr_system(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +int atomicXor_block(int *address, int val) +{ + return __iAtomicXor_block(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +int atomicXor_system(int *address, int val) +{ + return __iAtomicXor_system(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +long long atomicXor_block(long long *address, long long val) +{ + return __llAtomicXor_block(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +long long atomicXor_system(long long *address, long long val) +{ + return __llAtomicXor_system(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicXor_block(unsigned int *address, unsigned int val) +{ + return __uAtomicXor_block(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned int atomicXor_system(unsigned int *address, unsigned int val) +{ + return __uAtomicXor_system(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned long long atomicXor_block(unsigned long long *address, unsigned long long val) +{ + return __ullAtomicXor_block(address, val); +} + +__SM_60_ATOMIC_FUNCTIONS_DECL__ +unsigned long long atomicXor_system(unsigned long long *address, unsigned long long val) +{ + return __ullAtomicXor_system(address, val); +} + +#endif /* !__CUDA_ARCH__ || __CUDA_ARCH__ >= 600 */ + +#endif /* __cplusplus && __CUDACC__ */ + +#undef __SM_60_ATOMIC_FUNCTIONS_DECL__ + +#endif /* !__SM_60_ATOMIC_FUNCTIONS_HPP__ */ + diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/texture_types.h b/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/texture_types.h new file mode 100644 index 0000000000000000000000000000000000000000..5289dc6bbcab9f08bfad11e0a3dde1acca6adde4 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/texture_types.h @@ -0,0 +1,177 @@ +/* + * Copyright 1993-2012 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(__TEXTURE_TYPES_H__) +#define __TEXTURE_TYPES_H__ + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +#include "driver_types.h" + +/** + * \addtogroup CUDART_TYPES + * + * @{ + */ + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +#define cudaTextureType1D 0x01 +#define cudaTextureType2D 0x02 +#define cudaTextureType3D 0x03 +#define cudaTextureTypeCubemap 0x0C +#define cudaTextureType1DLayered 0xF1 +#define cudaTextureType2DLayered 0xF2 +#define cudaTextureTypeCubemapLayered 0xFC + +/** + * CUDA texture address modes + */ +enum __device_builtin__ cudaTextureAddressMode +{ + cudaAddressModeWrap = 0, /**< Wrapping address mode */ + cudaAddressModeClamp = 1, /**< Clamp to edge address mode */ + cudaAddressModeMirror = 2, /**< Mirror address mode */ + cudaAddressModeBorder = 3 /**< Border address mode */ +}; + +/** + * CUDA texture filter modes + */ +enum __device_builtin__ cudaTextureFilterMode +{ + cudaFilterModePoint = 0, /**< Point filter mode */ + cudaFilterModeLinear = 1 /**< Linear filter mode */ +}; + +/** + * CUDA texture read modes + */ +enum __device_builtin__ cudaTextureReadMode +{ + cudaReadModeElementType = 0, /**< Read texture as specified element type */ + cudaReadModeNormalizedFloat = 1 /**< Read texture as normalized float */ +}; + +/** + * CUDA texture descriptor + */ +struct __device_builtin__ cudaTextureDesc +{ + /** + * Texture address mode for up to 3 dimensions + */ + enum cudaTextureAddressMode addressMode[3]; + /** + * Texture filter mode + */ + enum cudaTextureFilterMode filterMode; + /** + * Texture read mode + */ + enum cudaTextureReadMode readMode; + /** + * Perform sRGB->linear conversion during texture read + */ + int sRGB; + /** + * Texture Border Color + */ + float borderColor[4]; + /** + * Indicates whether texture reads are normalized or not + */ + int normalizedCoords; + /** + * Limit to the anisotropy ratio + */ + unsigned int maxAnisotropy; + /** + * Mipmap filter mode + */ + enum cudaTextureFilterMode mipmapFilterMode; + /** + * Offset applied to the supplied mipmap level + */ + float mipmapLevelBias; + /** + * Lower end of the mipmap level range to clamp access to + */ + float minMipmapLevelClamp; + /** + * Upper end of the mipmap level range to clamp access to + */ + float maxMipmapLevelClamp; + /** + * Disable any trilinear filtering optimizations. + */ + int disableTrilinearOptimization; + /** + * Enable seamless cube map filtering. + */ + int seamlessCubemap; +}; + +/** + * An opaque value that represents a CUDA texture object + */ +typedef __device_builtin__ unsigned long long cudaTextureObject_t; + +/** @} */ +/** @} */ /* END CUDART_TYPES */ + +#endif /* !__TEXTURE_TYPES_H__ */ diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/vector_functions.h b/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/vector_functions.h new file mode 100644 index 0000000000000000000000000000000000000000..bee6cd32c36d94bde65aad1c867352493d07a0dc --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/vector_functions.h @@ -0,0 +1,175 @@ +/* + * Copyright 1993-2014 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(__VECTOR_FUNCTIONS_H__) +#define __VECTOR_FUNCTIONS_H__ + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +#include "cuda_runtime_api.h" + +#if defined(__CUDACC_RTC__) +#define __VECTOR_FUNCTIONS_DECL__ __host__ __device__ +#else /* !__CUDACC_RTC__ */ +#define __VECTOR_FUNCTIONS_DECL__ static __inline__ __host__ __device__ +#endif /* __CUDACC_RTC__ */ + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +__VECTOR_FUNCTIONS_DECL__ char1 make_char1(signed char x); + +__VECTOR_FUNCTIONS_DECL__ uchar1 make_uchar1(unsigned char x); + +__VECTOR_FUNCTIONS_DECL__ char2 make_char2(signed char x, signed char y); + +__VECTOR_FUNCTIONS_DECL__ uchar2 make_uchar2(unsigned char x, unsigned char y); + +__VECTOR_FUNCTIONS_DECL__ char3 make_char3(signed char x, signed char y, signed char z); + +__VECTOR_FUNCTIONS_DECL__ uchar3 make_uchar3(unsigned char x, unsigned char y, unsigned char z); + +__VECTOR_FUNCTIONS_DECL__ char4 make_char4(signed char x, signed char y, signed char z, signed char w); + +__VECTOR_FUNCTIONS_DECL__ uchar4 make_uchar4(unsigned char x, unsigned char y, unsigned char z, unsigned char w); + +__VECTOR_FUNCTIONS_DECL__ short1 make_short1(short x); + +__VECTOR_FUNCTIONS_DECL__ ushort1 make_ushort1(unsigned short x); + +__VECTOR_FUNCTIONS_DECL__ short2 make_short2(short x, short y); + +__VECTOR_FUNCTIONS_DECL__ ushort2 make_ushort2(unsigned short x, unsigned short y); + +__VECTOR_FUNCTIONS_DECL__ short3 make_short3(short x,short y, short z); + +__VECTOR_FUNCTIONS_DECL__ ushort3 make_ushort3(unsigned short x, unsigned short y, unsigned short z); + +__VECTOR_FUNCTIONS_DECL__ short4 make_short4(short x, short y, short z, short w); + +__VECTOR_FUNCTIONS_DECL__ ushort4 make_ushort4(unsigned short x, unsigned short y, unsigned short z, unsigned short w); + +__VECTOR_FUNCTIONS_DECL__ int1 make_int1(int x); + +__VECTOR_FUNCTIONS_DECL__ uint1 make_uint1(unsigned int x); + +__VECTOR_FUNCTIONS_DECL__ int2 make_int2(int x, int y); + +__VECTOR_FUNCTIONS_DECL__ uint2 make_uint2(unsigned int x, unsigned int y); + +__VECTOR_FUNCTIONS_DECL__ int3 make_int3(int x, int y, int z); + +__VECTOR_FUNCTIONS_DECL__ uint3 make_uint3(unsigned int x, unsigned int y, unsigned int z); + +__VECTOR_FUNCTIONS_DECL__ int4 make_int4(int x, int y, int z, int w); + +__VECTOR_FUNCTIONS_DECL__ uint4 make_uint4(unsigned int x, unsigned int y, unsigned int z, unsigned int w); + +__VECTOR_FUNCTIONS_DECL__ long1 make_long1(long int x); + +__VECTOR_FUNCTIONS_DECL__ ulong1 make_ulong1(unsigned long int x); + +__VECTOR_FUNCTIONS_DECL__ long2 make_long2(long int x, long int y); + +__VECTOR_FUNCTIONS_DECL__ ulong2 make_ulong2(unsigned long int x, unsigned long int y); + +__VECTOR_FUNCTIONS_DECL__ long3 make_long3(long int x, long int y, long int z); + +__VECTOR_FUNCTIONS_DECL__ ulong3 make_ulong3(unsigned long int x, unsigned long int y, unsigned long int z); + +__VECTOR_FUNCTIONS_DECL__ long4 make_long4(long int x, long int y, long int z, long int w); + +__VECTOR_FUNCTIONS_DECL__ ulong4 make_ulong4(unsigned long int x, unsigned long int y, unsigned long int z, unsigned long int w); + +__VECTOR_FUNCTIONS_DECL__ float1 make_float1(float x); + +__VECTOR_FUNCTIONS_DECL__ float2 make_float2(float x, float y); + +__VECTOR_FUNCTIONS_DECL__ float3 make_float3(float x, float y, float z); + +__VECTOR_FUNCTIONS_DECL__ float4 make_float4(float x, float y, float z, float w); + +__VECTOR_FUNCTIONS_DECL__ longlong1 make_longlong1(long long int x); + +__VECTOR_FUNCTIONS_DECL__ ulonglong1 make_ulonglong1(unsigned long long int x); + +__VECTOR_FUNCTIONS_DECL__ longlong2 make_longlong2(long long int x, long long int y); + +__VECTOR_FUNCTIONS_DECL__ ulonglong2 make_ulonglong2(unsigned long long int x, unsigned long long int y); + +__VECTOR_FUNCTIONS_DECL__ longlong3 make_longlong3(long long int x, long long int y, long long int z); + +__VECTOR_FUNCTIONS_DECL__ ulonglong3 make_ulonglong3(unsigned long long int x, unsigned long long int y, unsigned long long int z); + +__VECTOR_FUNCTIONS_DECL__ longlong4 make_longlong4(long long int x, long long int y, long long int z, long long int w); + +__VECTOR_FUNCTIONS_DECL__ ulonglong4 make_ulonglong4(unsigned long long int x, unsigned long long int y, unsigned long long int z, unsigned long long int w); + +__VECTOR_FUNCTIONS_DECL__ double1 make_double1(double x); + +__VECTOR_FUNCTIONS_DECL__ double2 make_double2(double x, double y); + +__VECTOR_FUNCTIONS_DECL__ double3 make_double3(double x, double y, double z); + +__VECTOR_FUNCTIONS_DECL__ double4 make_double4(double x, double y, double z, double w); + +#undef __VECTOR_FUNCTIONS_DECL__ + +#if !defined(__CUDACC_RTC__) +#include "vector_functions.hpp" +#endif /* !__CUDACC_RTC__ */ + +#endif /* !__VECTOR_FUNCTIONS_H__ */ diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/vector_functions.hpp b/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/vector_functions.hpp new file mode 100644 index 0000000000000000000000000000000000000000..ab69cf38045a7c44dae67e7149d49ac4c6148747 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nvidia/cuda_runtime/include/vector_functions.hpp @@ -0,0 +1,316 @@ +/* + * Copyright 1993-2014 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(__VECTOR_FUNCTIONS_HPP__) +#define __VECTOR_FUNCTIONS_HPP__ + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +#include "cuda_runtime_api.h" + +#if defined(__CUDACC_RTC__) +#define __VECTOR_FUNCTIONS_DECL__ __host__ __device__ +#else /* !__CUDACC_RTC__ */ +#define __VECTOR_FUNCTIONS_DECL__ static __inline__ __host__ __device__ +#endif /* __CUDACC_RTC__ */ + +/******************************************************************************* +* * +* * +* * +*******************************************************************************/ + +__VECTOR_FUNCTIONS_DECL__ char1 make_char1(signed char x) +{ + char1 t; t.x = x; return t; +} + +__VECTOR_FUNCTIONS_DECL__ uchar1 make_uchar1(unsigned char x) +{ + uchar1 t; t.x = x; return t; +} + +__VECTOR_FUNCTIONS_DECL__ char2 make_char2(signed char x, signed char y) +{ + char2 t; t.x = x; t.y = y; return t; +} + +__VECTOR_FUNCTIONS_DECL__ uchar2 make_uchar2(unsigned char x, unsigned char y) +{ + uchar2 t; t.x = x; t.y = y; return t; +} + +__VECTOR_FUNCTIONS_DECL__ char3 make_char3(signed char x, signed char y, signed char z) +{ + char3 t; t.x = x; t.y = y; t.z = z; return t; +} + +__VECTOR_FUNCTIONS_DECL__ uchar3 make_uchar3(unsigned char x, unsigned char y, unsigned char z) +{ + uchar3 t; t.x = x; t.y = y; t.z = z; return t; +} + +__VECTOR_FUNCTIONS_DECL__ char4 make_char4(signed char x, signed char y, signed char z, signed char w) +{ + char4 t; t.x = x; t.y = y; t.z = z; t.w = w; return t; +} + +__VECTOR_FUNCTIONS_DECL__ uchar4 make_uchar4(unsigned char x, unsigned char y, unsigned char z, unsigned char w) +{ + uchar4 t; t.x = x; t.y = y; t.z = z; t.w = w; return t; +} + +__VECTOR_FUNCTIONS_DECL__ short1 make_short1(short x) +{ + short1 t; t.x = x; return t; +} + +__VECTOR_FUNCTIONS_DECL__ ushort1 make_ushort1(unsigned short x) +{ + ushort1 t; t.x = x; return t; +} + +__VECTOR_FUNCTIONS_DECL__ short2 make_short2(short x, short y) +{ + short2 t; t.x = x; t.y = y; return t; +} + +__VECTOR_FUNCTIONS_DECL__ ushort2 make_ushort2(unsigned short x, unsigned short y) +{ + ushort2 t; t.x = x; t.y = y; return t; +} + +__VECTOR_FUNCTIONS_DECL__ short3 make_short3(short x,short y, short z) +{ + short3 t; t.x = x; t.y = y; t.z = z; return t; +} + +__VECTOR_FUNCTIONS_DECL__ ushort3 make_ushort3(unsigned short x, unsigned short y, unsigned short z) +{ + ushort3 t; t.x = x; t.y = y; t.z = z; return t; +} + +__VECTOR_FUNCTIONS_DECL__ short4 make_short4(short x, short y, short z, short w) +{ + short4 t; t.x = x; t.y = y; t.z = z; t.w = w; return t; +} + +__VECTOR_FUNCTIONS_DECL__ ushort4 make_ushort4(unsigned short x, unsigned short y, unsigned short z, unsigned short w) +{ + ushort4 t; t.x = x; t.y = y; t.z = z; t.w = w; return t; +} + +__VECTOR_FUNCTIONS_DECL__ int1 make_int1(int x) +{ + int1 t; t.x = x; return t; +} + +__VECTOR_FUNCTIONS_DECL__ uint1 make_uint1(unsigned int x) +{ + uint1 t; t.x = x; return t; +} + +__VECTOR_FUNCTIONS_DECL__ int2 make_int2(int x, int y) +{ + int2 t; t.x = x; t.y = y; return t; +} + +__VECTOR_FUNCTIONS_DECL__ uint2 make_uint2(unsigned int x, unsigned int y) +{ + uint2 t; t.x = x; t.y = y; return t; +} + +__VECTOR_FUNCTIONS_DECL__ int3 make_int3(int x, int y, int z) +{ + int3 t; t.x = x; t.y = y; t.z = z; return t; +} + +__VECTOR_FUNCTIONS_DECL__ uint3 make_uint3(unsigned int x, unsigned int y, unsigned int z) +{ + uint3 t; t.x = x; t.y = y; t.z = z; return t; +} + +__VECTOR_FUNCTIONS_DECL__ int4 make_int4(int x, int y, int z, int w) +{ + int4 t; t.x = x; t.y = y; t.z = z; t.w = w; return t; +} + +__VECTOR_FUNCTIONS_DECL__ uint4 make_uint4(unsigned int x, unsigned int y, unsigned int z, unsigned int w) +{ + uint4 t; t.x = x; t.y = y; t.z = z; t.w = w; return t; +} + +__VECTOR_FUNCTIONS_DECL__ long1 make_long1(long int x) +{ + long1 t; t.x = x; return t; +} + +__VECTOR_FUNCTIONS_DECL__ ulong1 make_ulong1(unsigned long int x) +{ + ulong1 t; t.x = x; return t; +} + +__VECTOR_FUNCTIONS_DECL__ long2 make_long2(long int x, long int y) +{ + long2 t; t.x = x; t.y = y; return t; +} + +__VECTOR_FUNCTIONS_DECL__ ulong2 make_ulong2(unsigned long int x, unsigned long int y) +{ + ulong2 t; t.x = x; t.y = y; return t; +} + +__VECTOR_FUNCTIONS_DECL__ long3 make_long3(long int x, long int y, long int z) +{ + long3 t; t.x = x; t.y = y; t.z = z; return t; +} + +__VECTOR_FUNCTIONS_DECL__ ulong3 make_ulong3(unsigned long int x, unsigned long int y, unsigned long int z) +{ + ulong3 t; t.x = x; t.y = y; t.z = z; return t; +} + +__VECTOR_FUNCTIONS_DECL__ long4 make_long4(long int x, long int y, long int z, long int w) +{ + long4 t; t.x = x; t.y = y; t.z = z; t.w = w; return t; +} + +__VECTOR_FUNCTIONS_DECL__ ulong4 make_ulong4(unsigned long int x, unsigned long int y, unsigned long int z, unsigned long int w) +{ + ulong4 t; t.x = x; t.y = y; t.z = z; t.w = w; return t; +} + +__VECTOR_FUNCTIONS_DECL__ float1 make_float1(float x) +{ + float1 t; t.x = x; return t; +} + +__VECTOR_FUNCTIONS_DECL__ float2 make_float2(float x, float y) +{ + float2 t; t.x = x; t.y = y; return t; +} + +__VECTOR_FUNCTIONS_DECL__ float3 make_float3(float x, float y, float z) +{ + float3 t; t.x = x; t.y = y; t.z = z; return t; +} + +__VECTOR_FUNCTIONS_DECL__ float4 make_float4(float x, float y, float z, float w) +{ + float4 t; t.x = x; t.y = y; t.z = z; t.w = w; return t; +} + +__VECTOR_FUNCTIONS_DECL__ longlong1 make_longlong1(long long int x) +{ + longlong1 t; t.x = x; return t; +} + +__VECTOR_FUNCTIONS_DECL__ ulonglong1 make_ulonglong1(unsigned long long int x) +{ + ulonglong1 t; t.x = x; return t; +} + +__VECTOR_FUNCTIONS_DECL__ longlong2 make_longlong2(long long int x, long long int y) +{ + longlong2 t; t.x = x; t.y = y; return t; +} + +__VECTOR_FUNCTIONS_DECL__ ulonglong2 make_ulonglong2(unsigned long long int x, unsigned long long int y) +{ + ulonglong2 t; t.x = x; t.y = y; return t; +} + +__VECTOR_FUNCTIONS_DECL__ longlong3 make_longlong3(long long int x, long long int y, long long int z) +{ + longlong3 t; t.x = x; t.y = y; t.z = z; return t; +} + +__VECTOR_FUNCTIONS_DECL__ ulonglong3 make_ulonglong3(unsigned long long int x, unsigned long long int y, unsigned long long int z) +{ + ulonglong3 t; t.x = x; t.y = y; t.z = z; return t; +} + +__VECTOR_FUNCTIONS_DECL__ longlong4 make_longlong4(long long int x, long long int y, long long int z, long long int w) +{ + longlong4 t; t.x = x; t.y = y; t.z = z; t.w = w; return t; +} + +__VECTOR_FUNCTIONS_DECL__ ulonglong4 make_ulonglong4(unsigned long long int x, unsigned long long int y, unsigned long long int z, unsigned long long int w) +{ + ulonglong4 t; t.x = x; t.y = y; t.z = z; t.w = w; return t; +} + +__VECTOR_FUNCTIONS_DECL__ double1 make_double1(double x) +{ + double1 t; t.x = x; return t; +} + +__VECTOR_FUNCTIONS_DECL__ double2 make_double2(double x, double y) +{ + double2 t; t.x = x; t.y = y; return t; +} + +__VECTOR_FUNCTIONS_DECL__ double3 make_double3(double x, double y, double z) +{ + double3 t; t.x = x; t.y = y; t.z = z; return t; +} + +__VECTOR_FUNCTIONS_DECL__ double4 make_double4(double x, double y, double z, double w) +{ + double4 t; t.x = x; t.y = y; t.z = z; t.w = w; return t; +} + +#undef __VECTOR_FUNCTIONS_DECL__ + +#endif /* !__VECTOR_FUNCTIONS_HPP__ */ + diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia/cudnn/__init__.py b/llmeval-env/lib/python3.10/site-packages/nvidia/cudnn/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia/cudnn/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nvidia/cudnn/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a337a3e7d49ad26253511a56a436e0eb6f3878a1 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nvidia/cudnn/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia/cudnn/include/__init__.py b/llmeval-env/lib/python3.10/site-packages/nvidia/cudnn/include/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia/cudnn/include/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nvidia/cudnn/include/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9dcfa4886383e282adeb59c2b034bb55ef251aa3 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nvidia/cudnn/include/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn.h b/llmeval-env/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn.h new file mode 100644 index 0000000000000000000000000000000000000000..1fcf41a697cb5e6bee6d3697d54a2fe0eafdc168 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn.h @@ -0,0 +1,78 @@ +/* + * Copyright 2014-2023 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +/* cudnn : Neural Networks Library + +*/ + +#if !defined(CUDNN_H_) +#define CUDNN_H_ + +#include +#include + +#include "cudnn_version.h" +#include "cudnn_ops_infer.h" +#include "cudnn_ops_train.h" +#include "cudnn_adv_infer.h" +#include "cudnn_adv_train.h" +#include "cudnn_cnn_infer.h" +#include "cudnn_cnn_train.h" + +#include "cudnn_backend.h" + +#if defined(__cplusplus) +extern "C" { +#endif + +#if defined(__cplusplus) +} +#endif + +#endif /* CUDNN_H_ */ diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_adv_infer.h b/llmeval-env/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_adv_infer.h new file mode 100644 index 0000000000000000000000000000000000000000..3c8ddabbf3325e2eafce645da039f91226d93237 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_adv_infer.h @@ -0,0 +1,658 @@ +/* + * Copyright 2014-2023 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +/* cudnn_adv_infer : cuDNN's advanced and experimental features. + +*/ + +#if !defined(CUDNN_ADV_INFER_H_) +#define CUDNN_ADV_INFER_H_ + +#include +#include + +#include "cudnn_version.h" +#include "cudnn_ops_infer.h" + +/* These version numbers are autogenerated, do not edit manually. */ +#define CUDNN_ADV_INFER_MAJOR 8 +#define CUDNN_ADV_INFER_MINOR 9 +#define CUDNN_ADV_INFER_PATCH 2 + +#if (CUDNN_ADV_INFER_MAJOR != CUDNN_MAJOR) || (CUDNN_ADV_INFER_MINOR != CUDNN_MINOR) || \ + (CUDNN_ADV_INFER_PATCH != CUDNN_PATCHLEVEL) +#error Version mismatch in cuDNN ADV INFER!!! +#endif + +#if defined(__cplusplus) +extern "C" { +#endif + +/* BASIC RNN API */ + +typedef enum { + CUDNN_FWD_MODE_INFERENCE = 0, + CUDNN_FWD_MODE_TRAINING = 1, +} cudnnForwardMode_t; + +typedef enum { + CUDNN_RNN_RELU = 0, /* basic RNN cell type with ReLu activation */ + CUDNN_RNN_TANH = 1, /* basic RNN cell type with tanh activation */ + CUDNN_LSTM = 2, /* LSTM with optional recurrent projection and clipping */ + CUDNN_GRU = 3, /* Using h' = tanh(r * Uh(t-1) + Wx) and h = (1 - z) * h' + z * h(t-1); */ +} cudnnRNNMode_t; + +typedef enum { + CUDNN_RNN_NO_BIAS = 0, /* rnn cell formulas do not use biases */ + CUDNN_RNN_SINGLE_INP_BIAS = 1, /* rnn cell formulas use one input bias in input GEMM */ + CUDNN_RNN_DOUBLE_BIAS = 2, /* default, rnn cell formulas use two bias vectors */ + CUDNN_RNN_SINGLE_REC_BIAS = 3 /* rnn cell formulas use one recurrent bias in recurrent GEMM */ +} cudnnRNNBiasMode_t; + +typedef enum { + CUDNN_UNIDIRECTIONAL = 0, /* single direction network */ + CUDNN_BIDIRECTIONAL = 1, /* output concatination at each layer */ +} cudnnDirectionMode_t; + +typedef enum { + CUDNN_LINEAR_INPUT = 0, /* adjustable weight matrix in first layer input GEMM */ + CUDNN_SKIP_INPUT = 1, /* fixed identity matrix in the first layer input GEMM */ +} cudnnRNNInputMode_t; + +typedef enum { + CUDNN_RNN_CLIP_NONE = 0, /* disables LSTM cell clipping */ + CUDNN_RNN_CLIP_MINMAX = 1, /* enables LSTM cell clipping */ +} cudnnRNNClipMode_t; + +typedef enum { + CUDNN_RNN_DATA_LAYOUT_SEQ_MAJOR_UNPACKED = 0, /* padded, outer stride from one time-step to the next */ + CUDNN_RNN_DATA_LAYOUT_SEQ_MAJOR_PACKED = 1, /* sequence length sorted and packed as in basic RNN api */ + CUDNN_RNN_DATA_LAYOUT_BATCH_MAJOR_UNPACKED = 2, /* padded, outer stride from one batch to the next */ +} cudnnRNNDataLayout_t; + +/* Legacy type for backward compatibility */ +typedef unsigned cudnnRNNPaddingMode_t; + +/* For auxFlags in cudnnSetRNNDescriptor_v8() and cudnnSetRNNPaddingMode() */ +#define CUDNN_RNN_PADDED_IO_DISABLED 0 +#define CUDNN_RNN_PADDED_IO_ENABLED (1U << 0) + +struct cudnnRNNStruct; +typedef struct cudnnRNNStruct *cudnnRNNDescriptor_t; + +struct cudnnPersistentRNNPlan; +typedef struct cudnnPersistentRNNPlan *cudnnPersistentRNNPlan_t; + +struct cudnnRNNDataStruct; +typedef struct cudnnRNNDataStruct *cudnnRNNDataDescriptor_t; + +cudnnStatus_t CUDNNWINAPI +cudnnCreateRNNDescriptor(cudnnRNNDescriptor_t *rnnDesc); + +cudnnStatus_t CUDNNWINAPI +cudnnDestroyRNNDescriptor(cudnnRNNDescriptor_t rnnDesc); + +cudnnStatus_t CUDNNWINAPI +cudnnSetRNNDescriptor_v8(cudnnRNNDescriptor_t rnnDesc, + cudnnRNNAlgo_t algo, + cudnnRNNMode_t cellMode, + cudnnRNNBiasMode_t biasMode, + cudnnDirectionMode_t dirMode, + cudnnRNNInputMode_t inputMode, + cudnnDataType_t dataType, + cudnnDataType_t mathPrec, + cudnnMathType_t mathType, + int32_t inputSize, + int32_t hiddenSize, + int32_t projSize, + int32_t numLayers, + cudnnDropoutDescriptor_t dropoutDesc, + uint32_t auxFlags); + +cudnnStatus_t CUDNNWINAPI +cudnnGetRNNDescriptor_v8(cudnnRNNDescriptor_t rnnDesc, + cudnnRNNAlgo_t *algo, + cudnnRNNMode_t *cellMode, + cudnnRNNBiasMode_t *biasMode, + cudnnDirectionMode_t *dirMode, + cudnnRNNInputMode_t *inputMode, + cudnnDataType_t *dataType, + cudnnDataType_t *mathPrec, + cudnnMathType_t *mathType, + int32_t *inputSize, + int32_t *hiddenSize, + int32_t *projSize, + int32_t *numLayers, + cudnnDropoutDescriptor_t *dropoutDesc, + uint32_t *auxFlags); + +/* + * mathPrec in cudnnSetRNNDescriptor_v6() specifies compute precision + * compute precision is further modified by cudnnSetRNNMatrixMathType() + * dataType in cudnnGetRNNParamsSize() and wDesc specify weight storage + * dropout is between RNN layers, not between recurrent steps + */ +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnSetRNNDescriptor_v6(cudnnHandle_t handle, + cudnnRNNDescriptor_t rnnDesc, + const int hiddenSize, + const int numLayers, + cudnnDropoutDescriptor_t dropoutDesc, + cudnnRNNInputMode_t inputMode, + cudnnDirectionMode_t direction, + cudnnRNNMode_t cellMode, + cudnnRNNAlgo_t algo, + cudnnDataType_t mathPrec); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetRNNDescriptor_v6(cudnnHandle_t handle, + cudnnRNNDescriptor_t rnnDesc, + int *hiddenSize, + int *numLayers, + cudnnDropoutDescriptor_t *dropoutDesc, + cudnnRNNInputMode_t *inputMode, + cudnnDirectionMode_t *direction, + cudnnRNNMode_t *cellMode, + cudnnRNNAlgo_t *algo, + cudnnDataType_t *mathPrec); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnSetRNNMatrixMathType(cudnnRNNDescriptor_t rnnDesc, cudnnMathType_t mType); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetRNNMatrixMathType(cudnnRNNDescriptor_t rnnDesc, cudnnMathType_t *mType); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnSetRNNBiasMode(cudnnRNNDescriptor_t rnnDesc, cudnnRNNBiasMode_t biasMode); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetRNNBiasMode(cudnnRNNDescriptor_t rnnDesc, cudnnRNNBiasMode_t *biasMode); + +cudnnStatus_t CUDNNWINAPI +cudnnRNNSetClip_v8(cudnnRNNDescriptor_t rnnDesc, + cudnnRNNClipMode_t clipMode, + cudnnNanPropagation_t clipNanOpt, + double lclip, + double rclip); + +cudnnStatus_t CUDNNWINAPI +cudnnRNNGetClip_v8(cudnnRNNDescriptor_t rnnDesc, + cudnnRNNClipMode_t *clipMode, + cudnnNanPropagation_t *clipNanOpt, + double *lclip, + double *rclip); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnRNNSetClip(cudnnHandle_t handle, + cudnnRNNDescriptor_t rnnDesc, + cudnnRNNClipMode_t clipMode, + cudnnNanPropagation_t clipNanOpt, + double lclip, + double rclip); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnRNNGetClip(cudnnHandle_t handle, + cudnnRNNDescriptor_t rnnDesc, + cudnnRNNClipMode_t *clipMode, + cudnnNanPropagation_t *clipNanOpt, + double *lclip, + double *rclip); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnSetRNNProjectionLayers(cudnnHandle_t handle, + cudnnRNNDescriptor_t rnnDesc, + const int recProjSize, + const int outProjSize); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetRNNProjectionLayers(cudnnHandle_t handle, + const cudnnRNNDescriptor_t rnnDesc, + int *recProjSize, + int *outProjSize); + +/* Expensive. Creates the plan for the specific settings. */ +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnCreatePersistentRNNPlan(cudnnRNNDescriptor_t rnnDesc, + const int minibatch, + const cudnnDataType_t dataType, + cudnnPersistentRNNPlan_t *plan); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnDestroyPersistentRNNPlan(cudnnPersistentRNNPlan_t plan); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnSetPersistentRNNPlan(cudnnRNNDescriptor_t rnnDesc, cudnnPersistentRNNPlan_t plan); + +cudnnStatus_t CUDNNWINAPI +cudnnBuildRNNDynamic(cudnnHandle_t handle, cudnnRNNDescriptor_t rnnDesc, int miniBatch); + +/* dataType in weight descriptors and input descriptors is used to describe storage */ +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetRNNWorkspaceSize(cudnnHandle_t handle, + const cudnnRNNDescriptor_t rnnDesc, + const int seqLength, + const cudnnTensorDescriptor_t *xDesc, + size_t *sizeInBytes); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetRNNTrainingReserveSize(cudnnHandle_t handle, + const cudnnRNNDescriptor_t rnnDesc, + const int seqLength, + const cudnnTensorDescriptor_t *xDesc, + size_t *sizeInBytes); + +cudnnStatus_t CUDNNWINAPI +cudnnGetRNNTempSpaceSizes(cudnnHandle_t handle, + cudnnRNNDescriptor_t rnnDesc, + cudnnForwardMode_t fwdMode, + cudnnRNNDataDescriptor_t xDesc, + size_t *workSpaceSize, + size_t *reserveSpaceSize); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetRNNParamsSize(cudnnHandle_t handle, + const cudnnRNNDescriptor_t rnnDesc, + const cudnnTensorDescriptor_t xDesc, + size_t *sizeInBytes, + cudnnDataType_t dataType); + +cudnnStatus_t CUDNNWINAPI +cudnnGetRNNWeightSpaceSize(cudnnHandle_t handle, cudnnRNNDescriptor_t rnnDesc, size_t *weightSpaceSize); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetRNNLinLayerMatrixParams(cudnnHandle_t handle, + const cudnnRNNDescriptor_t rnnDesc, + const int pseudoLayer, + const cudnnTensorDescriptor_t xDesc, + const cudnnFilterDescriptor_t wDesc, + const void *w, + const int linLayerID, + cudnnFilterDescriptor_t linLayerMatDesc, + void **linLayerMat); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetRNNLinLayerBiasParams(cudnnHandle_t handle, + const cudnnRNNDescriptor_t rnnDesc, + const int pseudoLayer, + const cudnnTensorDescriptor_t xDesc, + const cudnnFilterDescriptor_t wDesc, + const void *w, + const int linLayerID, + cudnnFilterDescriptor_t linLayerBiasDesc, + void **linLayerBias); + +cudnnStatus_t CUDNNWINAPI +cudnnGetRNNWeightParams(cudnnHandle_t handle, + cudnnRNNDescriptor_t rnnDesc, + int32_t pseudoLayer, + size_t weightSpaceSize, + const void *weightSpace, + int32_t linLayerID, + cudnnTensorDescriptor_t mDesc, + void **mAddr, + cudnnTensorDescriptor_t bDesc, + void **bAddr); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnRNNForwardInference(cudnnHandle_t handle, + const cudnnRNNDescriptor_t rnnDesc, + const int seqLength, + const cudnnTensorDescriptor_t *xDesc, + const void *x, + const cudnnTensorDescriptor_t hxDesc, + const void *hx, + const cudnnTensorDescriptor_t cxDesc, + const void *cx, + const cudnnFilterDescriptor_t wDesc, + const void *w, + const cudnnTensorDescriptor_t *yDesc, + void *y, + const cudnnTensorDescriptor_t hyDesc, + void *hy, + const cudnnTensorDescriptor_t cyDesc, + void *cy, + void *workSpace, + size_t workSpaceSizeInBytes); + +/* RNN EX API */ + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnSetRNNPaddingMode(cudnnRNNDescriptor_t rnnDesc, unsigned paddingMode); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetRNNPaddingMode(cudnnRNNDescriptor_t rnnDesc, unsigned *paddingMode); + +cudnnStatus_t CUDNNWINAPI +cudnnCreateRNNDataDescriptor(cudnnRNNDataDescriptor_t *rnnDataDesc); + +cudnnStatus_t CUDNNWINAPI +cudnnDestroyRNNDataDescriptor(cudnnRNNDataDescriptor_t rnnDataDesc); + +cudnnStatus_t CUDNNWINAPI +cudnnSetRNNDataDescriptor(cudnnRNNDataDescriptor_t rnnDataDesc, + cudnnDataType_t dataType, + cudnnRNNDataLayout_t layout, + int maxSeqLength, + int batchSize, + int vectorSize, + const int seqLengthArray[], /* length of each sequence in the batch */ + void *paddingFill); /* symbol for filling padding position in output */ + +cudnnStatus_t CUDNNWINAPI +cudnnGetRNNDataDescriptor(cudnnRNNDataDescriptor_t rnnDataDesc, + cudnnDataType_t *dataType, + cudnnRNNDataLayout_t *layout, + int *maxSeqLength, + int *batchSize, + int *vectorSize, + int arrayLengthRequested, + int seqLengthArray[], + void *paddingFill); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnRNNForwardInferenceEx(cudnnHandle_t handle, + const cudnnRNNDescriptor_t rnnDesc, + const cudnnRNNDataDescriptor_t xDesc, + const void *x, + const cudnnTensorDescriptor_t hxDesc, + const void *hx, + const cudnnTensorDescriptor_t cxDesc, + const void *cx, + const cudnnFilterDescriptor_t wDesc, + const void *w, + const cudnnRNNDataDescriptor_t yDesc, + void *y, + const cudnnTensorDescriptor_t hyDesc, + void *hy, + const cudnnTensorDescriptor_t cyDesc, + void *cy, + const cudnnRNNDataDescriptor_t kDesc, /* reserved, should pass NULL */ + const void *keys, /* reserved, should pass NULL */ + const cudnnRNNDataDescriptor_t cDesc, /* reserved, should pass NULL */ + void *cAttn, /* reserved, should pass NULL */ + const cudnnRNNDataDescriptor_t iDesc, /* reserved, should pass NULL */ + void *iAttn, /* reserved, should pass NULL */ + const cudnnRNNDataDescriptor_t qDesc, /* reserved, should pass NULL */ + void *queries, /* reserved, should pass NULL */ + void *workSpace, + size_t workSpaceSizeInBytes); + +cudnnStatus_t CUDNNWINAPI +cudnnRNNForward(cudnnHandle_t handle, + cudnnRNNDescriptor_t rnnDesc, + cudnnForwardMode_t fwdMode, + const int32_t devSeqLengths[], + cudnnRNNDataDescriptor_t xDesc, + const void *x, + cudnnRNNDataDescriptor_t yDesc, + void *y, + cudnnTensorDescriptor_t hDesc, + const void *hx, + void *hy, + cudnnTensorDescriptor_t cDesc, + const void *cx, + void *cy, + size_t weightSpaceSize, + const void *weightSpace, + size_t workSpaceSize, + void *workSpace, + size_t reserveSpaceSize, + void *reserveSpace); + +/* RNN FIND API */ + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnSetRNNAlgorithmDescriptor(cudnnHandle_t handle, cudnnRNNDescriptor_t rnnDesc, cudnnAlgorithmDescriptor_t algoDesc); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetRNNForwardInferenceAlgorithmMaxCount(cudnnHandle_t handle, const cudnnRNNDescriptor_t rnnDesc, int *count); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnFindRNNForwardInferenceAlgorithmEx(cudnnHandle_t handle, + const cudnnRNNDescriptor_t rnnDesc, + const int seqLength, + const cudnnTensorDescriptor_t *xDesc, + const void *x, + const cudnnTensorDescriptor_t hxDesc, + const void *hx, + const cudnnTensorDescriptor_t cxDesc, + const void *cx, + const cudnnFilterDescriptor_t wDesc, + const void *w, + const cudnnTensorDescriptor_t *yDesc, + void *y, + const cudnnTensorDescriptor_t hyDesc, + void *hy, + const cudnnTensorDescriptor_t cyDesc, + void *cy, + const float findIntensity, + const int requestedAlgoCount, + int *returnedAlgoCount, + cudnnAlgorithmPerformance_t *perfResults, + void *workspace, + size_t workSpaceSizeInBytes); + +/* Sequence data descriptor */ + +typedef enum { + CUDNN_SEQDATA_TIME_DIM = 0, /* index in time */ + CUDNN_SEQDATA_BATCH_DIM = 1, /* index in batch */ + CUDNN_SEQDATA_BEAM_DIM = 2, /* index in beam */ + CUDNN_SEQDATA_VECT_DIM = 3 /* index in vector */ +} cudnnSeqDataAxis_t; + +struct cudnnSeqDataStruct; +typedef struct cudnnSeqDataStruct *cudnnSeqDataDescriptor_t; + +#define CUDNN_SEQDATA_DIM_COUNT 4 /* dimension count */ + +cudnnStatus_t CUDNNWINAPI +cudnnCreateSeqDataDescriptor(cudnnSeqDataDescriptor_t *seqDataDesc); + +cudnnStatus_t CUDNNWINAPI +cudnnDestroySeqDataDescriptor(cudnnSeqDataDescriptor_t seqDataDesc); + +cudnnStatus_t CUDNNWINAPI +cudnnSetSeqDataDescriptor(cudnnSeqDataDescriptor_t seqDataDesc, + cudnnDataType_t dataType, + int nbDims, + const int dimA[], + const cudnnSeqDataAxis_t axes[], + size_t seqLengthArraySize, + const int seqLengthArray[], + void *paddingFill); + +cudnnStatus_t CUDNNWINAPI +cudnnGetSeqDataDescriptor(const cudnnSeqDataDescriptor_t seqDataDesc, + cudnnDataType_t *dataType, + int *nbDims, + int nbDimsRequested, + int dimA[], + cudnnSeqDataAxis_t axes[], + size_t *seqLengthArraySize, + size_t seqLengthSizeRequested, + int seqLengthArray[], + void *paddingFill); + +/* Multihead Attention */ + +/* Legacy type for backward compatibility */ +typedef unsigned cudnnAttnQueryMap_t; + +/* + * Multi-head attention options passed via 'attnMode' in cudnnSetAttnDescriptor(). + * Use the bitwise OR operator to combine several settings listed below. Additional + * minor options can be added here w/o changing or introducing new API functions. + */ +#define CUDNN_ATTN_QUERYMAP_ALL_TO_ONE 0 /* multiple Q-s map to a single (K,V) set when beam size > 1 */ +#define CUDNN_ATTN_QUERYMAP_ONE_TO_ONE (1U << 0) /* multiple Q-s map to multiple (K,V) sets when beam size > 1 */ +#define CUDNN_ATTN_DISABLE_PROJ_BIASES 0 /* no biases in attention input and output projections */ +#define CUDNN_ATTN_ENABLE_PROJ_BIASES (1U << 1) /* use biases in attention input and output projections */ + +struct cudnnAttnStruct; +typedef struct cudnnAttnStruct *cudnnAttnDescriptor_t; + +cudnnStatus_t CUDNNWINAPI +cudnnCreateAttnDescriptor(cudnnAttnDescriptor_t *attnDesc); + +cudnnStatus_t CUDNNWINAPI +cudnnDestroyAttnDescriptor(cudnnAttnDescriptor_t attnDesc); + +cudnnStatus_t CUDNNWINAPI +cudnnSetAttnDescriptor(cudnnAttnDescriptor_t attnDesc, + unsigned attnMode, + int nHeads, + double smScaler, + cudnnDataType_t dataType, + cudnnDataType_t computePrec, + cudnnMathType_t mathType, + cudnnDropoutDescriptor_t attnDropoutDesc, + cudnnDropoutDescriptor_t postDropoutDesc, + int qSize, + int kSize, + int vSize, + int qProjSize, + int kProjSize, + int vProjSize, + int oProjSize, + int qoMaxSeqLength, + int kvMaxSeqLength, + int maxBatchSize, + int maxBeamSize); + +cudnnStatus_t CUDNNWINAPI +cudnnGetAttnDescriptor(cudnnAttnDescriptor_t attnDesc, + unsigned *attnMode, + int *nHeads, + double *smScaler, + cudnnDataType_t *dataType, + cudnnDataType_t *computePrec, + cudnnMathType_t *mathType, + cudnnDropoutDescriptor_t *attnDropoutDesc, + cudnnDropoutDescriptor_t *postDropoutDesc, + int *qSize, + int *kSize, + int *vSize, + int *qProjSize, + int *kProjSize, + int *vProjSize, + int *oProjSize, + int *qoMaxSeqLength, + int *kvMaxSeqLength, + int *maxBatchSize, + int *maxBeamSize); + +cudnnStatus_t CUDNNWINAPI +cudnnGetMultiHeadAttnBuffers(cudnnHandle_t handle, + const cudnnAttnDescriptor_t attnDesc, + size_t *weightSizeInBytes, + size_t *workSpaceSizeInBytes, + size_t *reserveSpaceSizeInBytes); + +typedef enum { + CUDNN_MH_ATTN_Q_WEIGHTS = 0, /* input projection weights for 'queries' */ + CUDNN_MH_ATTN_K_WEIGHTS = 1, /* input projection weights for 'keys' */ + CUDNN_MH_ATTN_V_WEIGHTS = 2, /* input projection weights for 'values' */ + CUDNN_MH_ATTN_O_WEIGHTS = 3, /* output projection weights */ + CUDNN_MH_ATTN_Q_BIASES = 4, /* input projection bias tensor for 'queries' */ + CUDNN_MH_ATTN_K_BIASES = 5, /* input projection bias for 'keys' */ + CUDNN_MH_ATTN_V_BIASES = 6, /* input projection bias for 'values' */ + CUDNN_MH_ATTN_O_BIASES = 7, /* output projection biases */ +} cudnnMultiHeadAttnWeightKind_t; + +#define CUDNN_ATTN_WKIND_COUNT 8 /* Number of attention weight/bias tensors */ + +cudnnStatus_t CUDNNWINAPI +cudnnGetMultiHeadAttnWeights(cudnnHandle_t handle, + const cudnnAttnDescriptor_t attnDesc, + cudnnMultiHeadAttnWeightKind_t wKind, + size_t weightSizeInBytes, + const void *weights, + cudnnTensorDescriptor_t wDesc, + void **wAddr); + +cudnnStatus_t CUDNNWINAPI +cudnnMultiHeadAttnForward(cudnnHandle_t handle, + const cudnnAttnDescriptor_t attnDesc, + int currIdx, + const int loWinIdx[], + const int hiWinIdx[], + const int devSeqLengthsQO[], + const int devSeqLengthsKV[], + const cudnnSeqDataDescriptor_t qDesc, + const void *queries, + const void *residuals, + const cudnnSeqDataDescriptor_t kDesc, + const void *keys, + const cudnnSeqDataDescriptor_t vDesc, + const void *values, + const cudnnSeqDataDescriptor_t oDesc, + void *out, + size_t weightSizeInBytes, + const void *weights, + size_t workSpaceSizeInBytes, + void *workSpace, + size_t reserveSpaceSizeInBytes, + void *reserveSpace); + +/* + * \brief Cross-library version checker. + * This function is implemented differently in each sub-library. Each sublib + * checks whether its own version matches that of its dependencies. + * \returns CUDNN_STATUS_SUCCESS if the version check passes, + * CUDNN_STATUS_VERSION_MISMATCH if the versions are inconsistent. + */ +cudnnStatus_t CUDNNWINAPI +cudnnAdvInferVersionCheck(void); + +#if defined(__cplusplus) +} +#endif + +#endif /* CUDNN_ADV_INFER_H_ */ diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_adv_infer_v8.h b/llmeval-env/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_adv_infer_v8.h new file mode 100644 index 0000000000000000000000000000000000000000..3c8ddabbf3325e2eafce645da039f91226d93237 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_adv_infer_v8.h @@ -0,0 +1,658 @@ +/* + * Copyright 2014-2023 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +/* cudnn_adv_infer : cuDNN's advanced and experimental features. + +*/ + +#if !defined(CUDNN_ADV_INFER_H_) +#define CUDNN_ADV_INFER_H_ + +#include +#include + +#include "cudnn_version.h" +#include "cudnn_ops_infer.h" + +/* These version numbers are autogenerated, do not edit manually. */ +#define CUDNN_ADV_INFER_MAJOR 8 +#define CUDNN_ADV_INFER_MINOR 9 +#define CUDNN_ADV_INFER_PATCH 2 + +#if (CUDNN_ADV_INFER_MAJOR != CUDNN_MAJOR) || (CUDNN_ADV_INFER_MINOR != CUDNN_MINOR) || \ + (CUDNN_ADV_INFER_PATCH != CUDNN_PATCHLEVEL) +#error Version mismatch in cuDNN ADV INFER!!! +#endif + +#if defined(__cplusplus) +extern "C" { +#endif + +/* BASIC RNN API */ + +typedef enum { + CUDNN_FWD_MODE_INFERENCE = 0, + CUDNN_FWD_MODE_TRAINING = 1, +} cudnnForwardMode_t; + +typedef enum { + CUDNN_RNN_RELU = 0, /* basic RNN cell type with ReLu activation */ + CUDNN_RNN_TANH = 1, /* basic RNN cell type with tanh activation */ + CUDNN_LSTM = 2, /* LSTM with optional recurrent projection and clipping */ + CUDNN_GRU = 3, /* Using h' = tanh(r * Uh(t-1) + Wx) and h = (1 - z) * h' + z * h(t-1); */ +} cudnnRNNMode_t; + +typedef enum { + CUDNN_RNN_NO_BIAS = 0, /* rnn cell formulas do not use biases */ + CUDNN_RNN_SINGLE_INP_BIAS = 1, /* rnn cell formulas use one input bias in input GEMM */ + CUDNN_RNN_DOUBLE_BIAS = 2, /* default, rnn cell formulas use two bias vectors */ + CUDNN_RNN_SINGLE_REC_BIAS = 3 /* rnn cell formulas use one recurrent bias in recurrent GEMM */ +} cudnnRNNBiasMode_t; + +typedef enum { + CUDNN_UNIDIRECTIONAL = 0, /* single direction network */ + CUDNN_BIDIRECTIONAL = 1, /* output concatination at each layer */ +} cudnnDirectionMode_t; + +typedef enum { + CUDNN_LINEAR_INPUT = 0, /* adjustable weight matrix in first layer input GEMM */ + CUDNN_SKIP_INPUT = 1, /* fixed identity matrix in the first layer input GEMM */ +} cudnnRNNInputMode_t; + +typedef enum { + CUDNN_RNN_CLIP_NONE = 0, /* disables LSTM cell clipping */ + CUDNN_RNN_CLIP_MINMAX = 1, /* enables LSTM cell clipping */ +} cudnnRNNClipMode_t; + +typedef enum { + CUDNN_RNN_DATA_LAYOUT_SEQ_MAJOR_UNPACKED = 0, /* padded, outer stride from one time-step to the next */ + CUDNN_RNN_DATA_LAYOUT_SEQ_MAJOR_PACKED = 1, /* sequence length sorted and packed as in basic RNN api */ + CUDNN_RNN_DATA_LAYOUT_BATCH_MAJOR_UNPACKED = 2, /* padded, outer stride from one batch to the next */ +} cudnnRNNDataLayout_t; + +/* Legacy type for backward compatibility */ +typedef unsigned cudnnRNNPaddingMode_t; + +/* For auxFlags in cudnnSetRNNDescriptor_v8() and cudnnSetRNNPaddingMode() */ +#define CUDNN_RNN_PADDED_IO_DISABLED 0 +#define CUDNN_RNN_PADDED_IO_ENABLED (1U << 0) + +struct cudnnRNNStruct; +typedef struct cudnnRNNStruct *cudnnRNNDescriptor_t; + +struct cudnnPersistentRNNPlan; +typedef struct cudnnPersistentRNNPlan *cudnnPersistentRNNPlan_t; + +struct cudnnRNNDataStruct; +typedef struct cudnnRNNDataStruct *cudnnRNNDataDescriptor_t; + +cudnnStatus_t CUDNNWINAPI +cudnnCreateRNNDescriptor(cudnnRNNDescriptor_t *rnnDesc); + +cudnnStatus_t CUDNNWINAPI +cudnnDestroyRNNDescriptor(cudnnRNNDescriptor_t rnnDesc); + +cudnnStatus_t CUDNNWINAPI +cudnnSetRNNDescriptor_v8(cudnnRNNDescriptor_t rnnDesc, + cudnnRNNAlgo_t algo, + cudnnRNNMode_t cellMode, + cudnnRNNBiasMode_t biasMode, + cudnnDirectionMode_t dirMode, + cudnnRNNInputMode_t inputMode, + cudnnDataType_t dataType, + cudnnDataType_t mathPrec, + cudnnMathType_t mathType, + int32_t inputSize, + int32_t hiddenSize, + int32_t projSize, + int32_t numLayers, + cudnnDropoutDescriptor_t dropoutDesc, + uint32_t auxFlags); + +cudnnStatus_t CUDNNWINAPI +cudnnGetRNNDescriptor_v8(cudnnRNNDescriptor_t rnnDesc, + cudnnRNNAlgo_t *algo, + cudnnRNNMode_t *cellMode, + cudnnRNNBiasMode_t *biasMode, + cudnnDirectionMode_t *dirMode, + cudnnRNNInputMode_t *inputMode, + cudnnDataType_t *dataType, + cudnnDataType_t *mathPrec, + cudnnMathType_t *mathType, + int32_t *inputSize, + int32_t *hiddenSize, + int32_t *projSize, + int32_t *numLayers, + cudnnDropoutDescriptor_t *dropoutDesc, + uint32_t *auxFlags); + +/* + * mathPrec in cudnnSetRNNDescriptor_v6() specifies compute precision + * compute precision is further modified by cudnnSetRNNMatrixMathType() + * dataType in cudnnGetRNNParamsSize() and wDesc specify weight storage + * dropout is between RNN layers, not between recurrent steps + */ +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnSetRNNDescriptor_v6(cudnnHandle_t handle, + cudnnRNNDescriptor_t rnnDesc, + const int hiddenSize, + const int numLayers, + cudnnDropoutDescriptor_t dropoutDesc, + cudnnRNNInputMode_t inputMode, + cudnnDirectionMode_t direction, + cudnnRNNMode_t cellMode, + cudnnRNNAlgo_t algo, + cudnnDataType_t mathPrec); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetRNNDescriptor_v6(cudnnHandle_t handle, + cudnnRNNDescriptor_t rnnDesc, + int *hiddenSize, + int *numLayers, + cudnnDropoutDescriptor_t *dropoutDesc, + cudnnRNNInputMode_t *inputMode, + cudnnDirectionMode_t *direction, + cudnnRNNMode_t *cellMode, + cudnnRNNAlgo_t *algo, + cudnnDataType_t *mathPrec); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnSetRNNMatrixMathType(cudnnRNNDescriptor_t rnnDesc, cudnnMathType_t mType); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetRNNMatrixMathType(cudnnRNNDescriptor_t rnnDesc, cudnnMathType_t *mType); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnSetRNNBiasMode(cudnnRNNDescriptor_t rnnDesc, cudnnRNNBiasMode_t biasMode); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetRNNBiasMode(cudnnRNNDescriptor_t rnnDesc, cudnnRNNBiasMode_t *biasMode); + +cudnnStatus_t CUDNNWINAPI +cudnnRNNSetClip_v8(cudnnRNNDescriptor_t rnnDesc, + cudnnRNNClipMode_t clipMode, + cudnnNanPropagation_t clipNanOpt, + double lclip, + double rclip); + +cudnnStatus_t CUDNNWINAPI +cudnnRNNGetClip_v8(cudnnRNNDescriptor_t rnnDesc, + cudnnRNNClipMode_t *clipMode, + cudnnNanPropagation_t *clipNanOpt, + double *lclip, + double *rclip); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnRNNSetClip(cudnnHandle_t handle, + cudnnRNNDescriptor_t rnnDesc, + cudnnRNNClipMode_t clipMode, + cudnnNanPropagation_t clipNanOpt, + double lclip, + double rclip); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnRNNGetClip(cudnnHandle_t handle, + cudnnRNNDescriptor_t rnnDesc, + cudnnRNNClipMode_t *clipMode, + cudnnNanPropagation_t *clipNanOpt, + double *lclip, + double *rclip); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnSetRNNProjectionLayers(cudnnHandle_t handle, + cudnnRNNDescriptor_t rnnDesc, + const int recProjSize, + const int outProjSize); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetRNNProjectionLayers(cudnnHandle_t handle, + const cudnnRNNDescriptor_t rnnDesc, + int *recProjSize, + int *outProjSize); + +/* Expensive. Creates the plan for the specific settings. */ +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnCreatePersistentRNNPlan(cudnnRNNDescriptor_t rnnDesc, + const int minibatch, + const cudnnDataType_t dataType, + cudnnPersistentRNNPlan_t *plan); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnDestroyPersistentRNNPlan(cudnnPersistentRNNPlan_t plan); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnSetPersistentRNNPlan(cudnnRNNDescriptor_t rnnDesc, cudnnPersistentRNNPlan_t plan); + +cudnnStatus_t CUDNNWINAPI +cudnnBuildRNNDynamic(cudnnHandle_t handle, cudnnRNNDescriptor_t rnnDesc, int miniBatch); + +/* dataType in weight descriptors and input descriptors is used to describe storage */ +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetRNNWorkspaceSize(cudnnHandle_t handle, + const cudnnRNNDescriptor_t rnnDesc, + const int seqLength, + const cudnnTensorDescriptor_t *xDesc, + size_t *sizeInBytes); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetRNNTrainingReserveSize(cudnnHandle_t handle, + const cudnnRNNDescriptor_t rnnDesc, + const int seqLength, + const cudnnTensorDescriptor_t *xDesc, + size_t *sizeInBytes); + +cudnnStatus_t CUDNNWINAPI +cudnnGetRNNTempSpaceSizes(cudnnHandle_t handle, + cudnnRNNDescriptor_t rnnDesc, + cudnnForwardMode_t fwdMode, + cudnnRNNDataDescriptor_t xDesc, + size_t *workSpaceSize, + size_t *reserveSpaceSize); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetRNNParamsSize(cudnnHandle_t handle, + const cudnnRNNDescriptor_t rnnDesc, + const cudnnTensorDescriptor_t xDesc, + size_t *sizeInBytes, + cudnnDataType_t dataType); + +cudnnStatus_t CUDNNWINAPI +cudnnGetRNNWeightSpaceSize(cudnnHandle_t handle, cudnnRNNDescriptor_t rnnDesc, size_t *weightSpaceSize); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetRNNLinLayerMatrixParams(cudnnHandle_t handle, + const cudnnRNNDescriptor_t rnnDesc, + const int pseudoLayer, + const cudnnTensorDescriptor_t xDesc, + const cudnnFilterDescriptor_t wDesc, + const void *w, + const int linLayerID, + cudnnFilterDescriptor_t linLayerMatDesc, + void **linLayerMat); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetRNNLinLayerBiasParams(cudnnHandle_t handle, + const cudnnRNNDescriptor_t rnnDesc, + const int pseudoLayer, + const cudnnTensorDescriptor_t xDesc, + const cudnnFilterDescriptor_t wDesc, + const void *w, + const int linLayerID, + cudnnFilterDescriptor_t linLayerBiasDesc, + void **linLayerBias); + +cudnnStatus_t CUDNNWINAPI +cudnnGetRNNWeightParams(cudnnHandle_t handle, + cudnnRNNDescriptor_t rnnDesc, + int32_t pseudoLayer, + size_t weightSpaceSize, + const void *weightSpace, + int32_t linLayerID, + cudnnTensorDescriptor_t mDesc, + void **mAddr, + cudnnTensorDescriptor_t bDesc, + void **bAddr); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnRNNForwardInference(cudnnHandle_t handle, + const cudnnRNNDescriptor_t rnnDesc, + const int seqLength, + const cudnnTensorDescriptor_t *xDesc, + const void *x, + const cudnnTensorDescriptor_t hxDesc, + const void *hx, + const cudnnTensorDescriptor_t cxDesc, + const void *cx, + const cudnnFilterDescriptor_t wDesc, + const void *w, + const cudnnTensorDescriptor_t *yDesc, + void *y, + const cudnnTensorDescriptor_t hyDesc, + void *hy, + const cudnnTensorDescriptor_t cyDesc, + void *cy, + void *workSpace, + size_t workSpaceSizeInBytes); + +/* RNN EX API */ + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnSetRNNPaddingMode(cudnnRNNDescriptor_t rnnDesc, unsigned paddingMode); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetRNNPaddingMode(cudnnRNNDescriptor_t rnnDesc, unsigned *paddingMode); + +cudnnStatus_t CUDNNWINAPI +cudnnCreateRNNDataDescriptor(cudnnRNNDataDescriptor_t *rnnDataDesc); + +cudnnStatus_t CUDNNWINAPI +cudnnDestroyRNNDataDescriptor(cudnnRNNDataDescriptor_t rnnDataDesc); + +cudnnStatus_t CUDNNWINAPI +cudnnSetRNNDataDescriptor(cudnnRNNDataDescriptor_t rnnDataDesc, + cudnnDataType_t dataType, + cudnnRNNDataLayout_t layout, + int maxSeqLength, + int batchSize, + int vectorSize, + const int seqLengthArray[], /* length of each sequence in the batch */ + void *paddingFill); /* symbol for filling padding position in output */ + +cudnnStatus_t CUDNNWINAPI +cudnnGetRNNDataDescriptor(cudnnRNNDataDescriptor_t rnnDataDesc, + cudnnDataType_t *dataType, + cudnnRNNDataLayout_t *layout, + int *maxSeqLength, + int *batchSize, + int *vectorSize, + int arrayLengthRequested, + int seqLengthArray[], + void *paddingFill); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnRNNForwardInferenceEx(cudnnHandle_t handle, + const cudnnRNNDescriptor_t rnnDesc, + const cudnnRNNDataDescriptor_t xDesc, + const void *x, + const cudnnTensorDescriptor_t hxDesc, + const void *hx, + const cudnnTensorDescriptor_t cxDesc, + const void *cx, + const cudnnFilterDescriptor_t wDesc, + const void *w, + const cudnnRNNDataDescriptor_t yDesc, + void *y, + const cudnnTensorDescriptor_t hyDesc, + void *hy, + const cudnnTensorDescriptor_t cyDesc, + void *cy, + const cudnnRNNDataDescriptor_t kDesc, /* reserved, should pass NULL */ + const void *keys, /* reserved, should pass NULL */ + const cudnnRNNDataDescriptor_t cDesc, /* reserved, should pass NULL */ + void *cAttn, /* reserved, should pass NULL */ + const cudnnRNNDataDescriptor_t iDesc, /* reserved, should pass NULL */ + void *iAttn, /* reserved, should pass NULL */ + const cudnnRNNDataDescriptor_t qDesc, /* reserved, should pass NULL */ + void *queries, /* reserved, should pass NULL */ + void *workSpace, + size_t workSpaceSizeInBytes); + +cudnnStatus_t CUDNNWINAPI +cudnnRNNForward(cudnnHandle_t handle, + cudnnRNNDescriptor_t rnnDesc, + cudnnForwardMode_t fwdMode, + const int32_t devSeqLengths[], + cudnnRNNDataDescriptor_t xDesc, + const void *x, + cudnnRNNDataDescriptor_t yDesc, + void *y, + cudnnTensorDescriptor_t hDesc, + const void *hx, + void *hy, + cudnnTensorDescriptor_t cDesc, + const void *cx, + void *cy, + size_t weightSpaceSize, + const void *weightSpace, + size_t workSpaceSize, + void *workSpace, + size_t reserveSpaceSize, + void *reserveSpace); + +/* RNN FIND API */ + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnSetRNNAlgorithmDescriptor(cudnnHandle_t handle, cudnnRNNDescriptor_t rnnDesc, cudnnAlgorithmDescriptor_t algoDesc); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetRNNForwardInferenceAlgorithmMaxCount(cudnnHandle_t handle, const cudnnRNNDescriptor_t rnnDesc, int *count); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnFindRNNForwardInferenceAlgorithmEx(cudnnHandle_t handle, + const cudnnRNNDescriptor_t rnnDesc, + const int seqLength, + const cudnnTensorDescriptor_t *xDesc, + const void *x, + const cudnnTensorDescriptor_t hxDesc, + const void *hx, + const cudnnTensorDescriptor_t cxDesc, + const void *cx, + const cudnnFilterDescriptor_t wDesc, + const void *w, + const cudnnTensorDescriptor_t *yDesc, + void *y, + const cudnnTensorDescriptor_t hyDesc, + void *hy, + const cudnnTensorDescriptor_t cyDesc, + void *cy, + const float findIntensity, + const int requestedAlgoCount, + int *returnedAlgoCount, + cudnnAlgorithmPerformance_t *perfResults, + void *workspace, + size_t workSpaceSizeInBytes); + +/* Sequence data descriptor */ + +typedef enum { + CUDNN_SEQDATA_TIME_DIM = 0, /* index in time */ + CUDNN_SEQDATA_BATCH_DIM = 1, /* index in batch */ + CUDNN_SEQDATA_BEAM_DIM = 2, /* index in beam */ + CUDNN_SEQDATA_VECT_DIM = 3 /* index in vector */ +} cudnnSeqDataAxis_t; + +struct cudnnSeqDataStruct; +typedef struct cudnnSeqDataStruct *cudnnSeqDataDescriptor_t; + +#define CUDNN_SEQDATA_DIM_COUNT 4 /* dimension count */ + +cudnnStatus_t CUDNNWINAPI +cudnnCreateSeqDataDescriptor(cudnnSeqDataDescriptor_t *seqDataDesc); + +cudnnStatus_t CUDNNWINAPI +cudnnDestroySeqDataDescriptor(cudnnSeqDataDescriptor_t seqDataDesc); + +cudnnStatus_t CUDNNWINAPI +cudnnSetSeqDataDescriptor(cudnnSeqDataDescriptor_t seqDataDesc, + cudnnDataType_t dataType, + int nbDims, + const int dimA[], + const cudnnSeqDataAxis_t axes[], + size_t seqLengthArraySize, + const int seqLengthArray[], + void *paddingFill); + +cudnnStatus_t CUDNNWINAPI +cudnnGetSeqDataDescriptor(const cudnnSeqDataDescriptor_t seqDataDesc, + cudnnDataType_t *dataType, + int *nbDims, + int nbDimsRequested, + int dimA[], + cudnnSeqDataAxis_t axes[], + size_t *seqLengthArraySize, + size_t seqLengthSizeRequested, + int seqLengthArray[], + void *paddingFill); + +/* Multihead Attention */ + +/* Legacy type for backward compatibility */ +typedef unsigned cudnnAttnQueryMap_t; + +/* + * Multi-head attention options passed via 'attnMode' in cudnnSetAttnDescriptor(). + * Use the bitwise OR operator to combine several settings listed below. Additional + * minor options can be added here w/o changing or introducing new API functions. + */ +#define CUDNN_ATTN_QUERYMAP_ALL_TO_ONE 0 /* multiple Q-s map to a single (K,V) set when beam size > 1 */ +#define CUDNN_ATTN_QUERYMAP_ONE_TO_ONE (1U << 0) /* multiple Q-s map to multiple (K,V) sets when beam size > 1 */ +#define CUDNN_ATTN_DISABLE_PROJ_BIASES 0 /* no biases in attention input and output projections */ +#define CUDNN_ATTN_ENABLE_PROJ_BIASES (1U << 1) /* use biases in attention input and output projections */ + +struct cudnnAttnStruct; +typedef struct cudnnAttnStruct *cudnnAttnDescriptor_t; + +cudnnStatus_t CUDNNWINAPI +cudnnCreateAttnDescriptor(cudnnAttnDescriptor_t *attnDesc); + +cudnnStatus_t CUDNNWINAPI +cudnnDestroyAttnDescriptor(cudnnAttnDescriptor_t attnDesc); + +cudnnStatus_t CUDNNWINAPI +cudnnSetAttnDescriptor(cudnnAttnDescriptor_t attnDesc, + unsigned attnMode, + int nHeads, + double smScaler, + cudnnDataType_t dataType, + cudnnDataType_t computePrec, + cudnnMathType_t mathType, + cudnnDropoutDescriptor_t attnDropoutDesc, + cudnnDropoutDescriptor_t postDropoutDesc, + int qSize, + int kSize, + int vSize, + int qProjSize, + int kProjSize, + int vProjSize, + int oProjSize, + int qoMaxSeqLength, + int kvMaxSeqLength, + int maxBatchSize, + int maxBeamSize); + +cudnnStatus_t CUDNNWINAPI +cudnnGetAttnDescriptor(cudnnAttnDescriptor_t attnDesc, + unsigned *attnMode, + int *nHeads, + double *smScaler, + cudnnDataType_t *dataType, + cudnnDataType_t *computePrec, + cudnnMathType_t *mathType, + cudnnDropoutDescriptor_t *attnDropoutDesc, + cudnnDropoutDescriptor_t *postDropoutDesc, + int *qSize, + int *kSize, + int *vSize, + int *qProjSize, + int *kProjSize, + int *vProjSize, + int *oProjSize, + int *qoMaxSeqLength, + int *kvMaxSeqLength, + int *maxBatchSize, + int *maxBeamSize); + +cudnnStatus_t CUDNNWINAPI +cudnnGetMultiHeadAttnBuffers(cudnnHandle_t handle, + const cudnnAttnDescriptor_t attnDesc, + size_t *weightSizeInBytes, + size_t *workSpaceSizeInBytes, + size_t *reserveSpaceSizeInBytes); + +typedef enum { + CUDNN_MH_ATTN_Q_WEIGHTS = 0, /* input projection weights for 'queries' */ + CUDNN_MH_ATTN_K_WEIGHTS = 1, /* input projection weights for 'keys' */ + CUDNN_MH_ATTN_V_WEIGHTS = 2, /* input projection weights for 'values' */ + CUDNN_MH_ATTN_O_WEIGHTS = 3, /* output projection weights */ + CUDNN_MH_ATTN_Q_BIASES = 4, /* input projection bias tensor for 'queries' */ + CUDNN_MH_ATTN_K_BIASES = 5, /* input projection bias for 'keys' */ + CUDNN_MH_ATTN_V_BIASES = 6, /* input projection bias for 'values' */ + CUDNN_MH_ATTN_O_BIASES = 7, /* output projection biases */ +} cudnnMultiHeadAttnWeightKind_t; + +#define CUDNN_ATTN_WKIND_COUNT 8 /* Number of attention weight/bias tensors */ + +cudnnStatus_t CUDNNWINAPI +cudnnGetMultiHeadAttnWeights(cudnnHandle_t handle, + const cudnnAttnDescriptor_t attnDesc, + cudnnMultiHeadAttnWeightKind_t wKind, + size_t weightSizeInBytes, + const void *weights, + cudnnTensorDescriptor_t wDesc, + void **wAddr); + +cudnnStatus_t CUDNNWINAPI +cudnnMultiHeadAttnForward(cudnnHandle_t handle, + const cudnnAttnDescriptor_t attnDesc, + int currIdx, + const int loWinIdx[], + const int hiWinIdx[], + const int devSeqLengthsQO[], + const int devSeqLengthsKV[], + const cudnnSeqDataDescriptor_t qDesc, + const void *queries, + const void *residuals, + const cudnnSeqDataDescriptor_t kDesc, + const void *keys, + const cudnnSeqDataDescriptor_t vDesc, + const void *values, + const cudnnSeqDataDescriptor_t oDesc, + void *out, + size_t weightSizeInBytes, + const void *weights, + size_t workSpaceSizeInBytes, + void *workSpace, + size_t reserveSpaceSizeInBytes, + void *reserveSpace); + +/* + * \brief Cross-library version checker. + * This function is implemented differently in each sub-library. Each sublib + * checks whether its own version matches that of its dependencies. + * \returns CUDNN_STATUS_SUCCESS if the version check passes, + * CUDNN_STATUS_VERSION_MISMATCH if the versions are inconsistent. + */ +cudnnStatus_t CUDNNWINAPI +cudnnAdvInferVersionCheck(void); + +#if defined(__cplusplus) +} +#endif + +#endif /* CUDNN_ADV_INFER_H_ */ diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_adv_train.h b/llmeval-env/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_adv_train.h new file mode 100644 index 0000000000000000000000000000000000000000..6879af86b214a69138897ac78968149858b54737 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_adv_train.h @@ -0,0 +1,540 @@ +/* + * Copyright 2014-2023 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +/* cudnn_adv_train : cuDNN's advanced and experimental features. + +*/ + +#if !defined(CUDNN_ADV_TRAIN_H_) +#define CUDNN_ADV_TRAIN_H_ + +#include +#include + +#include "cudnn_version.h" +#include "cudnn_ops_infer.h" +#include "cudnn_ops_train.h" +#include "cudnn_adv_infer.h" + +/* These version numbers are autogenerated, do not edit manually. */ +#define CUDNN_ADV_TRAIN_MAJOR 8 +#define CUDNN_ADV_TRAIN_MINOR 9 +#define CUDNN_ADV_TRAIN_PATCH 2 + +#if (CUDNN_ADV_TRAIN_MAJOR != CUDNN_MAJOR) || (CUDNN_ADV_TRAIN_MINOR != CUDNN_MINOR) || \ + (CUDNN_ADV_TRAIN_PATCH != CUDNN_PATCHLEVEL) +#error Version mismatch in cuDNN ADV TRAIN!!! +#endif + +#if defined(__cplusplus) +extern "C" { +#endif + +typedef enum { + CUDNN_WGRAD_MODE_ADD = 0, /* add partial gradients to wgrad output buffers */ + CUDNN_WGRAD_MODE_SET = 1, /* write partial gradients to wgrad output buffers */ +} cudnnWgradMode_t; + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnRNNForwardTraining(cudnnHandle_t handle, + const cudnnRNNDescriptor_t rnnDesc, + const int seqLength, + const cudnnTensorDescriptor_t *xDesc, + const void *x, + const cudnnTensorDescriptor_t hxDesc, + const void *hx, + const cudnnTensorDescriptor_t cxDesc, + const void *cx, + const cudnnFilterDescriptor_t wDesc, + const void *w, + const cudnnTensorDescriptor_t *yDesc, + void *y, + const cudnnTensorDescriptor_t hyDesc, + void *hy, + const cudnnTensorDescriptor_t cyDesc, + void *cy, + void *workSpace, + size_t workSpaceSizeInBytes, + void *reserveSpace, + size_t reserveSpaceSizeInBytes); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnRNNBackwardData(cudnnHandle_t handle, + const cudnnRNNDescriptor_t rnnDesc, + const int seqLength, + const cudnnTensorDescriptor_t *yDesc, + const void *y, + const cudnnTensorDescriptor_t *dyDesc, + const void *dy, + const cudnnTensorDescriptor_t dhyDesc, + const void *dhy, + const cudnnTensorDescriptor_t dcyDesc, + const void *dcy, + const cudnnFilterDescriptor_t wDesc, + const void *w, + const cudnnTensorDescriptor_t hxDesc, + const void *hx, + const cudnnTensorDescriptor_t cxDesc, + const void *cx, + const cudnnTensorDescriptor_t *dxDesc, + void *dx, + const cudnnTensorDescriptor_t dhxDesc, + void *dhx, + const cudnnTensorDescriptor_t dcxDesc, + void *dcx, + void *workSpace, + size_t workSpaceSizeInBytes, + void *reserveSpace, + size_t reserveSpaceSizeInBytes); + +cudnnStatus_t CUDNNWINAPI +cudnnRNNBackwardData_v8(cudnnHandle_t handle, + cudnnRNNDescriptor_t rnnDesc, + const int32_t devSeqLengths[], + cudnnRNNDataDescriptor_t yDesc, + const void *y, + const void *dy, + cudnnRNNDataDescriptor_t xDesc, + void *dx, + cudnnTensorDescriptor_t hDesc, + const void *hx, + const void *dhy, + void *dhx, + cudnnTensorDescriptor_t cDesc, + const void *cx, + const void *dcy, + void *dcx, + size_t weightSpaceSize, + const void *weightSpace, + size_t workSpaceSize, + void *workSpace, + size_t reserveSpaceSize, + void *reserveSpace); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnRNNBackwardWeights(cudnnHandle_t handle, + const cudnnRNNDescriptor_t rnnDesc, + const int seqLength, + const cudnnTensorDescriptor_t *xDesc, + const void *x, + const cudnnTensorDescriptor_t hxDesc, + const void *hx, + const cudnnTensorDescriptor_t *yDesc, + const void *y, + const void *workSpace, + size_t workSpaceSizeInBytes, + const cudnnFilterDescriptor_t dwDesc, + void *dw, + const void *reserveSpace, + size_t reserveSpaceSizeInBytes); + +cudnnStatus_t CUDNNWINAPI +cudnnRNNBackwardWeights_v8(cudnnHandle_t handle, + cudnnRNNDescriptor_t rnnDesc, + cudnnWgradMode_t addGrad, + const int32_t devSeqLengths[], + cudnnRNNDataDescriptor_t xDesc, + const void *x, + cudnnTensorDescriptor_t hDesc, + const void *hx, + cudnnRNNDataDescriptor_t yDesc, + const void *y, + size_t weightSpaceSize, + void *dweightSpace, + size_t workSpaceSize, + void *workSpace, + size_t reserveSpaceSize, + void *reserveSpace); + +/* RNN EX API */ + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnRNNForwardTrainingEx(cudnnHandle_t handle, + const cudnnRNNDescriptor_t rnnDesc, + const cudnnRNNDataDescriptor_t xDesc, + const void *x, + const cudnnTensorDescriptor_t hxDesc, + const void *hx, + const cudnnTensorDescriptor_t cxDesc, + const void *cx, + const cudnnFilterDescriptor_t wDesc, + const void *w, + const cudnnRNNDataDescriptor_t yDesc, + void *y, + const cudnnTensorDescriptor_t hyDesc, + void *hy, + const cudnnTensorDescriptor_t cyDesc, + void *cy, + const cudnnRNNDataDescriptor_t kDesc, /* reserved, should pass NULL */ + const void *keys, /* reserved, should pass NULL */ + const cudnnRNNDataDescriptor_t cDesc, /* reserved, should pass NULL */ + void *cAttn, /* reserved, should pass NULL */ + const cudnnRNNDataDescriptor_t iDesc, /* reserved, should pass NULL */ + void *iAttn, /* reserved, should pass NULL */ + const cudnnRNNDataDescriptor_t qDesc, /* reserved, should pass NULL */ + void *queries, /* reserved, should pass NULL */ + void *workSpace, + size_t workSpaceSizeInBytes, + void *reserveSpace, + size_t reserveSpaceSizeInBytes); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnRNNBackwardDataEx(cudnnHandle_t handle, + const cudnnRNNDescriptor_t rnnDesc, + const cudnnRNNDataDescriptor_t yDesc, + const void *y, + const cudnnRNNDataDescriptor_t dyDesc, + const void *dy, + const cudnnRNNDataDescriptor_t dcDesc, /* reserved, should pass NULL */ + const void *dcAttn, /* reserved, should pass NULL */ + const cudnnTensorDescriptor_t dhyDesc, + const void *dhy, + const cudnnTensorDescriptor_t dcyDesc, + const void *dcy, + const cudnnFilterDescriptor_t wDesc, + const void *w, + const cudnnTensorDescriptor_t hxDesc, + const void *hx, + const cudnnTensorDescriptor_t cxDesc, + const void *cx, + const cudnnRNNDataDescriptor_t dxDesc, + void *dx, + const cudnnTensorDescriptor_t dhxDesc, + void *dhx, + const cudnnTensorDescriptor_t dcxDesc, + void *dcx, + const cudnnRNNDataDescriptor_t dkDesc, /* reserved, should pass NULL */ + void *dkeys, /* reserved, should pass NULL */ + void *workSpace, + size_t workSpaceSizeInBytes, + void *reserveSpace, + size_t reserveSpaceSizeInBytes); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnRNNBackwardWeightsEx(cudnnHandle_t handle, + const cudnnRNNDescriptor_t rnnDesc, + const cudnnRNNDataDescriptor_t xDesc, + const void *x, + const cudnnTensorDescriptor_t hxDesc, + const void *hx, + const cudnnRNNDataDescriptor_t yDesc, + const void *y, + void *workSpace, + size_t workSpaceSizeInBytes, + const cudnnFilterDescriptor_t dwDesc, + void *dw, + void *reserveSpace, + size_t reserveSpaceSizeInBytes); + +/* RNN FIND API */ + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetRNNForwardTrainingAlgorithmMaxCount(cudnnHandle_t handle, const cudnnRNNDescriptor_t rnnDesc, int *count); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnFindRNNForwardTrainingAlgorithmEx(cudnnHandle_t handle, + const cudnnRNNDescriptor_t rnnDesc, + const int seqLength, + const cudnnTensorDescriptor_t *xDesc, + const void *x, + const cudnnTensorDescriptor_t hxDesc, + const void *hx, + const cudnnTensorDescriptor_t cxDesc, + const void *cx, + const cudnnFilterDescriptor_t wDesc, + const void *w, + const cudnnTensorDescriptor_t *yDesc, + void *y, + const cudnnTensorDescriptor_t hyDesc, + void *hy, + const cudnnTensorDescriptor_t cyDesc, + void *cy, + const float findIntensity, + const int requestedAlgoCount, + int *returnedAlgoCount, + cudnnAlgorithmPerformance_t *perfResults, + void *workspace, + size_t workSpaceSizeInBytes, + void *reserveSpace, + size_t reserveSpaceSizeInBytes); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetRNNBackwardDataAlgorithmMaxCount(cudnnHandle_t handle, const cudnnRNNDescriptor_t rnnDesc, int *count); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnFindRNNBackwardDataAlgorithmEx(cudnnHandle_t handle, + const cudnnRNNDescriptor_t rnnDesc, + const int seqLength, + const cudnnTensorDescriptor_t *yDesc, + const void *y, + const cudnnTensorDescriptor_t *dyDesc, + const void *dy, + const cudnnTensorDescriptor_t dhyDesc, + const void *dhy, + const cudnnTensorDescriptor_t dcyDesc, + const void *dcy, + const cudnnFilterDescriptor_t wDesc, + const void *w, + const cudnnTensorDescriptor_t hxDesc, + const void *hx, + const cudnnTensorDescriptor_t cxDesc, + const void *cx, + const cudnnTensorDescriptor_t *dxDesc, + void *dx, + const cudnnTensorDescriptor_t dhxDesc, + void *dhx, + const cudnnTensorDescriptor_t dcxDesc, + void *dcx, + const float findIntensity, + const int requestedAlgoCount, + int *returnedAlgoCount, + cudnnAlgorithmPerformance_t *perfResults, + void *workspace, + size_t workSpaceSizeInBytes, + void *reserveSpace, + size_t reserveSpaceSizeInBytes); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetRNNBackwardWeightsAlgorithmMaxCount(cudnnHandle_t handle, const cudnnRNNDescriptor_t rnnDesc, int *count); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnFindRNNBackwardWeightsAlgorithmEx(cudnnHandle_t handle, + const cudnnRNNDescriptor_t rnnDesc, + const int seqLength, + const cudnnTensorDescriptor_t *xDesc, + const void *x, + const cudnnTensorDescriptor_t hxDesc, + const void *hx, + const cudnnTensorDescriptor_t *yDesc, + const void *y, + const float findIntensity, + const int requestedAlgoCount, + int *returnedAlgoCount, + cudnnAlgorithmPerformance_t *perfResults, + const void *workspace, + size_t workSpaceSizeInBytes, + const cudnnFilterDescriptor_t dwDesc, + void *dw, + const void *reserveSpace, + size_t reserveSpaceSizeInBytes); + +cudnnStatus_t CUDNNWINAPI +cudnnMultiHeadAttnBackwardData(cudnnHandle_t handle, + const cudnnAttnDescriptor_t attnDesc, + const int loWinIdx[], + const int hiWinIdx[], + const int devSeqLengthsDQDO[], + const int devSeqLengthsDKDV[], + const cudnnSeqDataDescriptor_t doDesc, + const void *dout, + const cudnnSeqDataDescriptor_t dqDesc, + void *dqueries, + const void *queries, + const cudnnSeqDataDescriptor_t dkDesc, + void *dkeys, + const void *keys, + const cudnnSeqDataDescriptor_t dvDesc, + void *dvalues, + const void *values, + size_t weightSizeInBytes, + const void *weights, + size_t workSpaceSizeInBytes, + void *workSpace, + size_t reserveSpaceSizeInBytes, + void *reserveSpace); + +cudnnStatus_t CUDNNWINAPI +cudnnMultiHeadAttnBackwardWeights(cudnnHandle_t handle, + const cudnnAttnDescriptor_t attnDesc, + cudnnWgradMode_t addGrad, + const cudnnSeqDataDescriptor_t qDesc, + const void *queries, + const cudnnSeqDataDescriptor_t kDesc, + const void *keys, + const cudnnSeqDataDescriptor_t vDesc, + const void *values, + const cudnnSeqDataDescriptor_t doDesc, + const void *dout, + size_t weightSizeInBytes, + const void *weights, + void *dweights, + size_t workSpaceSizeInBytes, + void *workSpace, + size_t reserveSpaceSizeInBytes, + void *reserveSpace); + +/* +* CTC (Connectionist Temporal Classification) loss descriptor create/destory/set/get functions +*/ +/* Input normalization mode for loss function */ +typedef enum { + CUDNN_LOSS_NORMALIZATION_NONE = 0, + CUDNN_LOSS_NORMALIZATION_SOFTMAX = 1, +} cudnnLossNormalizationMode_t; + +cudnnStatus_t CUDNNWINAPI +cudnnCreateCTCLossDescriptor(cudnnCTCLossDescriptor_t *ctcLossDesc); + +cudnnStatus_t CUDNNWINAPI +cudnnSetCTCLossDescriptor(cudnnCTCLossDescriptor_t ctcLossDesc, cudnnDataType_t compType); + +cudnnStatus_t CUDNNWINAPI +cudnnSetCTCLossDescriptorEx(cudnnCTCLossDescriptor_t ctcLossDesc, + cudnnDataType_t compType, + cudnnLossNormalizationMode_t normMode, + cudnnNanPropagation_t gradMode); + +cudnnStatus_t CUDNNWINAPI +cudnnSetCTCLossDescriptor_v8(cudnnCTCLossDescriptor_t ctcLossDesc, + cudnnDataType_t compType, + cudnnLossNormalizationMode_t normMode, + cudnnNanPropagation_t gradMode, + int maxLabelLength); + +cudnnStatus_t CUDNNWINAPI +cudnnGetCTCLossDescriptor(cudnnCTCLossDescriptor_t ctcLossDesc, cudnnDataType_t *compType); + +cudnnStatus_t CUDNNWINAPI +cudnnGetCTCLossDescriptorEx(cudnnCTCLossDescriptor_t ctcLossDesc, + cudnnDataType_t *compType, + cudnnLossNormalizationMode_t *normMode, + cudnnNanPropagation_t *gradMode); + +cudnnStatus_t CUDNNWINAPI +cudnnGetCTCLossDescriptor_v8(cudnnCTCLossDescriptor_t ctcLossDesc, + cudnnDataType_t *compType, + cudnnLossNormalizationMode_t *normMode, + cudnnNanPropagation_t *gradMode, + int *maxLabelLength); + +cudnnStatus_t CUDNNWINAPI +cudnnDestroyCTCLossDescriptor(cudnnCTCLossDescriptor_t ctcLossDesc); + +/* return the ctc costs and gradients, given the probabilities and labels */ +cudnnStatus_t CUDNNWINAPI +cudnnCTCLoss( + cudnnHandle_t handle, + const cudnnTensorDescriptor_t + probsDesc, /* Tensor descriptor for probabilities, the dimensions are T,N,A (T is the timing steps, N is the + mini batch size, A is the alphabet size) */ + const void *probs, /* probabilities after softmax, in GPU memory */ + const int hostLabels[], /* labels, in CPU memory */ + const int hostLabelLengths[], /* the length of each label, in CPU memory */ + const int hostInputLengths[], /* the lengths of timing steps in each batch, in CPU memory */ + void *costs, /* the returned costs of CTC, in GPU memory */ + const cudnnTensorDescriptor_t gradientsDesc, /* Tensor descriptor for gradients, the dimensions are T,N,A */ + void *gradients, /* the returned CTC gradients, in GPU memory, to compute costs only, set it to NULL */ + cudnnCTCLossAlgo_t algo, /* algorithm selected, supported now 0 and 1 */ + cudnnCTCLossDescriptor_t ctcLossDesc, + void *workspace, /* pointer to the workspace, in GPU memory */ + size_t workSpaceSizeInBytes); /* size of the workspace */ + +/* return the ctc costs and gradients, given the probabilities and labels */ +cudnnStatus_t CUDNNWINAPI +cudnnCTCLoss_v8( + cudnnHandle_t handle, + cudnnCTCLossAlgo_t algo, /* algorithm selected, supported now 0 and 1 */ + cudnnCTCLossDescriptor_t ctcLossDesc, + const cudnnTensorDescriptor_t + probsDesc, /* Tensor descriptor for probabilities, the dimensions are T,N,A (T is the timing steps, N is the + mini batch size, A is the alphabet size) */ + const void *probs, /* probabilities after softmax, in GPU memory */ + const int labels[], /* labels, in GPU memory */ + const int labelLengths[], /* the length of each label, in GPU memory */ + const int inputLengths[], /* the lengths of timing steps in each batch, in GPU memory */ + void *costs, /* the returned costs of CTC, in GPU memory */ + const cudnnTensorDescriptor_t gradientsDesc, /* Tensor descriptor for gradients, the dimensions are T,N,A */ + void *gradients, /* the returned CTC gradients, in GPU memory, to compute costs only, set it to NULL */ + size_t workSpaceSizeInBytes, /* size of the workspace */ + void *workspace); /* pointer to the workspace, in GPU memory */ + +/* return the workspace size needed for ctc */ +cudnnStatus_t CUDNNWINAPI +cudnnGetCTCLossWorkspaceSize( + cudnnHandle_t handle, + const cudnnTensorDescriptor_t probsDesc, /* Tensor descriptor for probabilities, the dimensions are T,N,A (T is the + timing steps, N is the mini batch size, A is the alphabet size) */ + const cudnnTensorDescriptor_t gradientsDesc, /* Tensor descriptor for gradients, the + dimensions are T,N,A. To compute costs + only, set it to NULL */ + const int *labels, /* labels, in CPU memory */ + const int *labelLengths, /* the length of each label, in CPU memory */ + const int *inputLengths, /* the lengths of timing steps in each batch, in CPU memory */ + cudnnCTCLossAlgo_t algo, /* algorithm selected, supported now 0 and 1 */ + cudnnCTCLossDescriptor_t ctcLossDesc, + size_t *sizeInBytes); /* pointer to the returned workspace size */ + +/* return the workspace size needed for ctc */ +cudnnStatus_t CUDNNWINAPI +cudnnGetCTCLossWorkspaceSize_v8( + cudnnHandle_t handle, + cudnnCTCLossAlgo_t algo, /* algorithm selected, supported now 0 and 1 */ + cudnnCTCLossDescriptor_t ctcLossDesc, + const cudnnTensorDescriptor_t probsDesc, /* Tensor descriptor for probabilities, the dimensions are T,N,A (T is the + timing steps, N is the mini batch size, A is the alphabet size) */ + const cudnnTensorDescriptor_t gradientsDesc, /* Tensor descriptor for gradients, the + dimensions are T,N,A. To compute costs + only, set it to NULL */ + size_t *sizeInBytes); /* pointer to the returned workspace size */ + +/* + * \brief Cross-library version checker. + * This function is implemented differently in each sub-library. Each sublib + * checks whether its own version matches that of its dependencies. + * \returns CUDNN_STATUS_SUCCESS if the version check passes, + * CUDNN_STATUS_VERSION_MISMATCH if the versions are inconsistent. + */ +cudnnStatus_t CUDNNWINAPI +cudnnAdvTrainVersionCheck(void); + +#if defined(__cplusplus) +} +#endif + +#endif /* CUDNN_ADV_TRAIN_H_ */ diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_adv_train_v8.h b/llmeval-env/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_adv_train_v8.h new file mode 100644 index 0000000000000000000000000000000000000000..6879af86b214a69138897ac78968149858b54737 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_adv_train_v8.h @@ -0,0 +1,540 @@ +/* + * Copyright 2014-2023 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +/* cudnn_adv_train : cuDNN's advanced and experimental features. + +*/ + +#if !defined(CUDNN_ADV_TRAIN_H_) +#define CUDNN_ADV_TRAIN_H_ + +#include +#include + +#include "cudnn_version.h" +#include "cudnn_ops_infer.h" +#include "cudnn_ops_train.h" +#include "cudnn_adv_infer.h" + +/* These version numbers are autogenerated, do not edit manually. */ +#define CUDNN_ADV_TRAIN_MAJOR 8 +#define CUDNN_ADV_TRAIN_MINOR 9 +#define CUDNN_ADV_TRAIN_PATCH 2 + +#if (CUDNN_ADV_TRAIN_MAJOR != CUDNN_MAJOR) || (CUDNN_ADV_TRAIN_MINOR != CUDNN_MINOR) || \ + (CUDNN_ADV_TRAIN_PATCH != CUDNN_PATCHLEVEL) +#error Version mismatch in cuDNN ADV TRAIN!!! +#endif + +#if defined(__cplusplus) +extern "C" { +#endif + +typedef enum { + CUDNN_WGRAD_MODE_ADD = 0, /* add partial gradients to wgrad output buffers */ + CUDNN_WGRAD_MODE_SET = 1, /* write partial gradients to wgrad output buffers */ +} cudnnWgradMode_t; + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnRNNForwardTraining(cudnnHandle_t handle, + const cudnnRNNDescriptor_t rnnDesc, + const int seqLength, + const cudnnTensorDescriptor_t *xDesc, + const void *x, + const cudnnTensorDescriptor_t hxDesc, + const void *hx, + const cudnnTensorDescriptor_t cxDesc, + const void *cx, + const cudnnFilterDescriptor_t wDesc, + const void *w, + const cudnnTensorDescriptor_t *yDesc, + void *y, + const cudnnTensorDescriptor_t hyDesc, + void *hy, + const cudnnTensorDescriptor_t cyDesc, + void *cy, + void *workSpace, + size_t workSpaceSizeInBytes, + void *reserveSpace, + size_t reserveSpaceSizeInBytes); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnRNNBackwardData(cudnnHandle_t handle, + const cudnnRNNDescriptor_t rnnDesc, + const int seqLength, + const cudnnTensorDescriptor_t *yDesc, + const void *y, + const cudnnTensorDescriptor_t *dyDesc, + const void *dy, + const cudnnTensorDescriptor_t dhyDesc, + const void *dhy, + const cudnnTensorDescriptor_t dcyDesc, + const void *dcy, + const cudnnFilterDescriptor_t wDesc, + const void *w, + const cudnnTensorDescriptor_t hxDesc, + const void *hx, + const cudnnTensorDescriptor_t cxDesc, + const void *cx, + const cudnnTensorDescriptor_t *dxDesc, + void *dx, + const cudnnTensorDescriptor_t dhxDesc, + void *dhx, + const cudnnTensorDescriptor_t dcxDesc, + void *dcx, + void *workSpace, + size_t workSpaceSizeInBytes, + void *reserveSpace, + size_t reserveSpaceSizeInBytes); + +cudnnStatus_t CUDNNWINAPI +cudnnRNNBackwardData_v8(cudnnHandle_t handle, + cudnnRNNDescriptor_t rnnDesc, + const int32_t devSeqLengths[], + cudnnRNNDataDescriptor_t yDesc, + const void *y, + const void *dy, + cudnnRNNDataDescriptor_t xDesc, + void *dx, + cudnnTensorDescriptor_t hDesc, + const void *hx, + const void *dhy, + void *dhx, + cudnnTensorDescriptor_t cDesc, + const void *cx, + const void *dcy, + void *dcx, + size_t weightSpaceSize, + const void *weightSpace, + size_t workSpaceSize, + void *workSpace, + size_t reserveSpaceSize, + void *reserveSpace); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnRNNBackwardWeights(cudnnHandle_t handle, + const cudnnRNNDescriptor_t rnnDesc, + const int seqLength, + const cudnnTensorDescriptor_t *xDesc, + const void *x, + const cudnnTensorDescriptor_t hxDesc, + const void *hx, + const cudnnTensorDescriptor_t *yDesc, + const void *y, + const void *workSpace, + size_t workSpaceSizeInBytes, + const cudnnFilterDescriptor_t dwDesc, + void *dw, + const void *reserveSpace, + size_t reserveSpaceSizeInBytes); + +cudnnStatus_t CUDNNWINAPI +cudnnRNNBackwardWeights_v8(cudnnHandle_t handle, + cudnnRNNDescriptor_t rnnDesc, + cudnnWgradMode_t addGrad, + const int32_t devSeqLengths[], + cudnnRNNDataDescriptor_t xDesc, + const void *x, + cudnnTensorDescriptor_t hDesc, + const void *hx, + cudnnRNNDataDescriptor_t yDesc, + const void *y, + size_t weightSpaceSize, + void *dweightSpace, + size_t workSpaceSize, + void *workSpace, + size_t reserveSpaceSize, + void *reserveSpace); + +/* RNN EX API */ + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnRNNForwardTrainingEx(cudnnHandle_t handle, + const cudnnRNNDescriptor_t rnnDesc, + const cudnnRNNDataDescriptor_t xDesc, + const void *x, + const cudnnTensorDescriptor_t hxDesc, + const void *hx, + const cudnnTensorDescriptor_t cxDesc, + const void *cx, + const cudnnFilterDescriptor_t wDesc, + const void *w, + const cudnnRNNDataDescriptor_t yDesc, + void *y, + const cudnnTensorDescriptor_t hyDesc, + void *hy, + const cudnnTensorDescriptor_t cyDesc, + void *cy, + const cudnnRNNDataDescriptor_t kDesc, /* reserved, should pass NULL */ + const void *keys, /* reserved, should pass NULL */ + const cudnnRNNDataDescriptor_t cDesc, /* reserved, should pass NULL */ + void *cAttn, /* reserved, should pass NULL */ + const cudnnRNNDataDescriptor_t iDesc, /* reserved, should pass NULL */ + void *iAttn, /* reserved, should pass NULL */ + const cudnnRNNDataDescriptor_t qDesc, /* reserved, should pass NULL */ + void *queries, /* reserved, should pass NULL */ + void *workSpace, + size_t workSpaceSizeInBytes, + void *reserveSpace, + size_t reserveSpaceSizeInBytes); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnRNNBackwardDataEx(cudnnHandle_t handle, + const cudnnRNNDescriptor_t rnnDesc, + const cudnnRNNDataDescriptor_t yDesc, + const void *y, + const cudnnRNNDataDescriptor_t dyDesc, + const void *dy, + const cudnnRNNDataDescriptor_t dcDesc, /* reserved, should pass NULL */ + const void *dcAttn, /* reserved, should pass NULL */ + const cudnnTensorDescriptor_t dhyDesc, + const void *dhy, + const cudnnTensorDescriptor_t dcyDesc, + const void *dcy, + const cudnnFilterDescriptor_t wDesc, + const void *w, + const cudnnTensorDescriptor_t hxDesc, + const void *hx, + const cudnnTensorDescriptor_t cxDesc, + const void *cx, + const cudnnRNNDataDescriptor_t dxDesc, + void *dx, + const cudnnTensorDescriptor_t dhxDesc, + void *dhx, + const cudnnTensorDescriptor_t dcxDesc, + void *dcx, + const cudnnRNNDataDescriptor_t dkDesc, /* reserved, should pass NULL */ + void *dkeys, /* reserved, should pass NULL */ + void *workSpace, + size_t workSpaceSizeInBytes, + void *reserveSpace, + size_t reserveSpaceSizeInBytes); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnRNNBackwardWeightsEx(cudnnHandle_t handle, + const cudnnRNNDescriptor_t rnnDesc, + const cudnnRNNDataDescriptor_t xDesc, + const void *x, + const cudnnTensorDescriptor_t hxDesc, + const void *hx, + const cudnnRNNDataDescriptor_t yDesc, + const void *y, + void *workSpace, + size_t workSpaceSizeInBytes, + const cudnnFilterDescriptor_t dwDesc, + void *dw, + void *reserveSpace, + size_t reserveSpaceSizeInBytes); + +/* RNN FIND API */ + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetRNNForwardTrainingAlgorithmMaxCount(cudnnHandle_t handle, const cudnnRNNDescriptor_t rnnDesc, int *count); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnFindRNNForwardTrainingAlgorithmEx(cudnnHandle_t handle, + const cudnnRNNDescriptor_t rnnDesc, + const int seqLength, + const cudnnTensorDescriptor_t *xDesc, + const void *x, + const cudnnTensorDescriptor_t hxDesc, + const void *hx, + const cudnnTensorDescriptor_t cxDesc, + const void *cx, + const cudnnFilterDescriptor_t wDesc, + const void *w, + const cudnnTensorDescriptor_t *yDesc, + void *y, + const cudnnTensorDescriptor_t hyDesc, + void *hy, + const cudnnTensorDescriptor_t cyDesc, + void *cy, + const float findIntensity, + const int requestedAlgoCount, + int *returnedAlgoCount, + cudnnAlgorithmPerformance_t *perfResults, + void *workspace, + size_t workSpaceSizeInBytes, + void *reserveSpace, + size_t reserveSpaceSizeInBytes); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetRNNBackwardDataAlgorithmMaxCount(cudnnHandle_t handle, const cudnnRNNDescriptor_t rnnDesc, int *count); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnFindRNNBackwardDataAlgorithmEx(cudnnHandle_t handle, + const cudnnRNNDescriptor_t rnnDesc, + const int seqLength, + const cudnnTensorDescriptor_t *yDesc, + const void *y, + const cudnnTensorDescriptor_t *dyDesc, + const void *dy, + const cudnnTensorDescriptor_t dhyDesc, + const void *dhy, + const cudnnTensorDescriptor_t dcyDesc, + const void *dcy, + const cudnnFilterDescriptor_t wDesc, + const void *w, + const cudnnTensorDescriptor_t hxDesc, + const void *hx, + const cudnnTensorDescriptor_t cxDesc, + const void *cx, + const cudnnTensorDescriptor_t *dxDesc, + void *dx, + const cudnnTensorDescriptor_t dhxDesc, + void *dhx, + const cudnnTensorDescriptor_t dcxDesc, + void *dcx, + const float findIntensity, + const int requestedAlgoCount, + int *returnedAlgoCount, + cudnnAlgorithmPerformance_t *perfResults, + void *workspace, + size_t workSpaceSizeInBytes, + void *reserveSpace, + size_t reserveSpaceSizeInBytes); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetRNNBackwardWeightsAlgorithmMaxCount(cudnnHandle_t handle, const cudnnRNNDescriptor_t rnnDesc, int *count); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnFindRNNBackwardWeightsAlgorithmEx(cudnnHandle_t handle, + const cudnnRNNDescriptor_t rnnDesc, + const int seqLength, + const cudnnTensorDescriptor_t *xDesc, + const void *x, + const cudnnTensorDescriptor_t hxDesc, + const void *hx, + const cudnnTensorDescriptor_t *yDesc, + const void *y, + const float findIntensity, + const int requestedAlgoCount, + int *returnedAlgoCount, + cudnnAlgorithmPerformance_t *perfResults, + const void *workspace, + size_t workSpaceSizeInBytes, + const cudnnFilterDescriptor_t dwDesc, + void *dw, + const void *reserveSpace, + size_t reserveSpaceSizeInBytes); + +cudnnStatus_t CUDNNWINAPI +cudnnMultiHeadAttnBackwardData(cudnnHandle_t handle, + const cudnnAttnDescriptor_t attnDesc, + const int loWinIdx[], + const int hiWinIdx[], + const int devSeqLengthsDQDO[], + const int devSeqLengthsDKDV[], + const cudnnSeqDataDescriptor_t doDesc, + const void *dout, + const cudnnSeqDataDescriptor_t dqDesc, + void *dqueries, + const void *queries, + const cudnnSeqDataDescriptor_t dkDesc, + void *dkeys, + const void *keys, + const cudnnSeqDataDescriptor_t dvDesc, + void *dvalues, + const void *values, + size_t weightSizeInBytes, + const void *weights, + size_t workSpaceSizeInBytes, + void *workSpace, + size_t reserveSpaceSizeInBytes, + void *reserveSpace); + +cudnnStatus_t CUDNNWINAPI +cudnnMultiHeadAttnBackwardWeights(cudnnHandle_t handle, + const cudnnAttnDescriptor_t attnDesc, + cudnnWgradMode_t addGrad, + const cudnnSeqDataDescriptor_t qDesc, + const void *queries, + const cudnnSeqDataDescriptor_t kDesc, + const void *keys, + const cudnnSeqDataDescriptor_t vDesc, + const void *values, + const cudnnSeqDataDescriptor_t doDesc, + const void *dout, + size_t weightSizeInBytes, + const void *weights, + void *dweights, + size_t workSpaceSizeInBytes, + void *workSpace, + size_t reserveSpaceSizeInBytes, + void *reserveSpace); + +/* +* CTC (Connectionist Temporal Classification) loss descriptor create/destory/set/get functions +*/ +/* Input normalization mode for loss function */ +typedef enum { + CUDNN_LOSS_NORMALIZATION_NONE = 0, + CUDNN_LOSS_NORMALIZATION_SOFTMAX = 1, +} cudnnLossNormalizationMode_t; + +cudnnStatus_t CUDNNWINAPI +cudnnCreateCTCLossDescriptor(cudnnCTCLossDescriptor_t *ctcLossDesc); + +cudnnStatus_t CUDNNWINAPI +cudnnSetCTCLossDescriptor(cudnnCTCLossDescriptor_t ctcLossDesc, cudnnDataType_t compType); + +cudnnStatus_t CUDNNWINAPI +cudnnSetCTCLossDescriptorEx(cudnnCTCLossDescriptor_t ctcLossDesc, + cudnnDataType_t compType, + cudnnLossNormalizationMode_t normMode, + cudnnNanPropagation_t gradMode); + +cudnnStatus_t CUDNNWINAPI +cudnnSetCTCLossDescriptor_v8(cudnnCTCLossDescriptor_t ctcLossDesc, + cudnnDataType_t compType, + cudnnLossNormalizationMode_t normMode, + cudnnNanPropagation_t gradMode, + int maxLabelLength); + +cudnnStatus_t CUDNNWINAPI +cudnnGetCTCLossDescriptor(cudnnCTCLossDescriptor_t ctcLossDesc, cudnnDataType_t *compType); + +cudnnStatus_t CUDNNWINAPI +cudnnGetCTCLossDescriptorEx(cudnnCTCLossDescriptor_t ctcLossDesc, + cudnnDataType_t *compType, + cudnnLossNormalizationMode_t *normMode, + cudnnNanPropagation_t *gradMode); + +cudnnStatus_t CUDNNWINAPI +cudnnGetCTCLossDescriptor_v8(cudnnCTCLossDescriptor_t ctcLossDesc, + cudnnDataType_t *compType, + cudnnLossNormalizationMode_t *normMode, + cudnnNanPropagation_t *gradMode, + int *maxLabelLength); + +cudnnStatus_t CUDNNWINAPI +cudnnDestroyCTCLossDescriptor(cudnnCTCLossDescriptor_t ctcLossDesc); + +/* return the ctc costs and gradients, given the probabilities and labels */ +cudnnStatus_t CUDNNWINAPI +cudnnCTCLoss( + cudnnHandle_t handle, + const cudnnTensorDescriptor_t + probsDesc, /* Tensor descriptor for probabilities, the dimensions are T,N,A (T is the timing steps, N is the + mini batch size, A is the alphabet size) */ + const void *probs, /* probabilities after softmax, in GPU memory */ + const int hostLabels[], /* labels, in CPU memory */ + const int hostLabelLengths[], /* the length of each label, in CPU memory */ + const int hostInputLengths[], /* the lengths of timing steps in each batch, in CPU memory */ + void *costs, /* the returned costs of CTC, in GPU memory */ + const cudnnTensorDescriptor_t gradientsDesc, /* Tensor descriptor for gradients, the dimensions are T,N,A */ + void *gradients, /* the returned CTC gradients, in GPU memory, to compute costs only, set it to NULL */ + cudnnCTCLossAlgo_t algo, /* algorithm selected, supported now 0 and 1 */ + cudnnCTCLossDescriptor_t ctcLossDesc, + void *workspace, /* pointer to the workspace, in GPU memory */ + size_t workSpaceSizeInBytes); /* size of the workspace */ + +/* return the ctc costs and gradients, given the probabilities and labels */ +cudnnStatus_t CUDNNWINAPI +cudnnCTCLoss_v8( + cudnnHandle_t handle, + cudnnCTCLossAlgo_t algo, /* algorithm selected, supported now 0 and 1 */ + cudnnCTCLossDescriptor_t ctcLossDesc, + const cudnnTensorDescriptor_t + probsDesc, /* Tensor descriptor for probabilities, the dimensions are T,N,A (T is the timing steps, N is the + mini batch size, A is the alphabet size) */ + const void *probs, /* probabilities after softmax, in GPU memory */ + const int labels[], /* labels, in GPU memory */ + const int labelLengths[], /* the length of each label, in GPU memory */ + const int inputLengths[], /* the lengths of timing steps in each batch, in GPU memory */ + void *costs, /* the returned costs of CTC, in GPU memory */ + const cudnnTensorDescriptor_t gradientsDesc, /* Tensor descriptor for gradients, the dimensions are T,N,A */ + void *gradients, /* the returned CTC gradients, in GPU memory, to compute costs only, set it to NULL */ + size_t workSpaceSizeInBytes, /* size of the workspace */ + void *workspace); /* pointer to the workspace, in GPU memory */ + +/* return the workspace size needed for ctc */ +cudnnStatus_t CUDNNWINAPI +cudnnGetCTCLossWorkspaceSize( + cudnnHandle_t handle, + const cudnnTensorDescriptor_t probsDesc, /* Tensor descriptor for probabilities, the dimensions are T,N,A (T is the + timing steps, N is the mini batch size, A is the alphabet size) */ + const cudnnTensorDescriptor_t gradientsDesc, /* Tensor descriptor for gradients, the + dimensions are T,N,A. To compute costs + only, set it to NULL */ + const int *labels, /* labels, in CPU memory */ + const int *labelLengths, /* the length of each label, in CPU memory */ + const int *inputLengths, /* the lengths of timing steps in each batch, in CPU memory */ + cudnnCTCLossAlgo_t algo, /* algorithm selected, supported now 0 and 1 */ + cudnnCTCLossDescriptor_t ctcLossDesc, + size_t *sizeInBytes); /* pointer to the returned workspace size */ + +/* return the workspace size needed for ctc */ +cudnnStatus_t CUDNNWINAPI +cudnnGetCTCLossWorkspaceSize_v8( + cudnnHandle_t handle, + cudnnCTCLossAlgo_t algo, /* algorithm selected, supported now 0 and 1 */ + cudnnCTCLossDescriptor_t ctcLossDesc, + const cudnnTensorDescriptor_t probsDesc, /* Tensor descriptor for probabilities, the dimensions are T,N,A (T is the + timing steps, N is the mini batch size, A is the alphabet size) */ + const cudnnTensorDescriptor_t gradientsDesc, /* Tensor descriptor for gradients, the + dimensions are T,N,A. To compute costs + only, set it to NULL */ + size_t *sizeInBytes); /* pointer to the returned workspace size */ + +/* + * \brief Cross-library version checker. + * This function is implemented differently in each sub-library. Each sublib + * checks whether its own version matches that of its dependencies. + * \returns CUDNN_STATUS_SUCCESS if the version check passes, + * CUDNN_STATUS_VERSION_MISMATCH if the versions are inconsistent. + */ +cudnnStatus_t CUDNNWINAPI +cudnnAdvTrainVersionCheck(void); + +#if defined(__cplusplus) +} +#endif + +#endif /* CUDNN_ADV_TRAIN_H_ */ diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_backend.h b/llmeval-env/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_backend.h new file mode 100644 index 0000000000000000000000000000000000000000..b0f41de3b1e87286037ed7d0351057d93287d88f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_backend.h @@ -0,0 +1,608 @@ +/* + * Copyright 2014-2023 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#ifndef _CUDNN_BACKEND_H_ +#define _CUDNN_BACKEND_H_ + +/* + * The content in this header file is under development to be included in cudnn.h in the future + * Production code should have all include of this header file remove. + */ + +#include "cudnn_ops_infer.h" +#include "cudnn_cnn_infer.h" + +/* NOTE: definition in extern "C" to be copied later to public header */ +#if defined(__cplusplus) +extern "C" { +#endif + +typedef void *cudnnBackendDescriptor_t; + +typedef struct cudnnFractionStruct { + int64_t numerator; + int64_t denominator; +} cudnnFraction_t; + +typedef enum { + CUDNN_POINTWISE_ADD = 0, + CUDNN_POINTWISE_ADD_SQUARE = 5, + CUDNN_POINTWISE_DIV = 6, + CUDNN_POINTWISE_MAX = 3, + CUDNN_POINTWISE_MIN = 2, + CUDNN_POINTWISE_MOD = 7, + CUDNN_POINTWISE_MUL = 1, + CUDNN_POINTWISE_POW = 8, + CUDNN_POINTWISE_SUB = 9, + + CUDNN_POINTWISE_ABS = 10, + CUDNN_POINTWISE_CEIL = 11, + CUDNN_POINTWISE_COS = 12, + CUDNN_POINTWISE_EXP = 13, + CUDNN_POINTWISE_FLOOR = 14, + CUDNN_POINTWISE_LOG = 15, + CUDNN_POINTWISE_NEG = 16, + CUDNN_POINTWISE_RSQRT = 17, + CUDNN_POINTWISE_SIN = 18, + CUDNN_POINTWISE_SQRT = 4, + CUDNN_POINTWISE_TAN = 19, + CUDNN_POINTWISE_ERF = 20, + CUDNN_POINTWISE_IDENTITY = 21, + CUDNN_POINTWISE_RECIPROCAL = 22, + + CUDNN_POINTWISE_RELU_FWD = 100, + CUDNN_POINTWISE_TANH_FWD = 101, + CUDNN_POINTWISE_SIGMOID_FWD = 102, + CUDNN_POINTWISE_ELU_FWD = 103, + CUDNN_POINTWISE_GELU_FWD = 104, + CUDNN_POINTWISE_SOFTPLUS_FWD = 105, + CUDNN_POINTWISE_SWISH_FWD = 106, + CUDNN_POINTWISE_GELU_APPROX_TANH_FWD = 107, + + CUDNN_POINTWISE_RELU_BWD = 200, + CUDNN_POINTWISE_TANH_BWD = 201, + CUDNN_POINTWISE_SIGMOID_BWD = 202, + CUDNN_POINTWISE_ELU_BWD = 203, + CUDNN_POINTWISE_GELU_BWD = 204, + CUDNN_POINTWISE_SOFTPLUS_BWD = 205, + CUDNN_POINTWISE_SWISH_BWD = 206, + CUDNN_POINTWISE_GELU_APPROX_TANH_BWD = 207, + + CUDNN_POINTWISE_CMP_EQ = 300, + CUDNN_POINTWISE_CMP_NEQ = 301, + CUDNN_POINTWISE_CMP_GT = 302, + CUDNN_POINTWISE_CMP_GE = 303, + CUDNN_POINTWISE_CMP_LT = 304, + CUDNN_POINTWISE_CMP_LE = 305, + + CUDNN_POINTWISE_LOGICAL_AND = 400, + CUDNN_POINTWISE_LOGICAL_OR = 401, + CUDNN_POINTWISE_LOGICAL_NOT = 402, + + CUDNN_POINTWISE_GEN_INDEX = 501, + + CUDNN_POINTWISE_BINARY_SELECT = 601, +} cudnnPointwiseMode_t; + +typedef enum { + CUDNN_RESAMPLE_NEAREST = 0, + CUDNN_RESAMPLE_BILINEAR = 1, + CUDNN_RESAMPLE_AVGPOOL = 2, + CUDNN_RESAMPLE_AVGPOOL_INCLUDE_PADDING = 2, + CUDNN_RESAMPLE_AVGPOOL_EXCLUDE_PADDING = 4, + CUDNN_RESAMPLE_MAXPOOL = 3, +} cudnnResampleMode_t; + +typedef enum { + CUDNN_SIGNAL_SET = 0, + CUDNN_SIGNAL_WAIT = 1, +} cudnnSignalMode_t; + +typedef enum { + CUDNN_GENSTATS_SUM_SQSUM = 0, +} cudnnGenStatsMode_t; + +typedef enum { + CUDNN_BN_FINALIZE_STATISTICS_TRAINING = 0, + CUDNN_BN_FINALIZE_STATISTICS_INFERENCE = 1, +} cudnnBnFinalizeStatsMode_t; + +typedef enum { + CUDNN_RNG_DISTRIBUTION_BERNOULLI, + CUDNN_RNG_DISTRIBUTION_UNIFORM, + CUDNN_RNG_DISTRIBUTION_NORMAL, +} cudnnRngDistribution_t; + +typedef enum { + CUDNN_ATTR_POINTWISE_MODE = 0, + CUDNN_ATTR_POINTWISE_MATH_PREC = 1, + CUDNN_ATTR_POINTWISE_NAN_PROPAGATION = 2, + CUDNN_ATTR_POINTWISE_RELU_LOWER_CLIP = 3, + CUDNN_ATTR_POINTWISE_RELU_UPPER_CLIP = 4, + CUDNN_ATTR_POINTWISE_RELU_LOWER_CLIP_SLOPE = 5, + CUDNN_ATTR_POINTWISE_ELU_ALPHA = 6, + CUDNN_ATTR_POINTWISE_SOFTPLUS_BETA = 7, + CUDNN_ATTR_POINTWISE_SWISH_BETA = 8, + CUDNN_ATTR_POINTWISE_AXIS = 9, + + CUDNN_ATTR_CONVOLUTION_COMP_TYPE = 100, + CUDNN_ATTR_CONVOLUTION_CONV_MODE = 101, + CUDNN_ATTR_CONVOLUTION_DILATIONS = 102, + CUDNN_ATTR_CONVOLUTION_FILTER_STRIDES = 103, + CUDNN_ATTR_CONVOLUTION_POST_PADDINGS = 104, + CUDNN_ATTR_CONVOLUTION_PRE_PADDINGS = 105, + CUDNN_ATTR_CONVOLUTION_SPATIAL_DIMS = 106, + + CUDNN_ATTR_ENGINEHEUR_MODE = 200, + CUDNN_ATTR_ENGINEHEUR_OPERATION_GRAPH = 201, + CUDNN_ATTR_ENGINEHEUR_RESULTS = 202, + + CUDNN_ATTR_ENGINECFG_ENGINE = 300, + CUDNN_ATTR_ENGINECFG_INTERMEDIATE_INFO = 301, + CUDNN_ATTR_ENGINECFG_KNOB_CHOICES = 302, + + CUDNN_ATTR_EXECUTION_PLAN_HANDLE = 400, + CUDNN_ATTR_EXECUTION_PLAN_ENGINE_CONFIG = 401, + CUDNN_ATTR_EXECUTION_PLAN_WORKSPACE_SIZE = 402, + CUDNN_ATTR_EXECUTION_PLAN_COMPUTED_INTERMEDIATE_UIDS = 403, + CUDNN_ATTR_EXECUTION_PLAN_RUN_ONLY_INTERMEDIATE_UIDS = 404, + CUDNN_ATTR_EXECUTION_PLAN_JSON_REPRESENTATION = 405, + + CUDNN_ATTR_INTERMEDIATE_INFO_UNIQUE_ID = 500, + CUDNN_ATTR_INTERMEDIATE_INFO_SIZE = 501, + CUDNN_ATTR_INTERMEDIATE_INFO_DEPENDENT_DATA_UIDS = 502, + CUDNN_ATTR_INTERMEDIATE_INFO_DEPENDENT_ATTRIBUTES = 503, + + CUDNN_ATTR_KNOB_CHOICE_KNOB_TYPE = 600, + CUDNN_ATTR_KNOB_CHOICE_KNOB_VALUE = 601, + + CUDNN_ATTR_OPERATION_CONVOLUTION_FORWARD_ALPHA = 700, + CUDNN_ATTR_OPERATION_CONVOLUTION_FORWARD_BETA = 701, + CUDNN_ATTR_OPERATION_CONVOLUTION_FORWARD_CONV_DESC = 702, + CUDNN_ATTR_OPERATION_CONVOLUTION_FORWARD_W = 703, + CUDNN_ATTR_OPERATION_CONVOLUTION_FORWARD_X = 704, + CUDNN_ATTR_OPERATION_CONVOLUTION_FORWARD_Y = 705, + CUDNN_ATTR_OPERATION_CONVOLUTION_BWD_DATA_ALPHA = 706, + CUDNN_ATTR_OPERATION_CONVOLUTION_BWD_DATA_BETA = 707, + CUDNN_ATTR_OPERATION_CONVOLUTION_BWD_DATA_CONV_DESC = 708, + CUDNN_ATTR_OPERATION_CONVOLUTION_BWD_DATA_W = 709, + CUDNN_ATTR_OPERATION_CONVOLUTION_BWD_DATA_DX = 710, + CUDNN_ATTR_OPERATION_CONVOLUTION_BWD_DATA_DY = 711, + CUDNN_ATTR_OPERATION_CONVOLUTION_BWD_FILTER_ALPHA = 712, + CUDNN_ATTR_OPERATION_CONVOLUTION_BWD_FILTER_BETA = 713, + CUDNN_ATTR_OPERATION_CONVOLUTION_BWD_FILTER_CONV_DESC = 714, + CUDNN_ATTR_OPERATION_CONVOLUTION_BWD_FILTER_DW = 715, + CUDNN_ATTR_OPERATION_CONVOLUTION_BWD_FILTER_X = 716, + CUDNN_ATTR_OPERATION_CONVOLUTION_BWD_FILTER_DY = 717, + + CUDNN_ATTR_OPERATION_POINTWISE_PW_DESCRIPTOR = 750, + CUDNN_ATTR_OPERATION_POINTWISE_XDESC = 751, + CUDNN_ATTR_OPERATION_POINTWISE_BDESC = 752, + CUDNN_ATTR_OPERATION_POINTWISE_YDESC = 753, + CUDNN_ATTR_OPERATION_POINTWISE_ALPHA1 = 754, + CUDNN_ATTR_OPERATION_POINTWISE_ALPHA2 = 755, + CUDNN_ATTR_OPERATION_POINTWISE_DXDESC = 756, + CUDNN_ATTR_OPERATION_POINTWISE_DYDESC = 757, + CUDNN_ATTR_OPERATION_POINTWISE_TDESC = 758, + + CUDNN_ATTR_OPERATION_GENSTATS_MODE = 770, + CUDNN_ATTR_OPERATION_GENSTATS_MATH_PREC = 771, + CUDNN_ATTR_OPERATION_GENSTATS_XDESC = 772, + CUDNN_ATTR_OPERATION_GENSTATS_SUMDESC = 773, + CUDNN_ATTR_OPERATION_GENSTATS_SQSUMDESC = 774, + + CUDNN_ATTR_OPERATION_BN_FINALIZE_STATS_MODE = 780, + CUDNN_ATTR_OPERATION_BN_FINALIZE_MATH_PREC = 781, + CUDNN_ATTR_OPERATION_BN_FINALIZE_Y_SUM_DESC = 782, + CUDNN_ATTR_OPERATION_BN_FINALIZE_Y_SQ_SUM_DESC = 783, + CUDNN_ATTR_OPERATION_BN_FINALIZE_SCALE_DESC = 784, + CUDNN_ATTR_OPERATION_BN_FINALIZE_BIAS_DESC = 785, + CUDNN_ATTR_OPERATION_BN_FINALIZE_PREV_RUNNING_MEAN_DESC = 786, + CUDNN_ATTR_OPERATION_BN_FINALIZE_PREV_RUNNING_VAR_DESC = 787, + CUDNN_ATTR_OPERATION_BN_FINALIZE_UPDATED_RUNNING_MEAN_DESC = 788, + CUDNN_ATTR_OPERATION_BN_FINALIZE_UPDATED_RUNNING_VAR_DESC = 789, + CUDNN_ATTR_OPERATION_BN_FINALIZE_SAVED_MEAN_DESC = 790, + CUDNN_ATTR_OPERATION_BN_FINALIZE_SAVED_INV_STD_DESC = 791, + CUDNN_ATTR_OPERATION_BN_FINALIZE_EQ_SCALE_DESC = 792, + CUDNN_ATTR_OPERATION_BN_FINALIZE_EQ_BIAS_DESC = 793, + CUDNN_ATTR_OPERATION_BN_FINALIZE_ACCUM_COUNT_DESC = 794, + CUDNN_ATTR_OPERATION_BN_FINALIZE_EPSILON_DESC = 795, + CUDNN_ATTR_OPERATION_BN_FINALIZE_EXP_AVERATE_FACTOR_DESC = 796, + + CUDNN_ATTR_OPERATIONGRAPH_HANDLE = 800, + CUDNN_ATTR_OPERATIONGRAPH_OPS = 801, + CUDNN_ATTR_OPERATIONGRAPH_ENGINE_GLOBAL_COUNT = 802, + + CUDNN_ATTR_TENSOR_BYTE_ALIGNMENT = 900, + CUDNN_ATTR_TENSOR_DATA_TYPE = 901, + CUDNN_ATTR_TENSOR_DIMENSIONS = 902, + CUDNN_ATTR_TENSOR_STRIDES = 903, + CUDNN_ATTR_TENSOR_VECTOR_COUNT = 904, + CUDNN_ATTR_TENSOR_VECTORIZED_DIMENSION = 905, + CUDNN_ATTR_TENSOR_UNIQUE_ID = 906, + CUDNN_ATTR_TENSOR_IS_VIRTUAL = 907, + CUDNN_ATTR_TENSOR_IS_BY_VALUE = 908, + CUDNN_ATTR_TENSOR_REORDERING_MODE = 909, + CUDNN_ATTR_TENSOR_RAGGED_OFFSET_DESC = 913, + + CUDNN_ATTR_VARIANT_PACK_UNIQUE_IDS = 1000, + CUDNN_ATTR_VARIANT_PACK_DATA_POINTERS = 1001, + CUDNN_ATTR_VARIANT_PACK_INTERMEDIATES = 1002, + CUDNN_ATTR_VARIANT_PACK_WORKSPACE = 1003, + + CUDNN_ATTR_LAYOUT_INFO_TENSOR_UID = 1100, + CUDNN_ATTR_LAYOUT_INFO_TYPES = 1101, + + CUDNN_ATTR_KNOB_INFO_TYPE = 1200, + CUDNN_ATTR_KNOB_INFO_MAXIMUM_VALUE = 1201, + CUDNN_ATTR_KNOB_INFO_MINIMUM_VALUE = 1202, + CUDNN_ATTR_KNOB_INFO_STRIDE = 1203, + + CUDNN_ATTR_ENGINE_OPERATION_GRAPH = 1300, + CUDNN_ATTR_ENGINE_GLOBAL_INDEX = 1301, + CUDNN_ATTR_ENGINE_KNOB_INFO = 1302, + CUDNN_ATTR_ENGINE_NUMERICAL_NOTE = 1303, + CUDNN_ATTR_ENGINE_LAYOUT_INFO = 1304, + CUDNN_ATTR_ENGINE_BEHAVIOR_NOTE = 1305, + + CUDNN_ATTR_MATMUL_COMP_TYPE = 1500, + CUDNN_ATTR_MATMUL_PADDING_VALUE = 1503, + + CUDNN_ATTR_OPERATION_MATMUL_ADESC = 1520, + CUDNN_ATTR_OPERATION_MATMUL_BDESC = 1521, + CUDNN_ATTR_OPERATION_MATMUL_CDESC = 1522, + CUDNN_ATTR_OPERATION_MATMUL_DESC = 1523, + CUDNN_ATTR_OPERATION_MATMUL_IRREGULARLY_STRIDED_BATCH_COUNT = 1524, + CUDNN_ATTR_OPERATION_MATMUL_GEMM_M_OVERRIDE_DESC = 1525, + CUDNN_ATTR_OPERATION_MATMUL_GEMM_N_OVERRIDE_DESC = 1526, + CUDNN_ATTR_OPERATION_MATMUL_GEMM_K_OVERRIDE_DESC = 1527, + + CUDNN_ATTR_REDUCTION_OPERATOR = 1600, + CUDNN_ATTR_REDUCTION_COMP_TYPE = 1601, + + CUDNN_ATTR_OPERATION_REDUCTION_XDESC = 1610, + CUDNN_ATTR_OPERATION_REDUCTION_YDESC = 1611, + CUDNN_ATTR_OPERATION_REDUCTION_DESC = 1612, + + CUDNN_ATTR_OPERATION_BN_BWD_WEIGHTS_MATH_PREC = 1620, + CUDNN_ATTR_OPERATION_BN_BWD_WEIGHTS_MEAN_DESC = 1621, + CUDNN_ATTR_OPERATION_BN_BWD_WEIGHTS_INVSTD_DESC = 1622, + CUDNN_ATTR_OPERATION_BN_BWD_WEIGHTS_BN_SCALE_DESC = 1623, + CUDNN_ATTR_OPERATION_BN_BWD_WEIGHTS_X_DESC = 1624, + CUDNN_ATTR_OPERATION_BN_BWD_WEIGHTS_DY_DESC = 1625, + CUDNN_ATTR_OPERATION_BN_BWD_WEIGHTS_DBN_SCALE_DESC = 1626, + CUDNN_ATTR_OPERATION_BN_BWD_WEIGHTS_DBN_BIAS_DESC = 1627, + CUDNN_ATTR_OPERATION_BN_BWD_WEIGHTS_EQ_DY_SCALE_DESC = 1628, + CUDNN_ATTR_OPERATION_BN_BWD_WEIGHTS_EQ_X_SCALE_DESC = 1629, + CUDNN_ATTR_OPERATION_BN_BWD_WEIGHTS_EQ_BIAS = 1630, + + CUDNN_ATTR_RESAMPLE_MODE = 1700, + CUDNN_ATTR_RESAMPLE_COMP_TYPE = 1701, + CUDNN_ATTR_RESAMPLE_SPATIAL_DIMS = 1702, + CUDNN_ATTR_RESAMPLE_POST_PADDINGS = 1703, + CUDNN_ATTR_RESAMPLE_PRE_PADDINGS = 1704, + CUDNN_ATTR_RESAMPLE_STRIDES = 1705, + CUDNN_ATTR_RESAMPLE_WINDOW_DIMS = 1706, + CUDNN_ATTR_RESAMPLE_NAN_PROPAGATION = 1707, + CUDNN_ATTR_RESAMPLE_PADDING_MODE = 1708, + + CUDNN_ATTR_OPERATION_RESAMPLE_FWD_XDESC = 1710, + CUDNN_ATTR_OPERATION_RESAMPLE_FWD_YDESC = 1711, + CUDNN_ATTR_OPERATION_RESAMPLE_FWD_IDXDESC = 1712, + CUDNN_ATTR_OPERATION_RESAMPLE_FWD_ALPHA = 1713, + CUDNN_ATTR_OPERATION_RESAMPLE_FWD_BETA = 1714, + CUDNN_ATTR_OPERATION_RESAMPLE_FWD_DESC = 1716, + + CUDNN_ATTR_OPERATION_RESAMPLE_BWD_DXDESC = 1720, + CUDNN_ATTR_OPERATION_RESAMPLE_BWD_DYDESC = 1721, + CUDNN_ATTR_OPERATION_RESAMPLE_BWD_IDXDESC = 1722, + CUDNN_ATTR_OPERATION_RESAMPLE_BWD_ALPHA = 1723, + CUDNN_ATTR_OPERATION_RESAMPLE_BWD_BETA = 1724, + CUDNN_ATTR_OPERATION_RESAMPLE_BWD_DESC = 1725, + CUDNN_ATTR_OPERATION_RESAMPLE_BWD_XDESC = 1726, + CUDNN_ATTR_OPERATION_RESAMPLE_BWD_YDESC = 1727, + + CUDNN_ATTR_OPERATION_CONCAT_AXIS = 1800, + CUDNN_ATTR_OPERATION_CONCAT_INPUT_DESCS = 1801, + CUDNN_ATTR_OPERATION_CONCAT_INPLACE_INDEX = 1802, + CUDNN_ATTR_OPERATION_CONCAT_OUTPUT_DESC = 1803, + + CUDNN_ATTR_OPERATION_SIGNAL_MODE = 1900, + CUDNN_ATTR_OPERATION_SIGNAL_FLAGDESC = 1901, + CUDNN_ATTR_OPERATION_SIGNAL_VALUE = 1902, + CUDNN_ATTR_OPERATION_SIGNAL_XDESC = 1903, + CUDNN_ATTR_OPERATION_SIGNAL_YDESC = 1904, + + CUDNN_ATTR_OPERATION_NORM_FWD_MODE = 2000, + CUDNN_ATTR_OPERATION_NORM_FWD_PHASE = 2001, + CUDNN_ATTR_OPERATION_NORM_FWD_XDESC = 2002, + CUDNN_ATTR_OPERATION_NORM_FWD_MEAN_DESC = 2003, + CUDNN_ATTR_OPERATION_NORM_FWD_INV_VARIANCE_DESC = 2004, + CUDNN_ATTR_OPERATION_NORM_FWD_SCALE_DESC = 2005, + CUDNN_ATTR_OPERATION_NORM_FWD_BIAS_DESC = 2006, + CUDNN_ATTR_OPERATION_NORM_FWD_EPSILON_DESC = 2007, + CUDNN_ATTR_OPERATION_NORM_FWD_EXP_AVG_FACTOR_DESC = 2008, + CUDNN_ATTR_OPERATION_NORM_FWD_INPUT_RUNNING_MEAN_DESC = 2009, + CUDNN_ATTR_OPERATION_NORM_FWD_INPUT_RUNNING_VAR_DESC = 2010, + CUDNN_ATTR_OPERATION_NORM_FWD_OUTPUT_RUNNING_MEAN_DESC = 2011, + CUDNN_ATTR_OPERATION_NORM_FWD_OUTPUT_RUNNING_VAR_DESC = 2012, + CUDNN_ATTR_OPERATION_NORM_FWD_YDESC = 2013, + CUDNN_ATTR_OPERATION_NORM_FWD_PEER_STAT_DESCS = 2014, + + CUDNN_ATTR_OPERATION_NORM_BWD_MODE = 2100, + CUDNN_ATTR_OPERATION_NORM_BWD_XDESC = 2101, + CUDNN_ATTR_OPERATION_NORM_BWD_MEAN_DESC = 2102, + CUDNN_ATTR_OPERATION_NORM_BWD_INV_VARIANCE_DESC = 2103, + CUDNN_ATTR_OPERATION_NORM_BWD_DYDESC = 2104, + CUDNN_ATTR_OPERATION_NORM_BWD_SCALE_DESC = 2105, + CUDNN_ATTR_OPERATION_NORM_BWD_EPSILON_DESC = 2106, + CUDNN_ATTR_OPERATION_NORM_BWD_DSCALE_DESC = 2107, + CUDNN_ATTR_OPERATION_NORM_BWD_DBIAS_DESC = 2108, + CUDNN_ATTR_OPERATION_NORM_BWD_DXDESC = 2109, + CUDNN_ATTR_OPERATION_NORM_BWD_PEER_STAT_DESCS = 2110, + + CUDNN_ATTR_OPERATION_RESHAPE_XDESC = 2200, + CUDNN_ATTR_OPERATION_RESHAPE_YDESC = 2201, + + CUDNN_ATTR_RNG_DISTRIBUTION = 2300, + CUDNN_ATTR_RNG_NORMAL_DIST_MEAN = 2301, + CUDNN_ATTR_RNG_NORMAL_DIST_STANDARD_DEVIATION = 2302, + CUDNN_ATTR_RNG_UNIFORM_DIST_MAXIMUM = 2303, + CUDNN_ATTR_RNG_UNIFORM_DIST_MINIMUM = 2304, + CUDNN_ATTR_RNG_BERNOULLI_DIST_PROBABILITY = 2305, + + CUDNN_ATTR_OPERATION_RNG_YDESC = 2310, + CUDNN_ATTR_OPERATION_RNG_SEED = 2311, + CUDNN_ATTR_OPERATION_RNG_DESC = 2312, + CUDNN_ATTR_OPERATION_RNG_OFFSET_DESC = 2313, + +} cudnnBackendAttributeName_t; + +typedef enum { + CUDNN_TYPE_HANDLE = 0, + CUDNN_TYPE_DATA_TYPE, + CUDNN_TYPE_BOOLEAN, + CUDNN_TYPE_INT64, + CUDNN_TYPE_FLOAT, + CUDNN_TYPE_DOUBLE, + CUDNN_TYPE_VOID_PTR, + CUDNN_TYPE_CONVOLUTION_MODE, + CUDNN_TYPE_HEUR_MODE, + CUDNN_TYPE_KNOB_TYPE, + CUDNN_TYPE_NAN_PROPOGATION, + CUDNN_TYPE_NUMERICAL_NOTE, + CUDNN_TYPE_LAYOUT_TYPE, + CUDNN_TYPE_ATTRIB_NAME, + CUDNN_TYPE_POINTWISE_MODE, + CUDNN_TYPE_BACKEND_DESCRIPTOR, + CUDNN_TYPE_GENSTATS_MODE, + CUDNN_TYPE_BN_FINALIZE_STATS_MODE, + CUDNN_TYPE_REDUCTION_OPERATOR_TYPE, + CUDNN_TYPE_BEHAVIOR_NOTE, + CUDNN_TYPE_TENSOR_REORDERING_MODE, + CUDNN_TYPE_RESAMPLE_MODE, + CUDNN_TYPE_PADDING_MODE, + CUDNN_TYPE_INT32, + CUDNN_TYPE_CHAR, + CUDNN_TYPE_SIGNAL_MODE, + CUDNN_TYPE_FRACTION, + CUDNN_TYPE_NORM_MODE, + CUDNN_TYPE_NORM_FWD_PHASE, + CUDNN_TYPE_RNG_DISTRIBUTION +} cudnnBackendAttributeType_t; + +typedef enum { + CUDNN_BACKEND_POINTWISE_DESCRIPTOR = 0, + CUDNN_BACKEND_CONVOLUTION_DESCRIPTOR, + CUDNN_BACKEND_ENGINE_DESCRIPTOR, + CUDNN_BACKEND_ENGINECFG_DESCRIPTOR, + CUDNN_BACKEND_ENGINEHEUR_DESCRIPTOR, + CUDNN_BACKEND_EXECUTION_PLAN_DESCRIPTOR, + CUDNN_BACKEND_INTERMEDIATE_INFO_DESCRIPTOR, + CUDNN_BACKEND_KNOB_CHOICE_DESCRIPTOR, + CUDNN_BACKEND_KNOB_INFO_DESCRIPTOR, + CUDNN_BACKEND_LAYOUT_INFO_DESCRIPTOR, + CUDNN_BACKEND_OPERATION_CONVOLUTION_FORWARD_DESCRIPTOR, + CUDNN_BACKEND_OPERATION_CONVOLUTION_BACKWARD_FILTER_DESCRIPTOR, + CUDNN_BACKEND_OPERATION_CONVOLUTION_BACKWARD_DATA_DESCRIPTOR, + CUDNN_BACKEND_OPERATION_POINTWISE_DESCRIPTOR, + CUDNN_BACKEND_OPERATION_GEN_STATS_DESCRIPTOR, + CUDNN_BACKEND_OPERATIONGRAPH_DESCRIPTOR, + CUDNN_BACKEND_VARIANT_PACK_DESCRIPTOR, + CUDNN_BACKEND_TENSOR_DESCRIPTOR, + CUDNN_BACKEND_MATMUL_DESCRIPTOR, + CUDNN_BACKEND_OPERATION_MATMUL_DESCRIPTOR, + CUDNN_BACKEND_OPERATION_BN_FINALIZE_STATISTICS_DESCRIPTOR, + CUDNN_BACKEND_REDUCTION_DESCRIPTOR, + CUDNN_BACKEND_OPERATION_REDUCTION_DESCRIPTOR, + CUDNN_BACKEND_OPERATION_BN_BWD_WEIGHTS_DESCRIPTOR, + CUDNN_BACKEND_RESAMPLE_DESCRIPTOR, + CUDNN_BACKEND_OPERATION_RESAMPLE_FWD_DESCRIPTOR, + CUDNN_BACKEND_OPERATION_RESAMPLE_BWD_DESCRIPTOR, + CUDNN_BACKEND_OPERATION_CONCAT_DESCRIPTOR, + CUDNN_BACKEND_OPERATION_SIGNAL_DESCRIPTOR, + CUDNN_BACKEND_OPERATION_NORM_FORWARD_DESCRIPTOR, + CUDNN_BACKEND_OPERATION_NORM_BACKWARD_DESCRIPTOR, + CUDNN_BACKEND_OPERATION_RESHAPE_DESCRIPTOR, + CUDNN_BACKEND_RNG_DESCRIPTOR, + CUDNN_BACKEND_OPERATION_RNG_DESCRIPTOR +} cudnnBackendDescriptorType_t; + +typedef enum { + CUDNN_NUMERICAL_NOTE_TENSOR_CORE = 0, + CUDNN_NUMERICAL_NOTE_DOWN_CONVERT_INPUTS, + CUDNN_NUMERICAL_NOTE_REDUCED_PRECISION_REDUCTION, + CUDNN_NUMERICAL_NOTE_FFT, + CUDNN_NUMERICAL_NOTE_NONDETERMINISTIC, + CUDNN_NUMERICAL_NOTE_WINOGRAD, + CUDNN_NUMERICAL_NOTE_WINOGRAD_TILE_4x4, + CUDNN_NUMERICAL_NOTE_WINOGRAD_TILE_6x6, + CUDNN_NUMERICAL_NOTE_WINOGRAD_TILE_13x13, + CUDNN_NUMERICAL_NOTE_TYPE_COUNT, +} cudnnBackendNumericalNote_t; + +typedef enum { + CUDNN_BEHAVIOR_NOTE_RUNTIME_COMPILATION = 0, + CUDNN_BEHAVIOR_NOTE_REQUIRES_FILTER_INT8x32_REORDER = 1, + CUDNN_BEHAVIOR_NOTE_REQUIRES_BIAS_INT8x32_REORDER = 2, + CUDNN_BEHAVIOR_NOTE_TYPE_COUNT, +} cudnnBackendBehaviorNote_t; + +typedef enum { + CUDNN_KNOB_TYPE_SPLIT_K = 0, + CUDNN_KNOB_TYPE_SWIZZLE = 1, + CUDNN_KNOB_TYPE_TILE_SIZE = 2, + CUDNN_KNOB_TYPE_USE_TEX = 3, + CUDNN_KNOB_TYPE_EDGE = 4, + CUDNN_KNOB_TYPE_KBLOCK = 5, + CUDNN_KNOB_TYPE_LDGA = 6, + CUDNN_KNOB_TYPE_LDGB = 7, + CUDNN_KNOB_TYPE_CHUNK_K = 8, + CUDNN_KNOB_TYPE_SPLIT_H = 9, + CUDNN_KNOB_TYPE_WINO_TILE = 10, + CUDNN_KNOB_TYPE_MULTIPLY = 11, + CUDNN_KNOB_TYPE_SPLIT_K_BUF = 12, + CUDNN_KNOB_TYPE_TILEK = 13, + CUDNN_KNOB_TYPE_STAGES = 14, + CUDNN_KNOB_TYPE_REDUCTION_MODE = 15, + CUDNN_KNOB_TYPE_CTA_SPLIT_K_MODE = 16, + CUDNN_KNOB_TYPE_SPLIT_K_SLC = 17, + CUDNN_KNOB_TYPE_IDX_MODE = 18, + CUDNN_KNOB_TYPE_SLICED = 19, + CUDNN_KNOB_TYPE_SPLIT_RS = 20, + CUDNN_KNOB_TYPE_SINGLEBUFFER = 21, + CUDNN_KNOB_TYPE_LDGC = 22, + CUDNN_KNOB_TYPE_SPECFILT = 23, + CUDNN_KNOB_TYPE_KERNEL_CFG = 24, + CUDNN_KNOB_TYPE_WORKSPACE = 25, + CUDNN_KNOB_TYPE_TILE_CGA = 26, + CUDNN_KNOB_TYPE_TILE_CGA_M = 27, + CUDNN_KNOB_TYPE_TILE_CGA_N = 28, + CUDNN_KNOB_TYPE_BLOCK_SIZE = 29, + CUDNN_KNOB_TYPE_OCCUPANCY = 30, + CUDNN_KNOB_TYPE_ARRAY_SIZE_PER_THREAD = 31, + CUDNN_KNOB_TYPE_NUM_C_PER_BLOCK = 32, + CUDNN_KNOB_TYPE_COUNTS, +} cudnnBackendKnobType_t; + +typedef enum { + CUDNN_LAYOUT_TYPE_PREFERRED_NCHW = 0, + CUDNN_LAYOUT_TYPE_PREFERRED_NHWC = 1, + CUDNN_LAYOUT_TYPE_PREFERRED_PAD4CK = 2, + CUDNN_LAYOUT_TYPE_PREFERRED_PAD8CK = 3, + CUDNN_LAYOUT_TYPE_COUNT = 4, +} cudnnBackendLayoutType_t; + +typedef enum { + CUDNN_HEUR_MODE_INSTANT = 0, + CUDNN_HEUR_MODE_B = 1, + CUDNN_HEUR_MODE_FALLBACK = 2, + CUDNN_HEUR_MODE_A = 3, + CUDNN_HEUR_MODES_COUNT = 4, +} cudnnBackendHeurMode_t; + +typedef enum { + CUDNN_TENSOR_REORDERING_NONE = 0, + CUDNN_TENSOR_REORDERING_INT8x32 = 1, + CUDNN_TENSOR_REORDERING_F16x16 = 2, +} cudnnBackendTensorReordering_t; + +typedef enum { + CUDNN_ZERO_PAD = 0, + CUDNN_NEG_INF_PAD = 1, + CUDNN_EDGE_VAL_PAD = 2, +} cudnnPaddingMode_t; + +typedef enum { + CUDNN_LAYER_NORM = 0, + CUDNN_INSTANCE_NORM = 1, + CUDNN_BATCH_NORM = 2, + CUDNN_GROUP_NORM = 3, +} cudnnBackendNormMode_t; + +typedef enum { + CUDNN_NORM_FWD_INFERENCE = 0, + CUDNN_NORM_FWD_TRAINING = 1, +} cudnnBackendNormFwdPhase_t; + +cudnnStatus_t CUDNNWINAPI +cudnnBackendCreateDescriptor(cudnnBackendDescriptorType_t descriptorType, cudnnBackendDescriptor_t *descriptor); + +cudnnStatus_t CUDNNWINAPI +cudnnBackendDestroyDescriptor(cudnnBackendDescriptor_t descriptor); + +cudnnStatus_t CUDNNWINAPI +cudnnBackendInitialize(cudnnBackendDescriptor_t descriptor); + +cudnnStatus_t CUDNNWINAPI +cudnnBackendFinalize(cudnnBackendDescriptor_t descriptor); + +cudnnStatus_t CUDNNWINAPI +cudnnBackendSetAttribute(cudnnBackendDescriptor_t descriptor, + cudnnBackendAttributeName_t attributeName, + cudnnBackendAttributeType_t attributeType, + int64_t elementCount, + const void *arrayOfElements); + +cudnnStatus_t CUDNNWINAPI +cudnnBackendGetAttribute(cudnnBackendDescriptor_t const descriptor, + cudnnBackendAttributeName_t attributeName, + cudnnBackendAttributeType_t attributeType, + int64_t requestedElementCount, + int64_t *elementCount, + void *arrayOfElements); + +cudnnStatus_t CUDNNWINAPI +cudnnBackendExecute(cudnnHandle_t handle, cudnnBackendDescriptor_t executionPlan, cudnnBackendDescriptor_t variantPack); + +#if defined(__cplusplus) +} +#endif + +#endif /* _CUDNN_BACKEND_H_ */ diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_backend_v8.h b/llmeval-env/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_backend_v8.h new file mode 100644 index 0000000000000000000000000000000000000000..b0f41de3b1e87286037ed7d0351057d93287d88f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_backend_v8.h @@ -0,0 +1,608 @@ +/* + * Copyright 2014-2023 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#ifndef _CUDNN_BACKEND_H_ +#define _CUDNN_BACKEND_H_ + +/* + * The content in this header file is under development to be included in cudnn.h in the future + * Production code should have all include of this header file remove. + */ + +#include "cudnn_ops_infer.h" +#include "cudnn_cnn_infer.h" + +/* NOTE: definition in extern "C" to be copied later to public header */ +#if defined(__cplusplus) +extern "C" { +#endif + +typedef void *cudnnBackendDescriptor_t; + +typedef struct cudnnFractionStruct { + int64_t numerator; + int64_t denominator; +} cudnnFraction_t; + +typedef enum { + CUDNN_POINTWISE_ADD = 0, + CUDNN_POINTWISE_ADD_SQUARE = 5, + CUDNN_POINTWISE_DIV = 6, + CUDNN_POINTWISE_MAX = 3, + CUDNN_POINTWISE_MIN = 2, + CUDNN_POINTWISE_MOD = 7, + CUDNN_POINTWISE_MUL = 1, + CUDNN_POINTWISE_POW = 8, + CUDNN_POINTWISE_SUB = 9, + + CUDNN_POINTWISE_ABS = 10, + CUDNN_POINTWISE_CEIL = 11, + CUDNN_POINTWISE_COS = 12, + CUDNN_POINTWISE_EXP = 13, + CUDNN_POINTWISE_FLOOR = 14, + CUDNN_POINTWISE_LOG = 15, + CUDNN_POINTWISE_NEG = 16, + CUDNN_POINTWISE_RSQRT = 17, + CUDNN_POINTWISE_SIN = 18, + CUDNN_POINTWISE_SQRT = 4, + CUDNN_POINTWISE_TAN = 19, + CUDNN_POINTWISE_ERF = 20, + CUDNN_POINTWISE_IDENTITY = 21, + CUDNN_POINTWISE_RECIPROCAL = 22, + + CUDNN_POINTWISE_RELU_FWD = 100, + CUDNN_POINTWISE_TANH_FWD = 101, + CUDNN_POINTWISE_SIGMOID_FWD = 102, + CUDNN_POINTWISE_ELU_FWD = 103, + CUDNN_POINTWISE_GELU_FWD = 104, + CUDNN_POINTWISE_SOFTPLUS_FWD = 105, + CUDNN_POINTWISE_SWISH_FWD = 106, + CUDNN_POINTWISE_GELU_APPROX_TANH_FWD = 107, + + CUDNN_POINTWISE_RELU_BWD = 200, + CUDNN_POINTWISE_TANH_BWD = 201, + CUDNN_POINTWISE_SIGMOID_BWD = 202, + CUDNN_POINTWISE_ELU_BWD = 203, + CUDNN_POINTWISE_GELU_BWD = 204, + CUDNN_POINTWISE_SOFTPLUS_BWD = 205, + CUDNN_POINTWISE_SWISH_BWD = 206, + CUDNN_POINTWISE_GELU_APPROX_TANH_BWD = 207, + + CUDNN_POINTWISE_CMP_EQ = 300, + CUDNN_POINTWISE_CMP_NEQ = 301, + CUDNN_POINTWISE_CMP_GT = 302, + CUDNN_POINTWISE_CMP_GE = 303, + CUDNN_POINTWISE_CMP_LT = 304, + CUDNN_POINTWISE_CMP_LE = 305, + + CUDNN_POINTWISE_LOGICAL_AND = 400, + CUDNN_POINTWISE_LOGICAL_OR = 401, + CUDNN_POINTWISE_LOGICAL_NOT = 402, + + CUDNN_POINTWISE_GEN_INDEX = 501, + + CUDNN_POINTWISE_BINARY_SELECT = 601, +} cudnnPointwiseMode_t; + +typedef enum { + CUDNN_RESAMPLE_NEAREST = 0, + CUDNN_RESAMPLE_BILINEAR = 1, + CUDNN_RESAMPLE_AVGPOOL = 2, + CUDNN_RESAMPLE_AVGPOOL_INCLUDE_PADDING = 2, + CUDNN_RESAMPLE_AVGPOOL_EXCLUDE_PADDING = 4, + CUDNN_RESAMPLE_MAXPOOL = 3, +} cudnnResampleMode_t; + +typedef enum { + CUDNN_SIGNAL_SET = 0, + CUDNN_SIGNAL_WAIT = 1, +} cudnnSignalMode_t; + +typedef enum { + CUDNN_GENSTATS_SUM_SQSUM = 0, +} cudnnGenStatsMode_t; + +typedef enum { + CUDNN_BN_FINALIZE_STATISTICS_TRAINING = 0, + CUDNN_BN_FINALIZE_STATISTICS_INFERENCE = 1, +} cudnnBnFinalizeStatsMode_t; + +typedef enum { + CUDNN_RNG_DISTRIBUTION_BERNOULLI, + CUDNN_RNG_DISTRIBUTION_UNIFORM, + CUDNN_RNG_DISTRIBUTION_NORMAL, +} cudnnRngDistribution_t; + +typedef enum { + CUDNN_ATTR_POINTWISE_MODE = 0, + CUDNN_ATTR_POINTWISE_MATH_PREC = 1, + CUDNN_ATTR_POINTWISE_NAN_PROPAGATION = 2, + CUDNN_ATTR_POINTWISE_RELU_LOWER_CLIP = 3, + CUDNN_ATTR_POINTWISE_RELU_UPPER_CLIP = 4, + CUDNN_ATTR_POINTWISE_RELU_LOWER_CLIP_SLOPE = 5, + CUDNN_ATTR_POINTWISE_ELU_ALPHA = 6, + CUDNN_ATTR_POINTWISE_SOFTPLUS_BETA = 7, + CUDNN_ATTR_POINTWISE_SWISH_BETA = 8, + CUDNN_ATTR_POINTWISE_AXIS = 9, + + CUDNN_ATTR_CONVOLUTION_COMP_TYPE = 100, + CUDNN_ATTR_CONVOLUTION_CONV_MODE = 101, + CUDNN_ATTR_CONVOLUTION_DILATIONS = 102, + CUDNN_ATTR_CONVOLUTION_FILTER_STRIDES = 103, + CUDNN_ATTR_CONVOLUTION_POST_PADDINGS = 104, + CUDNN_ATTR_CONVOLUTION_PRE_PADDINGS = 105, + CUDNN_ATTR_CONVOLUTION_SPATIAL_DIMS = 106, + + CUDNN_ATTR_ENGINEHEUR_MODE = 200, + CUDNN_ATTR_ENGINEHEUR_OPERATION_GRAPH = 201, + CUDNN_ATTR_ENGINEHEUR_RESULTS = 202, + + CUDNN_ATTR_ENGINECFG_ENGINE = 300, + CUDNN_ATTR_ENGINECFG_INTERMEDIATE_INFO = 301, + CUDNN_ATTR_ENGINECFG_KNOB_CHOICES = 302, + + CUDNN_ATTR_EXECUTION_PLAN_HANDLE = 400, + CUDNN_ATTR_EXECUTION_PLAN_ENGINE_CONFIG = 401, + CUDNN_ATTR_EXECUTION_PLAN_WORKSPACE_SIZE = 402, + CUDNN_ATTR_EXECUTION_PLAN_COMPUTED_INTERMEDIATE_UIDS = 403, + CUDNN_ATTR_EXECUTION_PLAN_RUN_ONLY_INTERMEDIATE_UIDS = 404, + CUDNN_ATTR_EXECUTION_PLAN_JSON_REPRESENTATION = 405, + + CUDNN_ATTR_INTERMEDIATE_INFO_UNIQUE_ID = 500, + CUDNN_ATTR_INTERMEDIATE_INFO_SIZE = 501, + CUDNN_ATTR_INTERMEDIATE_INFO_DEPENDENT_DATA_UIDS = 502, + CUDNN_ATTR_INTERMEDIATE_INFO_DEPENDENT_ATTRIBUTES = 503, + + CUDNN_ATTR_KNOB_CHOICE_KNOB_TYPE = 600, + CUDNN_ATTR_KNOB_CHOICE_KNOB_VALUE = 601, + + CUDNN_ATTR_OPERATION_CONVOLUTION_FORWARD_ALPHA = 700, + CUDNN_ATTR_OPERATION_CONVOLUTION_FORWARD_BETA = 701, + CUDNN_ATTR_OPERATION_CONVOLUTION_FORWARD_CONV_DESC = 702, + CUDNN_ATTR_OPERATION_CONVOLUTION_FORWARD_W = 703, + CUDNN_ATTR_OPERATION_CONVOLUTION_FORWARD_X = 704, + CUDNN_ATTR_OPERATION_CONVOLUTION_FORWARD_Y = 705, + CUDNN_ATTR_OPERATION_CONVOLUTION_BWD_DATA_ALPHA = 706, + CUDNN_ATTR_OPERATION_CONVOLUTION_BWD_DATA_BETA = 707, + CUDNN_ATTR_OPERATION_CONVOLUTION_BWD_DATA_CONV_DESC = 708, + CUDNN_ATTR_OPERATION_CONVOLUTION_BWD_DATA_W = 709, + CUDNN_ATTR_OPERATION_CONVOLUTION_BWD_DATA_DX = 710, + CUDNN_ATTR_OPERATION_CONVOLUTION_BWD_DATA_DY = 711, + CUDNN_ATTR_OPERATION_CONVOLUTION_BWD_FILTER_ALPHA = 712, + CUDNN_ATTR_OPERATION_CONVOLUTION_BWD_FILTER_BETA = 713, + CUDNN_ATTR_OPERATION_CONVOLUTION_BWD_FILTER_CONV_DESC = 714, + CUDNN_ATTR_OPERATION_CONVOLUTION_BWD_FILTER_DW = 715, + CUDNN_ATTR_OPERATION_CONVOLUTION_BWD_FILTER_X = 716, + CUDNN_ATTR_OPERATION_CONVOLUTION_BWD_FILTER_DY = 717, + + CUDNN_ATTR_OPERATION_POINTWISE_PW_DESCRIPTOR = 750, + CUDNN_ATTR_OPERATION_POINTWISE_XDESC = 751, + CUDNN_ATTR_OPERATION_POINTWISE_BDESC = 752, + CUDNN_ATTR_OPERATION_POINTWISE_YDESC = 753, + CUDNN_ATTR_OPERATION_POINTWISE_ALPHA1 = 754, + CUDNN_ATTR_OPERATION_POINTWISE_ALPHA2 = 755, + CUDNN_ATTR_OPERATION_POINTWISE_DXDESC = 756, + CUDNN_ATTR_OPERATION_POINTWISE_DYDESC = 757, + CUDNN_ATTR_OPERATION_POINTWISE_TDESC = 758, + + CUDNN_ATTR_OPERATION_GENSTATS_MODE = 770, + CUDNN_ATTR_OPERATION_GENSTATS_MATH_PREC = 771, + CUDNN_ATTR_OPERATION_GENSTATS_XDESC = 772, + CUDNN_ATTR_OPERATION_GENSTATS_SUMDESC = 773, + CUDNN_ATTR_OPERATION_GENSTATS_SQSUMDESC = 774, + + CUDNN_ATTR_OPERATION_BN_FINALIZE_STATS_MODE = 780, + CUDNN_ATTR_OPERATION_BN_FINALIZE_MATH_PREC = 781, + CUDNN_ATTR_OPERATION_BN_FINALIZE_Y_SUM_DESC = 782, + CUDNN_ATTR_OPERATION_BN_FINALIZE_Y_SQ_SUM_DESC = 783, + CUDNN_ATTR_OPERATION_BN_FINALIZE_SCALE_DESC = 784, + CUDNN_ATTR_OPERATION_BN_FINALIZE_BIAS_DESC = 785, + CUDNN_ATTR_OPERATION_BN_FINALIZE_PREV_RUNNING_MEAN_DESC = 786, + CUDNN_ATTR_OPERATION_BN_FINALIZE_PREV_RUNNING_VAR_DESC = 787, + CUDNN_ATTR_OPERATION_BN_FINALIZE_UPDATED_RUNNING_MEAN_DESC = 788, + CUDNN_ATTR_OPERATION_BN_FINALIZE_UPDATED_RUNNING_VAR_DESC = 789, + CUDNN_ATTR_OPERATION_BN_FINALIZE_SAVED_MEAN_DESC = 790, + CUDNN_ATTR_OPERATION_BN_FINALIZE_SAVED_INV_STD_DESC = 791, + CUDNN_ATTR_OPERATION_BN_FINALIZE_EQ_SCALE_DESC = 792, + CUDNN_ATTR_OPERATION_BN_FINALIZE_EQ_BIAS_DESC = 793, + CUDNN_ATTR_OPERATION_BN_FINALIZE_ACCUM_COUNT_DESC = 794, + CUDNN_ATTR_OPERATION_BN_FINALIZE_EPSILON_DESC = 795, + CUDNN_ATTR_OPERATION_BN_FINALIZE_EXP_AVERATE_FACTOR_DESC = 796, + + CUDNN_ATTR_OPERATIONGRAPH_HANDLE = 800, + CUDNN_ATTR_OPERATIONGRAPH_OPS = 801, + CUDNN_ATTR_OPERATIONGRAPH_ENGINE_GLOBAL_COUNT = 802, + + CUDNN_ATTR_TENSOR_BYTE_ALIGNMENT = 900, + CUDNN_ATTR_TENSOR_DATA_TYPE = 901, + CUDNN_ATTR_TENSOR_DIMENSIONS = 902, + CUDNN_ATTR_TENSOR_STRIDES = 903, + CUDNN_ATTR_TENSOR_VECTOR_COUNT = 904, + CUDNN_ATTR_TENSOR_VECTORIZED_DIMENSION = 905, + CUDNN_ATTR_TENSOR_UNIQUE_ID = 906, + CUDNN_ATTR_TENSOR_IS_VIRTUAL = 907, + CUDNN_ATTR_TENSOR_IS_BY_VALUE = 908, + CUDNN_ATTR_TENSOR_REORDERING_MODE = 909, + CUDNN_ATTR_TENSOR_RAGGED_OFFSET_DESC = 913, + + CUDNN_ATTR_VARIANT_PACK_UNIQUE_IDS = 1000, + CUDNN_ATTR_VARIANT_PACK_DATA_POINTERS = 1001, + CUDNN_ATTR_VARIANT_PACK_INTERMEDIATES = 1002, + CUDNN_ATTR_VARIANT_PACK_WORKSPACE = 1003, + + CUDNN_ATTR_LAYOUT_INFO_TENSOR_UID = 1100, + CUDNN_ATTR_LAYOUT_INFO_TYPES = 1101, + + CUDNN_ATTR_KNOB_INFO_TYPE = 1200, + CUDNN_ATTR_KNOB_INFO_MAXIMUM_VALUE = 1201, + CUDNN_ATTR_KNOB_INFO_MINIMUM_VALUE = 1202, + CUDNN_ATTR_KNOB_INFO_STRIDE = 1203, + + CUDNN_ATTR_ENGINE_OPERATION_GRAPH = 1300, + CUDNN_ATTR_ENGINE_GLOBAL_INDEX = 1301, + CUDNN_ATTR_ENGINE_KNOB_INFO = 1302, + CUDNN_ATTR_ENGINE_NUMERICAL_NOTE = 1303, + CUDNN_ATTR_ENGINE_LAYOUT_INFO = 1304, + CUDNN_ATTR_ENGINE_BEHAVIOR_NOTE = 1305, + + CUDNN_ATTR_MATMUL_COMP_TYPE = 1500, + CUDNN_ATTR_MATMUL_PADDING_VALUE = 1503, + + CUDNN_ATTR_OPERATION_MATMUL_ADESC = 1520, + CUDNN_ATTR_OPERATION_MATMUL_BDESC = 1521, + CUDNN_ATTR_OPERATION_MATMUL_CDESC = 1522, + CUDNN_ATTR_OPERATION_MATMUL_DESC = 1523, + CUDNN_ATTR_OPERATION_MATMUL_IRREGULARLY_STRIDED_BATCH_COUNT = 1524, + CUDNN_ATTR_OPERATION_MATMUL_GEMM_M_OVERRIDE_DESC = 1525, + CUDNN_ATTR_OPERATION_MATMUL_GEMM_N_OVERRIDE_DESC = 1526, + CUDNN_ATTR_OPERATION_MATMUL_GEMM_K_OVERRIDE_DESC = 1527, + + CUDNN_ATTR_REDUCTION_OPERATOR = 1600, + CUDNN_ATTR_REDUCTION_COMP_TYPE = 1601, + + CUDNN_ATTR_OPERATION_REDUCTION_XDESC = 1610, + CUDNN_ATTR_OPERATION_REDUCTION_YDESC = 1611, + CUDNN_ATTR_OPERATION_REDUCTION_DESC = 1612, + + CUDNN_ATTR_OPERATION_BN_BWD_WEIGHTS_MATH_PREC = 1620, + CUDNN_ATTR_OPERATION_BN_BWD_WEIGHTS_MEAN_DESC = 1621, + CUDNN_ATTR_OPERATION_BN_BWD_WEIGHTS_INVSTD_DESC = 1622, + CUDNN_ATTR_OPERATION_BN_BWD_WEIGHTS_BN_SCALE_DESC = 1623, + CUDNN_ATTR_OPERATION_BN_BWD_WEIGHTS_X_DESC = 1624, + CUDNN_ATTR_OPERATION_BN_BWD_WEIGHTS_DY_DESC = 1625, + CUDNN_ATTR_OPERATION_BN_BWD_WEIGHTS_DBN_SCALE_DESC = 1626, + CUDNN_ATTR_OPERATION_BN_BWD_WEIGHTS_DBN_BIAS_DESC = 1627, + CUDNN_ATTR_OPERATION_BN_BWD_WEIGHTS_EQ_DY_SCALE_DESC = 1628, + CUDNN_ATTR_OPERATION_BN_BWD_WEIGHTS_EQ_X_SCALE_DESC = 1629, + CUDNN_ATTR_OPERATION_BN_BWD_WEIGHTS_EQ_BIAS = 1630, + + CUDNN_ATTR_RESAMPLE_MODE = 1700, + CUDNN_ATTR_RESAMPLE_COMP_TYPE = 1701, + CUDNN_ATTR_RESAMPLE_SPATIAL_DIMS = 1702, + CUDNN_ATTR_RESAMPLE_POST_PADDINGS = 1703, + CUDNN_ATTR_RESAMPLE_PRE_PADDINGS = 1704, + CUDNN_ATTR_RESAMPLE_STRIDES = 1705, + CUDNN_ATTR_RESAMPLE_WINDOW_DIMS = 1706, + CUDNN_ATTR_RESAMPLE_NAN_PROPAGATION = 1707, + CUDNN_ATTR_RESAMPLE_PADDING_MODE = 1708, + + CUDNN_ATTR_OPERATION_RESAMPLE_FWD_XDESC = 1710, + CUDNN_ATTR_OPERATION_RESAMPLE_FWD_YDESC = 1711, + CUDNN_ATTR_OPERATION_RESAMPLE_FWD_IDXDESC = 1712, + CUDNN_ATTR_OPERATION_RESAMPLE_FWD_ALPHA = 1713, + CUDNN_ATTR_OPERATION_RESAMPLE_FWD_BETA = 1714, + CUDNN_ATTR_OPERATION_RESAMPLE_FWD_DESC = 1716, + + CUDNN_ATTR_OPERATION_RESAMPLE_BWD_DXDESC = 1720, + CUDNN_ATTR_OPERATION_RESAMPLE_BWD_DYDESC = 1721, + CUDNN_ATTR_OPERATION_RESAMPLE_BWD_IDXDESC = 1722, + CUDNN_ATTR_OPERATION_RESAMPLE_BWD_ALPHA = 1723, + CUDNN_ATTR_OPERATION_RESAMPLE_BWD_BETA = 1724, + CUDNN_ATTR_OPERATION_RESAMPLE_BWD_DESC = 1725, + CUDNN_ATTR_OPERATION_RESAMPLE_BWD_XDESC = 1726, + CUDNN_ATTR_OPERATION_RESAMPLE_BWD_YDESC = 1727, + + CUDNN_ATTR_OPERATION_CONCAT_AXIS = 1800, + CUDNN_ATTR_OPERATION_CONCAT_INPUT_DESCS = 1801, + CUDNN_ATTR_OPERATION_CONCAT_INPLACE_INDEX = 1802, + CUDNN_ATTR_OPERATION_CONCAT_OUTPUT_DESC = 1803, + + CUDNN_ATTR_OPERATION_SIGNAL_MODE = 1900, + CUDNN_ATTR_OPERATION_SIGNAL_FLAGDESC = 1901, + CUDNN_ATTR_OPERATION_SIGNAL_VALUE = 1902, + CUDNN_ATTR_OPERATION_SIGNAL_XDESC = 1903, + CUDNN_ATTR_OPERATION_SIGNAL_YDESC = 1904, + + CUDNN_ATTR_OPERATION_NORM_FWD_MODE = 2000, + CUDNN_ATTR_OPERATION_NORM_FWD_PHASE = 2001, + CUDNN_ATTR_OPERATION_NORM_FWD_XDESC = 2002, + CUDNN_ATTR_OPERATION_NORM_FWD_MEAN_DESC = 2003, + CUDNN_ATTR_OPERATION_NORM_FWD_INV_VARIANCE_DESC = 2004, + CUDNN_ATTR_OPERATION_NORM_FWD_SCALE_DESC = 2005, + CUDNN_ATTR_OPERATION_NORM_FWD_BIAS_DESC = 2006, + CUDNN_ATTR_OPERATION_NORM_FWD_EPSILON_DESC = 2007, + CUDNN_ATTR_OPERATION_NORM_FWD_EXP_AVG_FACTOR_DESC = 2008, + CUDNN_ATTR_OPERATION_NORM_FWD_INPUT_RUNNING_MEAN_DESC = 2009, + CUDNN_ATTR_OPERATION_NORM_FWD_INPUT_RUNNING_VAR_DESC = 2010, + CUDNN_ATTR_OPERATION_NORM_FWD_OUTPUT_RUNNING_MEAN_DESC = 2011, + CUDNN_ATTR_OPERATION_NORM_FWD_OUTPUT_RUNNING_VAR_DESC = 2012, + CUDNN_ATTR_OPERATION_NORM_FWD_YDESC = 2013, + CUDNN_ATTR_OPERATION_NORM_FWD_PEER_STAT_DESCS = 2014, + + CUDNN_ATTR_OPERATION_NORM_BWD_MODE = 2100, + CUDNN_ATTR_OPERATION_NORM_BWD_XDESC = 2101, + CUDNN_ATTR_OPERATION_NORM_BWD_MEAN_DESC = 2102, + CUDNN_ATTR_OPERATION_NORM_BWD_INV_VARIANCE_DESC = 2103, + CUDNN_ATTR_OPERATION_NORM_BWD_DYDESC = 2104, + CUDNN_ATTR_OPERATION_NORM_BWD_SCALE_DESC = 2105, + CUDNN_ATTR_OPERATION_NORM_BWD_EPSILON_DESC = 2106, + CUDNN_ATTR_OPERATION_NORM_BWD_DSCALE_DESC = 2107, + CUDNN_ATTR_OPERATION_NORM_BWD_DBIAS_DESC = 2108, + CUDNN_ATTR_OPERATION_NORM_BWD_DXDESC = 2109, + CUDNN_ATTR_OPERATION_NORM_BWD_PEER_STAT_DESCS = 2110, + + CUDNN_ATTR_OPERATION_RESHAPE_XDESC = 2200, + CUDNN_ATTR_OPERATION_RESHAPE_YDESC = 2201, + + CUDNN_ATTR_RNG_DISTRIBUTION = 2300, + CUDNN_ATTR_RNG_NORMAL_DIST_MEAN = 2301, + CUDNN_ATTR_RNG_NORMAL_DIST_STANDARD_DEVIATION = 2302, + CUDNN_ATTR_RNG_UNIFORM_DIST_MAXIMUM = 2303, + CUDNN_ATTR_RNG_UNIFORM_DIST_MINIMUM = 2304, + CUDNN_ATTR_RNG_BERNOULLI_DIST_PROBABILITY = 2305, + + CUDNN_ATTR_OPERATION_RNG_YDESC = 2310, + CUDNN_ATTR_OPERATION_RNG_SEED = 2311, + CUDNN_ATTR_OPERATION_RNG_DESC = 2312, + CUDNN_ATTR_OPERATION_RNG_OFFSET_DESC = 2313, + +} cudnnBackendAttributeName_t; + +typedef enum { + CUDNN_TYPE_HANDLE = 0, + CUDNN_TYPE_DATA_TYPE, + CUDNN_TYPE_BOOLEAN, + CUDNN_TYPE_INT64, + CUDNN_TYPE_FLOAT, + CUDNN_TYPE_DOUBLE, + CUDNN_TYPE_VOID_PTR, + CUDNN_TYPE_CONVOLUTION_MODE, + CUDNN_TYPE_HEUR_MODE, + CUDNN_TYPE_KNOB_TYPE, + CUDNN_TYPE_NAN_PROPOGATION, + CUDNN_TYPE_NUMERICAL_NOTE, + CUDNN_TYPE_LAYOUT_TYPE, + CUDNN_TYPE_ATTRIB_NAME, + CUDNN_TYPE_POINTWISE_MODE, + CUDNN_TYPE_BACKEND_DESCRIPTOR, + CUDNN_TYPE_GENSTATS_MODE, + CUDNN_TYPE_BN_FINALIZE_STATS_MODE, + CUDNN_TYPE_REDUCTION_OPERATOR_TYPE, + CUDNN_TYPE_BEHAVIOR_NOTE, + CUDNN_TYPE_TENSOR_REORDERING_MODE, + CUDNN_TYPE_RESAMPLE_MODE, + CUDNN_TYPE_PADDING_MODE, + CUDNN_TYPE_INT32, + CUDNN_TYPE_CHAR, + CUDNN_TYPE_SIGNAL_MODE, + CUDNN_TYPE_FRACTION, + CUDNN_TYPE_NORM_MODE, + CUDNN_TYPE_NORM_FWD_PHASE, + CUDNN_TYPE_RNG_DISTRIBUTION +} cudnnBackendAttributeType_t; + +typedef enum { + CUDNN_BACKEND_POINTWISE_DESCRIPTOR = 0, + CUDNN_BACKEND_CONVOLUTION_DESCRIPTOR, + CUDNN_BACKEND_ENGINE_DESCRIPTOR, + CUDNN_BACKEND_ENGINECFG_DESCRIPTOR, + CUDNN_BACKEND_ENGINEHEUR_DESCRIPTOR, + CUDNN_BACKEND_EXECUTION_PLAN_DESCRIPTOR, + CUDNN_BACKEND_INTERMEDIATE_INFO_DESCRIPTOR, + CUDNN_BACKEND_KNOB_CHOICE_DESCRIPTOR, + CUDNN_BACKEND_KNOB_INFO_DESCRIPTOR, + CUDNN_BACKEND_LAYOUT_INFO_DESCRIPTOR, + CUDNN_BACKEND_OPERATION_CONVOLUTION_FORWARD_DESCRIPTOR, + CUDNN_BACKEND_OPERATION_CONVOLUTION_BACKWARD_FILTER_DESCRIPTOR, + CUDNN_BACKEND_OPERATION_CONVOLUTION_BACKWARD_DATA_DESCRIPTOR, + CUDNN_BACKEND_OPERATION_POINTWISE_DESCRIPTOR, + CUDNN_BACKEND_OPERATION_GEN_STATS_DESCRIPTOR, + CUDNN_BACKEND_OPERATIONGRAPH_DESCRIPTOR, + CUDNN_BACKEND_VARIANT_PACK_DESCRIPTOR, + CUDNN_BACKEND_TENSOR_DESCRIPTOR, + CUDNN_BACKEND_MATMUL_DESCRIPTOR, + CUDNN_BACKEND_OPERATION_MATMUL_DESCRIPTOR, + CUDNN_BACKEND_OPERATION_BN_FINALIZE_STATISTICS_DESCRIPTOR, + CUDNN_BACKEND_REDUCTION_DESCRIPTOR, + CUDNN_BACKEND_OPERATION_REDUCTION_DESCRIPTOR, + CUDNN_BACKEND_OPERATION_BN_BWD_WEIGHTS_DESCRIPTOR, + CUDNN_BACKEND_RESAMPLE_DESCRIPTOR, + CUDNN_BACKEND_OPERATION_RESAMPLE_FWD_DESCRIPTOR, + CUDNN_BACKEND_OPERATION_RESAMPLE_BWD_DESCRIPTOR, + CUDNN_BACKEND_OPERATION_CONCAT_DESCRIPTOR, + CUDNN_BACKEND_OPERATION_SIGNAL_DESCRIPTOR, + CUDNN_BACKEND_OPERATION_NORM_FORWARD_DESCRIPTOR, + CUDNN_BACKEND_OPERATION_NORM_BACKWARD_DESCRIPTOR, + CUDNN_BACKEND_OPERATION_RESHAPE_DESCRIPTOR, + CUDNN_BACKEND_RNG_DESCRIPTOR, + CUDNN_BACKEND_OPERATION_RNG_DESCRIPTOR +} cudnnBackendDescriptorType_t; + +typedef enum { + CUDNN_NUMERICAL_NOTE_TENSOR_CORE = 0, + CUDNN_NUMERICAL_NOTE_DOWN_CONVERT_INPUTS, + CUDNN_NUMERICAL_NOTE_REDUCED_PRECISION_REDUCTION, + CUDNN_NUMERICAL_NOTE_FFT, + CUDNN_NUMERICAL_NOTE_NONDETERMINISTIC, + CUDNN_NUMERICAL_NOTE_WINOGRAD, + CUDNN_NUMERICAL_NOTE_WINOGRAD_TILE_4x4, + CUDNN_NUMERICAL_NOTE_WINOGRAD_TILE_6x6, + CUDNN_NUMERICAL_NOTE_WINOGRAD_TILE_13x13, + CUDNN_NUMERICAL_NOTE_TYPE_COUNT, +} cudnnBackendNumericalNote_t; + +typedef enum { + CUDNN_BEHAVIOR_NOTE_RUNTIME_COMPILATION = 0, + CUDNN_BEHAVIOR_NOTE_REQUIRES_FILTER_INT8x32_REORDER = 1, + CUDNN_BEHAVIOR_NOTE_REQUIRES_BIAS_INT8x32_REORDER = 2, + CUDNN_BEHAVIOR_NOTE_TYPE_COUNT, +} cudnnBackendBehaviorNote_t; + +typedef enum { + CUDNN_KNOB_TYPE_SPLIT_K = 0, + CUDNN_KNOB_TYPE_SWIZZLE = 1, + CUDNN_KNOB_TYPE_TILE_SIZE = 2, + CUDNN_KNOB_TYPE_USE_TEX = 3, + CUDNN_KNOB_TYPE_EDGE = 4, + CUDNN_KNOB_TYPE_KBLOCK = 5, + CUDNN_KNOB_TYPE_LDGA = 6, + CUDNN_KNOB_TYPE_LDGB = 7, + CUDNN_KNOB_TYPE_CHUNK_K = 8, + CUDNN_KNOB_TYPE_SPLIT_H = 9, + CUDNN_KNOB_TYPE_WINO_TILE = 10, + CUDNN_KNOB_TYPE_MULTIPLY = 11, + CUDNN_KNOB_TYPE_SPLIT_K_BUF = 12, + CUDNN_KNOB_TYPE_TILEK = 13, + CUDNN_KNOB_TYPE_STAGES = 14, + CUDNN_KNOB_TYPE_REDUCTION_MODE = 15, + CUDNN_KNOB_TYPE_CTA_SPLIT_K_MODE = 16, + CUDNN_KNOB_TYPE_SPLIT_K_SLC = 17, + CUDNN_KNOB_TYPE_IDX_MODE = 18, + CUDNN_KNOB_TYPE_SLICED = 19, + CUDNN_KNOB_TYPE_SPLIT_RS = 20, + CUDNN_KNOB_TYPE_SINGLEBUFFER = 21, + CUDNN_KNOB_TYPE_LDGC = 22, + CUDNN_KNOB_TYPE_SPECFILT = 23, + CUDNN_KNOB_TYPE_KERNEL_CFG = 24, + CUDNN_KNOB_TYPE_WORKSPACE = 25, + CUDNN_KNOB_TYPE_TILE_CGA = 26, + CUDNN_KNOB_TYPE_TILE_CGA_M = 27, + CUDNN_KNOB_TYPE_TILE_CGA_N = 28, + CUDNN_KNOB_TYPE_BLOCK_SIZE = 29, + CUDNN_KNOB_TYPE_OCCUPANCY = 30, + CUDNN_KNOB_TYPE_ARRAY_SIZE_PER_THREAD = 31, + CUDNN_KNOB_TYPE_NUM_C_PER_BLOCK = 32, + CUDNN_KNOB_TYPE_COUNTS, +} cudnnBackendKnobType_t; + +typedef enum { + CUDNN_LAYOUT_TYPE_PREFERRED_NCHW = 0, + CUDNN_LAYOUT_TYPE_PREFERRED_NHWC = 1, + CUDNN_LAYOUT_TYPE_PREFERRED_PAD4CK = 2, + CUDNN_LAYOUT_TYPE_PREFERRED_PAD8CK = 3, + CUDNN_LAYOUT_TYPE_COUNT = 4, +} cudnnBackendLayoutType_t; + +typedef enum { + CUDNN_HEUR_MODE_INSTANT = 0, + CUDNN_HEUR_MODE_B = 1, + CUDNN_HEUR_MODE_FALLBACK = 2, + CUDNN_HEUR_MODE_A = 3, + CUDNN_HEUR_MODES_COUNT = 4, +} cudnnBackendHeurMode_t; + +typedef enum { + CUDNN_TENSOR_REORDERING_NONE = 0, + CUDNN_TENSOR_REORDERING_INT8x32 = 1, + CUDNN_TENSOR_REORDERING_F16x16 = 2, +} cudnnBackendTensorReordering_t; + +typedef enum { + CUDNN_ZERO_PAD = 0, + CUDNN_NEG_INF_PAD = 1, + CUDNN_EDGE_VAL_PAD = 2, +} cudnnPaddingMode_t; + +typedef enum { + CUDNN_LAYER_NORM = 0, + CUDNN_INSTANCE_NORM = 1, + CUDNN_BATCH_NORM = 2, + CUDNN_GROUP_NORM = 3, +} cudnnBackendNormMode_t; + +typedef enum { + CUDNN_NORM_FWD_INFERENCE = 0, + CUDNN_NORM_FWD_TRAINING = 1, +} cudnnBackendNormFwdPhase_t; + +cudnnStatus_t CUDNNWINAPI +cudnnBackendCreateDescriptor(cudnnBackendDescriptorType_t descriptorType, cudnnBackendDescriptor_t *descriptor); + +cudnnStatus_t CUDNNWINAPI +cudnnBackendDestroyDescriptor(cudnnBackendDescriptor_t descriptor); + +cudnnStatus_t CUDNNWINAPI +cudnnBackendInitialize(cudnnBackendDescriptor_t descriptor); + +cudnnStatus_t CUDNNWINAPI +cudnnBackendFinalize(cudnnBackendDescriptor_t descriptor); + +cudnnStatus_t CUDNNWINAPI +cudnnBackendSetAttribute(cudnnBackendDescriptor_t descriptor, + cudnnBackendAttributeName_t attributeName, + cudnnBackendAttributeType_t attributeType, + int64_t elementCount, + const void *arrayOfElements); + +cudnnStatus_t CUDNNWINAPI +cudnnBackendGetAttribute(cudnnBackendDescriptor_t const descriptor, + cudnnBackendAttributeName_t attributeName, + cudnnBackendAttributeType_t attributeType, + int64_t requestedElementCount, + int64_t *elementCount, + void *arrayOfElements); + +cudnnStatus_t CUDNNWINAPI +cudnnBackendExecute(cudnnHandle_t handle, cudnnBackendDescriptor_t executionPlan, cudnnBackendDescriptor_t variantPack); + +#if defined(__cplusplus) +} +#endif + +#endif /* _CUDNN_BACKEND_H_ */ diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_cnn_infer.h b/llmeval-env/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_cnn_infer.h new file mode 100644 index 0000000000000000000000000000000000000000..5e4c91c93bdc0b5e69d9d6326b4e7384e35a8ca6 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_cnn_infer.h @@ -0,0 +1,571 @@ +/* + * Copyright 2014-2023 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +/* + * cudnn_cnn_infer : cuDNN's basic definitions and inference CNN functions. + */ + +#if !defined(CUDNN_CNN_INFER_H_) +#define CUDNN_CNN_INFER_H_ + +#pragma once +#include +#include + +#include "cudnn_version.h" +#include "cudnn_ops_infer.h" + +/* These version numbers are autogenerated, do not edit manually. */ +#define CUDNN_CNN_INFER_MAJOR 8 +#define CUDNN_CNN_INFER_MINOR 9 +#define CUDNN_CNN_INFER_PATCH 2 + +#if (CUDNN_CNN_INFER_MAJOR != CUDNN_MAJOR) || (CUDNN_CNN_INFER_MINOR != CUDNN_MINOR) || \ + (CUDNN_CNN_INFER_PATCH != CUDNN_PATCHLEVEL) +#error Version mismatch in cuDNN CNN INFER!!! +#endif + +#if defined(__cplusplus) +extern "C" { +#endif + +typedef struct cudnnConvolutionStruct *cudnnConvolutionDescriptor_t; + +/* + * convolution mode + */ +typedef enum { CUDNN_CONVOLUTION = 0, CUDNN_CROSS_CORRELATION = 1 } cudnnConvolutionMode_t; + +/* + * CUDNN Reorder + */ +typedef enum { + CUDNN_DEFAULT_REORDER = 0, + CUDNN_NO_REORDER = 1, +} cudnnReorderType_t; + +typedef struct cudnnConvolutionFwdAlgoPerfStruct { + cudnnConvolutionFwdAlgo_t algo; + cudnnStatus_t status; + float time; + size_t memory; + cudnnDeterminism_t determinism; + cudnnMathType_t mathType; + int reserved[3]; +} cudnnConvolutionFwdAlgoPerf_t; + +/* Create an instance of convolution descriptor */ +cudnnStatus_t CUDNNWINAPI +cudnnCreateConvolutionDescriptor(cudnnConvolutionDescriptor_t *convDesc); + +/* Destroy an instance of convolution descriptor */ +cudnnStatus_t CUDNNWINAPI +cudnnDestroyConvolutionDescriptor(cudnnConvolutionDescriptor_t convDesc); + +cudnnStatus_t CUDNNWINAPI +cudnnSetConvolutionMathType(cudnnConvolutionDescriptor_t convDesc, cudnnMathType_t mathType); + +cudnnStatus_t CUDNNWINAPI +cudnnGetConvolutionMathType(cudnnConvolutionDescriptor_t convDesc, cudnnMathType_t *mathType); + +cudnnStatus_t CUDNNWINAPI +cudnnSetConvolutionGroupCount(cudnnConvolutionDescriptor_t convDesc, int groupCount); + +cudnnStatus_t CUDNNWINAPI +cudnnGetConvolutionGroupCount(cudnnConvolutionDescriptor_t convDesc, int *groupCount); + +cudnnStatus_t CUDNNWINAPI +cudnnSetConvolutionReorderType(cudnnConvolutionDescriptor_t convDesc, cudnnReorderType_t reorderType); + +cudnnStatus_t CUDNNWINAPI +cudnnGetConvolutionReorderType(cudnnConvolutionDescriptor_t convDesc, cudnnReorderType_t *reorderType); + +cudnnStatus_t CUDNNWINAPI +cudnnSetConvolution2dDescriptor(cudnnConvolutionDescriptor_t convDesc, + int pad_h, /* zero-padding height */ + int pad_w, /* zero-padding width */ + int u, /* vertical filter stride */ + int v, /* horizontal filter stride */ + int dilation_h, /* filter dilation in the vertical dimension */ + int dilation_w, /* filter dilation in the horizontal dimension */ + cudnnConvolutionMode_t mode, + cudnnDataType_t computeType); + +cudnnStatus_t CUDNNWINAPI +cudnnGetConvolution2dDescriptor(const cudnnConvolutionDescriptor_t convDesc, + int *pad_h, /* zero-padding height */ + int *pad_w, /* zero-padding width */ + int *u, /* vertical filter stride */ + int *v, /* horizontal filter stride */ + int *dilation_h, /* filter dilation in the vertical dimension */ + int *dilation_w, /* filter dilation in the horizontal dimension */ + cudnnConvolutionMode_t *mode, + cudnnDataType_t *computeType); + +cudnnStatus_t CUDNNWINAPI +cudnnSetConvolutionNdDescriptor(cudnnConvolutionDescriptor_t convDesc, + int arrayLength, /* nbDims-2 size */ + const int padA[], + const int filterStrideA[], + const int dilationA[], + cudnnConvolutionMode_t mode, + cudnnDataType_t computeType); /* convolution data type */ + +/* Helper function to return the dimensions of the output tensor given a convolution descriptor */ +cudnnStatus_t CUDNNWINAPI +cudnnGetConvolutionNdDescriptor(const cudnnConvolutionDescriptor_t convDesc, + int arrayLengthRequested, + int *arrayLength, + int padA[], + int strideA[], + int dilationA[], + cudnnConvolutionMode_t *mode, + cudnnDataType_t *computeType); /* convolution data type */ + +cudnnStatus_t CUDNNWINAPI +cudnnGetConvolution2dForwardOutputDim(const cudnnConvolutionDescriptor_t convDesc, + const cudnnTensorDescriptor_t inputTensorDesc, + const cudnnFilterDescriptor_t filterDesc, + int *n, + int *c, + int *h, + int *w); + +/* Helper function to return the dimensions of the output tensor given a convolution descriptor */ +cudnnStatus_t CUDNNWINAPI +cudnnGetConvolutionNdForwardOutputDim(const cudnnConvolutionDescriptor_t convDesc, + const cudnnTensorDescriptor_t inputTensorDesc, + const cudnnFilterDescriptor_t filterDesc, + int nbDims, + int tensorOuputDimA[]); + +/* helper function to provide the convolution forward algo that fit best the requirement */ +cudnnStatus_t CUDNNWINAPI +cudnnGetConvolutionForwardAlgorithmMaxCount(cudnnHandle_t handle, int *count); + +cudnnStatus_t CUDNNWINAPI +cudnnGetConvolutionForwardAlgorithm_v7(cudnnHandle_t handle, + const cudnnTensorDescriptor_t srcDesc, + const cudnnFilterDescriptor_t filterDesc, + const cudnnConvolutionDescriptor_t convDesc, + const cudnnTensorDescriptor_t destDesc, + const int requestedAlgoCount, + int *returnedAlgoCount, + cudnnConvolutionFwdAlgoPerf_t *perfResults); + +cudnnStatus_t CUDNNWINAPI +cudnnFindConvolutionForwardAlgorithm(cudnnHandle_t handle, + const cudnnTensorDescriptor_t xDesc, + const cudnnFilterDescriptor_t wDesc, + const cudnnConvolutionDescriptor_t convDesc, + const cudnnTensorDescriptor_t yDesc, + const int requestedAlgoCount, + int *returnedAlgoCount, + cudnnConvolutionFwdAlgoPerf_t *perfResults); + +cudnnStatus_t CUDNNWINAPI +cudnnFindConvolutionForwardAlgorithmEx(cudnnHandle_t handle, + const cudnnTensorDescriptor_t xDesc, + const void *x, + const cudnnFilterDescriptor_t wDesc, + const void *w, + const cudnnConvolutionDescriptor_t convDesc, + const cudnnTensorDescriptor_t yDesc, + void *y, + const int requestedAlgoCount, + int *returnedAlgoCount, + cudnnConvolutionFwdAlgoPerf_t *perfResults, + void *workSpace, + size_t workSpaceSizeInBytes); + +cudnnStatus_t CUDNNWINAPI +cudnnIm2Col(cudnnHandle_t handle, + const cudnnTensorDescriptor_t xDesc, + const void *x, + const cudnnFilterDescriptor_t wDesc, + const cudnnConvolutionDescriptor_t convDesc, + void *colBuffer); + +cudnnStatus_t CUDNNWINAPI +cudnnReorderFilterAndBias(cudnnHandle_t handle, + const cudnnFilterDescriptor_t filterDesc, + cudnnReorderType_t reorderType, + const void *filterData, + void *reorderedFilterData, + int reorderBias, + const void *biasData, + void *reorderedBiasData); + +/* Helper function to return the minimum size of the workspace to be passed to the convolution given an algo*/ +cudnnStatus_t CUDNNWINAPI +cudnnGetConvolutionForwardWorkspaceSize(cudnnHandle_t handle, + const cudnnTensorDescriptor_t xDesc, + const cudnnFilterDescriptor_t wDesc, + const cudnnConvolutionDescriptor_t convDesc, + const cudnnTensorDescriptor_t yDesc, + cudnnConvolutionFwdAlgo_t algo, + size_t *sizeInBytes); + +/* Convolution functions: All of the form "output = alpha * Op(inputs) + beta * output" */ + +/* Function to perform the forward pass for batch convolution */ +cudnnStatus_t CUDNNWINAPI +cudnnConvolutionForward(cudnnHandle_t handle, + const void *alpha, + const cudnnTensorDescriptor_t xDesc, + const void *x, + const cudnnFilterDescriptor_t wDesc, + const void *w, + const cudnnConvolutionDescriptor_t convDesc, + cudnnConvolutionFwdAlgo_t algo, + void *workSpace, + size_t workSpaceSizeInBytes, + const void *beta, + const cudnnTensorDescriptor_t yDesc, + void *y); + +/* Fused conv/bias/activation operation : y = Act( alpha1 * conv(x) + alpha2 * z + bias ) */ +cudnnStatus_t CUDNNWINAPI +cudnnConvolutionBiasActivationForward(cudnnHandle_t handle, + const void *alpha1, + const cudnnTensorDescriptor_t xDesc, + const void *x, + const cudnnFilterDescriptor_t wDesc, + const void *w, + const cudnnConvolutionDescriptor_t convDesc, + cudnnConvolutionFwdAlgo_t algo, + void *workSpace, + size_t workSpaceSizeInBytes, + const void *alpha2, + const cudnnTensorDescriptor_t zDesc, + const void *z, + const cudnnTensorDescriptor_t biasDesc, + const void *bias, + const cudnnActivationDescriptor_t activationDesc, + const cudnnTensorDescriptor_t yDesc, + void *y); + +/* helper function to provide the convolution backward data algo that fit best the requirement */ + +typedef struct cudnnConvolutionBwdDataAlgoPerfStruct { + cudnnConvolutionBwdDataAlgo_t algo; + cudnnStatus_t status; + float time; + size_t memory; + cudnnDeterminism_t determinism; + cudnnMathType_t mathType; + int reserved[3]; +} cudnnConvolutionBwdDataAlgoPerf_t; + +cudnnStatus_t CUDNNWINAPI +cudnnGetConvolutionBackwardDataAlgorithmMaxCount(cudnnHandle_t handle, int *count); + +cudnnStatus_t CUDNNWINAPI +cudnnFindConvolutionBackwardDataAlgorithm(cudnnHandle_t handle, + const cudnnFilterDescriptor_t wDesc, + const cudnnTensorDescriptor_t dyDesc, + const cudnnConvolutionDescriptor_t convDesc, + const cudnnTensorDescriptor_t dxDesc, + const int requestedAlgoCount, + int *returnedAlgoCount, + cudnnConvolutionBwdDataAlgoPerf_t *perfResults); + +cudnnStatus_t CUDNNWINAPI +cudnnFindConvolutionBackwardDataAlgorithmEx(cudnnHandle_t handle, + const cudnnFilterDescriptor_t wDesc, + const void *w, + const cudnnTensorDescriptor_t dyDesc, + const void *dy, + const cudnnConvolutionDescriptor_t convDesc, + const cudnnTensorDescriptor_t dxDesc, + void *dx, + const int requestedAlgoCount, + int *returnedAlgoCount, + cudnnConvolutionBwdDataAlgoPerf_t *perfResults, + void *workSpace, + size_t workSpaceSizeInBytes); + +cudnnStatus_t CUDNNWINAPI +cudnnGetConvolutionBackwardDataAlgorithm_v7(cudnnHandle_t handle, + const cudnnFilterDescriptor_t filterDesc, + const cudnnTensorDescriptor_t diffDesc, + const cudnnConvolutionDescriptor_t convDesc, + const cudnnTensorDescriptor_t gradDesc, + const int requestedAlgoCount, + int *returnedAlgoCount, + cudnnConvolutionBwdDataAlgoPerf_t *perfResults); + +/* + * convolution algorithm (which requires potentially some workspace) + */ + +/* Helper function to return the minimum size of the workspace to be passed to the convolution given an algo*/ +cudnnStatus_t CUDNNWINAPI +cudnnGetConvolutionBackwardDataWorkspaceSize(cudnnHandle_t handle, + const cudnnFilterDescriptor_t wDesc, + const cudnnTensorDescriptor_t dyDesc, + const cudnnConvolutionDescriptor_t convDesc, + const cudnnTensorDescriptor_t dxDesc, + cudnnConvolutionBwdDataAlgo_t algo, + size_t *sizeInBytes); + +cudnnStatus_t CUDNNWINAPI +cudnnConvolutionBackwardData(cudnnHandle_t handle, + const void *alpha, + const cudnnFilterDescriptor_t wDesc, + const void *w, + const cudnnTensorDescriptor_t dyDesc, + const void *dy, + const cudnnConvolutionDescriptor_t convDesc, + cudnnConvolutionBwdDataAlgo_t algo, + void *workSpace, + size_t workSpaceSizeInBytes, + const void *beta, + const cudnnTensorDescriptor_t dxDesc, + void *dx); + +/* Helper function to calculate folding descriptors for dgrad */ +cudnnStatus_t CUDNNWINAPI +cudnnGetFoldedConvBackwardDataDescriptors(const cudnnHandle_t handle, + const cudnnFilterDescriptor_t filterDesc, + const cudnnTensorDescriptor_t diffDesc, + const cudnnConvolutionDescriptor_t convDesc, + const cudnnTensorDescriptor_t gradDesc, + const cudnnTensorFormat_t transformFormat, + cudnnFilterDescriptor_t foldedFilterDesc, + cudnnTensorDescriptor_t paddedDiffDesc, + cudnnConvolutionDescriptor_t foldedConvDesc, + cudnnTensorDescriptor_t foldedGradDesc, + cudnnTensorTransformDescriptor_t filterFoldTransDesc, + cudnnTensorTransformDescriptor_t diffPadTransDesc, + cudnnTensorTransformDescriptor_t gradFoldTransDesc, + cudnnTensorTransformDescriptor_t gradUnfoldTransDesc); + +/* cudnnFusedOps... */ +struct cudnnFusedOpsConstParamStruct; +typedef struct cudnnFusedOpsConstParamStruct *cudnnFusedOpsConstParamPack_t; + +struct cudnnFusedOpsVariantParamStruct; +typedef struct cudnnFusedOpsVariantParamStruct *cudnnFusedOpsVariantParamPack_t; + +struct cudnnFusedOpsPlanStruct; +typedef struct cudnnFusedOpsPlanStruct *cudnnFusedOpsPlan_t; + +typedef enum { + /* each op in [ ] can be disabled by passing NULL ptr */ + /* [per channel scale], [per channel bias], [activation], convolution, [generate BN stats] */ + CUDNN_FUSED_SCALE_BIAS_ACTIVATION_CONV_BNSTATS = 0, + /* [per channel scale], [per channel bias], [activation], convolutionBackwardWeights */ + CUDNN_FUSED_SCALE_BIAS_ACTIVATION_WGRAD = 1, + /* utility for BN training in BN-conv fusion */ + /* computes the equivalent scale and bias from ySum ySqSum and learned scale, bias */ + /* optionally update running stats and generate saved stats */ + CUDNN_FUSED_BN_FINALIZE_STATISTICS_TRAINING = 2, + /* utility for BN inference in BN-conv fusion */ + /* computes the equivalent scale and bias from learned running stats and learned scale, bias */ + CUDNN_FUSED_BN_FINALIZE_STATISTICS_INFERENCE = 3, + /* reserved for future use: convolution, [per channel scale], [per channel bias], [residual add], [activation] */ + CUDNN_FUSED_CONV_SCALE_BIAS_ADD_ACTIVATION = 4, + /* reserved for future use: [per channel scale], [per channel bias], [residual add], activation, bitmask */ + CUDNN_FUSED_SCALE_BIAS_ADD_ACTIVATION_GEN_BITMASK = 5, + /* reserved for future use */ + CUDNN_FUSED_DACTIVATION_FORK_DBATCHNORM = 6, +} cudnnFusedOps_t; + +typedef enum { + /* set XDESC: pass previously initialized cudnnTensorDescriptor_t */ + /* get XDESC: pass previously created cudnnTensorDescriptor_t */ + CUDNN_PARAM_XDESC = 0, + /* set/get XDATA_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */ + CUDNN_PARAM_XDATA_PLACEHOLDER = 1, + /* set/get BN_MODE: pass cudnnBatchNormMode_t* */ + CUDNN_PARAM_BN_MODE = 2, + /* set CUDNN_PARAM_BN_EQSCALEBIAS_DESC: pass previously initialized cudnnTensorDescriptor_t */ + /* get CUDNN_PARAM_BN_EQSCALEBIAS_DESC: pass previously created cudnnTensorDescriptor_t */ + CUDNN_PARAM_BN_EQSCALEBIAS_DESC = 3, + /* set/get BN_EQSCALE_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */ + CUDNN_PARAM_BN_EQSCALE_PLACEHOLDER = 4, + /* set/get BN_EQBIAS_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */ + CUDNN_PARAM_BN_EQBIAS_PLACEHOLDER = 5, + /* set ACTIVATION_DESC: pass previously initialized cudnnActivationDescriptor_t */ + /* get ACTIVATION_DESC: pass previously created cudnnActivationDescriptor_t */ + CUDNN_PARAM_ACTIVATION_DESC = 6, + /* set CONV_DESC: pass previously initialized cudnnConvolutionDescriptor_t */ + /* get CONV_DESC: pass previously created cudnnConvolutionDescriptor_t */ + CUDNN_PARAM_CONV_DESC = 7, + /* set WDESC: pass previously initialized cudnnFilterDescriptor_t */ + /* get WDESC: pass previously created cudnnFilterDescriptor_t */ + CUDNN_PARAM_WDESC = 8, + /* set/get WDATA_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */ + CUDNN_PARAM_WDATA_PLACEHOLDER = 9, + /* set DWDESC: pass previously initialized cudnnFilterDescriptor_t */ + /* get DWDESC: pass previously created cudnnFilterDescriptor_t */ + CUDNN_PARAM_DWDESC = 10, + /* set/get DWDATA_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */ + CUDNN_PARAM_DWDATA_PLACEHOLDER = 11, + /* set YDESC: pass previously initialized cudnnTensorDescriptor_t */ + /* get YDESC: pass previously created cudnnTensorDescriptor_t */ + CUDNN_PARAM_YDESC = 12, + /* set/get YDATA_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */ + CUDNN_PARAM_YDATA_PLACEHOLDER = 13, + /* set DYDESC: pass previously initialized cudnnTensorDescriptor_t */ + /* get DYDESC: pass previously created cudnnTensorDescriptor_t */ + CUDNN_PARAM_DYDESC = 14, + /* set/get DYDATA_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */ + CUDNN_PARAM_DYDATA_PLACEHOLDER = 15, + /* set YSTATS_DESC: pass previously initialized cudnnTensorDescriptor_t */ + /* get YSTATS_DESC: pass previously created cudnnTensorDescriptor_t */ + CUDNN_PARAM_YSTATS_DESC = 16, + /* set/get YSUM_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */ + CUDNN_PARAM_YSUM_PLACEHOLDER = 17, + /* set/get YSQSUM_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */ + CUDNN_PARAM_YSQSUM_PLACEHOLDER = 18, + /* set CUDNN_PARAM_BN_SCALEBIAS_MEANVAR_DESC: pass previously initialized cudnnTensorDescriptor_t */ + /* get CUDNN_PARAM_BN_SCALEBIAS_MEANVAR_DESC: pass previously created cudnnTensorDescriptor_t */ + CUDNN_PARAM_BN_SCALEBIAS_MEANVAR_DESC = 19, + /* set/get CUDNN_PARAM_BN_SCALE_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */ + CUDNN_PARAM_BN_SCALE_PLACEHOLDER = 20, + /* set/get CUDNN_PARAM_BN_BIAS_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */ + CUDNN_PARAM_BN_BIAS_PLACEHOLDER = 21, + /* set/get CUDNN_PARAM_BN_SAVED_MEAN_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */ + CUDNN_PARAM_BN_SAVED_MEAN_PLACEHOLDER = 22, + /* set/get CUDNN_PARAM_BN_SAVED_INVSTD_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */ + CUDNN_PARAM_BN_SAVED_INVSTD_PLACEHOLDER = 23, + /* set/get CUDNN_PARAM_BN_RUNNING_MEAN_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */ + CUDNN_PARAM_BN_RUNNING_MEAN_PLACEHOLDER = 24, + /* set/get CUDNN_PARAM_BN_RUNNING_VAR_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */ + CUDNN_PARAM_BN_RUNNING_VAR_PLACEHOLDER = 25, + + /* set ZDESC: pass previously initialized cudnnTensorDescriptor_t */ + /* get ZDESC: pass previously created cudnnTensorDescriptor_t */ + CUDNN_PARAM_ZDESC = 26, + /* set/get ZDATA_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */ + CUDNN_PARAM_ZDATA_PLACEHOLDER = 27, + /* set BN_Z_EQSCALEBIAS_DESC: pass previously initialized cudnnTensorDescriptor_t */ + /* get BN_Z_EQSCALEBIAS_DESC: pass previously created cudnnTensorDescriptor_t */ + CUDNN_PARAM_BN_Z_EQSCALEBIAS_DESC = 28, + /* set/get BN_Z_EQSCALE_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */ + CUDNN_PARAM_BN_Z_EQSCALE_PLACEHOLDER = 29, + /* set/get BN_Z_EQBIAS_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */ + CUDNN_PARAM_BN_Z_EQBIAS_PLACEHOLDER = 30, + + /* set ACTIVATION_BITMASK_DESC: pass previously initialized cudnnTensorDescriptor_t */ + /* get ACTIVATION_BITMASK_DESC: pass previously created cudnnTensorDescriptor_t */ + CUDNN_PARAM_ACTIVATION_BITMASK_DESC = 31, + /* set/get ACTIVATION_BITMASK_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */ + CUDNN_PARAM_ACTIVATION_BITMASK_PLACEHOLDER = 32, + + /* set DXDESC: pass previously initialized cudnnTensorDescriptor_t */ + /* get DXDESC: pass previously created cudnnTensorDescriptor_t */ + CUDNN_PARAM_DXDESC = 33, + /* set/get DXDATA_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */ + CUDNN_PARAM_DXDATA_PLACEHOLDER = 34, + /* set DZDESC: pass previously initialized cudnnTensorDescriptor_t */ + /* get DZDESC: pass previously created cudnnTensorDescriptor_t */ + CUDNN_PARAM_DZDESC = 35, + /* set/get DZDATA_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */ + CUDNN_PARAM_DZDATA_PLACEHOLDER = 36, + /* set/get CUDNN_PARAM_BN_DSCALE_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */ + CUDNN_PARAM_BN_DSCALE_PLACEHOLDER = 37, + /* set/get CUDNN_PARAM_BN_DBIAS_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */ + CUDNN_PARAM_BN_DBIAS_PLACEHOLDER = 38, +} cudnnFusedOpsConstParamLabel_t; + +typedef enum { + CUDNN_PTR_NULL = 0, + CUDNN_PTR_ELEM_ALIGNED = 1, + CUDNN_PTR_16B_ALIGNED = 2, +} cudnnFusedOpsPointerPlaceHolder_t; + +typedef enum { + /* set: pass void* pointing to dev memory */ + /* get: pass void** pointing to host memory */ + CUDNN_PTR_XDATA = 0, + CUDNN_PTR_BN_EQSCALE = 1, + CUDNN_PTR_BN_EQBIAS = 2, + CUDNN_PTR_WDATA = 3, + CUDNN_PTR_DWDATA = 4, + CUDNN_PTR_YDATA = 5, + CUDNN_PTR_DYDATA = 6, + CUDNN_PTR_YSUM = 7, + CUDNN_PTR_YSQSUM = 8, + CUDNN_PTR_WORKSPACE = 9, + CUDNN_PTR_BN_SCALE = 10, + CUDNN_PTR_BN_BIAS = 11, + CUDNN_PTR_BN_SAVED_MEAN = 12, + CUDNN_PTR_BN_SAVED_INVSTD = 13, + CUDNN_PTR_BN_RUNNING_MEAN = 14, + CUDNN_PTR_BN_RUNNING_VAR = 15, + CUDNN_PTR_ZDATA = 16, + CUDNN_PTR_BN_Z_EQSCALE = 17, + CUDNN_PTR_BN_Z_EQBIAS = 18, + CUDNN_PTR_ACTIVATION_BITMASK = 19, + CUDNN_PTR_DXDATA = 20, + CUDNN_PTR_DZDATA = 21, + CUDNN_PTR_BN_DSCALE = 22, + CUDNN_PTR_BN_DBIAS = 23, + + /* set/get: pass size_t* pointing to host memory */ + CUDNN_SCALAR_SIZE_T_WORKSPACE_SIZE_IN_BYTES = 100, + /* set/get: pass int64_t* pointing to host memory */ + CUDNN_SCALAR_INT64_T_BN_ACCUMULATION_COUNT = 101, + /* set/get: pass double* pointing to host memory */ + CUDNN_SCALAR_DOUBLE_BN_EXP_AVG_FACTOR = 102, + /* set/get: pass double* pointing to host memory */ + CUDNN_SCALAR_DOUBLE_BN_EPSILON = 103, +} cudnnFusedOpsVariantParamLabel_t; + +cudnnStatus_t CUDNNWINAPI +cudnnCnnInferVersionCheck(void); + +#if defined(__cplusplus) +} +#endif + +#endif /* CUDNN_CNN_INFER_H_ */ diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_cnn_infer_v8.h b/llmeval-env/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_cnn_infer_v8.h new file mode 100644 index 0000000000000000000000000000000000000000..5e4c91c93bdc0b5e69d9d6326b4e7384e35a8ca6 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_cnn_infer_v8.h @@ -0,0 +1,571 @@ +/* + * Copyright 2014-2023 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +/* + * cudnn_cnn_infer : cuDNN's basic definitions and inference CNN functions. + */ + +#if !defined(CUDNN_CNN_INFER_H_) +#define CUDNN_CNN_INFER_H_ + +#pragma once +#include +#include + +#include "cudnn_version.h" +#include "cudnn_ops_infer.h" + +/* These version numbers are autogenerated, do not edit manually. */ +#define CUDNN_CNN_INFER_MAJOR 8 +#define CUDNN_CNN_INFER_MINOR 9 +#define CUDNN_CNN_INFER_PATCH 2 + +#if (CUDNN_CNN_INFER_MAJOR != CUDNN_MAJOR) || (CUDNN_CNN_INFER_MINOR != CUDNN_MINOR) || \ + (CUDNN_CNN_INFER_PATCH != CUDNN_PATCHLEVEL) +#error Version mismatch in cuDNN CNN INFER!!! +#endif + +#if defined(__cplusplus) +extern "C" { +#endif + +typedef struct cudnnConvolutionStruct *cudnnConvolutionDescriptor_t; + +/* + * convolution mode + */ +typedef enum { CUDNN_CONVOLUTION = 0, CUDNN_CROSS_CORRELATION = 1 } cudnnConvolutionMode_t; + +/* + * CUDNN Reorder + */ +typedef enum { + CUDNN_DEFAULT_REORDER = 0, + CUDNN_NO_REORDER = 1, +} cudnnReorderType_t; + +typedef struct cudnnConvolutionFwdAlgoPerfStruct { + cudnnConvolutionFwdAlgo_t algo; + cudnnStatus_t status; + float time; + size_t memory; + cudnnDeterminism_t determinism; + cudnnMathType_t mathType; + int reserved[3]; +} cudnnConvolutionFwdAlgoPerf_t; + +/* Create an instance of convolution descriptor */ +cudnnStatus_t CUDNNWINAPI +cudnnCreateConvolutionDescriptor(cudnnConvolutionDescriptor_t *convDesc); + +/* Destroy an instance of convolution descriptor */ +cudnnStatus_t CUDNNWINAPI +cudnnDestroyConvolutionDescriptor(cudnnConvolutionDescriptor_t convDesc); + +cudnnStatus_t CUDNNWINAPI +cudnnSetConvolutionMathType(cudnnConvolutionDescriptor_t convDesc, cudnnMathType_t mathType); + +cudnnStatus_t CUDNNWINAPI +cudnnGetConvolutionMathType(cudnnConvolutionDescriptor_t convDesc, cudnnMathType_t *mathType); + +cudnnStatus_t CUDNNWINAPI +cudnnSetConvolutionGroupCount(cudnnConvolutionDescriptor_t convDesc, int groupCount); + +cudnnStatus_t CUDNNWINAPI +cudnnGetConvolutionGroupCount(cudnnConvolutionDescriptor_t convDesc, int *groupCount); + +cudnnStatus_t CUDNNWINAPI +cudnnSetConvolutionReorderType(cudnnConvolutionDescriptor_t convDesc, cudnnReorderType_t reorderType); + +cudnnStatus_t CUDNNWINAPI +cudnnGetConvolutionReorderType(cudnnConvolutionDescriptor_t convDesc, cudnnReorderType_t *reorderType); + +cudnnStatus_t CUDNNWINAPI +cudnnSetConvolution2dDescriptor(cudnnConvolutionDescriptor_t convDesc, + int pad_h, /* zero-padding height */ + int pad_w, /* zero-padding width */ + int u, /* vertical filter stride */ + int v, /* horizontal filter stride */ + int dilation_h, /* filter dilation in the vertical dimension */ + int dilation_w, /* filter dilation in the horizontal dimension */ + cudnnConvolutionMode_t mode, + cudnnDataType_t computeType); + +cudnnStatus_t CUDNNWINAPI +cudnnGetConvolution2dDescriptor(const cudnnConvolutionDescriptor_t convDesc, + int *pad_h, /* zero-padding height */ + int *pad_w, /* zero-padding width */ + int *u, /* vertical filter stride */ + int *v, /* horizontal filter stride */ + int *dilation_h, /* filter dilation in the vertical dimension */ + int *dilation_w, /* filter dilation in the horizontal dimension */ + cudnnConvolutionMode_t *mode, + cudnnDataType_t *computeType); + +cudnnStatus_t CUDNNWINAPI +cudnnSetConvolutionNdDescriptor(cudnnConvolutionDescriptor_t convDesc, + int arrayLength, /* nbDims-2 size */ + const int padA[], + const int filterStrideA[], + const int dilationA[], + cudnnConvolutionMode_t mode, + cudnnDataType_t computeType); /* convolution data type */ + +/* Helper function to return the dimensions of the output tensor given a convolution descriptor */ +cudnnStatus_t CUDNNWINAPI +cudnnGetConvolutionNdDescriptor(const cudnnConvolutionDescriptor_t convDesc, + int arrayLengthRequested, + int *arrayLength, + int padA[], + int strideA[], + int dilationA[], + cudnnConvolutionMode_t *mode, + cudnnDataType_t *computeType); /* convolution data type */ + +cudnnStatus_t CUDNNWINAPI +cudnnGetConvolution2dForwardOutputDim(const cudnnConvolutionDescriptor_t convDesc, + const cudnnTensorDescriptor_t inputTensorDesc, + const cudnnFilterDescriptor_t filterDesc, + int *n, + int *c, + int *h, + int *w); + +/* Helper function to return the dimensions of the output tensor given a convolution descriptor */ +cudnnStatus_t CUDNNWINAPI +cudnnGetConvolutionNdForwardOutputDim(const cudnnConvolutionDescriptor_t convDesc, + const cudnnTensorDescriptor_t inputTensorDesc, + const cudnnFilterDescriptor_t filterDesc, + int nbDims, + int tensorOuputDimA[]); + +/* helper function to provide the convolution forward algo that fit best the requirement */ +cudnnStatus_t CUDNNWINAPI +cudnnGetConvolutionForwardAlgorithmMaxCount(cudnnHandle_t handle, int *count); + +cudnnStatus_t CUDNNWINAPI +cudnnGetConvolutionForwardAlgorithm_v7(cudnnHandle_t handle, + const cudnnTensorDescriptor_t srcDesc, + const cudnnFilterDescriptor_t filterDesc, + const cudnnConvolutionDescriptor_t convDesc, + const cudnnTensorDescriptor_t destDesc, + const int requestedAlgoCount, + int *returnedAlgoCount, + cudnnConvolutionFwdAlgoPerf_t *perfResults); + +cudnnStatus_t CUDNNWINAPI +cudnnFindConvolutionForwardAlgorithm(cudnnHandle_t handle, + const cudnnTensorDescriptor_t xDesc, + const cudnnFilterDescriptor_t wDesc, + const cudnnConvolutionDescriptor_t convDesc, + const cudnnTensorDescriptor_t yDesc, + const int requestedAlgoCount, + int *returnedAlgoCount, + cudnnConvolutionFwdAlgoPerf_t *perfResults); + +cudnnStatus_t CUDNNWINAPI +cudnnFindConvolutionForwardAlgorithmEx(cudnnHandle_t handle, + const cudnnTensorDescriptor_t xDesc, + const void *x, + const cudnnFilterDescriptor_t wDesc, + const void *w, + const cudnnConvolutionDescriptor_t convDesc, + const cudnnTensorDescriptor_t yDesc, + void *y, + const int requestedAlgoCount, + int *returnedAlgoCount, + cudnnConvolutionFwdAlgoPerf_t *perfResults, + void *workSpace, + size_t workSpaceSizeInBytes); + +cudnnStatus_t CUDNNWINAPI +cudnnIm2Col(cudnnHandle_t handle, + const cudnnTensorDescriptor_t xDesc, + const void *x, + const cudnnFilterDescriptor_t wDesc, + const cudnnConvolutionDescriptor_t convDesc, + void *colBuffer); + +cudnnStatus_t CUDNNWINAPI +cudnnReorderFilterAndBias(cudnnHandle_t handle, + const cudnnFilterDescriptor_t filterDesc, + cudnnReorderType_t reorderType, + const void *filterData, + void *reorderedFilterData, + int reorderBias, + const void *biasData, + void *reorderedBiasData); + +/* Helper function to return the minimum size of the workspace to be passed to the convolution given an algo*/ +cudnnStatus_t CUDNNWINAPI +cudnnGetConvolutionForwardWorkspaceSize(cudnnHandle_t handle, + const cudnnTensorDescriptor_t xDesc, + const cudnnFilterDescriptor_t wDesc, + const cudnnConvolutionDescriptor_t convDesc, + const cudnnTensorDescriptor_t yDesc, + cudnnConvolutionFwdAlgo_t algo, + size_t *sizeInBytes); + +/* Convolution functions: All of the form "output = alpha * Op(inputs) + beta * output" */ + +/* Function to perform the forward pass for batch convolution */ +cudnnStatus_t CUDNNWINAPI +cudnnConvolutionForward(cudnnHandle_t handle, + const void *alpha, + const cudnnTensorDescriptor_t xDesc, + const void *x, + const cudnnFilterDescriptor_t wDesc, + const void *w, + const cudnnConvolutionDescriptor_t convDesc, + cudnnConvolutionFwdAlgo_t algo, + void *workSpace, + size_t workSpaceSizeInBytes, + const void *beta, + const cudnnTensorDescriptor_t yDesc, + void *y); + +/* Fused conv/bias/activation operation : y = Act( alpha1 * conv(x) + alpha2 * z + bias ) */ +cudnnStatus_t CUDNNWINAPI +cudnnConvolutionBiasActivationForward(cudnnHandle_t handle, + const void *alpha1, + const cudnnTensorDescriptor_t xDesc, + const void *x, + const cudnnFilterDescriptor_t wDesc, + const void *w, + const cudnnConvolutionDescriptor_t convDesc, + cudnnConvolutionFwdAlgo_t algo, + void *workSpace, + size_t workSpaceSizeInBytes, + const void *alpha2, + const cudnnTensorDescriptor_t zDesc, + const void *z, + const cudnnTensorDescriptor_t biasDesc, + const void *bias, + const cudnnActivationDescriptor_t activationDesc, + const cudnnTensorDescriptor_t yDesc, + void *y); + +/* helper function to provide the convolution backward data algo that fit best the requirement */ + +typedef struct cudnnConvolutionBwdDataAlgoPerfStruct { + cudnnConvolutionBwdDataAlgo_t algo; + cudnnStatus_t status; + float time; + size_t memory; + cudnnDeterminism_t determinism; + cudnnMathType_t mathType; + int reserved[3]; +} cudnnConvolutionBwdDataAlgoPerf_t; + +cudnnStatus_t CUDNNWINAPI +cudnnGetConvolutionBackwardDataAlgorithmMaxCount(cudnnHandle_t handle, int *count); + +cudnnStatus_t CUDNNWINAPI +cudnnFindConvolutionBackwardDataAlgorithm(cudnnHandle_t handle, + const cudnnFilterDescriptor_t wDesc, + const cudnnTensorDescriptor_t dyDesc, + const cudnnConvolutionDescriptor_t convDesc, + const cudnnTensorDescriptor_t dxDesc, + const int requestedAlgoCount, + int *returnedAlgoCount, + cudnnConvolutionBwdDataAlgoPerf_t *perfResults); + +cudnnStatus_t CUDNNWINAPI +cudnnFindConvolutionBackwardDataAlgorithmEx(cudnnHandle_t handle, + const cudnnFilterDescriptor_t wDesc, + const void *w, + const cudnnTensorDescriptor_t dyDesc, + const void *dy, + const cudnnConvolutionDescriptor_t convDesc, + const cudnnTensorDescriptor_t dxDesc, + void *dx, + const int requestedAlgoCount, + int *returnedAlgoCount, + cudnnConvolutionBwdDataAlgoPerf_t *perfResults, + void *workSpace, + size_t workSpaceSizeInBytes); + +cudnnStatus_t CUDNNWINAPI +cudnnGetConvolutionBackwardDataAlgorithm_v7(cudnnHandle_t handle, + const cudnnFilterDescriptor_t filterDesc, + const cudnnTensorDescriptor_t diffDesc, + const cudnnConvolutionDescriptor_t convDesc, + const cudnnTensorDescriptor_t gradDesc, + const int requestedAlgoCount, + int *returnedAlgoCount, + cudnnConvolutionBwdDataAlgoPerf_t *perfResults); + +/* + * convolution algorithm (which requires potentially some workspace) + */ + +/* Helper function to return the minimum size of the workspace to be passed to the convolution given an algo*/ +cudnnStatus_t CUDNNWINAPI +cudnnGetConvolutionBackwardDataWorkspaceSize(cudnnHandle_t handle, + const cudnnFilterDescriptor_t wDesc, + const cudnnTensorDescriptor_t dyDesc, + const cudnnConvolutionDescriptor_t convDesc, + const cudnnTensorDescriptor_t dxDesc, + cudnnConvolutionBwdDataAlgo_t algo, + size_t *sizeInBytes); + +cudnnStatus_t CUDNNWINAPI +cudnnConvolutionBackwardData(cudnnHandle_t handle, + const void *alpha, + const cudnnFilterDescriptor_t wDesc, + const void *w, + const cudnnTensorDescriptor_t dyDesc, + const void *dy, + const cudnnConvolutionDescriptor_t convDesc, + cudnnConvolutionBwdDataAlgo_t algo, + void *workSpace, + size_t workSpaceSizeInBytes, + const void *beta, + const cudnnTensorDescriptor_t dxDesc, + void *dx); + +/* Helper function to calculate folding descriptors for dgrad */ +cudnnStatus_t CUDNNWINAPI +cudnnGetFoldedConvBackwardDataDescriptors(const cudnnHandle_t handle, + const cudnnFilterDescriptor_t filterDesc, + const cudnnTensorDescriptor_t diffDesc, + const cudnnConvolutionDescriptor_t convDesc, + const cudnnTensorDescriptor_t gradDesc, + const cudnnTensorFormat_t transformFormat, + cudnnFilterDescriptor_t foldedFilterDesc, + cudnnTensorDescriptor_t paddedDiffDesc, + cudnnConvolutionDescriptor_t foldedConvDesc, + cudnnTensorDescriptor_t foldedGradDesc, + cudnnTensorTransformDescriptor_t filterFoldTransDesc, + cudnnTensorTransformDescriptor_t diffPadTransDesc, + cudnnTensorTransformDescriptor_t gradFoldTransDesc, + cudnnTensorTransformDescriptor_t gradUnfoldTransDesc); + +/* cudnnFusedOps... */ +struct cudnnFusedOpsConstParamStruct; +typedef struct cudnnFusedOpsConstParamStruct *cudnnFusedOpsConstParamPack_t; + +struct cudnnFusedOpsVariantParamStruct; +typedef struct cudnnFusedOpsVariantParamStruct *cudnnFusedOpsVariantParamPack_t; + +struct cudnnFusedOpsPlanStruct; +typedef struct cudnnFusedOpsPlanStruct *cudnnFusedOpsPlan_t; + +typedef enum { + /* each op in [ ] can be disabled by passing NULL ptr */ + /* [per channel scale], [per channel bias], [activation], convolution, [generate BN stats] */ + CUDNN_FUSED_SCALE_BIAS_ACTIVATION_CONV_BNSTATS = 0, + /* [per channel scale], [per channel bias], [activation], convolutionBackwardWeights */ + CUDNN_FUSED_SCALE_BIAS_ACTIVATION_WGRAD = 1, + /* utility for BN training in BN-conv fusion */ + /* computes the equivalent scale and bias from ySum ySqSum and learned scale, bias */ + /* optionally update running stats and generate saved stats */ + CUDNN_FUSED_BN_FINALIZE_STATISTICS_TRAINING = 2, + /* utility for BN inference in BN-conv fusion */ + /* computes the equivalent scale and bias from learned running stats and learned scale, bias */ + CUDNN_FUSED_BN_FINALIZE_STATISTICS_INFERENCE = 3, + /* reserved for future use: convolution, [per channel scale], [per channel bias], [residual add], [activation] */ + CUDNN_FUSED_CONV_SCALE_BIAS_ADD_ACTIVATION = 4, + /* reserved for future use: [per channel scale], [per channel bias], [residual add], activation, bitmask */ + CUDNN_FUSED_SCALE_BIAS_ADD_ACTIVATION_GEN_BITMASK = 5, + /* reserved for future use */ + CUDNN_FUSED_DACTIVATION_FORK_DBATCHNORM = 6, +} cudnnFusedOps_t; + +typedef enum { + /* set XDESC: pass previously initialized cudnnTensorDescriptor_t */ + /* get XDESC: pass previously created cudnnTensorDescriptor_t */ + CUDNN_PARAM_XDESC = 0, + /* set/get XDATA_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */ + CUDNN_PARAM_XDATA_PLACEHOLDER = 1, + /* set/get BN_MODE: pass cudnnBatchNormMode_t* */ + CUDNN_PARAM_BN_MODE = 2, + /* set CUDNN_PARAM_BN_EQSCALEBIAS_DESC: pass previously initialized cudnnTensorDescriptor_t */ + /* get CUDNN_PARAM_BN_EQSCALEBIAS_DESC: pass previously created cudnnTensorDescriptor_t */ + CUDNN_PARAM_BN_EQSCALEBIAS_DESC = 3, + /* set/get BN_EQSCALE_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */ + CUDNN_PARAM_BN_EQSCALE_PLACEHOLDER = 4, + /* set/get BN_EQBIAS_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */ + CUDNN_PARAM_BN_EQBIAS_PLACEHOLDER = 5, + /* set ACTIVATION_DESC: pass previously initialized cudnnActivationDescriptor_t */ + /* get ACTIVATION_DESC: pass previously created cudnnActivationDescriptor_t */ + CUDNN_PARAM_ACTIVATION_DESC = 6, + /* set CONV_DESC: pass previously initialized cudnnConvolutionDescriptor_t */ + /* get CONV_DESC: pass previously created cudnnConvolutionDescriptor_t */ + CUDNN_PARAM_CONV_DESC = 7, + /* set WDESC: pass previously initialized cudnnFilterDescriptor_t */ + /* get WDESC: pass previously created cudnnFilterDescriptor_t */ + CUDNN_PARAM_WDESC = 8, + /* set/get WDATA_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */ + CUDNN_PARAM_WDATA_PLACEHOLDER = 9, + /* set DWDESC: pass previously initialized cudnnFilterDescriptor_t */ + /* get DWDESC: pass previously created cudnnFilterDescriptor_t */ + CUDNN_PARAM_DWDESC = 10, + /* set/get DWDATA_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */ + CUDNN_PARAM_DWDATA_PLACEHOLDER = 11, + /* set YDESC: pass previously initialized cudnnTensorDescriptor_t */ + /* get YDESC: pass previously created cudnnTensorDescriptor_t */ + CUDNN_PARAM_YDESC = 12, + /* set/get YDATA_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */ + CUDNN_PARAM_YDATA_PLACEHOLDER = 13, + /* set DYDESC: pass previously initialized cudnnTensorDescriptor_t */ + /* get DYDESC: pass previously created cudnnTensorDescriptor_t */ + CUDNN_PARAM_DYDESC = 14, + /* set/get DYDATA_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */ + CUDNN_PARAM_DYDATA_PLACEHOLDER = 15, + /* set YSTATS_DESC: pass previously initialized cudnnTensorDescriptor_t */ + /* get YSTATS_DESC: pass previously created cudnnTensorDescriptor_t */ + CUDNN_PARAM_YSTATS_DESC = 16, + /* set/get YSUM_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */ + CUDNN_PARAM_YSUM_PLACEHOLDER = 17, + /* set/get YSQSUM_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */ + CUDNN_PARAM_YSQSUM_PLACEHOLDER = 18, + /* set CUDNN_PARAM_BN_SCALEBIAS_MEANVAR_DESC: pass previously initialized cudnnTensorDescriptor_t */ + /* get CUDNN_PARAM_BN_SCALEBIAS_MEANVAR_DESC: pass previously created cudnnTensorDescriptor_t */ + CUDNN_PARAM_BN_SCALEBIAS_MEANVAR_DESC = 19, + /* set/get CUDNN_PARAM_BN_SCALE_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */ + CUDNN_PARAM_BN_SCALE_PLACEHOLDER = 20, + /* set/get CUDNN_PARAM_BN_BIAS_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */ + CUDNN_PARAM_BN_BIAS_PLACEHOLDER = 21, + /* set/get CUDNN_PARAM_BN_SAVED_MEAN_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */ + CUDNN_PARAM_BN_SAVED_MEAN_PLACEHOLDER = 22, + /* set/get CUDNN_PARAM_BN_SAVED_INVSTD_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */ + CUDNN_PARAM_BN_SAVED_INVSTD_PLACEHOLDER = 23, + /* set/get CUDNN_PARAM_BN_RUNNING_MEAN_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */ + CUDNN_PARAM_BN_RUNNING_MEAN_PLACEHOLDER = 24, + /* set/get CUDNN_PARAM_BN_RUNNING_VAR_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */ + CUDNN_PARAM_BN_RUNNING_VAR_PLACEHOLDER = 25, + + /* set ZDESC: pass previously initialized cudnnTensorDescriptor_t */ + /* get ZDESC: pass previously created cudnnTensorDescriptor_t */ + CUDNN_PARAM_ZDESC = 26, + /* set/get ZDATA_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */ + CUDNN_PARAM_ZDATA_PLACEHOLDER = 27, + /* set BN_Z_EQSCALEBIAS_DESC: pass previously initialized cudnnTensorDescriptor_t */ + /* get BN_Z_EQSCALEBIAS_DESC: pass previously created cudnnTensorDescriptor_t */ + CUDNN_PARAM_BN_Z_EQSCALEBIAS_DESC = 28, + /* set/get BN_Z_EQSCALE_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */ + CUDNN_PARAM_BN_Z_EQSCALE_PLACEHOLDER = 29, + /* set/get BN_Z_EQBIAS_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */ + CUDNN_PARAM_BN_Z_EQBIAS_PLACEHOLDER = 30, + + /* set ACTIVATION_BITMASK_DESC: pass previously initialized cudnnTensorDescriptor_t */ + /* get ACTIVATION_BITMASK_DESC: pass previously created cudnnTensorDescriptor_t */ + CUDNN_PARAM_ACTIVATION_BITMASK_DESC = 31, + /* set/get ACTIVATION_BITMASK_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */ + CUDNN_PARAM_ACTIVATION_BITMASK_PLACEHOLDER = 32, + + /* set DXDESC: pass previously initialized cudnnTensorDescriptor_t */ + /* get DXDESC: pass previously created cudnnTensorDescriptor_t */ + CUDNN_PARAM_DXDESC = 33, + /* set/get DXDATA_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */ + CUDNN_PARAM_DXDATA_PLACEHOLDER = 34, + /* set DZDESC: pass previously initialized cudnnTensorDescriptor_t */ + /* get DZDESC: pass previously created cudnnTensorDescriptor_t */ + CUDNN_PARAM_DZDESC = 35, + /* set/get DZDATA_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */ + CUDNN_PARAM_DZDATA_PLACEHOLDER = 36, + /* set/get CUDNN_PARAM_BN_DSCALE_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */ + CUDNN_PARAM_BN_DSCALE_PLACEHOLDER = 37, + /* set/get CUDNN_PARAM_BN_DBIAS_PLACEHOLDER: pass cudnnFusedOpsPointerPlaceHolder_t* */ + CUDNN_PARAM_BN_DBIAS_PLACEHOLDER = 38, +} cudnnFusedOpsConstParamLabel_t; + +typedef enum { + CUDNN_PTR_NULL = 0, + CUDNN_PTR_ELEM_ALIGNED = 1, + CUDNN_PTR_16B_ALIGNED = 2, +} cudnnFusedOpsPointerPlaceHolder_t; + +typedef enum { + /* set: pass void* pointing to dev memory */ + /* get: pass void** pointing to host memory */ + CUDNN_PTR_XDATA = 0, + CUDNN_PTR_BN_EQSCALE = 1, + CUDNN_PTR_BN_EQBIAS = 2, + CUDNN_PTR_WDATA = 3, + CUDNN_PTR_DWDATA = 4, + CUDNN_PTR_YDATA = 5, + CUDNN_PTR_DYDATA = 6, + CUDNN_PTR_YSUM = 7, + CUDNN_PTR_YSQSUM = 8, + CUDNN_PTR_WORKSPACE = 9, + CUDNN_PTR_BN_SCALE = 10, + CUDNN_PTR_BN_BIAS = 11, + CUDNN_PTR_BN_SAVED_MEAN = 12, + CUDNN_PTR_BN_SAVED_INVSTD = 13, + CUDNN_PTR_BN_RUNNING_MEAN = 14, + CUDNN_PTR_BN_RUNNING_VAR = 15, + CUDNN_PTR_ZDATA = 16, + CUDNN_PTR_BN_Z_EQSCALE = 17, + CUDNN_PTR_BN_Z_EQBIAS = 18, + CUDNN_PTR_ACTIVATION_BITMASK = 19, + CUDNN_PTR_DXDATA = 20, + CUDNN_PTR_DZDATA = 21, + CUDNN_PTR_BN_DSCALE = 22, + CUDNN_PTR_BN_DBIAS = 23, + + /* set/get: pass size_t* pointing to host memory */ + CUDNN_SCALAR_SIZE_T_WORKSPACE_SIZE_IN_BYTES = 100, + /* set/get: pass int64_t* pointing to host memory */ + CUDNN_SCALAR_INT64_T_BN_ACCUMULATION_COUNT = 101, + /* set/get: pass double* pointing to host memory */ + CUDNN_SCALAR_DOUBLE_BN_EXP_AVG_FACTOR = 102, + /* set/get: pass double* pointing to host memory */ + CUDNN_SCALAR_DOUBLE_BN_EPSILON = 103, +} cudnnFusedOpsVariantParamLabel_t; + +cudnnStatus_t CUDNNWINAPI +cudnnCnnInferVersionCheck(void); + +#if defined(__cplusplus) +} +#endif + +#endif /* CUDNN_CNN_INFER_H_ */ diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_cnn_train.h b/llmeval-env/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_cnn_train.h new file mode 100644 index 0000000000000000000000000000000000000000..ee0358b51d8b2c48880cf2f3cde7adf83c112336 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_cnn_train.h @@ -0,0 +1,219 @@ +/* + * Copyright 2014-2023 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +/* + * cudnn_cnn_train : cuDNN's basic definitions and inference CNN functions. + */ + +#pragma once +#include +#include + +#include "cudnn_version.h" +#include "cudnn_ops_infer.h" +#include "cudnn_ops_train.h" +#include "cudnn_cnn_infer.h" + +/* These version numbers are autogenerated, do not edit manually. */ +#define CUDNN_CNN_TRAIN_MAJOR 8 +#define CUDNN_CNN_TRAIN_MINOR 9 +#define CUDNN_CNN_TRAIN_PATCH 2 + +#if (CUDNN_CNN_TRAIN_MAJOR != CUDNN_MAJOR) || (CUDNN_CNN_TRAIN_MINOR != CUDNN_MINOR) || \ + (CUDNN_CNN_TRAIN_PATCH != CUDNN_PATCHLEVEL) +#error Version mismatch in cuDNN CNN INFER!!! +#endif + +#if defined(__cplusplus) +extern "C" { +#endif + +/* helper function to provide the convolution backward filter algo that fit best the requirement */ + +typedef struct cudnnConvolutionBwdFilterAlgoPerfStruct { + cudnnConvolutionBwdFilterAlgo_t algo; + cudnnStatus_t status; + float time; + size_t memory; + cudnnDeterminism_t determinism; + cudnnMathType_t mathType; + int reserved[3]; +} cudnnConvolutionBwdFilterAlgoPerf_t; + +cudnnStatus_t CUDNNWINAPI +cudnnGetConvolutionBackwardFilterAlgorithmMaxCount(cudnnHandle_t handle, int *count); + +cudnnStatus_t CUDNNWINAPI +cudnnFindConvolutionBackwardFilterAlgorithm(cudnnHandle_t handle, + const cudnnTensorDescriptor_t xDesc, + const cudnnTensorDescriptor_t dyDesc, + const cudnnConvolutionDescriptor_t convDesc, + const cudnnFilterDescriptor_t dwDesc, + const int requestedAlgoCount, + int *returnedAlgoCount, + cudnnConvolutionBwdFilterAlgoPerf_t *perfResults); + +cudnnStatus_t CUDNNWINAPI +cudnnFindConvolutionBackwardFilterAlgorithmEx(cudnnHandle_t handle, + const cudnnTensorDescriptor_t xDesc, + const void *x, + const cudnnTensorDescriptor_t dyDesc, + const void *y, + const cudnnConvolutionDescriptor_t convDesc, + const cudnnFilterDescriptor_t dwDesc, + void *dw, + const int requestedAlgoCount, + int *returnedAlgoCount, + cudnnConvolutionBwdFilterAlgoPerf_t *perfResults, + void *workSpace, + size_t workSpaceSizeInBytes); + +cudnnStatus_t CUDNNWINAPI +cudnnGetConvolutionBackwardFilterAlgorithm_v7(cudnnHandle_t handle, + const cudnnTensorDescriptor_t srcDesc, + const cudnnTensorDescriptor_t diffDesc, + const cudnnConvolutionDescriptor_t convDesc, + const cudnnFilterDescriptor_t gradDesc, + const int requestedAlgoCount, + int *returnedAlgoCount, + cudnnConvolutionBwdFilterAlgoPerf_t *perfResults); + +/* + * convolution algorithm (which requires potentially some workspace) + */ + +/* Helper function to return the minimum size of the workspace to be passed to the convolution given an algo*/ +cudnnStatus_t CUDNNWINAPI +cudnnGetConvolutionBackwardFilterWorkspaceSize(cudnnHandle_t handle, + const cudnnTensorDescriptor_t xDesc, + const cudnnTensorDescriptor_t dyDesc, + const cudnnConvolutionDescriptor_t convDesc, + const cudnnFilterDescriptor_t gradDesc, + cudnnConvolutionBwdFilterAlgo_t algo, + size_t *sizeInBytes); + +cudnnStatus_t CUDNNWINAPI +cudnnConvolutionBackwardFilter(cudnnHandle_t handle, + const void *alpha, + const cudnnTensorDescriptor_t xDesc, + const void *x, + const cudnnTensorDescriptor_t dyDesc, + const void *dy, + const cudnnConvolutionDescriptor_t convDesc, + cudnnConvolutionBwdFilterAlgo_t algo, + void *workSpace, + size_t workSpaceSizeInBytes, + const void *beta, + const cudnnFilterDescriptor_t dwDesc, + void *dw); + +/* Function to compute the bias gradient for batch convolution */ +cudnnStatus_t CUDNNWINAPI +cudnnConvolutionBackwardBias(cudnnHandle_t handle, + const void *alpha, + const cudnnTensorDescriptor_t dyDesc, + const void *dy, + const void *beta, + const cudnnTensorDescriptor_t dbDesc, + void *db); + +cudnnStatus_t CUDNNWINAPI +cudnnCreateFusedOpsConstParamPack(cudnnFusedOpsConstParamPack_t *constPack, cudnnFusedOps_t ops); + +cudnnStatus_t CUDNNWINAPI +cudnnDestroyFusedOpsConstParamPack(cudnnFusedOpsConstParamPack_t constPack); + +cudnnStatus_t CUDNNWINAPI +cudnnSetFusedOpsConstParamPackAttribute(cudnnFusedOpsConstParamPack_t constPack, + cudnnFusedOpsConstParamLabel_t paramLabel, + const void *param); + +cudnnStatus_t CUDNNWINAPI +cudnnGetFusedOpsConstParamPackAttribute(const cudnnFusedOpsConstParamPack_t constPack, + cudnnFusedOpsConstParamLabel_t paramLabel, + void *param, + int *isNULL); + +cudnnStatus_t CUDNNWINAPI +cudnnCreateFusedOpsVariantParamPack(cudnnFusedOpsVariantParamPack_t *varPack, cudnnFusedOps_t ops); + +cudnnStatus_t CUDNNWINAPI +cudnnDestroyFusedOpsVariantParamPack(cudnnFusedOpsVariantParamPack_t varPack); + +cudnnStatus_t CUDNNWINAPI +cudnnSetFusedOpsVariantParamPackAttribute(cudnnFusedOpsVariantParamPack_t varPack, + cudnnFusedOpsVariantParamLabel_t paramLabel, + void *ptr); + +cudnnStatus_t CUDNNWINAPI +cudnnGetFusedOpsVariantParamPackAttribute(const cudnnFusedOpsVariantParamPack_t varPack, + cudnnFusedOpsVariantParamLabel_t paramLabel, + void *ptr); + +cudnnStatus_t CUDNNWINAPI +cudnnCreateFusedOpsPlan(cudnnFusedOpsPlan_t *plan, cudnnFusedOps_t ops); + +cudnnStatus_t CUDNNWINAPI +cudnnDestroyFusedOpsPlan(cudnnFusedOpsPlan_t plan); + +cudnnStatus_t CUDNNWINAPI +cudnnMakeFusedOpsPlan(cudnnHandle_t handle, + cudnnFusedOpsPlan_t plan, + const cudnnFusedOpsConstParamPack_t constPack, + size_t *workspaceSizeInBytes); + +cudnnStatus_t CUDNNWINAPI +cudnnFusedOpsExecute(cudnnHandle_t handle, const cudnnFusedOpsPlan_t plan, cudnnFusedOpsVariantParamPack_t varPack); + +cudnnStatus_t CUDNNWINAPI +cudnnCnnTrainVersionCheck(void); + +#if defined(__cplusplus) +} +#endif diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_cnn_train_v8.h b/llmeval-env/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_cnn_train_v8.h new file mode 100644 index 0000000000000000000000000000000000000000..ee0358b51d8b2c48880cf2f3cde7adf83c112336 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_cnn_train_v8.h @@ -0,0 +1,219 @@ +/* + * Copyright 2014-2023 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +/* + * cudnn_cnn_train : cuDNN's basic definitions and inference CNN functions. + */ + +#pragma once +#include +#include + +#include "cudnn_version.h" +#include "cudnn_ops_infer.h" +#include "cudnn_ops_train.h" +#include "cudnn_cnn_infer.h" + +/* These version numbers are autogenerated, do not edit manually. */ +#define CUDNN_CNN_TRAIN_MAJOR 8 +#define CUDNN_CNN_TRAIN_MINOR 9 +#define CUDNN_CNN_TRAIN_PATCH 2 + +#if (CUDNN_CNN_TRAIN_MAJOR != CUDNN_MAJOR) || (CUDNN_CNN_TRAIN_MINOR != CUDNN_MINOR) || \ + (CUDNN_CNN_TRAIN_PATCH != CUDNN_PATCHLEVEL) +#error Version mismatch in cuDNN CNN INFER!!! +#endif + +#if defined(__cplusplus) +extern "C" { +#endif + +/* helper function to provide the convolution backward filter algo that fit best the requirement */ + +typedef struct cudnnConvolutionBwdFilterAlgoPerfStruct { + cudnnConvolutionBwdFilterAlgo_t algo; + cudnnStatus_t status; + float time; + size_t memory; + cudnnDeterminism_t determinism; + cudnnMathType_t mathType; + int reserved[3]; +} cudnnConvolutionBwdFilterAlgoPerf_t; + +cudnnStatus_t CUDNNWINAPI +cudnnGetConvolutionBackwardFilterAlgorithmMaxCount(cudnnHandle_t handle, int *count); + +cudnnStatus_t CUDNNWINAPI +cudnnFindConvolutionBackwardFilterAlgorithm(cudnnHandle_t handle, + const cudnnTensorDescriptor_t xDesc, + const cudnnTensorDescriptor_t dyDesc, + const cudnnConvolutionDescriptor_t convDesc, + const cudnnFilterDescriptor_t dwDesc, + const int requestedAlgoCount, + int *returnedAlgoCount, + cudnnConvolutionBwdFilterAlgoPerf_t *perfResults); + +cudnnStatus_t CUDNNWINAPI +cudnnFindConvolutionBackwardFilterAlgorithmEx(cudnnHandle_t handle, + const cudnnTensorDescriptor_t xDesc, + const void *x, + const cudnnTensorDescriptor_t dyDesc, + const void *y, + const cudnnConvolutionDescriptor_t convDesc, + const cudnnFilterDescriptor_t dwDesc, + void *dw, + const int requestedAlgoCount, + int *returnedAlgoCount, + cudnnConvolutionBwdFilterAlgoPerf_t *perfResults, + void *workSpace, + size_t workSpaceSizeInBytes); + +cudnnStatus_t CUDNNWINAPI +cudnnGetConvolutionBackwardFilterAlgorithm_v7(cudnnHandle_t handle, + const cudnnTensorDescriptor_t srcDesc, + const cudnnTensorDescriptor_t diffDesc, + const cudnnConvolutionDescriptor_t convDesc, + const cudnnFilterDescriptor_t gradDesc, + const int requestedAlgoCount, + int *returnedAlgoCount, + cudnnConvolutionBwdFilterAlgoPerf_t *perfResults); + +/* + * convolution algorithm (which requires potentially some workspace) + */ + +/* Helper function to return the minimum size of the workspace to be passed to the convolution given an algo*/ +cudnnStatus_t CUDNNWINAPI +cudnnGetConvolutionBackwardFilterWorkspaceSize(cudnnHandle_t handle, + const cudnnTensorDescriptor_t xDesc, + const cudnnTensorDescriptor_t dyDesc, + const cudnnConvolutionDescriptor_t convDesc, + const cudnnFilterDescriptor_t gradDesc, + cudnnConvolutionBwdFilterAlgo_t algo, + size_t *sizeInBytes); + +cudnnStatus_t CUDNNWINAPI +cudnnConvolutionBackwardFilter(cudnnHandle_t handle, + const void *alpha, + const cudnnTensorDescriptor_t xDesc, + const void *x, + const cudnnTensorDescriptor_t dyDesc, + const void *dy, + const cudnnConvolutionDescriptor_t convDesc, + cudnnConvolutionBwdFilterAlgo_t algo, + void *workSpace, + size_t workSpaceSizeInBytes, + const void *beta, + const cudnnFilterDescriptor_t dwDesc, + void *dw); + +/* Function to compute the bias gradient for batch convolution */ +cudnnStatus_t CUDNNWINAPI +cudnnConvolutionBackwardBias(cudnnHandle_t handle, + const void *alpha, + const cudnnTensorDescriptor_t dyDesc, + const void *dy, + const void *beta, + const cudnnTensorDescriptor_t dbDesc, + void *db); + +cudnnStatus_t CUDNNWINAPI +cudnnCreateFusedOpsConstParamPack(cudnnFusedOpsConstParamPack_t *constPack, cudnnFusedOps_t ops); + +cudnnStatus_t CUDNNWINAPI +cudnnDestroyFusedOpsConstParamPack(cudnnFusedOpsConstParamPack_t constPack); + +cudnnStatus_t CUDNNWINAPI +cudnnSetFusedOpsConstParamPackAttribute(cudnnFusedOpsConstParamPack_t constPack, + cudnnFusedOpsConstParamLabel_t paramLabel, + const void *param); + +cudnnStatus_t CUDNNWINAPI +cudnnGetFusedOpsConstParamPackAttribute(const cudnnFusedOpsConstParamPack_t constPack, + cudnnFusedOpsConstParamLabel_t paramLabel, + void *param, + int *isNULL); + +cudnnStatus_t CUDNNWINAPI +cudnnCreateFusedOpsVariantParamPack(cudnnFusedOpsVariantParamPack_t *varPack, cudnnFusedOps_t ops); + +cudnnStatus_t CUDNNWINAPI +cudnnDestroyFusedOpsVariantParamPack(cudnnFusedOpsVariantParamPack_t varPack); + +cudnnStatus_t CUDNNWINAPI +cudnnSetFusedOpsVariantParamPackAttribute(cudnnFusedOpsVariantParamPack_t varPack, + cudnnFusedOpsVariantParamLabel_t paramLabel, + void *ptr); + +cudnnStatus_t CUDNNWINAPI +cudnnGetFusedOpsVariantParamPackAttribute(const cudnnFusedOpsVariantParamPack_t varPack, + cudnnFusedOpsVariantParamLabel_t paramLabel, + void *ptr); + +cudnnStatus_t CUDNNWINAPI +cudnnCreateFusedOpsPlan(cudnnFusedOpsPlan_t *plan, cudnnFusedOps_t ops); + +cudnnStatus_t CUDNNWINAPI +cudnnDestroyFusedOpsPlan(cudnnFusedOpsPlan_t plan); + +cudnnStatus_t CUDNNWINAPI +cudnnMakeFusedOpsPlan(cudnnHandle_t handle, + cudnnFusedOpsPlan_t plan, + const cudnnFusedOpsConstParamPack_t constPack, + size_t *workspaceSizeInBytes); + +cudnnStatus_t CUDNNWINAPI +cudnnFusedOpsExecute(cudnnHandle_t handle, const cudnnFusedOpsPlan_t plan, cudnnFusedOpsVariantParamPack_t varPack); + +cudnnStatus_t CUDNNWINAPI +cudnnCnnTrainVersionCheck(void); + +#if defined(__cplusplus) +} +#endif diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_ops_infer.h b/llmeval-env/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_ops_infer.h new file mode 100644 index 0000000000000000000000000000000000000000..79ba34cc1a1557462d49b63a9cb52d9bfe149693 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_ops_infer.h @@ -0,0 +1,1183 @@ +/* + * Copyright 2014-2023 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +/* + * cudnn_ops_infer : cuDNN's basic definitions and inference operations. + */ + +#if !defined(CUDNN_OPS_INFER_H_) +#define CUDNN_OPS_INFER_H_ + +#include +#include + +#include "cudnn_version.h" + +/* These version numbers are autogenerated, do not edit manually. */ +#define CUDNN_OPS_INFER_MAJOR 8 +#define CUDNN_OPS_INFER_MINOR 9 +#define CUDNN_OPS_INFER_PATCH 2 + +#if (CUDNN_OPS_INFER_MAJOR != CUDNN_MAJOR) || (CUDNN_OPS_INFER_MINOR != CUDNN_MINOR) || \ + (CUDNN_OPS_INFER_PATCH != CUDNN_PATCHLEVEL) +#error Version mismatch in cuDNN OPS INFER!!! +#endif + +#ifndef CUDNNWINAPI +#ifdef _WIN32 +#define CUDNNWINAPI __stdcall +#else +#define CUDNNWINAPI +#endif +#endif + +/* Warnings for deprecated API-s are enabled using the CUDNN_WARN_DEPRECATED macro */ +#if defined(CUDNN_WARN_DEPRECATED) && (defined(__GNUC__) || defined(__clang__)) +/* GCC, Intel C/C++, Cray C/C++, CLANG, IBM XL C/C++ little endian */ +#define CUDNN_DEPRECATED __attribute__((deprecated)) +#elif defined(CUDNN_WARN_DEPRECATED) && defined(_MSC_VER) +/* Microsoft Visual C++ */ +#define CUDNN_DEPRECATED __declspec(deprecated) +#elif defined(CUDNN_WARN_DEPRECATED) && (__cplusplus >= 201402L) +/* C++14 compilers */ +#define CUDNN_DEPRECATED [[deprecated]] +#else +/* No support for the deprecated attribute */ +#define CUDNN_DEPRECATED +#endif + +#if defined(__cplusplus) +extern "C" { +#endif + +struct cudnnContext; +typedef struct cudnnContext *cudnnHandle_t; + +size_t CUDNNWINAPI +cudnnGetVersion(void); + +size_t CUDNNWINAPI +cudnnGetMaxDeviceVersion(void); + +/* Returns CUDA Runtime version statically linked against cudnn */ +size_t CUDNNWINAPI +cudnnGetCudartVersion(void); + +/* + * CUDNN return codes + */ +typedef enum { + CUDNN_STATUS_SUCCESS = 0, + CUDNN_STATUS_NOT_INITIALIZED = 1, + CUDNN_STATUS_ALLOC_FAILED = 2, + CUDNN_STATUS_BAD_PARAM = 3, + CUDNN_STATUS_INTERNAL_ERROR = 4, + CUDNN_STATUS_INVALID_VALUE = 5, + CUDNN_STATUS_ARCH_MISMATCH = 6, + CUDNN_STATUS_MAPPING_ERROR = 7, + CUDNN_STATUS_EXECUTION_FAILED = 8, + CUDNN_STATUS_NOT_SUPPORTED = 9, + CUDNN_STATUS_LICENSE_ERROR = 10, + CUDNN_STATUS_RUNTIME_PREREQUISITE_MISSING = 11, + CUDNN_STATUS_RUNTIME_IN_PROGRESS = 12, + CUDNN_STATUS_RUNTIME_FP_OVERFLOW = 13, + CUDNN_STATUS_VERSION_MISMATCH = 14, +} cudnnStatus_t; + +/* human-readable error messages */ +const char *CUDNNWINAPI +cudnnGetErrorString(cudnnStatus_t status); + +/* Forward definition in this version only */ +typedef struct cudnnRuntimeTag_t cudnnRuntimeTag_t; + +typedef enum { + CUDNN_ERRQUERY_RAWCODE = 0, + CUDNN_ERRQUERY_NONBLOCKING = 1, + CUDNN_ERRQUERY_BLOCKING = 2, +} cudnnErrQueryMode_t; + +cudnnStatus_t CUDNNWINAPI +cudnnQueryRuntimeError(cudnnHandle_t handle, cudnnStatus_t *rstatus, cudnnErrQueryMode_t mode, cudnnRuntimeTag_t *tag); + +#ifndef __LIBRARY_TYPES_H__ + +typedef enum libraryPropertyType_t { MAJOR_VERSION, MINOR_VERSION, PATCH_LEVEL } libraryPropertyType; + +#endif + +cudnnStatus_t CUDNNWINAPI +cudnnGetProperty(libraryPropertyType type, int *value); + +cudnnStatus_t CUDNNWINAPI +cudnnCreate(cudnnHandle_t *handle); +cudnnStatus_t CUDNNWINAPI +cudnnDestroy(cudnnHandle_t handle); +cudnnStatus_t CUDNNWINAPI +cudnnSetStream(cudnnHandle_t handle, cudaStream_t streamId); +cudnnStatus_t CUDNNWINAPI +cudnnGetStream(cudnnHandle_t handle, cudaStream_t *streamId); + +/* Data structures to represent Image/Filter and the Neural Network Layer */ +typedef struct cudnnTensorStruct *cudnnTensorDescriptor_t; +typedef struct cudnnPoolingStruct *cudnnPoolingDescriptor_t; +typedef struct cudnnFilterStruct *cudnnFilterDescriptor_t; +typedef struct cudnnLRNStruct *cudnnLRNDescriptor_t; +typedef struct cudnnActivationStruct *cudnnActivationDescriptor_t; +typedef struct cudnnSpatialTransformerStruct *cudnnSpatialTransformerDescriptor_t; +typedef struct cudnnOpTensorStruct *cudnnOpTensorDescriptor_t; +typedef struct cudnnReduceTensorStruct *cudnnReduceTensorDescriptor_t; +typedef struct cudnnCTCLossStruct *cudnnCTCLossDescriptor_t; +typedef struct cudnnTensorTransformStruct *cudnnTensorTransformDescriptor_t; +/* + * CUDNN data type + */ +typedef enum { + CUDNN_DATA_FLOAT = 0, + CUDNN_DATA_DOUBLE = 1, + CUDNN_DATA_HALF = 2, + CUDNN_DATA_INT8 = 3, + CUDNN_DATA_INT32 = 4, + CUDNN_DATA_INT8x4 = 5, + CUDNN_DATA_UINT8 = 6, + CUDNN_DATA_UINT8x4 = 7, + CUDNN_DATA_INT8x32 = 8, + CUDNN_DATA_BFLOAT16 = 9, + CUDNN_DATA_INT64 = 10, + CUDNN_DATA_BOOLEAN = 11, + CUDNN_DATA_FP8_E4M3 = 12, + CUDNN_DATA_FP8_E5M2 = 13, + CUDNN_DATA_FAST_FLOAT_FOR_FP8 = 14, +} cudnnDataType_t; + +/* + * CUDNN math type + */ +typedef enum { + CUDNN_DEFAULT_MATH = 0, + CUDNN_TENSOR_OP_MATH = 1, + CUDNN_TENSOR_OP_MATH_ALLOW_CONVERSION = 2, + CUDNN_FMA_MATH = 3, +} cudnnMathType_t; + +/* + * CUDNN propagate Nan + */ +typedef enum { + CUDNN_NOT_PROPAGATE_NAN = 0, + CUDNN_PROPAGATE_NAN = 1, +} cudnnNanPropagation_t; + +/* + * CUDNN Determinism + */ +typedef enum { + CUDNN_NON_DETERMINISTIC = 0, + CUDNN_DETERMINISTIC = 1, +} cudnnDeterminism_t; + +/* Maximum supported number of tensor dimensions */ +#define CUDNN_DIM_MAX 8 + +/* Create an instance of a generic Tensor descriptor */ +cudnnStatus_t CUDNNWINAPI +cudnnCreateTensorDescriptor(cudnnTensorDescriptor_t *tensorDesc); + +typedef enum { + CUDNN_TENSOR_NCHW = 0, /* row major (wStride = 1, hStride = w) */ + CUDNN_TENSOR_NHWC = 1, /* feature maps interleaved ( cStride = 1 )*/ + CUDNN_TENSOR_NCHW_VECT_C = 2, /* each image point is vector of element of C, vector length in data type */ +} cudnnTensorFormat_t; + +cudnnStatus_t CUDNNWINAPI +cudnnSetTensor4dDescriptor(cudnnTensorDescriptor_t tensorDesc, + cudnnTensorFormat_t format, + cudnnDataType_t dataType, /* image data type */ + int n, /* number of inputs (batch size) */ + int c, /* number of input feature maps */ + int h, /* height of input section */ + int w); /* width of input section */ + +cudnnStatus_t CUDNNWINAPI +cudnnSetTensor4dDescriptorEx(cudnnTensorDescriptor_t tensorDesc, + cudnnDataType_t dataType, /* image data type */ + int n, /* number of inputs (batch size) */ + int c, /* number of input feature maps */ + int h, /* height of input section */ + int w, /* width of input section */ + int nStride, + int cStride, + int hStride, + int wStride); + +cudnnStatus_t CUDNNWINAPI +cudnnGetTensor4dDescriptor(const cudnnTensorDescriptor_t tensorDesc, + cudnnDataType_t *dataType, /* image data type */ + int *n, /* number of inputs (batch size) */ + int *c, /* number of input feature maps */ + int *h, /* height of input section */ + int *w, /* width of input section */ + int *nStride, + int *cStride, + int *hStride, + int *wStride); + +cudnnStatus_t CUDNNWINAPI +cudnnSetTensorNdDescriptor(cudnnTensorDescriptor_t tensorDesc, + cudnnDataType_t dataType, + int nbDims, + const int dimA[], + const int strideA[]); + +cudnnStatus_t CUDNNWINAPI +cudnnSetTensorNdDescriptorEx(cudnnTensorDescriptor_t tensorDesc, + cudnnTensorFormat_t format, + cudnnDataType_t dataType, + int nbDims, + const int dimA[]); + +cudnnStatus_t CUDNNWINAPI +cudnnGetTensorNdDescriptor(const cudnnTensorDescriptor_t tensorDesc, + int nbDimsRequested, + cudnnDataType_t *dataType, + int *nbDims, + int dimA[], + int strideA[]); + +cudnnStatus_t CUDNNWINAPI +cudnnGetTensorSizeInBytes(const cudnnTensorDescriptor_t tensorDesc, size_t *size); + +/* PixelOffset( n, c, h, w ) = n *input_stride + c * feature_stride + h * h_stride + w * w_stride + + 1)Example of all images in row major order one batch of features after the other (with an optional padding on row) + input_stride : c x h x h_stride + feature_stride : h x h_stride + h_stride : >= w ( h_stride = w if no padding) + w_stride : 1 + + + 2)Example of all images in row major with features maps interleaved + input_stride : c x h x h_stride + feature_stride : 1 + h_stride : w x c + w_stride : c + + 3)Example of all images in column major order one batch of features after the other (with optional padding on column) + input_stride : c x w x w_stride + feature_stride : w x w_stride + h_stride : 1 + w_stride : >= h + +*/ + +/* Destroy an instance of Tensor4d descriptor */ +cudnnStatus_t CUDNNWINAPI +cudnnDestroyTensorDescriptor(cudnnTensorDescriptor_t tensorDesc); + +/* Fold/unfold transforms */ +typedef enum { + CUDNN_TRANSFORM_FOLD = 0U, + CUDNN_TRANSFORM_UNFOLD = 1U, +} cudnnFoldingDirection_t; + +/** Create a destination descriptor for cudnnTransformTensor */ +cudnnStatus_t CUDNNWINAPI +cudnnInitTransformDest(const cudnnTensorTransformDescriptor_t transformDesc, + const cudnnTensorDescriptor_t srcDesc, + cudnnTensorDescriptor_t destDesc, + size_t *destSizeInBytes); + +/** Create an empty tensor transform descriptor */ +cudnnStatus_t CUDNNWINAPI +cudnnCreateTensorTransformDescriptor(cudnnTensorTransformDescriptor_t *transformDesc); + +/** Initialize a previously created tensor transform descriptor. */ +cudnnStatus_t CUDNNWINAPI +cudnnSetTensorTransformDescriptor(cudnnTensorTransformDescriptor_t transformDesc, + const uint32_t nbDims, + const cudnnTensorFormat_t destFormat, + const int32_t padBeforeA[], + const int32_t padAfterA[], + const uint32_t foldA[], + const cudnnFoldingDirection_t direction); + +/** + * Retrieves the values stored in a previously initialized tensor transform + * descriptor. + */ +cudnnStatus_t CUDNNWINAPI +cudnnGetTensorTransformDescriptor(cudnnTensorTransformDescriptor_t transformDesc, + uint32_t nbDimsRequested, + cudnnTensorFormat_t *destFormat, + int32_t padBeforeA[], + int32_t padAfterA[], + uint32_t foldA[], + cudnnFoldingDirection_t *direction); + +/** + * Destroys a previously created tensor transform descriptor. + */ +cudnnStatus_t CUDNNWINAPI +cudnnDestroyTensorTransformDescriptor(cudnnTensorTransformDescriptor_t transformDesc); + +/* Tensor layout conversion helper (y = alpha * x + beta * y) */ +cudnnStatus_t CUDNNWINAPI +cudnnTransformTensor(cudnnHandle_t handle, + const void *alpha, + const cudnnTensorDescriptor_t xDesc, + const void *x, + const void *beta, + const cudnnTensorDescriptor_t yDesc, + void *y); + +cudnnStatus_t CUDNNWINAPI +cudnnTransformTensorEx(cudnnHandle_t handle, + const cudnnTensorTransformDescriptor_t transDesc, + const void *alpha, + const cudnnTensorDescriptor_t srcDesc, + const void *srcData, + const void *beta, + const cudnnTensorDescriptor_t destDesc, + void *destData); + +/* Tensor Bias addition : C = alpha * A + beta * C */ +cudnnStatus_t CUDNNWINAPI +cudnnAddTensor(cudnnHandle_t handle, + const void *alpha, + const cudnnTensorDescriptor_t aDesc, + const void *A, + const void *beta, + const cudnnTensorDescriptor_t cDesc, + void *C); + +/* + * CUDNN OpTensor op type + */ +typedef enum { + CUDNN_OP_TENSOR_ADD = 0, + CUDNN_OP_TENSOR_MUL = 1, + CUDNN_OP_TENSOR_MIN = 2, + CUDNN_OP_TENSOR_MAX = 3, + CUDNN_OP_TENSOR_SQRT = 4, + CUDNN_OP_TENSOR_NOT = 5, +} cudnnOpTensorOp_t; + +cudnnStatus_t CUDNNWINAPI +cudnnCreateOpTensorDescriptor(cudnnOpTensorDescriptor_t *opTensorDesc); + +cudnnStatus_t CUDNNWINAPI +cudnnSetOpTensorDescriptor(cudnnOpTensorDescriptor_t opTensorDesc, + cudnnOpTensorOp_t opTensorOp, + cudnnDataType_t opTensorCompType, + cudnnNanPropagation_t opTensorNanOpt); + +cudnnStatus_t CUDNNWINAPI +cudnnGetOpTensorDescriptor(const cudnnOpTensorDescriptor_t opTensorDesc, + cudnnOpTensorOp_t *opTensorOp, + cudnnDataType_t *opTensorCompType, + cudnnNanPropagation_t *opTensorNanOpt); + +cudnnStatus_t CUDNNWINAPI +cudnnDestroyOpTensorDescriptor(cudnnOpTensorDescriptor_t opTensorDesc); + +/* Tensor operation : C = op( alpha1 * A, alpha2 * B ) + beta * C */ +/* B tensor is ignored for CUDNN_OP_TENSOR_SQRT, CUDNN_OP_TENSOR_NOT. */ +cudnnStatus_t CUDNNWINAPI +cudnnOpTensor(cudnnHandle_t handle, + const cudnnOpTensorDescriptor_t opTensorDesc, + const void *alpha1, + const cudnnTensorDescriptor_t aDesc, + const void *A, + const void *alpha2, + const cudnnTensorDescriptor_t bDesc, + const void *B, + const void *beta, + const cudnnTensorDescriptor_t cDesc, + void *C); + +/* + * CUDNN ReduceTensor op type + */ +typedef enum { + CUDNN_REDUCE_TENSOR_ADD = 0, + CUDNN_REDUCE_TENSOR_MUL = 1, + CUDNN_REDUCE_TENSOR_MIN = 2, + CUDNN_REDUCE_TENSOR_MAX = 3, + CUDNN_REDUCE_TENSOR_AMAX = 4, + CUDNN_REDUCE_TENSOR_AVG = 5, + CUDNN_REDUCE_TENSOR_NORM1 = 6, + CUDNN_REDUCE_TENSOR_NORM2 = 7, + CUDNN_REDUCE_TENSOR_MUL_NO_ZEROS = 8, +} cudnnReduceTensorOp_t; + +/* + * CUDNN ReduceTensor indices type + */ +typedef enum { + CUDNN_REDUCE_TENSOR_NO_INDICES = 0, + CUDNN_REDUCE_TENSOR_FLATTENED_INDICES = 1, +} cudnnReduceTensorIndices_t; + +/* + * CUDNN tensor indices type size (all unsigned) + * Currently not supported, default is 32 bit unsigned. + */ +typedef enum { + CUDNN_32BIT_INDICES = 0, + CUDNN_64BIT_INDICES = 1, + CUDNN_16BIT_INDICES = 2, + CUDNN_8BIT_INDICES = 3, +} cudnnIndicesType_t; + +cudnnStatus_t CUDNNWINAPI +cudnnCreateReduceTensorDescriptor(cudnnReduceTensorDescriptor_t *reduceTensorDesc); + +cudnnStatus_t CUDNNWINAPI +cudnnSetReduceTensorDescriptor(cudnnReduceTensorDescriptor_t reduceTensorDesc, + cudnnReduceTensorOp_t reduceTensorOp, + cudnnDataType_t reduceTensorCompType, + cudnnNanPropagation_t reduceTensorNanOpt, + cudnnReduceTensorIndices_t reduceTensorIndices, + cudnnIndicesType_t reduceTensorIndicesType); + +cudnnStatus_t CUDNNWINAPI +cudnnGetReduceTensorDescriptor(const cudnnReduceTensorDescriptor_t reduceTensorDesc, + cudnnReduceTensorOp_t *reduceTensorOp, + cudnnDataType_t *reduceTensorCompType, + cudnnNanPropagation_t *reduceTensorNanOpt, + cudnnReduceTensorIndices_t *reduceTensorIndices, + cudnnIndicesType_t *reduceTensorIndicesType); + +cudnnStatus_t CUDNNWINAPI +cudnnDestroyReduceTensorDescriptor(cudnnReduceTensorDescriptor_t reduceTensorDesc); + +/* Helper function to return the minimum size of the index space to be passed to the reduction given the input and + * output tensors */ +cudnnStatus_t CUDNNWINAPI +cudnnGetReductionIndicesSize(cudnnHandle_t handle, + const cudnnReduceTensorDescriptor_t reduceTensorDesc, + const cudnnTensorDescriptor_t aDesc, + const cudnnTensorDescriptor_t cDesc, + size_t *sizeInBytes); + +/* Helper function to return the minimum size of the workspace to be passed to the reduction given the input and output + * tensors */ +cudnnStatus_t CUDNNWINAPI +cudnnGetReductionWorkspaceSize(cudnnHandle_t handle, + const cudnnReduceTensorDescriptor_t reduceTensorDesc, + const cudnnTensorDescriptor_t aDesc, + const cudnnTensorDescriptor_t cDesc, + size_t *sizeInBytes); + +/* Tensor operation : C = reduce op( alpha * A ) + beta * C */ +/* The NaN propagation enum applies to only the min and max reduce ops; the other reduce ops propagate NaN as usual. */ +/* The indices space is ignored for reduce ops other than min or max. */ +cudnnStatus_t CUDNNWINAPI +cudnnReduceTensor(cudnnHandle_t handle, + const cudnnReduceTensorDescriptor_t reduceTensorDesc, + void *indices, + size_t indicesSizeInBytes, + void *workspace, + size_t workspaceSizeInBytes, + const void *alpha, + const cudnnTensorDescriptor_t aDesc, + const void *A, + const void *beta, + const cudnnTensorDescriptor_t cDesc, + void *C); + +/* Set all values of a tensor to a given value : y[i] = value[0] */ +cudnnStatus_t CUDNNWINAPI +cudnnSetTensor(cudnnHandle_t handle, const cudnnTensorDescriptor_t yDesc, void *y, const void *valuePtr); + +/* Scale all values of a tensor by a given factor : y[i] = alpha * y[i] */ +cudnnStatus_t CUDNNWINAPI +cudnnScaleTensor(cudnnHandle_t handle, const cudnnTensorDescriptor_t yDesc, void *y, const void *alpha); + +/* Create an instance of FilterStruct */ +cudnnStatus_t CUDNNWINAPI +cudnnCreateFilterDescriptor(cudnnFilterDescriptor_t *filterDesc); + +cudnnStatus_t CUDNNWINAPI +cudnnSetFilter4dDescriptor(cudnnFilterDescriptor_t filterDesc, + cudnnDataType_t dataType, /* image data type */ + cudnnTensorFormat_t format, + int k, /* number of output feature maps */ + int c, /* number of input feature maps */ + int h, /* height of each input filter */ + int w); /* width of each input filter */ + +cudnnStatus_t CUDNNWINAPI +cudnnGetFilter4dDescriptor(const cudnnFilterDescriptor_t filterDesc, + cudnnDataType_t *dataType, /* image data type */ + cudnnTensorFormat_t *format, + int *k, /* number of output feature maps */ + int *c, /* number of input feature maps */ + int *h, /* height of each input filter */ + int *w); /* width of each input filter */ + +cudnnStatus_t CUDNNWINAPI +cudnnSetFilterNdDescriptor(cudnnFilterDescriptor_t filterDesc, + cudnnDataType_t dataType, /* image data type */ + cudnnTensorFormat_t format, + int nbDims, + const int filterDimA[]); + +cudnnStatus_t CUDNNWINAPI +cudnnGetFilterNdDescriptor(const cudnnFilterDescriptor_t filterDesc, + int nbDimsRequested, + cudnnDataType_t *dataType, /* image data type */ + cudnnTensorFormat_t *format, + int *nbDims, + int filterDimA[]); +cudnnStatus_t CUDNNWINAPI +cudnnGetFilterSizeInBytes(const cudnnFilterDescriptor_t filterDesc, size_t *size); + +cudnnStatus_t CUDNNWINAPI +cudnnTransformFilter(cudnnHandle_t handle, + const cudnnTensorTransformDescriptor_t transDesc, + const void *alpha, + const cudnnFilterDescriptor_t srcDesc, + const void *srcData, + const void *beta, + const cudnnFilterDescriptor_t destDesc, + void *destData); + +cudnnStatus_t CUDNNWINAPI +cudnnDestroyFilterDescriptor(cudnnFilterDescriptor_t filterDesc); + +/* + * softmax algorithm + */ +typedef enum { + CUDNN_SOFTMAX_FAST = 0, /* straightforward implementation */ + CUDNN_SOFTMAX_ACCURATE = 1, /* subtract max from every point to avoid overflow */ + CUDNN_SOFTMAX_LOG = 2 +} cudnnSoftmaxAlgorithm_t; + +typedef enum { + CUDNN_SOFTMAX_MODE_INSTANCE = 0, /* compute the softmax over all C, H, W for each N */ + CUDNN_SOFTMAX_MODE_CHANNEL = 1 /* compute the softmax over all C for each H, W, N */ +} cudnnSoftmaxMode_t; + +/* Softmax functions: All of the form "output = alpha * Op(inputs) + beta * output" */ + +/* Function to perform forward softmax */ +cudnnStatus_t CUDNNWINAPI +cudnnSoftmaxForward(cudnnHandle_t handle, + cudnnSoftmaxAlgorithm_t algo, + cudnnSoftmaxMode_t mode, + const void *alpha, + const cudnnTensorDescriptor_t xDesc, + const void *x, + const void *beta, + const cudnnTensorDescriptor_t yDesc, + void *y); + +/* + * pooling mode + */ +typedef enum { + CUDNN_POOLING_MAX = 0, + CUDNN_POOLING_AVERAGE_COUNT_INCLUDE_PADDING = 1, /* count for average includes padded values */ + CUDNN_POOLING_AVERAGE_COUNT_EXCLUDE_PADDING = 2, /* count for average does not include padded values */ + CUDNN_POOLING_MAX_DETERMINISTIC = 3 +} cudnnPoolingMode_t; + +/* Create an instance of pooling descriptor */ +cudnnStatus_t CUDNNWINAPI +cudnnCreatePoolingDescriptor(cudnnPoolingDescriptor_t *poolingDesc); + +cudnnStatus_t CUDNNWINAPI +cudnnSetPooling2dDescriptor(cudnnPoolingDescriptor_t poolingDesc, + cudnnPoolingMode_t mode, + cudnnNanPropagation_t maxpoolingNanOpt, + int windowHeight, + int windowWidth, + int verticalPadding, + int horizontalPadding, + int verticalStride, + int horizontalStride); + +cudnnStatus_t CUDNNWINAPI +cudnnGetPooling2dDescriptor(const cudnnPoolingDescriptor_t poolingDesc, + cudnnPoolingMode_t *mode, + cudnnNanPropagation_t *maxpoolingNanOpt, + int *windowHeight, + int *windowWidth, + int *verticalPadding, + int *horizontalPadding, + int *verticalStride, + int *horizontalStride); + +cudnnStatus_t CUDNNWINAPI +cudnnSetPoolingNdDescriptor(cudnnPoolingDescriptor_t poolingDesc, + const cudnnPoolingMode_t mode, + const cudnnNanPropagation_t maxpoolingNanOpt, + int nbDims, + const int windowDimA[], + const int paddingA[], + const int strideA[]); + +cudnnStatus_t CUDNNWINAPI +cudnnGetPoolingNdDescriptor(const cudnnPoolingDescriptor_t poolingDesc, + int nbDimsRequested, + cudnnPoolingMode_t *mode, + cudnnNanPropagation_t *maxpoolingNanOpt, + int *nbDims, + int windowDimA[], + int paddingA[], + int strideA[]); + +cudnnStatus_t CUDNNWINAPI +cudnnGetPoolingNdForwardOutputDim(const cudnnPoolingDescriptor_t poolingDesc, + const cudnnTensorDescriptor_t inputTensorDesc, + int nbDims, + int outputTensorDimA[]); + +cudnnStatus_t CUDNNWINAPI +cudnnGetPooling2dForwardOutputDim(const cudnnPoolingDescriptor_t poolingDesc, + const cudnnTensorDescriptor_t inputTensorDesc, + int *n, + int *c, + int *h, + int *w); + +/* Destroy an instance of pooling descriptor */ +cudnnStatus_t CUDNNWINAPI +cudnnDestroyPoolingDescriptor(cudnnPoolingDescriptor_t poolingDesc); + +/* Pooling functions: All of the form "output = alpha * Op(inputs) + beta * output" */ + +/* Function to perform forward pooling */ +cudnnStatus_t CUDNNWINAPI +cudnnPoolingForward(cudnnHandle_t handle, + const cudnnPoolingDescriptor_t poolingDesc, + const void *alpha, + const cudnnTensorDescriptor_t xDesc, + const void *x, + const void *beta, + const cudnnTensorDescriptor_t yDesc, + void *y); + +/* + * activation mode + */ +typedef enum { + CUDNN_ACTIVATION_SIGMOID = 0, + CUDNN_ACTIVATION_RELU = 1, + CUDNN_ACTIVATION_TANH = 2, + CUDNN_ACTIVATION_CLIPPED_RELU = 3, + CUDNN_ACTIVATION_ELU = 4, + CUDNN_ACTIVATION_IDENTITY = 5, + CUDNN_ACTIVATION_SWISH = 6 +} cudnnActivationMode_t; + +/* Activation functions: All of the form "output = alpha * Op(inputs) + beta * output" */ +cudnnStatus_t CUDNNWINAPI +cudnnCreateActivationDescriptor(cudnnActivationDescriptor_t *activationDesc); + +cudnnStatus_t CUDNNWINAPI +cudnnSetActivationDescriptor(cudnnActivationDescriptor_t activationDesc, + cudnnActivationMode_t mode, + cudnnNanPropagation_t reluNanOpt, + double coef); /* ceiling for clipped RELU, alpha for ELU */ + +cudnnStatus_t CUDNNWINAPI +cudnnGetActivationDescriptor(const cudnnActivationDescriptor_t activationDesc, + cudnnActivationMode_t *mode, + cudnnNanPropagation_t *reluNanOpt, + double *coef); /* ceiling for clipped RELU, alpha for ELU */ + +cudnnStatus_t CUDNNWINAPI +cudnnSetActivationDescriptorSwishBeta(cudnnActivationDescriptor_t activationDesc, double swish_beta); + +cudnnStatus_t CUDNNWINAPI +cudnnGetActivationDescriptorSwishBeta(cudnnActivationDescriptor_t activationDesc, double *swish_beta); + +cudnnStatus_t CUDNNWINAPI +cudnnDestroyActivationDescriptor(cudnnActivationDescriptor_t activationDesc); + +/* Function to perform forward activation */ +cudnnStatus_t CUDNNWINAPI +cudnnActivationForward(cudnnHandle_t handle, + cudnnActivationDescriptor_t activationDesc, + const void *alpha, + const cudnnTensorDescriptor_t xDesc, + const void *x, + const void *beta, + const cudnnTensorDescriptor_t yDesc, + void *y); + +/* + * Create an instance of LRN (Local Response Normalization) descriptor + * Uses lrnN=5, lrnAlpha=1e-4, lrnBeta=0.75, lrnK=2.0 as defaults from Krizhevsky'12 ImageNet paper + */ +cudnnStatus_t CUDNNWINAPI +cudnnCreateLRNDescriptor(cudnnLRNDescriptor_t *normDesc); + +#define CUDNN_LRN_MIN_N 1 /* minimum allowed lrnN */ +#define CUDNN_LRN_MAX_N 16 /* maximum allowed lrnN */ +#define CUDNN_LRN_MIN_K 1e-5 /* minimum allowed lrnK */ +#define CUDNN_LRN_MIN_BETA 0.01 /* minimum allowed lrnBeta */ + +/* LRN layer mode */ +typedef enum { + CUDNN_LRN_CROSS_CHANNEL_DIM1 = 0, /* Normalize across tensor's dimA[1] dimension */ +} cudnnLRNMode_t; + +/* + * Uses a window [center-lookBehind, center+lookAhead], where + * lookBehind = floor( (lrnN-1)/2 ), lookAhead = lrnN-lookBehind-1. + * Values of double parameters cast to tensor data type. + */ +cudnnStatus_t CUDNNWINAPI +cudnnSetLRNDescriptor(cudnnLRNDescriptor_t normDesc, unsigned lrnN, double lrnAlpha, double lrnBeta, double lrnK); +/* + * Retrieve the settings currently stored in an LRN layer descriptor + * Any of the provided pointers can be NULL (no corresponding value will be returned) + */ +cudnnStatus_t CUDNNWINAPI +cudnnGetLRNDescriptor(cudnnLRNDescriptor_t normDesc, unsigned *lrnN, double *lrnAlpha, double *lrnBeta, double *lrnK); + +/* Destroy an instance of LRN descriptor */ +cudnnStatus_t CUDNNWINAPI +cudnnDestroyLRNDescriptor(cudnnLRNDescriptor_t lrnDesc); + +/* LRN functions: output = alpha * normalize(x) + beta * old_y */ + +/* LRN cross-channel forward computation. Double parameters cast to tensor data type */ +cudnnStatus_t CUDNNWINAPI +cudnnLRNCrossChannelForward(cudnnHandle_t handle, + cudnnLRNDescriptor_t normDesc, + cudnnLRNMode_t lrnMode, + const void *alpha, + const cudnnTensorDescriptor_t xDesc, + const void *x, + const void *beta, + const cudnnTensorDescriptor_t yDesc, + void *y); + +typedef enum { + CUDNN_DIVNORM_PRECOMPUTED_MEANS = 0, +} cudnnDivNormMode_t; + +/* LCN/divisive normalization functions: y = alpha * normalize(x) + beta * y */ +cudnnStatus_t CUDNNWINAPI +cudnnDivisiveNormalizationForward(cudnnHandle_t handle, + cudnnLRNDescriptor_t normDesc, + cudnnDivNormMode_t mode, + const void *alpha, + const cudnnTensorDescriptor_t xDesc, /* same desc for means, temp, temp2 */ + const void *x, + const void *means, /* if NULL, means are assumed to be zero */ + void *temp, + void *temp2, + const void *beta, + const cudnnTensorDescriptor_t yDesc, + void *y); + +typedef enum { + /* bnScale, bnBias tensor dims are 1xCxHxWx.. (one value per CHW...-slice, normalized over N slice) */ + CUDNN_BATCHNORM_PER_ACTIVATION = 0, + + /* bnScale, bnBias tensor dims are 1xCx1x1 (one value per C-dim normalized over Nx1xHxW subtensors) */ + CUDNN_BATCHNORM_SPATIAL = 1, + + /* + * bnScale, bnBias tensor dims are 1xCx1x1 (one value per C-dim normalized over Nx1xHxW subtensors). + * May be faster than CUDNN_BATCHNORM_SPATIAL but imposes some limits on the range of values + */ + CUDNN_BATCHNORM_SPATIAL_PERSISTENT = 2, +} cudnnBatchNormMode_t; + +#define CUDNN_BN_MIN_EPSILON 0.0 /* Minimum epsilon allowed to be used in the Batch Normalization formula */ + +/* + * Derives a tensor descriptor from layer data descriptor for BatchNormalization + * scale, invVariance, bnBias, bnScale tensors. Use this tensor desc for + * bnScaleBiasMeanVarDesc and bnScaleBiasDiffDesc in Batch Normalization forward and backward functions. + */ +cudnnStatus_t CUDNNWINAPI +cudnnDeriveBNTensorDescriptor(cudnnTensorDescriptor_t derivedBnDesc, + const cudnnTensorDescriptor_t xDesc, + cudnnBatchNormMode_t mode); + +typedef enum { + CUDNN_BATCHNORM_OPS_BN = 0, /* do batch normalization only */ + CUDNN_BATCHNORM_OPS_BN_ACTIVATION = 1, /* do batchNorm, then activation */ + CUDNN_BATCHNORM_OPS_BN_ADD_ACTIVATION = 2, /* do batchNorm, then elemWiseAdd, then activation */ +} cudnnBatchNormOps_t; + +/* + * Performs Batch Normalization during Inference: + * y[i] = bnScale[k]*(x[i]-estimatedMean[k])/sqrt(epsilon+estimatedVariance[k]) + bnBias[k] + * with bnScale, bnBias, runningMean, runningInvVariance tensors indexed + * according to spatial or per-activation mode. Refer to cudnnBatchNormalizationForwardTraining + * above for notes on function arguments. + */ +cudnnStatus_t CUDNNWINAPI +cudnnBatchNormalizationForwardInference(cudnnHandle_t handle, + cudnnBatchNormMode_t mode, + const void *alpha, /* alpha[0] = result blend factor */ + const void *beta, /* beta[0] = dest layer blend factor */ + const cudnnTensorDescriptor_t xDesc, + const void *x, /* NxCxHxW */ + const cudnnTensorDescriptor_t yDesc, + void *y, /* NxCxHxW */ + const cudnnTensorDescriptor_t bnScaleBiasMeanVarDesc, + const void *bnScale, + const void *bnBias, + const void *estimatedMean, + const void *estimatedVariance, + double epsilon); + +typedef enum { + /* bnScale, bnBias tensor dims are 1xCxHxWx.. (one value per CHW...-slice, normalized over N slice) */ + CUDNN_NORM_PER_ACTIVATION = 0, + + /* bnScale, bnBias tensor dims are 1xCx1x1 (one value per C-dim normalized over Nx1xHxW subtensors) */ + CUDNN_NORM_PER_CHANNEL = 1, +} cudnnNormMode_t; + +typedef enum { CUDNN_NORM_ALGO_STANDARD = 0, CUDNN_NORM_ALGO_PERSIST = 1 } cudnnNormAlgo_t; + +/* + * Derives a tensor descriptor from layer data descriptor for Normalization + * scale, invVariance, bnBias, bnScale tensors. Use this tensor desc for + * normScaleBiasMeanVarDesc and normScaleBiasDiffDesc in Normalization forward and backward functions. + */ +cudnnStatus_t CUDNNWINAPI +cudnnDeriveNormTensorDescriptor(cudnnTensorDescriptor_t derivedNormScaleBiasDesc, + cudnnTensorDescriptor_t derivedNormMeanVarDesc, + const cudnnTensorDescriptor_t xDesc, + cudnnNormMode_t mode, + int groupCnt); /* Place hold for future work, should be set to 1 now*/ + +typedef enum { + CUDNN_NORM_OPS_NORM = 0, /* do normalization only */ + CUDNN_NORM_OPS_NORM_ACTIVATION = 1, /* do Norm, then activation */ + CUDNN_NORM_OPS_NORM_ADD_ACTIVATION = 2, /* do Norm, then elemWiseAdd, then activation */ +} cudnnNormOps_t; + +/* + * Performs Normalization during Inference: + * y[i] = normScale[k]*(x[i]-estimatedMean[k])/sqrt(epsilon+estimatedVariance[k]) + normBias[k] + * with normScale, normBias, runningMean, runningInvVariance tensors indexed + * according to per-channel or per-activation mode. Refer to cudnnNormalizationForwardTraining + * above for notes on function arguments. + */ +cudnnStatus_t CUDNNWINAPI +cudnnNormalizationForwardInference(cudnnHandle_t handle, + cudnnNormMode_t mode, + cudnnNormOps_t normOps, + cudnnNormAlgo_t algo, + const void *alpha, /* alpha[0] = result blend factor */ + const void *beta, /* beta[0] = dest layer blend factor */ + const cudnnTensorDescriptor_t xDesc, + const void *x, /* NxCxHxW */ + const cudnnTensorDescriptor_t normScaleBiasDesc, + const void *normScale, + const void *normBias, + const cudnnTensorDescriptor_t normMeanVarDesc, + const void *estimatedMean, + const void *estimatedVariance, + const cudnnTensorDescriptor_t zDesc, + const void *z, + cudnnActivationDescriptor_t activationDesc, + const cudnnTensorDescriptor_t yDesc, + void *y, /* NxCxHxW */ + double epsilon, + int groupCnt); /* Place hold for future work*/ + +/* APIs for spatial transformer network*/ +typedef enum { + CUDNN_SAMPLER_BILINEAR = 0, +} cudnnSamplerType_t; + +cudnnStatus_t CUDNNWINAPI +cudnnCreateSpatialTransformerDescriptor(cudnnSpatialTransformerDescriptor_t *stDesc); + +cudnnStatus_t CUDNNWINAPI +cudnnSetSpatialTransformerNdDescriptor(cudnnSpatialTransformerDescriptor_t stDesc, + cudnnSamplerType_t samplerType, + cudnnDataType_t dataType, + const int nbDims, + const int dimA[]); + +cudnnStatus_t CUDNNWINAPI +cudnnDestroySpatialTransformerDescriptor(cudnnSpatialTransformerDescriptor_t stDesc); + +cudnnStatus_t CUDNNWINAPI +cudnnSpatialTfGridGeneratorForward(cudnnHandle_t handle, + const cudnnSpatialTransformerDescriptor_t stDesc, + const void *theta, + void *grid); + +cudnnStatus_t CUDNNWINAPI +cudnnSpatialTfSamplerForward(cudnnHandle_t handle, + cudnnSpatialTransformerDescriptor_t stDesc, + const void *alpha, + const cudnnTensorDescriptor_t xDesc, + const void *x, + const void *grid, + const void *beta, + cudnnTensorDescriptor_t yDesc, + void *y); + +typedef struct cudnnDropoutStruct *cudnnDropoutDescriptor_t; + +cudnnStatus_t CUDNNWINAPI +cudnnCreateDropoutDescriptor(cudnnDropoutDescriptor_t *dropoutDesc); + +cudnnStatus_t CUDNNWINAPI +cudnnDestroyDropoutDescriptor(cudnnDropoutDescriptor_t dropoutDesc); + +/*helper function to determine size of the states to be passed to cudnnSetDropoutDescriptor */ +cudnnStatus_t CUDNNWINAPI +cudnnDropoutGetStatesSize(cudnnHandle_t handle, size_t *sizeInBytes); + +/*helper function to determine size of the reserve space to be passed to dropout forward/backward calls */ +cudnnStatus_t CUDNNWINAPI +cudnnDropoutGetReserveSpaceSize(cudnnTensorDescriptor_t xdesc, size_t *sizeInBytes); + +cudnnStatus_t CUDNNWINAPI +cudnnSetDropoutDescriptor(cudnnDropoutDescriptor_t dropoutDesc, + cudnnHandle_t handle, + float dropout, + void *states, + size_t stateSizeInBytes, + unsigned long long seed); + +/* Restores the dropout descriptor to a previously saved-off state */ +cudnnStatus_t CUDNNWINAPI +cudnnRestoreDropoutDescriptor(cudnnDropoutDescriptor_t dropoutDesc, + cudnnHandle_t handle, + float dropout, + void *states, + size_t stateSizeInBytes, + unsigned long long seed); + +cudnnStatus_t CUDNNWINAPI +cudnnGetDropoutDescriptor(cudnnDropoutDescriptor_t dropoutDesc, + cudnnHandle_t handle, + float *dropout, + void **states, + unsigned long long *seed); + +cudnnStatus_t CUDNNWINAPI +cudnnDropoutForward(cudnnHandle_t handle, + const cudnnDropoutDescriptor_t dropoutDesc, + const cudnnTensorDescriptor_t xdesc, + const void *x, + const cudnnTensorDescriptor_t ydesc, + void *y, + void *reserveSpace, + size_t reserveSpaceSizeInBytes); + +/* TODO: remove */ + +typedef struct cudnnAlgorithmStruct *cudnnAlgorithmDescriptor_t; +typedef struct cudnnAlgorithmPerformanceStruct *cudnnAlgorithmPerformance_t; + +/* TODO: move these enums out to the appropriate submodule */ +typedef enum { + CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM = 0, + CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM = 1, + CUDNN_CONVOLUTION_FWD_ALGO_GEMM = 2, + CUDNN_CONVOLUTION_FWD_ALGO_DIRECT = 3, + CUDNN_CONVOLUTION_FWD_ALGO_FFT = 4, + CUDNN_CONVOLUTION_FWD_ALGO_FFT_TILING = 5, + CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD = 6, + CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED = 7, + CUDNN_CONVOLUTION_FWD_ALGO_COUNT = 8 +} cudnnConvolutionFwdAlgo_t; + +typedef enum { + CUDNN_CONVOLUTION_BWD_FILTER_ALGO_0 = 0, /* non-deterministic */ + CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1 = 1, + CUDNN_CONVOLUTION_BWD_FILTER_ALGO_FFT = 2, + CUDNN_CONVOLUTION_BWD_FILTER_ALGO_3 = 3, /* non-deterministic */ + CUDNN_CONVOLUTION_BWD_FILTER_ALGO_WINOGRAD = 4, /* not implemented */ + CUDNN_CONVOLUTION_BWD_FILTER_ALGO_WINOGRAD_NONFUSED = 5, + CUDNN_CONVOLUTION_BWD_FILTER_ALGO_FFT_TILING = 6, + CUDNN_CONVOLUTION_BWD_FILTER_ALGO_COUNT = 7 +} cudnnConvolutionBwdFilterAlgo_t; + +typedef enum { + CUDNN_CONVOLUTION_BWD_DATA_ALGO_0 = 0, /* non-deterministic */ + CUDNN_CONVOLUTION_BWD_DATA_ALGO_1 = 1, + CUDNN_CONVOLUTION_BWD_DATA_ALGO_FFT = 2, + CUDNN_CONVOLUTION_BWD_DATA_ALGO_FFT_TILING = 3, + CUDNN_CONVOLUTION_BWD_DATA_ALGO_WINOGRAD = 4, + CUDNN_CONVOLUTION_BWD_DATA_ALGO_WINOGRAD_NONFUSED = 5, + CUDNN_CONVOLUTION_BWD_DATA_ALGO_COUNT = 6 +} cudnnConvolutionBwdDataAlgo_t; + +typedef enum { + CUDNN_RNN_ALGO_STANDARD = 0, + CUDNN_RNN_ALGO_PERSIST_STATIC = 1, + CUDNN_RNN_ALGO_PERSIST_DYNAMIC = 2, + CUDNN_RNN_ALGO_PERSIST_STATIC_SMALL_H = 3, + CUDNN_RNN_ALGO_COUNT = 4, +} cudnnRNNAlgo_t; + +typedef enum { CUDNN_CTC_LOSS_ALGO_DETERMINISTIC = 0, CUDNN_CTC_LOSS_ALGO_NON_DETERMINISTIC = 1 } cudnnCTCLossAlgo_t; + +/* TODO: remove */ +typedef struct cudnnAlgorithmUnionStruct { + union Algorithm { + cudnnConvolutionFwdAlgo_t convFwdAlgo; + cudnnConvolutionBwdFilterAlgo_t convBwdFilterAlgo; + cudnnConvolutionBwdDataAlgo_t convBwdDataAlgo; + cudnnRNNAlgo_t RNNAlgo; + cudnnCTCLossAlgo_t CTCLossAlgo; + } algo; +} cudnnAlgorithm_t; + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnCreateAlgorithmDescriptor(cudnnAlgorithmDescriptor_t *algoDesc); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnSetAlgorithmDescriptor(cudnnAlgorithmDescriptor_t algoDesc, cudnnAlgorithm_t algorithm); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetAlgorithmDescriptor(const cudnnAlgorithmDescriptor_t algoDesc, cudnnAlgorithm_t *algorithm); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnCopyAlgorithmDescriptor(const cudnnAlgorithmDescriptor_t src, cudnnAlgorithmDescriptor_t dest); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnDestroyAlgorithmDescriptor(cudnnAlgorithmDescriptor_t algoDesc); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnCreateAlgorithmPerformance(cudnnAlgorithmPerformance_t *algoPerf, int numberToCreate); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnSetAlgorithmPerformance(cudnnAlgorithmPerformance_t algoPerf, + cudnnAlgorithmDescriptor_t algoDesc, + cudnnStatus_t status, + float time, + size_t memory); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetAlgorithmPerformance(const cudnnAlgorithmPerformance_t algoPerf, + cudnnAlgorithmDescriptor_t *algoDesc, + cudnnStatus_t *status, + float *time, + size_t *memory); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnDestroyAlgorithmPerformance(cudnnAlgorithmPerformance_t *algoPerf, int numberToDestroy); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetAlgorithmSpaceSize(cudnnHandle_t handle, cudnnAlgorithmDescriptor_t algoDesc, size_t *algoSpaceSizeInBytes); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnSaveAlgorithm(cudnnHandle_t handle, + cudnnAlgorithmDescriptor_t algoDesc, + void *algoSpace, + size_t algoSpaceSizeInBytes); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnRestoreAlgorithm(cudnnHandle_t handle, + void *algoSpace, + size_t algoSpaceSizeInBytes, + cudnnAlgorithmDescriptor_t algoDesc); + +typedef enum { + CUDNN_SEV_FATAL = 0, + CUDNN_SEV_ERROR = 1, + CUDNN_SEV_WARNING = 2, + CUDNN_SEV_INFO = 3, +} cudnnSeverity_t; + +/* Message masks to be used with cudnnSetCallback() */ +#define CUDNN_SEV_ERROR_EN (1U << CUDNN_SEV_ERROR) +#define CUDNN_SEV_WARNING_EN (1U << CUDNN_SEV_WARNING) +#define CUDNN_SEV_INFO_EN (1U << CUDNN_SEV_INFO) + +/* struct containing useful informaiton for each API call */ +typedef struct cudnnDebugStruct { + unsigned cudnn_version; + cudnnStatus_t cudnnStatus; + unsigned time_sec; /* epoch time in seconds */ + unsigned time_usec; /* microseconds part of epoch time */ + unsigned time_delta; /* time since start in seconds */ + cudnnHandle_t handle; /* cudnn handle */ + cudaStream_t stream; /* cuda stream ID */ + unsigned long long pid; /* process ID */ + unsigned long long tid; /* thread ID */ + int cudaDeviceId; /* CUDA device ID */ + int reserved[15]; /* reserved for future use */ +} cudnnDebug_t; + +typedef void (*cudnnCallback_t)(cudnnSeverity_t sev, void *udata, const cudnnDebug_t *dbg, const char *msg); + +cudnnStatus_t CUDNNWINAPI +cudnnSetCallback(unsigned mask, void *udata, cudnnCallback_t fptr); + +cudnnStatus_t CUDNNWINAPI +cudnnGetCallback(unsigned *mask, void **udata, cudnnCallback_t *fptr); + +/* + * \brief Cross-library version checker. + * This function is implemented differently in each sub-library. Each sublib + * checks whether its own version matches that of its dependencies. + * \returns CUDNN_STATUS_SUCCESS if the version check passes, + * CUDNN_STATUS_VERSION_MISMATCH if the versions are inconsistent. + */ +cudnnStatus_t CUDNNWINAPI +cudnnOpsInferVersionCheck(void); + +#if defined(__cplusplus) +} +#endif + +#endif /* CUDNN_OPS_INFER_H_ */ diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_ops_infer_v8.h b/llmeval-env/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_ops_infer_v8.h new file mode 100644 index 0000000000000000000000000000000000000000..79ba34cc1a1557462d49b63a9cb52d9bfe149693 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_ops_infer_v8.h @@ -0,0 +1,1183 @@ +/* + * Copyright 2014-2023 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +/* + * cudnn_ops_infer : cuDNN's basic definitions and inference operations. + */ + +#if !defined(CUDNN_OPS_INFER_H_) +#define CUDNN_OPS_INFER_H_ + +#include +#include + +#include "cudnn_version.h" + +/* These version numbers are autogenerated, do not edit manually. */ +#define CUDNN_OPS_INFER_MAJOR 8 +#define CUDNN_OPS_INFER_MINOR 9 +#define CUDNN_OPS_INFER_PATCH 2 + +#if (CUDNN_OPS_INFER_MAJOR != CUDNN_MAJOR) || (CUDNN_OPS_INFER_MINOR != CUDNN_MINOR) || \ + (CUDNN_OPS_INFER_PATCH != CUDNN_PATCHLEVEL) +#error Version mismatch in cuDNN OPS INFER!!! +#endif + +#ifndef CUDNNWINAPI +#ifdef _WIN32 +#define CUDNNWINAPI __stdcall +#else +#define CUDNNWINAPI +#endif +#endif + +/* Warnings for deprecated API-s are enabled using the CUDNN_WARN_DEPRECATED macro */ +#if defined(CUDNN_WARN_DEPRECATED) && (defined(__GNUC__) || defined(__clang__)) +/* GCC, Intel C/C++, Cray C/C++, CLANG, IBM XL C/C++ little endian */ +#define CUDNN_DEPRECATED __attribute__((deprecated)) +#elif defined(CUDNN_WARN_DEPRECATED) && defined(_MSC_VER) +/* Microsoft Visual C++ */ +#define CUDNN_DEPRECATED __declspec(deprecated) +#elif defined(CUDNN_WARN_DEPRECATED) && (__cplusplus >= 201402L) +/* C++14 compilers */ +#define CUDNN_DEPRECATED [[deprecated]] +#else +/* No support for the deprecated attribute */ +#define CUDNN_DEPRECATED +#endif + +#if defined(__cplusplus) +extern "C" { +#endif + +struct cudnnContext; +typedef struct cudnnContext *cudnnHandle_t; + +size_t CUDNNWINAPI +cudnnGetVersion(void); + +size_t CUDNNWINAPI +cudnnGetMaxDeviceVersion(void); + +/* Returns CUDA Runtime version statically linked against cudnn */ +size_t CUDNNWINAPI +cudnnGetCudartVersion(void); + +/* + * CUDNN return codes + */ +typedef enum { + CUDNN_STATUS_SUCCESS = 0, + CUDNN_STATUS_NOT_INITIALIZED = 1, + CUDNN_STATUS_ALLOC_FAILED = 2, + CUDNN_STATUS_BAD_PARAM = 3, + CUDNN_STATUS_INTERNAL_ERROR = 4, + CUDNN_STATUS_INVALID_VALUE = 5, + CUDNN_STATUS_ARCH_MISMATCH = 6, + CUDNN_STATUS_MAPPING_ERROR = 7, + CUDNN_STATUS_EXECUTION_FAILED = 8, + CUDNN_STATUS_NOT_SUPPORTED = 9, + CUDNN_STATUS_LICENSE_ERROR = 10, + CUDNN_STATUS_RUNTIME_PREREQUISITE_MISSING = 11, + CUDNN_STATUS_RUNTIME_IN_PROGRESS = 12, + CUDNN_STATUS_RUNTIME_FP_OVERFLOW = 13, + CUDNN_STATUS_VERSION_MISMATCH = 14, +} cudnnStatus_t; + +/* human-readable error messages */ +const char *CUDNNWINAPI +cudnnGetErrorString(cudnnStatus_t status); + +/* Forward definition in this version only */ +typedef struct cudnnRuntimeTag_t cudnnRuntimeTag_t; + +typedef enum { + CUDNN_ERRQUERY_RAWCODE = 0, + CUDNN_ERRQUERY_NONBLOCKING = 1, + CUDNN_ERRQUERY_BLOCKING = 2, +} cudnnErrQueryMode_t; + +cudnnStatus_t CUDNNWINAPI +cudnnQueryRuntimeError(cudnnHandle_t handle, cudnnStatus_t *rstatus, cudnnErrQueryMode_t mode, cudnnRuntimeTag_t *tag); + +#ifndef __LIBRARY_TYPES_H__ + +typedef enum libraryPropertyType_t { MAJOR_VERSION, MINOR_VERSION, PATCH_LEVEL } libraryPropertyType; + +#endif + +cudnnStatus_t CUDNNWINAPI +cudnnGetProperty(libraryPropertyType type, int *value); + +cudnnStatus_t CUDNNWINAPI +cudnnCreate(cudnnHandle_t *handle); +cudnnStatus_t CUDNNWINAPI +cudnnDestroy(cudnnHandle_t handle); +cudnnStatus_t CUDNNWINAPI +cudnnSetStream(cudnnHandle_t handle, cudaStream_t streamId); +cudnnStatus_t CUDNNWINAPI +cudnnGetStream(cudnnHandle_t handle, cudaStream_t *streamId); + +/* Data structures to represent Image/Filter and the Neural Network Layer */ +typedef struct cudnnTensorStruct *cudnnTensorDescriptor_t; +typedef struct cudnnPoolingStruct *cudnnPoolingDescriptor_t; +typedef struct cudnnFilterStruct *cudnnFilterDescriptor_t; +typedef struct cudnnLRNStruct *cudnnLRNDescriptor_t; +typedef struct cudnnActivationStruct *cudnnActivationDescriptor_t; +typedef struct cudnnSpatialTransformerStruct *cudnnSpatialTransformerDescriptor_t; +typedef struct cudnnOpTensorStruct *cudnnOpTensorDescriptor_t; +typedef struct cudnnReduceTensorStruct *cudnnReduceTensorDescriptor_t; +typedef struct cudnnCTCLossStruct *cudnnCTCLossDescriptor_t; +typedef struct cudnnTensorTransformStruct *cudnnTensorTransformDescriptor_t; +/* + * CUDNN data type + */ +typedef enum { + CUDNN_DATA_FLOAT = 0, + CUDNN_DATA_DOUBLE = 1, + CUDNN_DATA_HALF = 2, + CUDNN_DATA_INT8 = 3, + CUDNN_DATA_INT32 = 4, + CUDNN_DATA_INT8x4 = 5, + CUDNN_DATA_UINT8 = 6, + CUDNN_DATA_UINT8x4 = 7, + CUDNN_DATA_INT8x32 = 8, + CUDNN_DATA_BFLOAT16 = 9, + CUDNN_DATA_INT64 = 10, + CUDNN_DATA_BOOLEAN = 11, + CUDNN_DATA_FP8_E4M3 = 12, + CUDNN_DATA_FP8_E5M2 = 13, + CUDNN_DATA_FAST_FLOAT_FOR_FP8 = 14, +} cudnnDataType_t; + +/* + * CUDNN math type + */ +typedef enum { + CUDNN_DEFAULT_MATH = 0, + CUDNN_TENSOR_OP_MATH = 1, + CUDNN_TENSOR_OP_MATH_ALLOW_CONVERSION = 2, + CUDNN_FMA_MATH = 3, +} cudnnMathType_t; + +/* + * CUDNN propagate Nan + */ +typedef enum { + CUDNN_NOT_PROPAGATE_NAN = 0, + CUDNN_PROPAGATE_NAN = 1, +} cudnnNanPropagation_t; + +/* + * CUDNN Determinism + */ +typedef enum { + CUDNN_NON_DETERMINISTIC = 0, + CUDNN_DETERMINISTIC = 1, +} cudnnDeterminism_t; + +/* Maximum supported number of tensor dimensions */ +#define CUDNN_DIM_MAX 8 + +/* Create an instance of a generic Tensor descriptor */ +cudnnStatus_t CUDNNWINAPI +cudnnCreateTensorDescriptor(cudnnTensorDescriptor_t *tensorDesc); + +typedef enum { + CUDNN_TENSOR_NCHW = 0, /* row major (wStride = 1, hStride = w) */ + CUDNN_TENSOR_NHWC = 1, /* feature maps interleaved ( cStride = 1 )*/ + CUDNN_TENSOR_NCHW_VECT_C = 2, /* each image point is vector of element of C, vector length in data type */ +} cudnnTensorFormat_t; + +cudnnStatus_t CUDNNWINAPI +cudnnSetTensor4dDescriptor(cudnnTensorDescriptor_t tensorDesc, + cudnnTensorFormat_t format, + cudnnDataType_t dataType, /* image data type */ + int n, /* number of inputs (batch size) */ + int c, /* number of input feature maps */ + int h, /* height of input section */ + int w); /* width of input section */ + +cudnnStatus_t CUDNNWINAPI +cudnnSetTensor4dDescriptorEx(cudnnTensorDescriptor_t tensorDesc, + cudnnDataType_t dataType, /* image data type */ + int n, /* number of inputs (batch size) */ + int c, /* number of input feature maps */ + int h, /* height of input section */ + int w, /* width of input section */ + int nStride, + int cStride, + int hStride, + int wStride); + +cudnnStatus_t CUDNNWINAPI +cudnnGetTensor4dDescriptor(const cudnnTensorDescriptor_t tensorDesc, + cudnnDataType_t *dataType, /* image data type */ + int *n, /* number of inputs (batch size) */ + int *c, /* number of input feature maps */ + int *h, /* height of input section */ + int *w, /* width of input section */ + int *nStride, + int *cStride, + int *hStride, + int *wStride); + +cudnnStatus_t CUDNNWINAPI +cudnnSetTensorNdDescriptor(cudnnTensorDescriptor_t tensorDesc, + cudnnDataType_t dataType, + int nbDims, + const int dimA[], + const int strideA[]); + +cudnnStatus_t CUDNNWINAPI +cudnnSetTensorNdDescriptorEx(cudnnTensorDescriptor_t tensorDesc, + cudnnTensorFormat_t format, + cudnnDataType_t dataType, + int nbDims, + const int dimA[]); + +cudnnStatus_t CUDNNWINAPI +cudnnGetTensorNdDescriptor(const cudnnTensorDescriptor_t tensorDesc, + int nbDimsRequested, + cudnnDataType_t *dataType, + int *nbDims, + int dimA[], + int strideA[]); + +cudnnStatus_t CUDNNWINAPI +cudnnGetTensorSizeInBytes(const cudnnTensorDescriptor_t tensorDesc, size_t *size); + +/* PixelOffset( n, c, h, w ) = n *input_stride + c * feature_stride + h * h_stride + w * w_stride + + 1)Example of all images in row major order one batch of features after the other (with an optional padding on row) + input_stride : c x h x h_stride + feature_stride : h x h_stride + h_stride : >= w ( h_stride = w if no padding) + w_stride : 1 + + + 2)Example of all images in row major with features maps interleaved + input_stride : c x h x h_stride + feature_stride : 1 + h_stride : w x c + w_stride : c + + 3)Example of all images in column major order one batch of features after the other (with optional padding on column) + input_stride : c x w x w_stride + feature_stride : w x w_stride + h_stride : 1 + w_stride : >= h + +*/ + +/* Destroy an instance of Tensor4d descriptor */ +cudnnStatus_t CUDNNWINAPI +cudnnDestroyTensorDescriptor(cudnnTensorDescriptor_t tensorDesc); + +/* Fold/unfold transforms */ +typedef enum { + CUDNN_TRANSFORM_FOLD = 0U, + CUDNN_TRANSFORM_UNFOLD = 1U, +} cudnnFoldingDirection_t; + +/** Create a destination descriptor for cudnnTransformTensor */ +cudnnStatus_t CUDNNWINAPI +cudnnInitTransformDest(const cudnnTensorTransformDescriptor_t transformDesc, + const cudnnTensorDescriptor_t srcDesc, + cudnnTensorDescriptor_t destDesc, + size_t *destSizeInBytes); + +/** Create an empty tensor transform descriptor */ +cudnnStatus_t CUDNNWINAPI +cudnnCreateTensorTransformDescriptor(cudnnTensorTransformDescriptor_t *transformDesc); + +/** Initialize a previously created tensor transform descriptor. */ +cudnnStatus_t CUDNNWINAPI +cudnnSetTensorTransformDescriptor(cudnnTensorTransformDescriptor_t transformDesc, + const uint32_t nbDims, + const cudnnTensorFormat_t destFormat, + const int32_t padBeforeA[], + const int32_t padAfterA[], + const uint32_t foldA[], + const cudnnFoldingDirection_t direction); + +/** + * Retrieves the values stored in a previously initialized tensor transform + * descriptor. + */ +cudnnStatus_t CUDNNWINAPI +cudnnGetTensorTransformDescriptor(cudnnTensorTransformDescriptor_t transformDesc, + uint32_t nbDimsRequested, + cudnnTensorFormat_t *destFormat, + int32_t padBeforeA[], + int32_t padAfterA[], + uint32_t foldA[], + cudnnFoldingDirection_t *direction); + +/** + * Destroys a previously created tensor transform descriptor. + */ +cudnnStatus_t CUDNNWINAPI +cudnnDestroyTensorTransformDescriptor(cudnnTensorTransformDescriptor_t transformDesc); + +/* Tensor layout conversion helper (y = alpha * x + beta * y) */ +cudnnStatus_t CUDNNWINAPI +cudnnTransformTensor(cudnnHandle_t handle, + const void *alpha, + const cudnnTensorDescriptor_t xDesc, + const void *x, + const void *beta, + const cudnnTensorDescriptor_t yDesc, + void *y); + +cudnnStatus_t CUDNNWINAPI +cudnnTransformTensorEx(cudnnHandle_t handle, + const cudnnTensorTransformDescriptor_t transDesc, + const void *alpha, + const cudnnTensorDescriptor_t srcDesc, + const void *srcData, + const void *beta, + const cudnnTensorDescriptor_t destDesc, + void *destData); + +/* Tensor Bias addition : C = alpha * A + beta * C */ +cudnnStatus_t CUDNNWINAPI +cudnnAddTensor(cudnnHandle_t handle, + const void *alpha, + const cudnnTensorDescriptor_t aDesc, + const void *A, + const void *beta, + const cudnnTensorDescriptor_t cDesc, + void *C); + +/* + * CUDNN OpTensor op type + */ +typedef enum { + CUDNN_OP_TENSOR_ADD = 0, + CUDNN_OP_TENSOR_MUL = 1, + CUDNN_OP_TENSOR_MIN = 2, + CUDNN_OP_TENSOR_MAX = 3, + CUDNN_OP_TENSOR_SQRT = 4, + CUDNN_OP_TENSOR_NOT = 5, +} cudnnOpTensorOp_t; + +cudnnStatus_t CUDNNWINAPI +cudnnCreateOpTensorDescriptor(cudnnOpTensorDescriptor_t *opTensorDesc); + +cudnnStatus_t CUDNNWINAPI +cudnnSetOpTensorDescriptor(cudnnOpTensorDescriptor_t opTensorDesc, + cudnnOpTensorOp_t opTensorOp, + cudnnDataType_t opTensorCompType, + cudnnNanPropagation_t opTensorNanOpt); + +cudnnStatus_t CUDNNWINAPI +cudnnGetOpTensorDescriptor(const cudnnOpTensorDescriptor_t opTensorDesc, + cudnnOpTensorOp_t *opTensorOp, + cudnnDataType_t *opTensorCompType, + cudnnNanPropagation_t *opTensorNanOpt); + +cudnnStatus_t CUDNNWINAPI +cudnnDestroyOpTensorDescriptor(cudnnOpTensorDescriptor_t opTensorDesc); + +/* Tensor operation : C = op( alpha1 * A, alpha2 * B ) + beta * C */ +/* B tensor is ignored for CUDNN_OP_TENSOR_SQRT, CUDNN_OP_TENSOR_NOT. */ +cudnnStatus_t CUDNNWINAPI +cudnnOpTensor(cudnnHandle_t handle, + const cudnnOpTensorDescriptor_t opTensorDesc, + const void *alpha1, + const cudnnTensorDescriptor_t aDesc, + const void *A, + const void *alpha2, + const cudnnTensorDescriptor_t bDesc, + const void *B, + const void *beta, + const cudnnTensorDescriptor_t cDesc, + void *C); + +/* + * CUDNN ReduceTensor op type + */ +typedef enum { + CUDNN_REDUCE_TENSOR_ADD = 0, + CUDNN_REDUCE_TENSOR_MUL = 1, + CUDNN_REDUCE_TENSOR_MIN = 2, + CUDNN_REDUCE_TENSOR_MAX = 3, + CUDNN_REDUCE_TENSOR_AMAX = 4, + CUDNN_REDUCE_TENSOR_AVG = 5, + CUDNN_REDUCE_TENSOR_NORM1 = 6, + CUDNN_REDUCE_TENSOR_NORM2 = 7, + CUDNN_REDUCE_TENSOR_MUL_NO_ZEROS = 8, +} cudnnReduceTensorOp_t; + +/* + * CUDNN ReduceTensor indices type + */ +typedef enum { + CUDNN_REDUCE_TENSOR_NO_INDICES = 0, + CUDNN_REDUCE_TENSOR_FLATTENED_INDICES = 1, +} cudnnReduceTensorIndices_t; + +/* + * CUDNN tensor indices type size (all unsigned) + * Currently not supported, default is 32 bit unsigned. + */ +typedef enum { + CUDNN_32BIT_INDICES = 0, + CUDNN_64BIT_INDICES = 1, + CUDNN_16BIT_INDICES = 2, + CUDNN_8BIT_INDICES = 3, +} cudnnIndicesType_t; + +cudnnStatus_t CUDNNWINAPI +cudnnCreateReduceTensorDescriptor(cudnnReduceTensorDescriptor_t *reduceTensorDesc); + +cudnnStatus_t CUDNNWINAPI +cudnnSetReduceTensorDescriptor(cudnnReduceTensorDescriptor_t reduceTensorDesc, + cudnnReduceTensorOp_t reduceTensorOp, + cudnnDataType_t reduceTensorCompType, + cudnnNanPropagation_t reduceTensorNanOpt, + cudnnReduceTensorIndices_t reduceTensorIndices, + cudnnIndicesType_t reduceTensorIndicesType); + +cudnnStatus_t CUDNNWINAPI +cudnnGetReduceTensorDescriptor(const cudnnReduceTensorDescriptor_t reduceTensorDesc, + cudnnReduceTensorOp_t *reduceTensorOp, + cudnnDataType_t *reduceTensorCompType, + cudnnNanPropagation_t *reduceTensorNanOpt, + cudnnReduceTensorIndices_t *reduceTensorIndices, + cudnnIndicesType_t *reduceTensorIndicesType); + +cudnnStatus_t CUDNNWINAPI +cudnnDestroyReduceTensorDescriptor(cudnnReduceTensorDescriptor_t reduceTensorDesc); + +/* Helper function to return the minimum size of the index space to be passed to the reduction given the input and + * output tensors */ +cudnnStatus_t CUDNNWINAPI +cudnnGetReductionIndicesSize(cudnnHandle_t handle, + const cudnnReduceTensorDescriptor_t reduceTensorDesc, + const cudnnTensorDescriptor_t aDesc, + const cudnnTensorDescriptor_t cDesc, + size_t *sizeInBytes); + +/* Helper function to return the minimum size of the workspace to be passed to the reduction given the input and output + * tensors */ +cudnnStatus_t CUDNNWINAPI +cudnnGetReductionWorkspaceSize(cudnnHandle_t handle, + const cudnnReduceTensorDescriptor_t reduceTensorDesc, + const cudnnTensorDescriptor_t aDesc, + const cudnnTensorDescriptor_t cDesc, + size_t *sizeInBytes); + +/* Tensor operation : C = reduce op( alpha * A ) + beta * C */ +/* The NaN propagation enum applies to only the min and max reduce ops; the other reduce ops propagate NaN as usual. */ +/* The indices space is ignored for reduce ops other than min or max. */ +cudnnStatus_t CUDNNWINAPI +cudnnReduceTensor(cudnnHandle_t handle, + const cudnnReduceTensorDescriptor_t reduceTensorDesc, + void *indices, + size_t indicesSizeInBytes, + void *workspace, + size_t workspaceSizeInBytes, + const void *alpha, + const cudnnTensorDescriptor_t aDesc, + const void *A, + const void *beta, + const cudnnTensorDescriptor_t cDesc, + void *C); + +/* Set all values of a tensor to a given value : y[i] = value[0] */ +cudnnStatus_t CUDNNWINAPI +cudnnSetTensor(cudnnHandle_t handle, const cudnnTensorDescriptor_t yDesc, void *y, const void *valuePtr); + +/* Scale all values of a tensor by a given factor : y[i] = alpha * y[i] */ +cudnnStatus_t CUDNNWINAPI +cudnnScaleTensor(cudnnHandle_t handle, const cudnnTensorDescriptor_t yDesc, void *y, const void *alpha); + +/* Create an instance of FilterStruct */ +cudnnStatus_t CUDNNWINAPI +cudnnCreateFilterDescriptor(cudnnFilterDescriptor_t *filterDesc); + +cudnnStatus_t CUDNNWINAPI +cudnnSetFilter4dDescriptor(cudnnFilterDescriptor_t filterDesc, + cudnnDataType_t dataType, /* image data type */ + cudnnTensorFormat_t format, + int k, /* number of output feature maps */ + int c, /* number of input feature maps */ + int h, /* height of each input filter */ + int w); /* width of each input filter */ + +cudnnStatus_t CUDNNWINAPI +cudnnGetFilter4dDescriptor(const cudnnFilterDescriptor_t filterDesc, + cudnnDataType_t *dataType, /* image data type */ + cudnnTensorFormat_t *format, + int *k, /* number of output feature maps */ + int *c, /* number of input feature maps */ + int *h, /* height of each input filter */ + int *w); /* width of each input filter */ + +cudnnStatus_t CUDNNWINAPI +cudnnSetFilterNdDescriptor(cudnnFilterDescriptor_t filterDesc, + cudnnDataType_t dataType, /* image data type */ + cudnnTensorFormat_t format, + int nbDims, + const int filterDimA[]); + +cudnnStatus_t CUDNNWINAPI +cudnnGetFilterNdDescriptor(const cudnnFilterDescriptor_t filterDesc, + int nbDimsRequested, + cudnnDataType_t *dataType, /* image data type */ + cudnnTensorFormat_t *format, + int *nbDims, + int filterDimA[]); +cudnnStatus_t CUDNNWINAPI +cudnnGetFilterSizeInBytes(const cudnnFilterDescriptor_t filterDesc, size_t *size); + +cudnnStatus_t CUDNNWINAPI +cudnnTransformFilter(cudnnHandle_t handle, + const cudnnTensorTransformDescriptor_t transDesc, + const void *alpha, + const cudnnFilterDescriptor_t srcDesc, + const void *srcData, + const void *beta, + const cudnnFilterDescriptor_t destDesc, + void *destData); + +cudnnStatus_t CUDNNWINAPI +cudnnDestroyFilterDescriptor(cudnnFilterDescriptor_t filterDesc); + +/* + * softmax algorithm + */ +typedef enum { + CUDNN_SOFTMAX_FAST = 0, /* straightforward implementation */ + CUDNN_SOFTMAX_ACCURATE = 1, /* subtract max from every point to avoid overflow */ + CUDNN_SOFTMAX_LOG = 2 +} cudnnSoftmaxAlgorithm_t; + +typedef enum { + CUDNN_SOFTMAX_MODE_INSTANCE = 0, /* compute the softmax over all C, H, W for each N */ + CUDNN_SOFTMAX_MODE_CHANNEL = 1 /* compute the softmax over all C for each H, W, N */ +} cudnnSoftmaxMode_t; + +/* Softmax functions: All of the form "output = alpha * Op(inputs) + beta * output" */ + +/* Function to perform forward softmax */ +cudnnStatus_t CUDNNWINAPI +cudnnSoftmaxForward(cudnnHandle_t handle, + cudnnSoftmaxAlgorithm_t algo, + cudnnSoftmaxMode_t mode, + const void *alpha, + const cudnnTensorDescriptor_t xDesc, + const void *x, + const void *beta, + const cudnnTensorDescriptor_t yDesc, + void *y); + +/* + * pooling mode + */ +typedef enum { + CUDNN_POOLING_MAX = 0, + CUDNN_POOLING_AVERAGE_COUNT_INCLUDE_PADDING = 1, /* count for average includes padded values */ + CUDNN_POOLING_AVERAGE_COUNT_EXCLUDE_PADDING = 2, /* count for average does not include padded values */ + CUDNN_POOLING_MAX_DETERMINISTIC = 3 +} cudnnPoolingMode_t; + +/* Create an instance of pooling descriptor */ +cudnnStatus_t CUDNNWINAPI +cudnnCreatePoolingDescriptor(cudnnPoolingDescriptor_t *poolingDesc); + +cudnnStatus_t CUDNNWINAPI +cudnnSetPooling2dDescriptor(cudnnPoolingDescriptor_t poolingDesc, + cudnnPoolingMode_t mode, + cudnnNanPropagation_t maxpoolingNanOpt, + int windowHeight, + int windowWidth, + int verticalPadding, + int horizontalPadding, + int verticalStride, + int horizontalStride); + +cudnnStatus_t CUDNNWINAPI +cudnnGetPooling2dDescriptor(const cudnnPoolingDescriptor_t poolingDesc, + cudnnPoolingMode_t *mode, + cudnnNanPropagation_t *maxpoolingNanOpt, + int *windowHeight, + int *windowWidth, + int *verticalPadding, + int *horizontalPadding, + int *verticalStride, + int *horizontalStride); + +cudnnStatus_t CUDNNWINAPI +cudnnSetPoolingNdDescriptor(cudnnPoolingDescriptor_t poolingDesc, + const cudnnPoolingMode_t mode, + const cudnnNanPropagation_t maxpoolingNanOpt, + int nbDims, + const int windowDimA[], + const int paddingA[], + const int strideA[]); + +cudnnStatus_t CUDNNWINAPI +cudnnGetPoolingNdDescriptor(const cudnnPoolingDescriptor_t poolingDesc, + int nbDimsRequested, + cudnnPoolingMode_t *mode, + cudnnNanPropagation_t *maxpoolingNanOpt, + int *nbDims, + int windowDimA[], + int paddingA[], + int strideA[]); + +cudnnStatus_t CUDNNWINAPI +cudnnGetPoolingNdForwardOutputDim(const cudnnPoolingDescriptor_t poolingDesc, + const cudnnTensorDescriptor_t inputTensorDesc, + int nbDims, + int outputTensorDimA[]); + +cudnnStatus_t CUDNNWINAPI +cudnnGetPooling2dForwardOutputDim(const cudnnPoolingDescriptor_t poolingDesc, + const cudnnTensorDescriptor_t inputTensorDesc, + int *n, + int *c, + int *h, + int *w); + +/* Destroy an instance of pooling descriptor */ +cudnnStatus_t CUDNNWINAPI +cudnnDestroyPoolingDescriptor(cudnnPoolingDescriptor_t poolingDesc); + +/* Pooling functions: All of the form "output = alpha * Op(inputs) + beta * output" */ + +/* Function to perform forward pooling */ +cudnnStatus_t CUDNNWINAPI +cudnnPoolingForward(cudnnHandle_t handle, + const cudnnPoolingDescriptor_t poolingDesc, + const void *alpha, + const cudnnTensorDescriptor_t xDesc, + const void *x, + const void *beta, + const cudnnTensorDescriptor_t yDesc, + void *y); + +/* + * activation mode + */ +typedef enum { + CUDNN_ACTIVATION_SIGMOID = 0, + CUDNN_ACTIVATION_RELU = 1, + CUDNN_ACTIVATION_TANH = 2, + CUDNN_ACTIVATION_CLIPPED_RELU = 3, + CUDNN_ACTIVATION_ELU = 4, + CUDNN_ACTIVATION_IDENTITY = 5, + CUDNN_ACTIVATION_SWISH = 6 +} cudnnActivationMode_t; + +/* Activation functions: All of the form "output = alpha * Op(inputs) + beta * output" */ +cudnnStatus_t CUDNNWINAPI +cudnnCreateActivationDescriptor(cudnnActivationDescriptor_t *activationDesc); + +cudnnStatus_t CUDNNWINAPI +cudnnSetActivationDescriptor(cudnnActivationDescriptor_t activationDesc, + cudnnActivationMode_t mode, + cudnnNanPropagation_t reluNanOpt, + double coef); /* ceiling for clipped RELU, alpha for ELU */ + +cudnnStatus_t CUDNNWINAPI +cudnnGetActivationDescriptor(const cudnnActivationDescriptor_t activationDesc, + cudnnActivationMode_t *mode, + cudnnNanPropagation_t *reluNanOpt, + double *coef); /* ceiling for clipped RELU, alpha for ELU */ + +cudnnStatus_t CUDNNWINAPI +cudnnSetActivationDescriptorSwishBeta(cudnnActivationDescriptor_t activationDesc, double swish_beta); + +cudnnStatus_t CUDNNWINAPI +cudnnGetActivationDescriptorSwishBeta(cudnnActivationDescriptor_t activationDesc, double *swish_beta); + +cudnnStatus_t CUDNNWINAPI +cudnnDestroyActivationDescriptor(cudnnActivationDescriptor_t activationDesc); + +/* Function to perform forward activation */ +cudnnStatus_t CUDNNWINAPI +cudnnActivationForward(cudnnHandle_t handle, + cudnnActivationDescriptor_t activationDesc, + const void *alpha, + const cudnnTensorDescriptor_t xDesc, + const void *x, + const void *beta, + const cudnnTensorDescriptor_t yDesc, + void *y); + +/* + * Create an instance of LRN (Local Response Normalization) descriptor + * Uses lrnN=5, lrnAlpha=1e-4, lrnBeta=0.75, lrnK=2.0 as defaults from Krizhevsky'12 ImageNet paper + */ +cudnnStatus_t CUDNNWINAPI +cudnnCreateLRNDescriptor(cudnnLRNDescriptor_t *normDesc); + +#define CUDNN_LRN_MIN_N 1 /* minimum allowed lrnN */ +#define CUDNN_LRN_MAX_N 16 /* maximum allowed lrnN */ +#define CUDNN_LRN_MIN_K 1e-5 /* minimum allowed lrnK */ +#define CUDNN_LRN_MIN_BETA 0.01 /* minimum allowed lrnBeta */ + +/* LRN layer mode */ +typedef enum { + CUDNN_LRN_CROSS_CHANNEL_DIM1 = 0, /* Normalize across tensor's dimA[1] dimension */ +} cudnnLRNMode_t; + +/* + * Uses a window [center-lookBehind, center+lookAhead], where + * lookBehind = floor( (lrnN-1)/2 ), lookAhead = lrnN-lookBehind-1. + * Values of double parameters cast to tensor data type. + */ +cudnnStatus_t CUDNNWINAPI +cudnnSetLRNDescriptor(cudnnLRNDescriptor_t normDesc, unsigned lrnN, double lrnAlpha, double lrnBeta, double lrnK); +/* + * Retrieve the settings currently stored in an LRN layer descriptor + * Any of the provided pointers can be NULL (no corresponding value will be returned) + */ +cudnnStatus_t CUDNNWINAPI +cudnnGetLRNDescriptor(cudnnLRNDescriptor_t normDesc, unsigned *lrnN, double *lrnAlpha, double *lrnBeta, double *lrnK); + +/* Destroy an instance of LRN descriptor */ +cudnnStatus_t CUDNNWINAPI +cudnnDestroyLRNDescriptor(cudnnLRNDescriptor_t lrnDesc); + +/* LRN functions: output = alpha * normalize(x) + beta * old_y */ + +/* LRN cross-channel forward computation. Double parameters cast to tensor data type */ +cudnnStatus_t CUDNNWINAPI +cudnnLRNCrossChannelForward(cudnnHandle_t handle, + cudnnLRNDescriptor_t normDesc, + cudnnLRNMode_t lrnMode, + const void *alpha, + const cudnnTensorDescriptor_t xDesc, + const void *x, + const void *beta, + const cudnnTensorDescriptor_t yDesc, + void *y); + +typedef enum { + CUDNN_DIVNORM_PRECOMPUTED_MEANS = 0, +} cudnnDivNormMode_t; + +/* LCN/divisive normalization functions: y = alpha * normalize(x) + beta * y */ +cudnnStatus_t CUDNNWINAPI +cudnnDivisiveNormalizationForward(cudnnHandle_t handle, + cudnnLRNDescriptor_t normDesc, + cudnnDivNormMode_t mode, + const void *alpha, + const cudnnTensorDescriptor_t xDesc, /* same desc for means, temp, temp2 */ + const void *x, + const void *means, /* if NULL, means are assumed to be zero */ + void *temp, + void *temp2, + const void *beta, + const cudnnTensorDescriptor_t yDesc, + void *y); + +typedef enum { + /* bnScale, bnBias tensor dims are 1xCxHxWx.. (one value per CHW...-slice, normalized over N slice) */ + CUDNN_BATCHNORM_PER_ACTIVATION = 0, + + /* bnScale, bnBias tensor dims are 1xCx1x1 (one value per C-dim normalized over Nx1xHxW subtensors) */ + CUDNN_BATCHNORM_SPATIAL = 1, + + /* + * bnScale, bnBias tensor dims are 1xCx1x1 (one value per C-dim normalized over Nx1xHxW subtensors). + * May be faster than CUDNN_BATCHNORM_SPATIAL but imposes some limits on the range of values + */ + CUDNN_BATCHNORM_SPATIAL_PERSISTENT = 2, +} cudnnBatchNormMode_t; + +#define CUDNN_BN_MIN_EPSILON 0.0 /* Minimum epsilon allowed to be used in the Batch Normalization formula */ + +/* + * Derives a tensor descriptor from layer data descriptor for BatchNormalization + * scale, invVariance, bnBias, bnScale tensors. Use this tensor desc for + * bnScaleBiasMeanVarDesc and bnScaleBiasDiffDesc in Batch Normalization forward and backward functions. + */ +cudnnStatus_t CUDNNWINAPI +cudnnDeriveBNTensorDescriptor(cudnnTensorDescriptor_t derivedBnDesc, + const cudnnTensorDescriptor_t xDesc, + cudnnBatchNormMode_t mode); + +typedef enum { + CUDNN_BATCHNORM_OPS_BN = 0, /* do batch normalization only */ + CUDNN_BATCHNORM_OPS_BN_ACTIVATION = 1, /* do batchNorm, then activation */ + CUDNN_BATCHNORM_OPS_BN_ADD_ACTIVATION = 2, /* do batchNorm, then elemWiseAdd, then activation */ +} cudnnBatchNormOps_t; + +/* + * Performs Batch Normalization during Inference: + * y[i] = bnScale[k]*(x[i]-estimatedMean[k])/sqrt(epsilon+estimatedVariance[k]) + bnBias[k] + * with bnScale, bnBias, runningMean, runningInvVariance tensors indexed + * according to spatial or per-activation mode. Refer to cudnnBatchNormalizationForwardTraining + * above for notes on function arguments. + */ +cudnnStatus_t CUDNNWINAPI +cudnnBatchNormalizationForwardInference(cudnnHandle_t handle, + cudnnBatchNormMode_t mode, + const void *alpha, /* alpha[0] = result blend factor */ + const void *beta, /* beta[0] = dest layer blend factor */ + const cudnnTensorDescriptor_t xDesc, + const void *x, /* NxCxHxW */ + const cudnnTensorDescriptor_t yDesc, + void *y, /* NxCxHxW */ + const cudnnTensorDescriptor_t bnScaleBiasMeanVarDesc, + const void *bnScale, + const void *bnBias, + const void *estimatedMean, + const void *estimatedVariance, + double epsilon); + +typedef enum { + /* bnScale, bnBias tensor dims are 1xCxHxWx.. (one value per CHW...-slice, normalized over N slice) */ + CUDNN_NORM_PER_ACTIVATION = 0, + + /* bnScale, bnBias tensor dims are 1xCx1x1 (one value per C-dim normalized over Nx1xHxW subtensors) */ + CUDNN_NORM_PER_CHANNEL = 1, +} cudnnNormMode_t; + +typedef enum { CUDNN_NORM_ALGO_STANDARD = 0, CUDNN_NORM_ALGO_PERSIST = 1 } cudnnNormAlgo_t; + +/* + * Derives a tensor descriptor from layer data descriptor for Normalization + * scale, invVariance, bnBias, bnScale tensors. Use this tensor desc for + * normScaleBiasMeanVarDesc and normScaleBiasDiffDesc in Normalization forward and backward functions. + */ +cudnnStatus_t CUDNNWINAPI +cudnnDeriveNormTensorDescriptor(cudnnTensorDescriptor_t derivedNormScaleBiasDesc, + cudnnTensorDescriptor_t derivedNormMeanVarDesc, + const cudnnTensorDescriptor_t xDesc, + cudnnNormMode_t mode, + int groupCnt); /* Place hold for future work, should be set to 1 now*/ + +typedef enum { + CUDNN_NORM_OPS_NORM = 0, /* do normalization only */ + CUDNN_NORM_OPS_NORM_ACTIVATION = 1, /* do Norm, then activation */ + CUDNN_NORM_OPS_NORM_ADD_ACTIVATION = 2, /* do Norm, then elemWiseAdd, then activation */ +} cudnnNormOps_t; + +/* + * Performs Normalization during Inference: + * y[i] = normScale[k]*(x[i]-estimatedMean[k])/sqrt(epsilon+estimatedVariance[k]) + normBias[k] + * with normScale, normBias, runningMean, runningInvVariance tensors indexed + * according to per-channel or per-activation mode. Refer to cudnnNormalizationForwardTraining + * above for notes on function arguments. + */ +cudnnStatus_t CUDNNWINAPI +cudnnNormalizationForwardInference(cudnnHandle_t handle, + cudnnNormMode_t mode, + cudnnNormOps_t normOps, + cudnnNormAlgo_t algo, + const void *alpha, /* alpha[0] = result blend factor */ + const void *beta, /* beta[0] = dest layer blend factor */ + const cudnnTensorDescriptor_t xDesc, + const void *x, /* NxCxHxW */ + const cudnnTensorDescriptor_t normScaleBiasDesc, + const void *normScale, + const void *normBias, + const cudnnTensorDescriptor_t normMeanVarDesc, + const void *estimatedMean, + const void *estimatedVariance, + const cudnnTensorDescriptor_t zDesc, + const void *z, + cudnnActivationDescriptor_t activationDesc, + const cudnnTensorDescriptor_t yDesc, + void *y, /* NxCxHxW */ + double epsilon, + int groupCnt); /* Place hold for future work*/ + +/* APIs for spatial transformer network*/ +typedef enum { + CUDNN_SAMPLER_BILINEAR = 0, +} cudnnSamplerType_t; + +cudnnStatus_t CUDNNWINAPI +cudnnCreateSpatialTransformerDescriptor(cudnnSpatialTransformerDescriptor_t *stDesc); + +cudnnStatus_t CUDNNWINAPI +cudnnSetSpatialTransformerNdDescriptor(cudnnSpatialTransformerDescriptor_t stDesc, + cudnnSamplerType_t samplerType, + cudnnDataType_t dataType, + const int nbDims, + const int dimA[]); + +cudnnStatus_t CUDNNWINAPI +cudnnDestroySpatialTransformerDescriptor(cudnnSpatialTransformerDescriptor_t stDesc); + +cudnnStatus_t CUDNNWINAPI +cudnnSpatialTfGridGeneratorForward(cudnnHandle_t handle, + const cudnnSpatialTransformerDescriptor_t stDesc, + const void *theta, + void *grid); + +cudnnStatus_t CUDNNWINAPI +cudnnSpatialTfSamplerForward(cudnnHandle_t handle, + cudnnSpatialTransformerDescriptor_t stDesc, + const void *alpha, + const cudnnTensorDescriptor_t xDesc, + const void *x, + const void *grid, + const void *beta, + cudnnTensorDescriptor_t yDesc, + void *y); + +typedef struct cudnnDropoutStruct *cudnnDropoutDescriptor_t; + +cudnnStatus_t CUDNNWINAPI +cudnnCreateDropoutDescriptor(cudnnDropoutDescriptor_t *dropoutDesc); + +cudnnStatus_t CUDNNWINAPI +cudnnDestroyDropoutDescriptor(cudnnDropoutDescriptor_t dropoutDesc); + +/*helper function to determine size of the states to be passed to cudnnSetDropoutDescriptor */ +cudnnStatus_t CUDNNWINAPI +cudnnDropoutGetStatesSize(cudnnHandle_t handle, size_t *sizeInBytes); + +/*helper function to determine size of the reserve space to be passed to dropout forward/backward calls */ +cudnnStatus_t CUDNNWINAPI +cudnnDropoutGetReserveSpaceSize(cudnnTensorDescriptor_t xdesc, size_t *sizeInBytes); + +cudnnStatus_t CUDNNWINAPI +cudnnSetDropoutDescriptor(cudnnDropoutDescriptor_t dropoutDesc, + cudnnHandle_t handle, + float dropout, + void *states, + size_t stateSizeInBytes, + unsigned long long seed); + +/* Restores the dropout descriptor to a previously saved-off state */ +cudnnStatus_t CUDNNWINAPI +cudnnRestoreDropoutDescriptor(cudnnDropoutDescriptor_t dropoutDesc, + cudnnHandle_t handle, + float dropout, + void *states, + size_t stateSizeInBytes, + unsigned long long seed); + +cudnnStatus_t CUDNNWINAPI +cudnnGetDropoutDescriptor(cudnnDropoutDescriptor_t dropoutDesc, + cudnnHandle_t handle, + float *dropout, + void **states, + unsigned long long *seed); + +cudnnStatus_t CUDNNWINAPI +cudnnDropoutForward(cudnnHandle_t handle, + const cudnnDropoutDescriptor_t dropoutDesc, + const cudnnTensorDescriptor_t xdesc, + const void *x, + const cudnnTensorDescriptor_t ydesc, + void *y, + void *reserveSpace, + size_t reserveSpaceSizeInBytes); + +/* TODO: remove */ + +typedef struct cudnnAlgorithmStruct *cudnnAlgorithmDescriptor_t; +typedef struct cudnnAlgorithmPerformanceStruct *cudnnAlgorithmPerformance_t; + +/* TODO: move these enums out to the appropriate submodule */ +typedef enum { + CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM = 0, + CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM = 1, + CUDNN_CONVOLUTION_FWD_ALGO_GEMM = 2, + CUDNN_CONVOLUTION_FWD_ALGO_DIRECT = 3, + CUDNN_CONVOLUTION_FWD_ALGO_FFT = 4, + CUDNN_CONVOLUTION_FWD_ALGO_FFT_TILING = 5, + CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD = 6, + CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED = 7, + CUDNN_CONVOLUTION_FWD_ALGO_COUNT = 8 +} cudnnConvolutionFwdAlgo_t; + +typedef enum { + CUDNN_CONVOLUTION_BWD_FILTER_ALGO_0 = 0, /* non-deterministic */ + CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1 = 1, + CUDNN_CONVOLUTION_BWD_FILTER_ALGO_FFT = 2, + CUDNN_CONVOLUTION_BWD_FILTER_ALGO_3 = 3, /* non-deterministic */ + CUDNN_CONVOLUTION_BWD_FILTER_ALGO_WINOGRAD = 4, /* not implemented */ + CUDNN_CONVOLUTION_BWD_FILTER_ALGO_WINOGRAD_NONFUSED = 5, + CUDNN_CONVOLUTION_BWD_FILTER_ALGO_FFT_TILING = 6, + CUDNN_CONVOLUTION_BWD_FILTER_ALGO_COUNT = 7 +} cudnnConvolutionBwdFilterAlgo_t; + +typedef enum { + CUDNN_CONVOLUTION_BWD_DATA_ALGO_0 = 0, /* non-deterministic */ + CUDNN_CONVOLUTION_BWD_DATA_ALGO_1 = 1, + CUDNN_CONVOLUTION_BWD_DATA_ALGO_FFT = 2, + CUDNN_CONVOLUTION_BWD_DATA_ALGO_FFT_TILING = 3, + CUDNN_CONVOLUTION_BWD_DATA_ALGO_WINOGRAD = 4, + CUDNN_CONVOLUTION_BWD_DATA_ALGO_WINOGRAD_NONFUSED = 5, + CUDNN_CONVOLUTION_BWD_DATA_ALGO_COUNT = 6 +} cudnnConvolutionBwdDataAlgo_t; + +typedef enum { + CUDNN_RNN_ALGO_STANDARD = 0, + CUDNN_RNN_ALGO_PERSIST_STATIC = 1, + CUDNN_RNN_ALGO_PERSIST_DYNAMIC = 2, + CUDNN_RNN_ALGO_PERSIST_STATIC_SMALL_H = 3, + CUDNN_RNN_ALGO_COUNT = 4, +} cudnnRNNAlgo_t; + +typedef enum { CUDNN_CTC_LOSS_ALGO_DETERMINISTIC = 0, CUDNN_CTC_LOSS_ALGO_NON_DETERMINISTIC = 1 } cudnnCTCLossAlgo_t; + +/* TODO: remove */ +typedef struct cudnnAlgorithmUnionStruct { + union Algorithm { + cudnnConvolutionFwdAlgo_t convFwdAlgo; + cudnnConvolutionBwdFilterAlgo_t convBwdFilterAlgo; + cudnnConvolutionBwdDataAlgo_t convBwdDataAlgo; + cudnnRNNAlgo_t RNNAlgo; + cudnnCTCLossAlgo_t CTCLossAlgo; + } algo; +} cudnnAlgorithm_t; + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnCreateAlgorithmDescriptor(cudnnAlgorithmDescriptor_t *algoDesc); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnSetAlgorithmDescriptor(cudnnAlgorithmDescriptor_t algoDesc, cudnnAlgorithm_t algorithm); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetAlgorithmDescriptor(const cudnnAlgorithmDescriptor_t algoDesc, cudnnAlgorithm_t *algorithm); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnCopyAlgorithmDescriptor(const cudnnAlgorithmDescriptor_t src, cudnnAlgorithmDescriptor_t dest); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnDestroyAlgorithmDescriptor(cudnnAlgorithmDescriptor_t algoDesc); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnCreateAlgorithmPerformance(cudnnAlgorithmPerformance_t *algoPerf, int numberToCreate); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnSetAlgorithmPerformance(cudnnAlgorithmPerformance_t algoPerf, + cudnnAlgorithmDescriptor_t algoDesc, + cudnnStatus_t status, + float time, + size_t memory); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetAlgorithmPerformance(const cudnnAlgorithmPerformance_t algoPerf, + cudnnAlgorithmDescriptor_t *algoDesc, + cudnnStatus_t *status, + float *time, + size_t *memory); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnDestroyAlgorithmPerformance(cudnnAlgorithmPerformance_t *algoPerf, int numberToDestroy); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnGetAlgorithmSpaceSize(cudnnHandle_t handle, cudnnAlgorithmDescriptor_t algoDesc, size_t *algoSpaceSizeInBytes); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnSaveAlgorithm(cudnnHandle_t handle, + cudnnAlgorithmDescriptor_t algoDesc, + void *algoSpace, + size_t algoSpaceSizeInBytes); + +CUDNN_DEPRECATED cudnnStatus_t CUDNNWINAPI +cudnnRestoreAlgorithm(cudnnHandle_t handle, + void *algoSpace, + size_t algoSpaceSizeInBytes, + cudnnAlgorithmDescriptor_t algoDesc); + +typedef enum { + CUDNN_SEV_FATAL = 0, + CUDNN_SEV_ERROR = 1, + CUDNN_SEV_WARNING = 2, + CUDNN_SEV_INFO = 3, +} cudnnSeverity_t; + +/* Message masks to be used with cudnnSetCallback() */ +#define CUDNN_SEV_ERROR_EN (1U << CUDNN_SEV_ERROR) +#define CUDNN_SEV_WARNING_EN (1U << CUDNN_SEV_WARNING) +#define CUDNN_SEV_INFO_EN (1U << CUDNN_SEV_INFO) + +/* struct containing useful informaiton for each API call */ +typedef struct cudnnDebugStruct { + unsigned cudnn_version; + cudnnStatus_t cudnnStatus; + unsigned time_sec; /* epoch time in seconds */ + unsigned time_usec; /* microseconds part of epoch time */ + unsigned time_delta; /* time since start in seconds */ + cudnnHandle_t handle; /* cudnn handle */ + cudaStream_t stream; /* cuda stream ID */ + unsigned long long pid; /* process ID */ + unsigned long long tid; /* thread ID */ + int cudaDeviceId; /* CUDA device ID */ + int reserved[15]; /* reserved for future use */ +} cudnnDebug_t; + +typedef void (*cudnnCallback_t)(cudnnSeverity_t sev, void *udata, const cudnnDebug_t *dbg, const char *msg); + +cudnnStatus_t CUDNNWINAPI +cudnnSetCallback(unsigned mask, void *udata, cudnnCallback_t fptr); + +cudnnStatus_t CUDNNWINAPI +cudnnGetCallback(unsigned *mask, void **udata, cudnnCallback_t *fptr); + +/* + * \brief Cross-library version checker. + * This function is implemented differently in each sub-library. Each sublib + * checks whether its own version matches that of its dependencies. + * \returns CUDNN_STATUS_SUCCESS if the version check passes, + * CUDNN_STATUS_VERSION_MISMATCH if the versions are inconsistent. + */ +cudnnStatus_t CUDNNWINAPI +cudnnOpsInferVersionCheck(void); + +#if defined(__cplusplus) +} +#endif + +#endif /* CUDNN_OPS_INFER_H_ */ diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_ops_train.h b/llmeval-env/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_ops_train.h new file mode 100644 index 0000000000000000000000000000000000000000..425c7c684968d76e1154de76eac082e61ec62f36 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_ops_train.h @@ -0,0 +1,501 @@ +/* + * Copyright 2014-2023 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +/* + * cudnn_ops_train : cuDNN's basic training operations and algorithms. + */ + +#if !defined(CUDNN_OPS_TRAIN_H_) +#define CUDNN_OPS_TRAIN_H_ + +#include +#include + +#include "cudnn_version.h" +#include "cudnn_ops_infer.h" + +/* These version numbers are autogenerated, do not edit manually. */ +#define CUDNN_OPS_TRAIN_MAJOR 8 +#define CUDNN_OPS_TRAIN_MINOR 9 +#define CUDNN_OPS_TRAIN_PATCH 2 + +#if (CUDNN_OPS_TRAIN_MAJOR != CUDNN_MAJOR) || (CUDNN_OPS_TRAIN_MINOR != CUDNN_MINOR) || \ + (CUDNN_OPS_TRAIN_PATCH != CUDNN_PATCHLEVEL) +#error Version mismatch in cuDNN OPS TRAIN!!! +#endif + +#if defined(__cplusplus) +extern "C" { +#endif + +/* Function to perform backward softmax */ +cudnnStatus_t CUDNNWINAPI +cudnnSoftmaxBackward(cudnnHandle_t handle, + cudnnSoftmaxAlgorithm_t algo, + cudnnSoftmaxMode_t mode, + const void *alpha, + const cudnnTensorDescriptor_t yDesc, + const void *y, + const cudnnTensorDescriptor_t dyDesc, + const void *dy, + const void *beta, + const cudnnTensorDescriptor_t dxDesc, + void *dx); + +/* Function to perform backward pooling */ +cudnnStatus_t CUDNNWINAPI +cudnnPoolingBackward(cudnnHandle_t handle, + const cudnnPoolingDescriptor_t poolingDesc, + const void *alpha, + const cudnnTensorDescriptor_t yDesc, + const void *y, + const cudnnTensorDescriptor_t dyDesc, + const void *dy, + const cudnnTensorDescriptor_t xDesc, + const void *x, + const void *beta, + const cudnnTensorDescriptor_t dxDesc, + void *dx); + +/* Function to perform backward activation */ +cudnnStatus_t CUDNNWINAPI +cudnnActivationBackward(cudnnHandle_t handle, + cudnnActivationDescriptor_t activationDesc, + const void *alpha, + const cudnnTensorDescriptor_t yDesc, + const void *y, + const cudnnTensorDescriptor_t dyDesc, + const void *dy, + const cudnnTensorDescriptor_t xDesc, + const void *x, + const void *beta, + const cudnnTensorDescriptor_t dxDesc, + void *dx); + +/* LRN cross-channel backward computation. Double parameters cast to tensor data type */ +cudnnStatus_t CUDNNWINAPI +cudnnLRNCrossChannelBackward(cudnnHandle_t handle, + cudnnLRNDescriptor_t normDesc, + cudnnLRNMode_t lrnMode, + const void *alpha, + const cudnnTensorDescriptor_t yDesc, + const void *y, + const cudnnTensorDescriptor_t dyDesc, + const void *dy, + const cudnnTensorDescriptor_t xDesc, + const void *x, + const void *beta, + const cudnnTensorDescriptor_t dxDesc, + void *dx); + +cudnnStatus_t CUDNNWINAPI +cudnnDivisiveNormalizationBackward(cudnnHandle_t handle, + cudnnLRNDescriptor_t normDesc, + cudnnDivNormMode_t mode, + const void *alpha, + const cudnnTensorDescriptor_t xDesc, /* same desc for x, means, dy, temp, temp2 */ + const void *x, + const void *means, /* if NULL, means are assumed to be zero */ + const void *dy, + void *temp, + void *temp2, + const void *beta, + const cudnnTensorDescriptor_t dXdMeansDesc, /* same desc for dx, dMeans */ + void *dx, /* output x differential */ + void *dMeans); /* output means differential, can be NULL */ + +cudnnStatus_t CUDNNWINAPI +cudnnGetBatchNormalizationForwardTrainingExWorkspaceSize(cudnnHandle_t handle, + cudnnBatchNormMode_t mode, + cudnnBatchNormOps_t bnOps, + const cudnnTensorDescriptor_t xDesc, + const cudnnTensorDescriptor_t zDesc, + const cudnnTensorDescriptor_t yDesc, + const cudnnTensorDescriptor_t bnScaleBiasMeanVarDesc, + const cudnnActivationDescriptor_t activationDesc, + size_t *sizeInBytes); + +cudnnStatus_t CUDNNWINAPI +cudnnGetBatchNormalizationBackwardExWorkspaceSize(cudnnHandle_t handle, + cudnnBatchNormMode_t mode, + cudnnBatchNormOps_t bnOps, + const cudnnTensorDescriptor_t xDesc, + const cudnnTensorDescriptor_t yDesc, + const cudnnTensorDescriptor_t dyDesc, + const cudnnTensorDescriptor_t dzDesc, + const cudnnTensorDescriptor_t dxDesc, + const cudnnTensorDescriptor_t dBnScaleBiasDesc, + const cudnnActivationDescriptor_t activationDesc, + size_t *sizeInBytes); + +cudnnStatus_t CUDNNWINAPI +cudnnGetBatchNormalizationTrainingExReserveSpaceSize(cudnnHandle_t handle, + cudnnBatchNormMode_t mode, + cudnnBatchNormOps_t bnOps, + const cudnnActivationDescriptor_t activationDesc, + const cudnnTensorDescriptor_t xDesc, + size_t *sizeInBytes); + +/* Computes y = BN(x). Also accumulates moving averages of mean and inverse variances */ +cudnnStatus_t CUDNNWINAPI +cudnnBatchNormalizationForwardTraining( + cudnnHandle_t handle, + cudnnBatchNormMode_t mode, + + const void *alpha, /* alpha[0] = result blend factor */ + const void *beta, /* beta[0] = dest layer blend factor */ + + const cudnnTensorDescriptor_t xDesc, + const void *x, /* NxCxHxW */ + const cudnnTensorDescriptor_t yDesc, + void *y, /* NxCxHxW */ + + /* Shared desc for the next 6 tensors in the argument list. + Data type to be set as follows: + type = (typeOf(x) == double) ? double : float + Dimensions for this descriptor depend on normalization mode + - Spatial Normalization : tensors are expected to have dims 1xCx1x1 + (normalization is performed across NxHxW) + - Per-Activation Normalization : tensors are expected to have dims of 1xCxHxW + (normalization is performed across N) */ + const cudnnTensorDescriptor_t bnScaleBiasMeanVarDesc, + + /* 'Gamma' and 'Beta' respectively in Ioffe and Szegedy's paper's notation */ + const void *bnScale, + const void *bnBias, + + /* MUST use factor=1 in the very first call of a complete training cycle. + Use a factor=1/(1+n) at N-th call to the function to get + Cumulative Moving Average (CMA) behavior + CMA[n] = (x[1]+...+x[n])/n + Since CMA[n+1] = (n*CMA[n]+x[n+1])/(n+1) = + ((n+1)*CMA[n]-CMA[n])/(n+1) + x[n+1]/(n+1) = + CMA[n]*(1-1/(n+1)) + x[n+1]*1/(n+1) */ + double exponentialAverageFactor, + + /* Used in Training phase only. + runningMean = newMean*factor + runningMean*(1-factor) */ + void *resultRunningMean, + /* Output in training mode, input in inference. Is the moving average + of variance[x] (factor is applied in the same way as for runningMean) */ + void *resultRunningVariance, + + /* Has to be >= CUDNN_BN_MIN_EPSILON. Should be the same in forward and backward functions. */ + double epsilon, + + /* Optionally save intermediate results from the forward pass here + - can be reused to speed up backward pass. NULL if unused */ + void *resultSaveMean, + void *resultSaveInvVariance); + +/* Computes y = relu(BN(x) + z). Also accumulates moving averages of mean and inverse variances */ +cudnnStatus_t CUDNNWINAPI +cudnnBatchNormalizationForwardTrainingEx( + cudnnHandle_t handle, + cudnnBatchNormMode_t mode, + cudnnBatchNormOps_t bnOps, + + const void *alpha, /* alpha[0] = result blend factor */ + const void *beta, /* beta[0] = dest layer blend factor */ + + const cudnnTensorDescriptor_t xDesc, + const void *xData, + const cudnnTensorDescriptor_t zDesc, + const void *zData, + const cudnnTensorDescriptor_t yDesc, + void *yData, + + const cudnnTensorDescriptor_t bnScaleBiasMeanVarDesc, + const void *bnScale, + const void *bnBias, + + double exponentialAverageFactor, + void *resultRunningMean, + void *resultRunningVariance, + + /* Has to be >= CUDNN_BN_MIN_EPSILON. Should be the same in forward and backward functions. */ + double epsilon, + + /* Optionally save intermediate results from the forward pass here + - can be reused to speed up backward pass. NULL if unused */ + void *resultSaveMean, + void *resultSaveInvVariance, + + cudnnActivationDescriptor_t activationDesc, + void *workspace, + size_t workSpaceSizeInBytes, + void *reserveSpace, + size_t reserveSpaceSizeInBytes); + +/* Performs backward pass of Batch Normalization layer. Returns x gradient, +* bnScale gradient and bnBias gradient */ +cudnnStatus_t CUDNNWINAPI +cudnnBatchNormalizationBackward(cudnnHandle_t handle, + cudnnBatchNormMode_t mode, + const void *alphaDataDiff, + const void *betaDataDiff, + const void *alphaParamDiff, + const void *betaParamDiff, + const cudnnTensorDescriptor_t xDesc, /* same desc for x, dx, dy */ + const void *x, + const cudnnTensorDescriptor_t dyDesc, + const void *dy, + const cudnnTensorDescriptor_t dxDesc, + void *dx, + /* Shared tensor desc for the 4 tensors below */ + const cudnnTensorDescriptor_t dBnScaleBiasDesc, + const void *bnScale, /* bnBias doesn't affect backpropagation */ + /* scale and bias diff are not backpropagated below this layer */ + void *dBnScaleResult, + void *dBnBiasResult, + /* Same epsilon as forward pass */ + double epsilon, + + /* Optionally cached intermediate results from + forward pass */ + const void *savedMean, + const void *savedInvVariance); + +cudnnStatus_t CUDNNWINAPI +cudnnBatchNormalizationBackwardEx(cudnnHandle_t handle, + cudnnBatchNormMode_t mode, + cudnnBatchNormOps_t bnOps, + + const void *alphaDataDiff, + const void *betaDataDiff, + const void *alphaParamDiff, + const void *betaParamDiff, + const cudnnTensorDescriptor_t xDesc, + const void *xData, + const cudnnTensorDescriptor_t yDesc, + const void *yData, + const cudnnTensorDescriptor_t dyDesc, + const void *dyData, + const cudnnTensorDescriptor_t dzDesc, + void *dzData, + const cudnnTensorDescriptor_t dxDesc, + void *dxData, + + /* Shared tensor desc for the 4 tensors below */ + const cudnnTensorDescriptor_t dBnScaleBiasDesc, + const void *bnScaleData, + const void *bnBiasData, /* needed if there is activation */ + void *dBnScaleData, + void *dBnBiasData, + double epsilon, /* Same epsilon as forward pass */ + + /* Optionally cached intermediate results from + forward pass */ + const void *savedMean, + const void *savedInvVariance, + cudnnActivationDescriptor_t activationDesc, + void *workSpace, + size_t workSpaceSizeInBytes, + void *reserveSpace, + size_t reserveSpaceSizeInBytes); + +cudnnStatus_t CUDNNWINAPI +cudnnGetNormalizationForwardTrainingWorkspaceSize(cudnnHandle_t handle, + cudnnNormMode_t mode, + cudnnNormOps_t normOps, + cudnnNormAlgo_t algo, + const cudnnTensorDescriptor_t xDesc, + const cudnnTensorDescriptor_t zDesc, + const cudnnTensorDescriptor_t yDesc, + const cudnnTensorDescriptor_t normScaleBiasDesc, + const cudnnActivationDescriptor_t activationDesc, + const cudnnTensorDescriptor_t normMeanVarDesc, + size_t *sizeInBytes, + int groupCnt); /* Place hold for future work, should be set to 1 now*/ + +cudnnStatus_t CUDNNWINAPI +cudnnGetNormalizationBackwardWorkspaceSize(cudnnHandle_t handle, + cudnnNormMode_t mode, + cudnnNormOps_t normOps, + cudnnNormAlgo_t algo, + const cudnnTensorDescriptor_t xDesc, + const cudnnTensorDescriptor_t yDesc, + const cudnnTensorDescriptor_t dyDesc, + const cudnnTensorDescriptor_t dzDesc, + const cudnnTensorDescriptor_t dxDesc, + const cudnnTensorDescriptor_t dNormScaleBiasDesc, + const cudnnActivationDescriptor_t activationDesc, + const cudnnTensorDescriptor_t normMeanVarDesc, + size_t *sizeInBytes, + int groupCnt); /* Place hold for future work, should be set to 1 now*/ + +cudnnStatus_t CUDNNWINAPI +cudnnGetNormalizationTrainingReserveSpaceSize(cudnnHandle_t handle, + cudnnNormMode_t mode, + cudnnNormOps_t normOps, + cudnnNormAlgo_t algo, + const cudnnActivationDescriptor_t activationDesc, + const cudnnTensorDescriptor_t xDesc, + size_t *sizeInBytes, + int groupCnt); /* Place hold for future work, should be set to 1 now*/ + +/* Computes y = relu(Norm(x) + z). Also accumulates moving averages of mean and inverse variances */ +cudnnStatus_t CUDNNWINAPI +cudnnNormalizationForwardTraining(cudnnHandle_t handle, + cudnnNormMode_t mode, + cudnnNormOps_t normOps, + cudnnNormAlgo_t algo, + const void *alpha, /* alpha[0] = result blend factor */ + const void *beta, /* beta[0] = dest layer blend factor */ + const cudnnTensorDescriptor_t xDesc, + const void *xData, + const cudnnTensorDescriptor_t normScaleBiasDesc, + const void *normScale, + const void *normBias, + double exponentialAverageFactor, + const cudnnTensorDescriptor_t normMeanVarDesc, + void *resultRunningMean, + void *resultRunningVariance, + /* Has to be >= 0. Should be the same in forward and backward functions. */ + double epsilon, + /* Optionally save intermediate results from the forward pass here + - can be reused to speed up backward pass. NULL if unused */ + void *resultSaveMean, + void *resultSaveInvVariance, + cudnnActivationDescriptor_t activationDesc, + const cudnnTensorDescriptor_t zDesc, + const void *zData, + const cudnnTensorDescriptor_t yDesc, + void *yData, + void *workspace, + size_t workSpaceSizeInBytes, + void *reserveSpace, + size_t reserveSpaceSizeInBytes, + int groupCnt); /* Place hold for future work, should be set to 1 now*/ + +cudnnStatus_t CUDNNWINAPI +cudnnNormalizationBackward(cudnnHandle_t handle, + cudnnNormMode_t mode, + cudnnNormOps_t normOps, + cudnnNormAlgo_t algo, + const void *alphaDataDiff, + const void *betaDataDiff, + const void *alphaParamDiff, + const void *betaParamDiff, + const cudnnTensorDescriptor_t xDesc, + const void *xData, + const cudnnTensorDescriptor_t yDesc, + const void *yData, + const cudnnTensorDescriptor_t dyDesc, + const void *dyData, + const cudnnTensorDescriptor_t dzDesc, + void *dzData, + const cudnnTensorDescriptor_t dxDesc, + void *dxData, + /* Shared tensor desc for the 4 tensors below */ + const cudnnTensorDescriptor_t dNormScaleBiasDesc, + const void *normScaleData, + const void *normBiasData, /* needed if there is activation */ + void *dNormScaleData, + void *dNormBiasData, + double epsilon, /* Same epsilon as forward pass */ + const cudnnTensorDescriptor_t normMeanVarDesc, + /* Optionally cached intermediate results from + forward pass */ + const void *savedMean, + const void *savedInvVariance, + cudnnActivationDescriptor_t activationDesc, + void *workSpace, + size_t workSpaceSizeInBytes, + void *reserveSpace, + size_t reserveSpaceSizeInBytes, + int groupCnt); /* Place hold for future work, should be set to 1 now*/ + +cudnnStatus_t CUDNNWINAPI +cudnnSpatialTfGridGeneratorBackward(cudnnHandle_t handle, + const cudnnSpatialTransformerDescriptor_t stDesc, + const void *dgrid, + void *dtheta); + +cudnnStatus_t CUDNNWINAPI +cudnnSpatialTfSamplerBackward(cudnnHandle_t handle, + cudnnSpatialTransformerDescriptor_t stDesc, + const void *alpha, + const cudnnTensorDescriptor_t xDesc, + const void *x, + const void *beta, + const cudnnTensorDescriptor_t dxDesc, + void *dx, + const void *alphaDgrid, + const cudnnTensorDescriptor_t dyDesc, + const void *dy, + const void *grid, + const void *betaDgrid, + void *dgrid); + +cudnnStatus_t CUDNNWINAPI +cudnnDropoutBackward(cudnnHandle_t handle, + const cudnnDropoutDescriptor_t dropoutDesc, + const cudnnTensorDescriptor_t dydesc, + const void *dy, + const cudnnTensorDescriptor_t dxdesc, + void *dx, + void *reserveSpace, + size_t reserveSpaceSizeInBytes); + +/* + * \brief Cross-library version checker. + * This function is implemented differently in each sub-library. Each sublib + * checks whether its own version matches that of its dependencies. + * \returns CUDNN_STATUS_SUCCESS if the version check passes, + * CUDNN_STATUS_VERSION_MISMATCH if the versions are inconsistent. + */ +cudnnStatus_t CUDNNWINAPI +cudnnOpsTrainVersionCheck(void); + +#if defined(__cplusplus) +} +#endif + +#endif /* CUDNN_OPS_TRAIN_H_ */ diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_ops_train_v8.h b/llmeval-env/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_ops_train_v8.h new file mode 100644 index 0000000000000000000000000000000000000000..425c7c684968d76e1154de76eac082e61ec62f36 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_ops_train_v8.h @@ -0,0 +1,501 @@ +/* + * Copyright 2014-2023 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +/* + * cudnn_ops_train : cuDNN's basic training operations and algorithms. + */ + +#if !defined(CUDNN_OPS_TRAIN_H_) +#define CUDNN_OPS_TRAIN_H_ + +#include +#include + +#include "cudnn_version.h" +#include "cudnn_ops_infer.h" + +/* These version numbers are autogenerated, do not edit manually. */ +#define CUDNN_OPS_TRAIN_MAJOR 8 +#define CUDNN_OPS_TRAIN_MINOR 9 +#define CUDNN_OPS_TRAIN_PATCH 2 + +#if (CUDNN_OPS_TRAIN_MAJOR != CUDNN_MAJOR) || (CUDNN_OPS_TRAIN_MINOR != CUDNN_MINOR) || \ + (CUDNN_OPS_TRAIN_PATCH != CUDNN_PATCHLEVEL) +#error Version mismatch in cuDNN OPS TRAIN!!! +#endif + +#if defined(__cplusplus) +extern "C" { +#endif + +/* Function to perform backward softmax */ +cudnnStatus_t CUDNNWINAPI +cudnnSoftmaxBackward(cudnnHandle_t handle, + cudnnSoftmaxAlgorithm_t algo, + cudnnSoftmaxMode_t mode, + const void *alpha, + const cudnnTensorDescriptor_t yDesc, + const void *y, + const cudnnTensorDescriptor_t dyDesc, + const void *dy, + const void *beta, + const cudnnTensorDescriptor_t dxDesc, + void *dx); + +/* Function to perform backward pooling */ +cudnnStatus_t CUDNNWINAPI +cudnnPoolingBackward(cudnnHandle_t handle, + const cudnnPoolingDescriptor_t poolingDesc, + const void *alpha, + const cudnnTensorDescriptor_t yDesc, + const void *y, + const cudnnTensorDescriptor_t dyDesc, + const void *dy, + const cudnnTensorDescriptor_t xDesc, + const void *x, + const void *beta, + const cudnnTensorDescriptor_t dxDesc, + void *dx); + +/* Function to perform backward activation */ +cudnnStatus_t CUDNNWINAPI +cudnnActivationBackward(cudnnHandle_t handle, + cudnnActivationDescriptor_t activationDesc, + const void *alpha, + const cudnnTensorDescriptor_t yDesc, + const void *y, + const cudnnTensorDescriptor_t dyDesc, + const void *dy, + const cudnnTensorDescriptor_t xDesc, + const void *x, + const void *beta, + const cudnnTensorDescriptor_t dxDesc, + void *dx); + +/* LRN cross-channel backward computation. Double parameters cast to tensor data type */ +cudnnStatus_t CUDNNWINAPI +cudnnLRNCrossChannelBackward(cudnnHandle_t handle, + cudnnLRNDescriptor_t normDesc, + cudnnLRNMode_t lrnMode, + const void *alpha, + const cudnnTensorDescriptor_t yDesc, + const void *y, + const cudnnTensorDescriptor_t dyDesc, + const void *dy, + const cudnnTensorDescriptor_t xDesc, + const void *x, + const void *beta, + const cudnnTensorDescriptor_t dxDesc, + void *dx); + +cudnnStatus_t CUDNNWINAPI +cudnnDivisiveNormalizationBackward(cudnnHandle_t handle, + cudnnLRNDescriptor_t normDesc, + cudnnDivNormMode_t mode, + const void *alpha, + const cudnnTensorDescriptor_t xDesc, /* same desc for x, means, dy, temp, temp2 */ + const void *x, + const void *means, /* if NULL, means are assumed to be zero */ + const void *dy, + void *temp, + void *temp2, + const void *beta, + const cudnnTensorDescriptor_t dXdMeansDesc, /* same desc for dx, dMeans */ + void *dx, /* output x differential */ + void *dMeans); /* output means differential, can be NULL */ + +cudnnStatus_t CUDNNWINAPI +cudnnGetBatchNormalizationForwardTrainingExWorkspaceSize(cudnnHandle_t handle, + cudnnBatchNormMode_t mode, + cudnnBatchNormOps_t bnOps, + const cudnnTensorDescriptor_t xDesc, + const cudnnTensorDescriptor_t zDesc, + const cudnnTensorDescriptor_t yDesc, + const cudnnTensorDescriptor_t bnScaleBiasMeanVarDesc, + const cudnnActivationDescriptor_t activationDesc, + size_t *sizeInBytes); + +cudnnStatus_t CUDNNWINAPI +cudnnGetBatchNormalizationBackwardExWorkspaceSize(cudnnHandle_t handle, + cudnnBatchNormMode_t mode, + cudnnBatchNormOps_t bnOps, + const cudnnTensorDescriptor_t xDesc, + const cudnnTensorDescriptor_t yDesc, + const cudnnTensorDescriptor_t dyDesc, + const cudnnTensorDescriptor_t dzDesc, + const cudnnTensorDescriptor_t dxDesc, + const cudnnTensorDescriptor_t dBnScaleBiasDesc, + const cudnnActivationDescriptor_t activationDesc, + size_t *sizeInBytes); + +cudnnStatus_t CUDNNWINAPI +cudnnGetBatchNormalizationTrainingExReserveSpaceSize(cudnnHandle_t handle, + cudnnBatchNormMode_t mode, + cudnnBatchNormOps_t bnOps, + const cudnnActivationDescriptor_t activationDesc, + const cudnnTensorDescriptor_t xDesc, + size_t *sizeInBytes); + +/* Computes y = BN(x). Also accumulates moving averages of mean and inverse variances */ +cudnnStatus_t CUDNNWINAPI +cudnnBatchNormalizationForwardTraining( + cudnnHandle_t handle, + cudnnBatchNormMode_t mode, + + const void *alpha, /* alpha[0] = result blend factor */ + const void *beta, /* beta[0] = dest layer blend factor */ + + const cudnnTensorDescriptor_t xDesc, + const void *x, /* NxCxHxW */ + const cudnnTensorDescriptor_t yDesc, + void *y, /* NxCxHxW */ + + /* Shared desc for the next 6 tensors in the argument list. + Data type to be set as follows: + type = (typeOf(x) == double) ? double : float + Dimensions for this descriptor depend on normalization mode + - Spatial Normalization : tensors are expected to have dims 1xCx1x1 + (normalization is performed across NxHxW) + - Per-Activation Normalization : tensors are expected to have dims of 1xCxHxW + (normalization is performed across N) */ + const cudnnTensorDescriptor_t bnScaleBiasMeanVarDesc, + + /* 'Gamma' and 'Beta' respectively in Ioffe and Szegedy's paper's notation */ + const void *bnScale, + const void *bnBias, + + /* MUST use factor=1 in the very first call of a complete training cycle. + Use a factor=1/(1+n) at N-th call to the function to get + Cumulative Moving Average (CMA) behavior + CMA[n] = (x[1]+...+x[n])/n + Since CMA[n+1] = (n*CMA[n]+x[n+1])/(n+1) = + ((n+1)*CMA[n]-CMA[n])/(n+1) + x[n+1]/(n+1) = + CMA[n]*(1-1/(n+1)) + x[n+1]*1/(n+1) */ + double exponentialAverageFactor, + + /* Used in Training phase only. + runningMean = newMean*factor + runningMean*(1-factor) */ + void *resultRunningMean, + /* Output in training mode, input in inference. Is the moving average + of variance[x] (factor is applied in the same way as for runningMean) */ + void *resultRunningVariance, + + /* Has to be >= CUDNN_BN_MIN_EPSILON. Should be the same in forward and backward functions. */ + double epsilon, + + /* Optionally save intermediate results from the forward pass here + - can be reused to speed up backward pass. NULL if unused */ + void *resultSaveMean, + void *resultSaveInvVariance); + +/* Computes y = relu(BN(x) + z). Also accumulates moving averages of mean and inverse variances */ +cudnnStatus_t CUDNNWINAPI +cudnnBatchNormalizationForwardTrainingEx( + cudnnHandle_t handle, + cudnnBatchNormMode_t mode, + cudnnBatchNormOps_t bnOps, + + const void *alpha, /* alpha[0] = result blend factor */ + const void *beta, /* beta[0] = dest layer blend factor */ + + const cudnnTensorDescriptor_t xDesc, + const void *xData, + const cudnnTensorDescriptor_t zDesc, + const void *zData, + const cudnnTensorDescriptor_t yDesc, + void *yData, + + const cudnnTensorDescriptor_t bnScaleBiasMeanVarDesc, + const void *bnScale, + const void *bnBias, + + double exponentialAverageFactor, + void *resultRunningMean, + void *resultRunningVariance, + + /* Has to be >= CUDNN_BN_MIN_EPSILON. Should be the same in forward and backward functions. */ + double epsilon, + + /* Optionally save intermediate results from the forward pass here + - can be reused to speed up backward pass. NULL if unused */ + void *resultSaveMean, + void *resultSaveInvVariance, + + cudnnActivationDescriptor_t activationDesc, + void *workspace, + size_t workSpaceSizeInBytes, + void *reserveSpace, + size_t reserveSpaceSizeInBytes); + +/* Performs backward pass of Batch Normalization layer. Returns x gradient, +* bnScale gradient and bnBias gradient */ +cudnnStatus_t CUDNNWINAPI +cudnnBatchNormalizationBackward(cudnnHandle_t handle, + cudnnBatchNormMode_t mode, + const void *alphaDataDiff, + const void *betaDataDiff, + const void *alphaParamDiff, + const void *betaParamDiff, + const cudnnTensorDescriptor_t xDesc, /* same desc for x, dx, dy */ + const void *x, + const cudnnTensorDescriptor_t dyDesc, + const void *dy, + const cudnnTensorDescriptor_t dxDesc, + void *dx, + /* Shared tensor desc for the 4 tensors below */ + const cudnnTensorDescriptor_t dBnScaleBiasDesc, + const void *bnScale, /* bnBias doesn't affect backpropagation */ + /* scale and bias diff are not backpropagated below this layer */ + void *dBnScaleResult, + void *dBnBiasResult, + /* Same epsilon as forward pass */ + double epsilon, + + /* Optionally cached intermediate results from + forward pass */ + const void *savedMean, + const void *savedInvVariance); + +cudnnStatus_t CUDNNWINAPI +cudnnBatchNormalizationBackwardEx(cudnnHandle_t handle, + cudnnBatchNormMode_t mode, + cudnnBatchNormOps_t bnOps, + + const void *alphaDataDiff, + const void *betaDataDiff, + const void *alphaParamDiff, + const void *betaParamDiff, + const cudnnTensorDescriptor_t xDesc, + const void *xData, + const cudnnTensorDescriptor_t yDesc, + const void *yData, + const cudnnTensorDescriptor_t dyDesc, + const void *dyData, + const cudnnTensorDescriptor_t dzDesc, + void *dzData, + const cudnnTensorDescriptor_t dxDesc, + void *dxData, + + /* Shared tensor desc for the 4 tensors below */ + const cudnnTensorDescriptor_t dBnScaleBiasDesc, + const void *bnScaleData, + const void *bnBiasData, /* needed if there is activation */ + void *dBnScaleData, + void *dBnBiasData, + double epsilon, /* Same epsilon as forward pass */ + + /* Optionally cached intermediate results from + forward pass */ + const void *savedMean, + const void *savedInvVariance, + cudnnActivationDescriptor_t activationDesc, + void *workSpace, + size_t workSpaceSizeInBytes, + void *reserveSpace, + size_t reserveSpaceSizeInBytes); + +cudnnStatus_t CUDNNWINAPI +cudnnGetNormalizationForwardTrainingWorkspaceSize(cudnnHandle_t handle, + cudnnNormMode_t mode, + cudnnNormOps_t normOps, + cudnnNormAlgo_t algo, + const cudnnTensorDescriptor_t xDesc, + const cudnnTensorDescriptor_t zDesc, + const cudnnTensorDescriptor_t yDesc, + const cudnnTensorDescriptor_t normScaleBiasDesc, + const cudnnActivationDescriptor_t activationDesc, + const cudnnTensorDescriptor_t normMeanVarDesc, + size_t *sizeInBytes, + int groupCnt); /* Place hold for future work, should be set to 1 now*/ + +cudnnStatus_t CUDNNWINAPI +cudnnGetNormalizationBackwardWorkspaceSize(cudnnHandle_t handle, + cudnnNormMode_t mode, + cudnnNormOps_t normOps, + cudnnNormAlgo_t algo, + const cudnnTensorDescriptor_t xDesc, + const cudnnTensorDescriptor_t yDesc, + const cudnnTensorDescriptor_t dyDesc, + const cudnnTensorDescriptor_t dzDesc, + const cudnnTensorDescriptor_t dxDesc, + const cudnnTensorDescriptor_t dNormScaleBiasDesc, + const cudnnActivationDescriptor_t activationDesc, + const cudnnTensorDescriptor_t normMeanVarDesc, + size_t *sizeInBytes, + int groupCnt); /* Place hold for future work, should be set to 1 now*/ + +cudnnStatus_t CUDNNWINAPI +cudnnGetNormalizationTrainingReserveSpaceSize(cudnnHandle_t handle, + cudnnNormMode_t mode, + cudnnNormOps_t normOps, + cudnnNormAlgo_t algo, + const cudnnActivationDescriptor_t activationDesc, + const cudnnTensorDescriptor_t xDesc, + size_t *sizeInBytes, + int groupCnt); /* Place hold for future work, should be set to 1 now*/ + +/* Computes y = relu(Norm(x) + z). Also accumulates moving averages of mean and inverse variances */ +cudnnStatus_t CUDNNWINAPI +cudnnNormalizationForwardTraining(cudnnHandle_t handle, + cudnnNormMode_t mode, + cudnnNormOps_t normOps, + cudnnNormAlgo_t algo, + const void *alpha, /* alpha[0] = result blend factor */ + const void *beta, /* beta[0] = dest layer blend factor */ + const cudnnTensorDescriptor_t xDesc, + const void *xData, + const cudnnTensorDescriptor_t normScaleBiasDesc, + const void *normScale, + const void *normBias, + double exponentialAverageFactor, + const cudnnTensorDescriptor_t normMeanVarDesc, + void *resultRunningMean, + void *resultRunningVariance, + /* Has to be >= 0. Should be the same in forward and backward functions. */ + double epsilon, + /* Optionally save intermediate results from the forward pass here + - can be reused to speed up backward pass. NULL if unused */ + void *resultSaveMean, + void *resultSaveInvVariance, + cudnnActivationDescriptor_t activationDesc, + const cudnnTensorDescriptor_t zDesc, + const void *zData, + const cudnnTensorDescriptor_t yDesc, + void *yData, + void *workspace, + size_t workSpaceSizeInBytes, + void *reserveSpace, + size_t reserveSpaceSizeInBytes, + int groupCnt); /* Place hold for future work, should be set to 1 now*/ + +cudnnStatus_t CUDNNWINAPI +cudnnNormalizationBackward(cudnnHandle_t handle, + cudnnNormMode_t mode, + cudnnNormOps_t normOps, + cudnnNormAlgo_t algo, + const void *alphaDataDiff, + const void *betaDataDiff, + const void *alphaParamDiff, + const void *betaParamDiff, + const cudnnTensorDescriptor_t xDesc, + const void *xData, + const cudnnTensorDescriptor_t yDesc, + const void *yData, + const cudnnTensorDescriptor_t dyDesc, + const void *dyData, + const cudnnTensorDescriptor_t dzDesc, + void *dzData, + const cudnnTensorDescriptor_t dxDesc, + void *dxData, + /* Shared tensor desc for the 4 tensors below */ + const cudnnTensorDescriptor_t dNormScaleBiasDesc, + const void *normScaleData, + const void *normBiasData, /* needed if there is activation */ + void *dNormScaleData, + void *dNormBiasData, + double epsilon, /* Same epsilon as forward pass */ + const cudnnTensorDescriptor_t normMeanVarDesc, + /* Optionally cached intermediate results from + forward pass */ + const void *savedMean, + const void *savedInvVariance, + cudnnActivationDescriptor_t activationDesc, + void *workSpace, + size_t workSpaceSizeInBytes, + void *reserveSpace, + size_t reserveSpaceSizeInBytes, + int groupCnt); /* Place hold for future work, should be set to 1 now*/ + +cudnnStatus_t CUDNNWINAPI +cudnnSpatialTfGridGeneratorBackward(cudnnHandle_t handle, + const cudnnSpatialTransformerDescriptor_t stDesc, + const void *dgrid, + void *dtheta); + +cudnnStatus_t CUDNNWINAPI +cudnnSpatialTfSamplerBackward(cudnnHandle_t handle, + cudnnSpatialTransformerDescriptor_t stDesc, + const void *alpha, + const cudnnTensorDescriptor_t xDesc, + const void *x, + const void *beta, + const cudnnTensorDescriptor_t dxDesc, + void *dx, + const void *alphaDgrid, + const cudnnTensorDescriptor_t dyDesc, + const void *dy, + const void *grid, + const void *betaDgrid, + void *dgrid); + +cudnnStatus_t CUDNNWINAPI +cudnnDropoutBackward(cudnnHandle_t handle, + const cudnnDropoutDescriptor_t dropoutDesc, + const cudnnTensorDescriptor_t dydesc, + const void *dy, + const cudnnTensorDescriptor_t dxdesc, + void *dx, + void *reserveSpace, + size_t reserveSpaceSizeInBytes); + +/* + * \brief Cross-library version checker. + * This function is implemented differently in each sub-library. Each sublib + * checks whether its own version matches that of its dependencies. + * \returns CUDNN_STATUS_SUCCESS if the version check passes, + * CUDNN_STATUS_VERSION_MISMATCH if the versions are inconsistent. + */ +cudnnStatus_t CUDNNWINAPI +cudnnOpsTrainVersionCheck(void); + +#if defined(__cplusplus) +} +#endif + +#endif /* CUDNN_OPS_TRAIN_H_ */ diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_v8.h b/llmeval-env/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_v8.h new file mode 100644 index 0000000000000000000000000000000000000000..1fcf41a697cb5e6bee6d3697d54a2fe0eafdc168 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_v8.h @@ -0,0 +1,78 @@ +/* + * Copyright 2014-2023 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +/* cudnn : Neural Networks Library + +*/ + +#if !defined(CUDNN_H_) +#define CUDNN_H_ + +#include +#include + +#include "cudnn_version.h" +#include "cudnn_ops_infer.h" +#include "cudnn_ops_train.h" +#include "cudnn_adv_infer.h" +#include "cudnn_adv_train.h" +#include "cudnn_cnn_infer.h" +#include "cudnn_cnn_train.h" + +#include "cudnn_backend.h" + +#if defined(__cplusplus) +extern "C" { +#endif + +#if defined(__cplusplus) +} +#endif + +#endif /* CUDNN_H_ */ diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_version.h b/llmeval-env/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_version.h new file mode 100644 index 0000000000000000000000000000000000000000..71f2211173bd3ce300999343daf8229b247fe49f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_version.h @@ -0,0 +1,109 @@ +/* + * Copyright 2014-2023 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +/** + * \file: The master cuDNN version file. + */ + +#ifndef CUDNN_VERSION_H_ +#define CUDNN_VERSION_H_ + +#define CUDNN_MAJOR 8 +#define CUDNN_MINOR 9 +#define CUDNN_PATCHLEVEL 2 + +#define CUDNN_VERSION (CUDNN_MAJOR * 1000 + CUDNN_MINOR * 100 + CUDNN_PATCHLEVEL) + +/* cannot use constexpr here since this is a C-only file */ +/* Below is the max SM version this cuDNN library is aware of and supports natively */ + +#define CUDNN_MAX_SM_MAJOR_NUMBER 9 +#define CUDNN_MAX_SM_MINOR_NUMBER 0 +#define CUDNN_MAX_DEVICE_VERSION (CUDNN_MAX_SM_MAJOR_NUMBER * 100 + CUDNN_MAX_SM_MINOR_NUMBER * 10) + +/* Here are constants for each of the SM Architectures we support to use in code where device version checks must be + * made */ + +/* MAXWELL SM 50 52 53 */ +#define CUDNN_SM_50 500 +#define CUDNN_SM_52 520 +#define CUDNN_SM_53 530 + +/* PASCAL SM 60 61 62 */ +#define CUDNN_SM_60 600 +#define CUDNN_SM_61 610 +#define CUDNN_SM_62 620 + +/* VOLTA SM 70 72 */ +#define CUDNN_SM_70 700 +#define CUDNN_SM_72 720 + +/* TURING SM 75 */ +#define CUDNN_SM_75 750 + +/* AMPERE SM 80 86 87 */ +#define CUDNN_SM_80 800 +#define CUDNN_SM_86 860 +#define CUDNN_SM_87 870 + +/* ADA LOVELACE SM 89 */ +#define CUDNN_SM_89 890 + +/* HOPPER SM 90 */ +#define CUDNN_SM_90 900 + +/* END MARKER for last known version. + * This can be replaced after support for 1000 is added + */ +#define CUDNN_SM_9X_END 999 + +/* This is the minimum version we support devices below this will return CUDNN_STATUS_ARCH_MISMATCH */ +#define CUDNN_MIN_DEVICE_VERSION CUDNN_SM_50 + +#endif /* CUDNN_VERSION_H */ diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_version_v8.h b/llmeval-env/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_version_v8.h new file mode 100644 index 0000000000000000000000000000000000000000..71f2211173bd3ce300999343daf8229b247fe49f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nvidia/cudnn/include/cudnn_version_v8.h @@ -0,0 +1,109 @@ +/* + * Copyright 2014-2023 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +/** + * \file: The master cuDNN version file. + */ + +#ifndef CUDNN_VERSION_H_ +#define CUDNN_VERSION_H_ + +#define CUDNN_MAJOR 8 +#define CUDNN_MINOR 9 +#define CUDNN_PATCHLEVEL 2 + +#define CUDNN_VERSION (CUDNN_MAJOR * 1000 + CUDNN_MINOR * 100 + CUDNN_PATCHLEVEL) + +/* cannot use constexpr here since this is a C-only file */ +/* Below is the max SM version this cuDNN library is aware of and supports natively */ + +#define CUDNN_MAX_SM_MAJOR_NUMBER 9 +#define CUDNN_MAX_SM_MINOR_NUMBER 0 +#define CUDNN_MAX_DEVICE_VERSION (CUDNN_MAX_SM_MAJOR_NUMBER * 100 + CUDNN_MAX_SM_MINOR_NUMBER * 10) + +/* Here are constants for each of the SM Architectures we support to use in code where device version checks must be + * made */ + +/* MAXWELL SM 50 52 53 */ +#define CUDNN_SM_50 500 +#define CUDNN_SM_52 520 +#define CUDNN_SM_53 530 + +/* PASCAL SM 60 61 62 */ +#define CUDNN_SM_60 600 +#define CUDNN_SM_61 610 +#define CUDNN_SM_62 620 + +/* VOLTA SM 70 72 */ +#define CUDNN_SM_70 700 +#define CUDNN_SM_72 720 + +/* TURING SM 75 */ +#define CUDNN_SM_75 750 + +/* AMPERE SM 80 86 87 */ +#define CUDNN_SM_80 800 +#define CUDNN_SM_86 860 +#define CUDNN_SM_87 870 + +/* ADA LOVELACE SM 89 */ +#define CUDNN_SM_89 890 + +/* HOPPER SM 90 */ +#define CUDNN_SM_90 900 + +/* END MARKER for last known version. + * This can be replaced after support for 1000 is added + */ +#define CUDNN_SM_9X_END 999 + +/* This is the minimum version we support devices below this will return CUDNN_STATUS_ARCH_MISMATCH */ +#define CUDNN_MIN_DEVICE_VERSION CUDNN_SM_50 + +#endif /* CUDNN_VERSION_H */ diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia/cudnn/lib/__init__.py b/llmeval-env/lib/python3.10/site-packages/nvidia/cudnn/lib/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia/cudnn/lib/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nvidia/cudnn/lib/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9d38002beff2477e88dfcdc9be603978297517c5 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nvidia/cudnn/lib/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia/cudnn/lib/libcudnn.so.8 b/llmeval-env/lib/python3.10/site-packages/nvidia/cudnn/lib/libcudnn.so.8 new file mode 100644 index 0000000000000000000000000000000000000000..e93ea0e9530b4537f02974fb16bd40b003984e03 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nvidia/cudnn/lib/libcudnn.so.8 differ diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia/curand/__init__.py b/llmeval-env/lib/python3.10/site-packages/nvidia/curand/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia/curand/include/__init__.py b/llmeval-env/lib/python3.10/site-packages/nvidia/curand/include/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia/curand/include/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nvidia/curand/include/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e292577bef2159b7ff4c1e8fa75f2fc3c34b9e2d Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nvidia/curand/include/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia/curand/include/curand.h b/llmeval-env/lib/python3.10/site-packages/nvidia/curand/include/curand.h new file mode 100644 index 0000000000000000000000000000000000000000..f475df38abbfa4b3c02eb0c6d709dfdc71e946b1 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nvidia/curand/include/curand.h @@ -0,0 +1,1077 @@ + + /* Copyright 2010-2014 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * The source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * The Licensed Deliverables contained herein are PROPRIETARY and + * CONFIDENTIAL to NVIDIA and are being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(CURAND_H_) +#define CURAND_H_ + +/** + * \defgroup HOST Host API + * + * @{ + */ +#ifndef __CUDACC_RTC__ +#include +#endif + +#ifndef CURANDAPI +#ifdef _WIN32 +#define CURANDAPI __stdcall +#else +#define CURANDAPI +#endif +#endif + +#if defined(__cplusplus) +extern "C" { +#endif /* __cplusplus */ + +#define CURAND_VER_MAJOR 10 +#define CURAND_VER_MINOR 3 +#define CURAND_VER_PATCH 2 +#define CURAND_VER_BUILD 106 +#define CURAND_VERSION (CURAND_VER_MAJOR * 1000 + \ + CURAND_VER_MINOR * 100 + \ + CURAND_VER_PATCH) +/* CURAND Host API datatypes */ + +/** + * @{ + */ + +/** + * CURAND function call status types + */ +enum curandStatus { + CURAND_STATUS_SUCCESS = 0, ///< No errors + CURAND_STATUS_VERSION_MISMATCH = 100, ///< Header file and linked library version do not match + CURAND_STATUS_NOT_INITIALIZED = 101, ///< Generator not initialized + CURAND_STATUS_ALLOCATION_FAILED = 102, ///< Memory allocation failed + CURAND_STATUS_TYPE_ERROR = 103, ///< Generator is wrong type + CURAND_STATUS_OUT_OF_RANGE = 104, ///< Argument out of range + CURAND_STATUS_LENGTH_NOT_MULTIPLE = 105, ///< Length requested is not a multple of dimension + CURAND_STATUS_DOUBLE_PRECISION_REQUIRED = 106, ///< GPU does not have double precision required by MRG32k3a + CURAND_STATUS_LAUNCH_FAILURE = 201, ///< Kernel launch failure + CURAND_STATUS_PREEXISTING_FAILURE = 202, ///< Preexisting failure on library entry + CURAND_STATUS_INITIALIZATION_FAILED = 203, ///< Initialization of CUDA failed + CURAND_STATUS_ARCH_MISMATCH = 204, ///< Architecture mismatch, GPU does not support requested feature + CURAND_STATUS_INTERNAL_ERROR = 999 ///< Internal library error +}; + +/* + * CURAND function call status types +*/ +/** \cond UNHIDE_TYPEDEFS */ +typedef enum curandStatus curandStatus_t; +/** \endcond */ + +/** + * CURAND generator types + */ +enum curandRngType { + CURAND_RNG_TEST = 0, + CURAND_RNG_PSEUDO_DEFAULT = 100, ///< Default pseudorandom generator + CURAND_RNG_PSEUDO_XORWOW = 101, ///< XORWOW pseudorandom generator + CURAND_RNG_PSEUDO_MRG32K3A = 121, ///< MRG32k3a pseudorandom generator + CURAND_RNG_PSEUDO_MTGP32 = 141, ///< Mersenne Twister MTGP32 pseudorandom generator + CURAND_RNG_PSEUDO_MT19937 = 142, ///< Mersenne Twister MT19937 pseudorandom generator + CURAND_RNG_PSEUDO_PHILOX4_32_10 = 161, ///< PHILOX-4x32-10 pseudorandom generator + CURAND_RNG_QUASI_DEFAULT = 200, ///< Default quasirandom generator + CURAND_RNG_QUASI_SOBOL32 = 201, ///< Sobol32 quasirandom generator + CURAND_RNG_QUASI_SCRAMBLED_SOBOL32 = 202, ///< Scrambled Sobol32 quasirandom generator + CURAND_RNG_QUASI_SOBOL64 = 203, ///< Sobol64 quasirandom generator + CURAND_RNG_QUASI_SCRAMBLED_SOBOL64 = 204 ///< Scrambled Sobol64 quasirandom generator +}; + +/* + * CURAND generator types + */ +/** \cond UNHIDE_TYPEDEFS */ +typedef enum curandRngType curandRngType_t; +/** \endcond */ + +/** + * CURAND ordering of results in memory + */ +enum curandOrdering { + CURAND_ORDERING_PSEUDO_BEST = 100, ///< Best ordering for pseudorandom results + CURAND_ORDERING_PSEUDO_DEFAULT = 101, ///< Specific default thread sequence for pseudorandom results, same as CURAND_ORDERING_PSEUDO_BEST + CURAND_ORDERING_PSEUDO_SEEDED = 102, ///< Specific seeding pattern for fast lower quality pseudorandom results + CURAND_ORDERING_PSEUDO_LEGACY = 103, ///< Specific legacy sequence for pseudorandom results, guaranteed to remain the same for all cuRAND release + CURAND_ORDERING_PSEUDO_DYNAMIC = 104, ///< Specific ordering adjusted to the device it is being executed on, provides the best performance + CURAND_ORDERING_QUASI_DEFAULT = 201 ///< Specific n-dimensional ordering for quasirandom results +}; + +/* + * CURAND ordering of results in memory + */ +/** \cond UNHIDE_TYPEDEFS */ +typedef enum curandOrdering curandOrdering_t; +/** \endcond */ + +/** + * CURAND choice of direction vector set + */ +enum curandDirectionVectorSet { + CURAND_DIRECTION_VECTORS_32_JOEKUO6 = 101, ///< Specific set of 32-bit direction vectors generated from polynomials recommended by S. Joe and F. Y. Kuo, for up to 20,000 dimensions + CURAND_SCRAMBLED_DIRECTION_VECTORS_32_JOEKUO6 = 102, ///< Specific set of 32-bit direction vectors generated from polynomials recommended by S. Joe and F. Y. Kuo, for up to 20,000 dimensions, and scrambled + CURAND_DIRECTION_VECTORS_64_JOEKUO6 = 103, ///< Specific set of 64-bit direction vectors generated from polynomials recommended by S. Joe and F. Y. Kuo, for up to 20,000 dimensions + CURAND_SCRAMBLED_DIRECTION_VECTORS_64_JOEKUO6 = 104 ///< Specific set of 64-bit direction vectors generated from polynomials recommended by S. Joe and F. Y. Kuo, for up to 20,000 dimensions, and scrambled +}; + +/* + * CURAND choice of direction vector set + */ +/** \cond UNHIDE_TYPEDEFS */ +typedef enum curandDirectionVectorSet curandDirectionVectorSet_t; +/** \endcond */ + +/** + * CURAND array of 32-bit direction vectors + */ +/** \cond UNHIDE_TYPEDEFS */ +typedef unsigned int curandDirectionVectors32_t[32]; +/** \endcond */ + + /** + * CURAND array of 64-bit direction vectors + */ +/** \cond UNHIDE_TYPEDEFS */ +typedef unsigned long long curandDirectionVectors64_t[64]; +/** \endcond **/ + +/** + * CURAND generator (opaque) + */ +struct curandGenerator_st; + +/** + * CURAND generator + */ +/** \cond UNHIDE_TYPEDEFS */ +typedef struct curandGenerator_st *curandGenerator_t; +/** \endcond */ + +/** + * CURAND distribution + */ +/** \cond UNHIDE_TYPEDEFS */ +typedef double curandDistribution_st; +typedef curandDistribution_st *curandDistribution_t; +typedef struct curandDistributionShift_st *curandDistributionShift_t; +/** \endcond */ +/** + * CURAND distribution M2 + */ +/** \cond UNHIDE_TYPEDEFS */ +typedef struct curandDistributionM2Shift_st *curandDistributionM2Shift_t; +typedef struct curandHistogramM2_st *curandHistogramM2_t; +typedef unsigned int curandHistogramM2K_st; +typedef curandHistogramM2K_st *curandHistogramM2K_t; +typedef curandDistribution_st curandHistogramM2V_st; +typedef curandHistogramM2V_st *curandHistogramM2V_t; + +typedef struct curandDiscreteDistribution_st *curandDiscreteDistribution_t; +/** \endcond */ + +/* + * CURAND METHOD + */ +/** \cond UNHIDE_ENUMS */ +enum curandMethod { + CURAND_CHOOSE_BEST = 0, // choose best depends on args + CURAND_ITR = 1, + CURAND_KNUTH = 2, + CURAND_HITR = 3, + CURAND_M1 = 4, + CURAND_M2 = 5, + CURAND_BINARY_SEARCH = 6, + CURAND_DISCRETE_GAUSS = 7, + CURAND_REJECTION = 8, + CURAND_DEVICE_API = 9, + CURAND_FAST_REJECTION = 10, + CURAND_3RD = 11, + CURAND_DEFINITION = 12, + CURAND_POISSON = 13 +}; + +typedef enum curandMethod curandMethod_t; +/** \endcond */ + + +#ifndef __CUDACC_RTC__ + +/** + * @} + */ + +/** + * \brief Create new random number generator. + * + * Creates a new random number generator of type \p rng_type + * and returns it in \p *generator. + * + * Legal values for \p rng_type are: + * - CURAND_RNG_PSEUDO_DEFAULT + * - CURAND_RNG_PSEUDO_XORWOW + * - CURAND_RNG_PSEUDO_MRG32K3A + * - CURAND_RNG_PSEUDO_MTGP32 + * - CURAND_RNG_PSEUDO_MT19937 + * - CURAND_RNG_PSEUDO_PHILOX4_32_10 + * - CURAND_RNG_QUASI_DEFAULT + * - CURAND_RNG_QUASI_SOBOL32 + * - CURAND_RNG_QUASI_SCRAMBLED_SOBOL32 + * - CURAND_RNG_QUASI_SOBOL64 + * - CURAND_RNG_QUASI_SCRAMBLED_SOBOL64 + * + * When \p rng_type is CURAND_RNG_PSEUDO_DEFAULT, the type chosen + * is CURAND_RNG_PSEUDO_XORWOW. \n + * When \p rng_type is CURAND_RNG_QUASI_DEFAULT, + * the type chosen is CURAND_RNG_QUASI_SOBOL32. + * + * The default values for \p rng_type = CURAND_RNG_PSEUDO_XORWOW are: + * - \p seed = 0 + * - \p offset = 0 + * - \p ordering = CURAND_ORDERING_PSEUDO_DEFAULT + * + * The default values for \p rng_type = CURAND_RNG_PSEUDO_MRG32K3A are: + * - \p seed = 0 + * - \p offset = 0 + * - \p ordering = CURAND_ORDERING_PSEUDO_DEFAULT + * + * The default values for \p rng_type = CURAND_RNG_PSEUDO_MTGP32 are: + * - \p seed = 0 + * - \p offset = 0 + * - \p ordering = CURAND_ORDERING_PSEUDO_DEFAULT + * + * The default values for \p rng_type = CURAND_RNG_PSEUDO_MT19937 are: + * - \p seed = 0 + * - \p offset = 0 + * - \p ordering = CURAND_ORDERING_PSEUDO_DEFAULT + * + * * The default values for \p rng_type = CURAND_RNG_PSEUDO_PHILOX4_32_10 are: + * - \p seed = 0 + * - \p offset = 0 + * - \p ordering = CURAND_ORDERING_PSEUDO_DEFAULT + * + * The default values for \p rng_type = CURAND_RNG_QUASI_SOBOL32 are: + * - \p dimensions = 1 + * - \p offset = 0 + * - \p ordering = CURAND_ORDERING_QUASI_DEFAULT + * + * The default values for \p rng_type = CURAND_RNG_QUASI_SOBOL64 are: + * - \p dimensions = 1 + * - \p offset = 0 + * - \p ordering = CURAND_ORDERING_QUASI_DEFAULT + * + * The default values for \p rng_type = CURAND_RNG_QUASI_SCRAMBBLED_SOBOL32 are: + * - \p dimensions = 1 + * - \p offset = 0 + * - \p ordering = CURAND_ORDERING_QUASI_DEFAULT + * + * The default values for \p rng_type = CURAND_RNG_QUASI_SCRAMBLED_SOBOL64 are: + * - \p dimensions = 1 + * - \p offset = 0 + * - \p ordering = CURAND_ORDERING_QUASI_DEFAULT + * + * \param generator - Pointer to generator + * \param rng_type - Type of generator to create + * + * \return + * - CURAND_STATUS_ALLOCATION_FAILED, if memory could not be allocated \n + * - CURAND_STATUS_INITIALIZATION_FAILED if there was a problem setting up the GPU \n + * - CURAND_STATUS_VERSION_MISMATCH if the header file version does not match the + * dynamically linked library version \n + * - CURAND_STATUS_TYPE_ERROR if the value for \p rng_type is invalid \n + * - CURAND_STATUS_SUCCESS if generator was created successfully \n + * + */ +curandStatus_t CURANDAPI +curandCreateGenerator(curandGenerator_t *generator, curandRngType_t rng_type); + +/** + * \brief Create new host CPU random number generator. + * + * Creates a new host CPU random number generator of type \p rng_type + * and returns it in \p *generator. + * + * Legal values for \p rng_type are: + * - CURAND_RNG_PSEUDO_DEFAULT + * - CURAND_RNG_PSEUDO_XORWOW + * - CURAND_RNG_PSEUDO_MRG32K3A + * - CURAND_RNG_PSEUDO_MTGP32 + * - CURAND_RNG_PSEUDO_MT19937 + * - CURAND_RNG_PSEUDO_PHILOX4_32_10 + * - CURAND_RNG_QUASI_DEFAULT + * - CURAND_RNG_QUASI_SOBOL32 + * + * When \p rng_type is CURAND_RNG_PSEUDO_DEFAULT, the type chosen + * is CURAND_RNG_PSEUDO_XORWOW. \n + * When \p rng_type is CURAND_RNG_QUASI_DEFAULT, + * the type chosen is CURAND_RNG_QUASI_SOBOL32. + * + * The default values for \p rng_type = CURAND_RNG_PSEUDO_XORWOW are: + * - \p seed = 0 + * - \p offset = 0 + * - \p ordering = CURAND_ORDERING_PSEUDO_DEFAULT + * + * The default values for \p rng_type = CURAND_RNG_PSEUDO_MRG32K3A are: + * - \p seed = 0 + * - \p offset = 0 + * - \p ordering = CURAND_ORDERING_PSEUDO_DEFAULT + * + * The default values for \p rng_type = CURAND_RNG_PSEUDO_MTGP32 are: + * - \p seed = 0 + * - \p offset = 0 + * - \p ordering = CURAND_ORDERING_PSEUDO_DEFAULT + * + * The default values for \p rng_type = CURAND_RNG_PSEUDO_MT19937 are: + * - \p seed = 0 + * - \p offset = 0 + * - \p ordering = CURAND_ORDERING_PSEUDO_DEFAULT + * + * * The default values for \p rng_type = CURAND_RNG_PSEUDO_PHILOX4_32_10 are: + * - \p seed = 0 + * - \p offset = 0 + * - \p ordering = CURAND_ORDERING_PSEUDO_DEFAULT + * + * The default values for \p rng_type = CURAND_RNG_QUASI_SOBOL32 are: + * - \p dimensions = 1 + * - \p offset = 0 + * - \p ordering = CURAND_ORDERING_QUASI_DEFAULT + * + * The default values for \p rng_type = CURAND_RNG_QUASI_SOBOL64 are: + * - \p dimensions = 1 + * - \p offset = 0 + * - \p ordering = CURAND_ORDERING_QUASI_DEFAULT + * + * The default values for \p rng_type = CURAND_RNG_QUASI_SCRAMBLED_SOBOL32 are: + * - \p dimensions = 1 + * - \p offset = 0 + * - \p ordering = CURAND_ORDERING_QUASI_DEFAULT + * + * The default values for \p rng_type = CURAND_RNG_QUASI_SCRAMBLED_SOBOL64 are: + * - \p dimensions = 1 + * - \p offset = 0 + * - \p ordering = CURAND_ORDERING_QUASI_DEFAULT + * + * \param generator - Pointer to generator + * \param rng_type - Type of generator to create + * + * \return + * - CURAND_STATUS_ALLOCATION_FAILED if memory could not be allocated \n + * - CURAND_STATUS_INITIALIZATION_FAILED if there was a problem setting up the GPU \n + * - CURAND_STATUS_VERSION_MISMATCH if the header file version does not match the + * dynamically linked library version \n + * - CURAND_STATUS_TYPE_ERROR if the value for \p rng_type is invalid \n + * - CURAND_STATUS_SUCCESS if generator was created successfully \n + */ +curandStatus_t CURANDAPI +curandCreateGeneratorHost(curandGenerator_t *generator, curandRngType_t rng_type); + +/** + * \brief Destroy an existing generator. + * + * Destroy an existing generator and free all memory associated with its state. + * + * \param generator - Generator to destroy + * + * \return + * - CURAND_STATUS_NOT_INITIALIZED if the generator was never created \n + * - CURAND_STATUS_SUCCESS if generator was destroyed successfully \n + */ +curandStatus_t CURANDAPI +curandDestroyGenerator(curandGenerator_t generator); + +/** + * \brief Return the version number of the library. + * + * Return in \p *version the version number of the dynamically linked CURAND + * library. The format is the same as CUDART_VERSION from the CUDA Runtime. + * The only supported configuration is CURAND version equal to CUDA Runtime + * version. + * + * \param version - CURAND library version + * + * \return + * - CURAND_STATUS_SUCCESS if the version number was successfully returned \n + */ +curandStatus_t CURANDAPI +curandGetVersion(int *version); + +/** +* \brief Return the value of the curand property. +* +* Return in \p *value the number for the property described by \p type of the +* dynamically linked CURAND library. +* +* \param type - CUDA library property +* \param value - integer value for the requested property +* +* \return +* - CURAND_STATUS_SUCCESS if the property value was successfully returned \n +* - CURAND_STATUS_OUT_OF_RANGE if the property type is not recognized \n +*/ +curandStatus_t CURANDAPI +curandGetProperty(libraryPropertyType type, int *value); + + +/** + * \brief Set the current stream for CURAND kernel launches. + * + * Set the current stream for CURAND kernel launches. All library functions + * will use this stream until set again. + * + * \param generator - Generator to modify + * \param stream - Stream to use or ::NULL for null stream + * + * \return + * - CURAND_STATUS_NOT_INITIALIZED if the generator was never created \n + * - CURAND_STATUS_SUCCESS if stream was set successfully \n + */ +curandStatus_t CURANDAPI +curandSetStream(curandGenerator_t generator, cudaStream_t stream); + +/** + * \brief Set the seed value of the pseudo-random number generator. + * + * Set the seed value of the pseudorandom number generator. + * All values of seed are valid. Different seeds will produce different sequences. + * Different seeds will often not be statistically correlated with each other, + * but some pairs of seed values may generate sequences which are statistically correlated. + * + * \param generator - Generator to modify + * \param seed - Seed value + * + * \return + * - CURAND_STATUS_NOT_INITIALIZED if the generator was never created \n + * - CURAND_STATUS_TYPE_ERROR if the generator is not a pseudorandom number generator \n + * - CURAND_STATUS_SUCCESS if generator seed was set successfully \n + */ +curandStatus_t CURANDAPI +curandSetPseudoRandomGeneratorSeed(curandGenerator_t generator, unsigned long long seed); + +/** + * \brief Set the absolute offset of the pseudo or quasirandom number generator. + * + * Set the absolute offset of the pseudo or quasirandom number generator. + * + * All values of offset are valid. The offset position is absolute, not + * relative to the current position in the sequence. + * + * \param generator - Generator to modify + * \param offset - Absolute offset position + * + * \return + * - CURAND_STATUS_NOT_INITIALIZED if the generator was never created \n + * - CURAND_STATUS_SUCCESS if generator offset was set successfully \n + */ +curandStatus_t CURANDAPI +curandSetGeneratorOffset(curandGenerator_t generator, unsigned long long offset); + +/** + * \brief Set the ordering of results of the pseudo or quasirandom number generator. + * + * Set the ordering of results of the pseudo or quasirandom number generator. + * + * Legal values of \p order for pseudorandom generators are: + * - CURAND_ORDERING_PSEUDO_DEFAULT + * - CURAND_ORDERING_PSEUDO_BEST + * - CURAND_ORDERING_PSEUDO_SEEDED + * - CURAND_ORDERING_PSEUDO_LEGACY + * + * Legal values of \p order for quasirandom generators are: + * - CURAND_ORDERING_QUASI_DEFAULT + * + * \param generator - Generator to modify + * \param order - Ordering of results + * + * \return + * - CURAND_STATUS_NOT_INITIALIZED if the generator was never created \n + * - CURAND_STATUS_OUT_OF_RANGE if the ordering is not valid \n + * - CURAND_STATUS_SUCCESS if generator ordering was set successfully \n + */ +curandStatus_t CURANDAPI +curandSetGeneratorOrdering(curandGenerator_t generator, curandOrdering_t order); + +/** + * \brief Set the number of dimensions. + * + * Set the number of dimensions to be generated by the quasirandom number + * generator. + * + * Legal values for \p num_dimensions are 1 to 20000. + * + * \param generator - Generator to modify + * \param num_dimensions - Number of dimensions + * + * \return + * - CURAND_STATUS_NOT_INITIALIZED if the generator was never created \n + * - CURAND_STATUS_OUT_OF_RANGE if num_dimensions is not valid \n + * - CURAND_STATUS_TYPE_ERROR if the generator is not a quasirandom number generator \n + * - CURAND_STATUS_SUCCESS if generator ordering was set successfully \n + */ +curandStatus_t CURANDAPI +curandSetQuasiRandomGeneratorDimensions(curandGenerator_t generator, unsigned int num_dimensions); + +/** + * \brief Generate 32-bit pseudo or quasirandom numbers. + * + * Use \p generator to generate \p num 32-bit results into the device memory at + * \p outputPtr. The device memory must have been previously allocated and be + * large enough to hold all the results. Launches are done with the stream + * set using ::curandSetStream(), or the null stream if no stream has been set. + * + * Results are 32-bit values with every bit random. + * + * \param generator - Generator to use + * \param outputPtr - Pointer to device memory to store CUDA-generated results, or + * Pointer to host memory to store CPU-generated results + * \param num - Number of random 32-bit values to generate + * + * \return + * - CURAND_STATUS_ALLOCATION_FAILED if memory could not be allocated \n + * - CURAND_STATUS_NOT_INITIALIZED if the generator was never created \n + * - CURAND_STATUS_PREEXISTING_FAILURE if there was an existing error from + * a previous kernel launch \n + * - CURAND_STATUS_LENGTH_NOT_MULTIPLE if the number of output samples is + * not a multiple of the quasirandom dimension \n + * - CURAND_STATUS_LAUNCH_FAILURE if the kernel launch failed for any reason \n + * - CURAND_STATUS_TYPE_ERROR if the generator is a 64 bit quasirandom generator. + * (use ::curandGenerateLongLong() with 64 bit quasirandom generators) + * - CURAND_STATUS_SUCCESS if the results were generated successfully \n + */ +curandStatus_t CURANDAPI +curandGenerate(curandGenerator_t generator, unsigned int *outputPtr, size_t num); + +/** + * \brief Generate 64-bit quasirandom numbers. + * + * Use \p generator to generate \p num 64-bit results into the device memory at + * \p outputPtr. The device memory must have been previously allocated and be + * large enough to hold all the results. Launches are done with the stream + * set using ::curandSetStream(), or the null stream if no stream has been set. + * + * Results are 64-bit values with every bit random. + * + * \param generator - Generator to use + * \param outputPtr - Pointer to device memory to store CUDA-generated results, or + * Pointer to host memory to store CPU-generated results + * \param num - Number of random 64-bit values to generate + * + * \return + * - CURAND_STATUS_NOT_INITIALIZED if the generator was never created \n + * - CURAND_STATUS_PREEXISTING_FAILURE if there was an existing error from + * a previous kernel launch \n + * - CURAND_STATUS_LENGTH_NOT_MULTIPLE if the number of output samples is + * not a multiple of the quasirandom dimension \n + * - CURAND_STATUS_LAUNCH_FAILURE if the kernel launch failed for any reason \n + * - CURAND_STATUS_TYPE_ERROR if the generator is not a 64 bit quasirandom generator\n + * - CURAND_STATUS_SUCCESS if the results were generated successfully \n + */ +curandStatus_t CURANDAPI +curandGenerateLongLong(curandGenerator_t generator, unsigned long long *outputPtr, size_t num); + +/** + * \brief Generate uniformly distributed floats. + * + * Use \p generator to generate \p num float results into the device memory at + * \p outputPtr. The device memory must have been previously allocated and be + * large enough to hold all the results. Launches are done with the stream + * set using ::curandSetStream(), or the null stream if no stream has been set. + * + * Results are 32-bit floating point values between \p 0.0f and \p 1.0f, + * excluding \p 0.0f and including \p 1.0f. + * + * \param generator - Generator to use + * \param outputPtr - Pointer to device memory to store CUDA-generated results, or + * Pointer to host memory to store CPU-generated results + * \param num - Number of floats to generate + * + * \return + * - CURAND_STATUS_ALLOCATION_FAILED if memory could not be allocated \n + * - CURAND_STATUS_NOT_INITIALIZED if the generator was never created \n + * - CURAND_STATUS_PREEXISTING_FAILURE if there was an existing error from + * a previous kernel launch \n + * - CURAND_STATUS_LAUNCH_FAILURE if the kernel launch failed for any reason \n + * - CURAND_STATUS_LENGTH_NOT_MULTIPLE if the number of output samples is + * not a multiple of the quasirandom dimension \n + * - CURAND_STATUS_SUCCESS if the results were generated successfully \n + */ +curandStatus_t CURANDAPI +curandGenerateUniform(curandGenerator_t generator, float *outputPtr, size_t num); + +/** + * \brief Generate uniformly distributed doubles. + * + * Use \p generator to generate \p num double results into the device memory at + * \p outputPtr. The device memory must have been previously allocated and be + * large enough to hold all the results. Launches are done with the stream + * set using ::curandSetStream(), or the null stream if no stream has been set. + * + * Results are 64-bit double precision floating point values between + * \p 0.0 and \p 1.0, excluding \p 0.0 and including \p 1.0. + * + * \param generator - Generator to use + * \param outputPtr - Pointer to device memory to store CUDA-generated results, or + * Pointer to host memory to store CPU-generated results + * \param num - Number of doubles to generate + * + * \return + * - CURAND_STATUS_ALLOCATION_FAILED if memory could not be allocated \n + * - CURAND_STATUS_NOT_INITIALIZED if the generator was never created \n + * - CURAND_STATUS_PREEXISTING_FAILURE if there was an existing error from + * a previous kernel launch \n + * - CURAND_STATUS_LAUNCH_FAILURE if the kernel launch failed for any reason \n + * - CURAND_STATUS_LENGTH_NOT_MULTIPLE if the number of output samples is + * not a multiple of the quasirandom dimension \n + * - CURAND_STATUS_DOUBLE_PRECISION_REQUIRED if the GPU does not support double precision \n + * - CURAND_STATUS_SUCCESS if the results were generated successfully \n + */ +curandStatus_t CURANDAPI +curandGenerateUniformDouble(curandGenerator_t generator, double *outputPtr, size_t num); + +/** + * \brief Generate normally distributed doubles. + * + * Use \p generator to generate \p n float results into the device memory at + * \p outputPtr. The device memory must have been previously allocated and be + * large enough to hold all the results. Launches are done with the stream + * set using ::curandSetStream(), or the null stream if no stream has been set. + * + * Results are 32-bit floating point values with mean \p mean and standard + * deviation \p stddev. + * + * Normally distributed results are generated from pseudorandom generators + * with a Box-Muller transform, and so require \p n to be even. + * Quasirandom generators use an inverse cumulative distribution + * function to preserve dimensionality. + * + * There may be slight numerical differences between results generated + * on the GPU with generators created with ::curandCreateGenerator() + * and results calculated on the CPU with generators created with + * ::curandCreateGeneratorHost(). These differences arise because of + * differences in results for transcendental functions. In addition, + * future versions of CURAND may use newer versions of the CUDA math + * library, so different versions of CURAND may give slightly different + * numerical values. + * + * \param generator - Generator to use + * \param outputPtr - Pointer to device memory to store CUDA-generated results, or + * Pointer to host memory to store CPU-generated results + * \param n - Number of floats to generate + * \param mean - Mean of normal distribution + * \param stddev - Standard deviation of normal distribution + * + * \return + * - CURAND_STATUS_ALLOCATION_FAILED if memory could not be allocated \n + * - CURAND_STATUS_NOT_INITIALIZED if the generator was never created \n + * - CURAND_STATUS_PREEXISTING_FAILURE if there was an existing error from + * a previous kernel launch \n + * - CURAND_STATUS_LAUNCH_FAILURE if the kernel launch failed for any reason \n + * - CURAND_STATUS_LENGTH_NOT_MULTIPLE if the number of output samples is + * not a multiple of the quasirandom dimension, or is not a multiple + * of two for pseudorandom generators \n + * - CURAND_STATUS_SUCCESS if the results were generated successfully \n + */ +curandStatus_t CURANDAPI +curandGenerateNormal(curandGenerator_t generator, float *outputPtr, + size_t n, float mean, float stddev); + +/** + * \brief Generate normally distributed doubles. + * + * Use \p generator to generate \p n double results into the device memory at + * \p outputPtr. The device memory must have been previously allocated and be + * large enough to hold all the results. Launches are done with the stream + * set using ::curandSetStream(), or the null stream if no stream has been set. + * + * Results are 64-bit floating point values with mean \p mean and standard + * deviation \p stddev. + * + * Normally distributed results are generated from pseudorandom generators + * with a Box-Muller transform, and so require \p n to be even. + * Quasirandom generators use an inverse cumulative distribution + * function to preserve dimensionality. + * + * There may be slight numerical differences between results generated + * on the GPU with generators created with ::curandCreateGenerator() + * and results calculated on the CPU with generators created with + * ::curandCreateGeneratorHost(). These differences arise because of + * differences in results for transcendental functions. In addition, + * future versions of CURAND may use newer versions of the CUDA math + * library, so different versions of CURAND may give slightly different + * numerical values. + * + * \param generator - Generator to use + * \param outputPtr - Pointer to device memory to store CUDA-generated results, or + * Pointer to host memory to store CPU-generated results + * \param n - Number of doubles to generate + * \param mean - Mean of normal distribution + * \param stddev - Standard deviation of normal distribution + * + * \return + * - CURAND_STATUS_ALLOCATION_FAILED if memory could not be allocated \n + * - CURAND_STATUS_NOT_INITIALIZED if the generator was never created \n + * - CURAND_STATUS_PREEXISTING_FAILURE if there was an existing error from + * a previous kernel launch \n + * - CURAND_STATUS_LAUNCH_FAILURE if the kernel launch failed for any reason \n + * - CURAND_STATUS_LENGTH_NOT_MULTIPLE if the number of output samples is + * not a multiple of the quasirandom dimension, or is not a multiple + * of two for pseudorandom generators \n + * - CURAND_STATUS_DOUBLE_PRECISION_REQUIRED if the GPU does not support double precision \n + * - CURAND_STATUS_SUCCESS if the results were generated successfully \n + */ +curandStatus_t CURANDAPI +curandGenerateNormalDouble(curandGenerator_t generator, double *outputPtr, + size_t n, double mean, double stddev); + +/** + * \brief Generate log-normally distributed floats. + * + * Use \p generator to generate \p n float results into the device memory at + * \p outputPtr. The device memory must have been previously allocated and be + * large enough to hold all the results. Launches are done with the stream + * set using ::curandSetStream(), or the null stream if no stream has been set. + * + * Results are 32-bit floating point values with log-normal distribution based on + * an associated normal distribution with mean \p mean and standard deviation \p stddev. + * + * Normally distributed results are generated from pseudorandom generators + * with a Box-Muller transform, and so require \p n to be even. + * Quasirandom generators use an inverse cumulative distribution + * function to preserve dimensionality. + * The normally distributed results are transformed into log-normal distribution. + * + * There may be slight numerical differences between results generated + * on the GPU with generators created with ::curandCreateGenerator() + * and results calculated on the CPU with generators created with + * ::curandCreateGeneratorHost(). These differences arise because of + * differences in results for transcendental functions. In addition, + * future versions of CURAND may use newer versions of the CUDA math + * library, so different versions of CURAND may give slightly different + * numerical values. + * + * \param generator - Generator to use + * \param outputPtr - Pointer to device memory to store CUDA-generated results, or + * Pointer to host memory to store CPU-generated results + * \param n - Number of floats to generate + * \param mean - Mean of associated normal distribution + * \param stddev - Standard deviation of associated normal distribution + * + * \return + * - CURAND_STATUS_ALLOCATION_FAILED if memory could not be allocated \n + * - CURAND_STATUS_NOT_INITIALIZED if the generator was never created \n + * - CURAND_STATUS_PREEXISTING_FAILURE if there was an existing error from + * a previous kernel launch \n + * - CURAND_STATUS_LAUNCH_FAILURE if the kernel launch failed for any reason \n + * - CURAND_STATUS_LENGTH_NOT_MULTIPLE if the number of output samples is + * not a multiple of the quasirandom dimension, or is not a multiple + * of two for pseudorandom generators \n + * - CURAND_STATUS_SUCCESS if the results were generated successfully \n + */ +curandStatus_t CURANDAPI +curandGenerateLogNormal(curandGenerator_t generator, float *outputPtr, + size_t n, float mean, float stddev); + +/** + * \brief Generate log-normally distributed doubles. + * + * Use \p generator to generate \p n double results into the device memory at + * \p outputPtr. The device memory must have been previously allocated and be + * large enough to hold all the results. Launches are done with the stream + * set using ::curandSetStream(), or the null stream if no stream has been set. + * + * Results are 64-bit floating point values with log-normal distribution based on + * an associated normal distribution with mean \p mean and standard deviation \p stddev. + * + * Normally distributed results are generated from pseudorandom generators + * with a Box-Muller transform, and so require \p n to be even. + * Quasirandom generators use an inverse cumulative distribution + * function to preserve dimensionality. + * The normally distributed results are transformed into log-normal distribution. + * + * There may be slight numerical differences between results generated + * on the GPU with generators created with ::curandCreateGenerator() + * and results calculated on the CPU with generators created with + * ::curandCreateGeneratorHost(). These differences arise because of + * differences in results for transcendental functions. In addition, + * future versions of CURAND may use newer versions of the CUDA math + * library, so different versions of CURAND may give slightly different + * numerical values. + * + * \param generator - Generator to use + * \param outputPtr - Pointer to device memory to store CUDA-generated results, or + * Pointer to host memory to store CPU-generated results + * \param n - Number of doubles to generate + * \param mean - Mean of normal distribution + * \param stddev - Standard deviation of normal distribution + * + * \return + * - CURAND_STATUS_ALLOCATION_FAILED if memory could not be allocated \n + * - CURAND_STATUS_NOT_INITIALIZED if the generator was never created \n + * - CURAND_STATUS_PREEXISTING_FAILURE if there was an existing error from + * a previous kernel launch \n + * - CURAND_STATUS_LAUNCH_FAILURE if the kernel launch failed for any reason \n + * - CURAND_STATUS_LENGTH_NOT_MULTIPLE if the number of output samples is + * not a multiple of the quasirandom dimension, or is not a multiple + * of two for pseudorandom generators \n + * - CURAND_STATUS_DOUBLE_PRECISION_REQUIRED if the GPU does not support double precision \n + * - CURAND_STATUS_SUCCESS if the results were generated successfully \n + */ +curandStatus_t CURANDAPI +curandGenerateLogNormalDouble(curandGenerator_t generator, double *outputPtr, + size_t n, double mean, double stddev); + +/** + * \brief Construct the histogram array for a Poisson distribution. + * + * Construct the histogram array for the Poisson distribution with lambda \p lambda. + * For lambda greater than 2000, an approximation with a normal distribution is used. + * + * \param lambda - lambda for the Poisson distribution + * + * + * \param discrete_distribution - pointer to the histogram in device memory + * + * \return + * - CURAND_STATUS_ALLOCATION_FAILED if memory could not be allocated \n + * - CURAND_STATUS_DOUBLE_PRECISION_REQUIRED if the GPU does not support double precision \n + * - CURAND_STATUS_INITIALIZATION_FAILED if there was a problem setting up the GPU \n + * - CURAND_STATUS_NOT_INITIALIZED if the distribution pointer was null \n + * - CURAND_STATUS_PREEXISTING_FAILURE if there was an existing error from + * a previous kernel launch \n + * - CURAND_STATUS_OUT_OF_RANGE if lambda is non-positive or greater than 400,000 \n + * - CURAND_STATUS_SUCCESS if the histogram was generated successfully \n + */ + +curandStatus_t CURANDAPI +curandCreatePoissonDistribution(double lambda, curandDiscreteDistribution_t *discrete_distribution); + + + +/** + * \brief Destroy the histogram array for a discrete distribution (e.g. Poisson). + * + * Destroy the histogram array for a discrete distribution created by curandCreatePoissonDistribution. + * + * \param discrete_distribution - pointer to device memory where the histogram is stored + * + * \return + * - CURAND_STATUS_NOT_INITIALIZED if the histogram was never created \n + * - CURAND_STATUS_SUCCESS if the histogram was destroyed successfully \n + */ +curandStatus_t CURANDAPI +curandDestroyDistribution(curandDiscreteDistribution_t discrete_distribution); + + +/** + * \brief Generate Poisson-distributed unsigned ints. + * + * Use \p generator to generate \p n unsigned int results into device memory at + * \p outputPtr. The device memory must have been previously allocated and must be + * large enough to hold all the results. Launches are done with the stream + * set using ::curandSetStream(), or the null stream if no stream has been set. + * + * Results are 32-bit unsigned int point values with Poisson distribution, with lambda \p lambda. + * + * \param generator - Generator to use + * \param outputPtr - Pointer to device memory to store CUDA-generated results, or + * Pointer to host memory to store CPU-generated results + * \param n - Number of unsigned ints to generate + * \param lambda - lambda for the Poisson distribution + * + * \return + * - CURAND_STATUS_ALLOCATION_FAILED if memory could not be allocated \n + * - CURAND_STATUS_NOT_INITIALIZED if the generator was never created \n + * - CURAND_STATUS_PREEXISTING_FAILURE if there was an existing error from + * a previous kernel launch \n + * - CURAND_STATUS_LAUNCH_FAILURE if the kernel launch failed for any reason \n + * - CURAND_STATUS_LENGTH_NOT_MULTIPLE if the number of output samples is + * not a multiple of the quasirandom dimension\n + * - CURAND_STATUS_DOUBLE_PRECISION_REQUIRED if the GPU or sm does not support double precision \n + * - CURAND_STATUS_OUT_OF_RANGE if lambda is non-positive or greater than 400,000 \n + * - CURAND_STATUS_SUCCESS if the results were generated successfully \n + */ + +curandStatus_t CURANDAPI +curandGeneratePoisson(curandGenerator_t generator, unsigned int *outputPtr, + size_t n, double lambda); +// just for internal usage +curandStatus_t CURANDAPI +curandGeneratePoissonMethod(curandGenerator_t generator, unsigned int *outputPtr, + size_t n, double lambda, curandMethod_t method); + + +curandStatus_t CURANDAPI +curandGenerateBinomial(curandGenerator_t generator, unsigned int *outputPtr, + size_t num, unsigned int n, double p); +// just for internal usage +curandStatus_t CURANDAPI +curandGenerateBinomialMethod(curandGenerator_t generator, + unsigned int *outputPtr, + size_t num, unsigned int n, double p, + curandMethod_t method); + + +/** + * \brief Setup starting states. + * + * Generate the starting state of the generator. This function is + * automatically called by generation functions such as + * ::curandGenerate() and ::curandGenerateUniform(). + * It can be called manually for performance testing reasons to separate + * timings for starting state generation and random number generation. + * + * \param generator - Generator to update + * + * \return + * - CURAND_STATUS_ALLOCATION_FAILED if memory could not be allocated \n + * - CURAND_STATUS_NOT_INITIALIZED if the generator was never created \n + * - CURAND_STATUS_PREEXISTING_FAILURE if there was an existing error from + * a previous kernel launch \n + * - CURAND_STATUS_LAUNCH_FAILURE if the kernel launch failed for any reason \n + * - CURAND_STATUS_SUCCESS if the seeds were generated successfully \n + */ +curandStatus_t CURANDAPI +curandGenerateSeeds(curandGenerator_t generator); + +/** + * \brief Get direction vectors for 32-bit quasirandom number generation. + * + * Get a pointer to an array of direction vectors that can be used + * for quasirandom number generation. The resulting pointer will + * reference an array of direction vectors in host memory. + * + * The array contains vectors for many dimensions. Each dimension + * has 32 vectors. Each individual vector is an unsigned int. + * + * Legal values for \p set are: + * - CURAND_DIRECTION_VECTORS_32_JOEKUO6 (20,000 dimensions) + * - CURAND_SCRAMBLED_DIRECTION_VECTORS_32_JOEKUO6 (20,000 dimensions) + * + * \param vectors - Address of pointer in which to return direction vectors + * \param set - Which set of direction vectors to use + * + * \return + * - CURAND_STATUS_OUT_OF_RANGE if the choice of set is invalid \n + * - CURAND_STATUS_SUCCESS if the pointer was set successfully \n + */ +curandStatus_t CURANDAPI +curandGetDirectionVectors32(curandDirectionVectors32_t *vectors[], curandDirectionVectorSet_t set); + +/** + * \brief Get scramble constants for 32-bit scrambled Sobol' . + * + * Get a pointer to an array of scramble constants that can be used + * for quasirandom number generation. The resulting pointer will + * reference an array of unsinged ints in host memory. + * + * The array contains constants for many dimensions. Each dimension + * has a single unsigned int constant. + * + * \param constants - Address of pointer in which to return scramble constants + * + * \return + * - CURAND_STATUS_SUCCESS if the pointer was set successfully \n + */ +curandStatus_t CURANDAPI +curandGetScrambleConstants32(unsigned int * * constants); + +/** + * \brief Get direction vectors for 64-bit quasirandom number generation. + * + * Get a pointer to an array of direction vectors that can be used + * for quasirandom number generation. The resulting pointer will + * reference an array of direction vectors in host memory. + * + * The array contains vectors for many dimensions. Each dimension + * has 64 vectors. Each individual vector is an unsigned long long. + * + * Legal values for \p set are: + * - CURAND_DIRECTION_VECTORS_64_JOEKUO6 (20,000 dimensions) + * - CURAND_SCRAMBLED_DIRECTION_VECTORS_64_JOEKUO6 (20,000 dimensions) + * + * \param vectors - Address of pointer in which to return direction vectors + * \param set - Which set of direction vectors to use + * + * \return + * - CURAND_STATUS_OUT_OF_RANGE if the choice of set is invalid \n + * - CURAND_STATUS_SUCCESS if the pointer was set successfully \n + */ +curandStatus_t CURANDAPI +curandGetDirectionVectors64(curandDirectionVectors64_t *vectors[], curandDirectionVectorSet_t set); + +/** + * \brief Get scramble constants for 64-bit scrambled Sobol' . + * + * Get a pointer to an array of scramble constants that can be used + * for quasirandom number generation. The resulting pointer will + * reference an array of unsinged long longs in host memory. + * + * The array contains constants for many dimensions. Each dimension + * has a single unsigned long long constant. + * + * \param constants - Address of pointer in which to return scramble constants + * + * \return + * - CURAND_STATUS_SUCCESS if the pointer was set successfully \n + */ +curandStatus_t CURANDAPI +curandGetScrambleConstants64(unsigned long long * * constants); + +/** @} */ + +#endif // __CUDACC_RTC__ + +#if defined(__cplusplus) +} +#endif /* __cplusplus */ + + +#endif /* !defined(CURAND_H_) */ diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia/curand/include/curand_discrete.h b/llmeval-env/lib/python3.10/site-packages/nvidia/curand/include/curand_discrete.h new file mode 100644 index 0000000000000000000000000000000000000000..7e194487a0e2ec02abcb1dd8634c42141a148d84 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nvidia/curand/include/curand_discrete.h @@ -0,0 +1,87 @@ + /* Copyright 2010-2014 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * The source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * The Licensed Deliverables contained herein are PROPRIETARY and + * CONFIDENTIAL to NVIDIA and are being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#if !defined(CURANDDISCRETE_H_) +#define CURANDDISCRETE_H_ + +struct curandDistributionShift_st { + curandDistribution_t probability; + curandDistribution_t host_probability; + unsigned int shift; + unsigned int length; + unsigned int host_gen; +}; + +struct curandHistogramM2_st { + curandHistogramM2V_t V; + curandHistogramM2V_t host_V; + curandHistogramM2K_t K; + curandHistogramM2K_t host_K; + unsigned int host_gen; +}; + + +struct curandDistributionM2Shift_st { + curandHistogramM2_t histogram; + curandHistogramM2_t host_histogram; + unsigned int shift; + unsigned int length; + unsigned int host_gen; +}; + +struct curandDiscreteDistribution_st { + curandDiscreteDistribution_t self_host_ptr; + curandDistributionM2Shift_t M2; + curandDistributionM2Shift_t host_M2; + double stddev; + double mean; + curandMethod_t method; + unsigned int host_gen; +}; + +#endif // !defined(CURANDDISCRETE_H_) diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia/curand/include/curand_discrete2.h b/llmeval-env/lib/python3.10/site-packages/nvidia/curand/include/curand_discrete2.h new file mode 100644 index 0000000000000000000000000000000000000000..ac827749840488f66d71f492c14dbec80b57a3d1 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nvidia/curand/include/curand_discrete2.h @@ -0,0 +1,253 @@ + + /* Copyright 2010-2014 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * The source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * The Licensed Deliverables contained herein are PROPRIETARY and + * CONFIDENTIAL to NVIDIA and are being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + + +#if !defined(CURAND_DISCRETE_H_) +#define CURAND_DISCRETE_H_ + +/** + * \defgroup DEVICE Device API + * + * @{ + */ + +#ifndef __CUDACC_RTC__ +#include +#endif // __CUDACC_RTC__ + +#include "curand_mrg32k3a.h" +#include "curand_mtgp32_kernel.h" +#include "curand_philox4x32_x.h" + + +template +QUALIFIERS unsigned int _curand_discrete(T x, curandDiscreteDistribution_t discrete_distribution){ + if (discrete_distribution->method == CURAND_M2){ + return _curand_M2_double(x, discrete_distribution->M2); + } + return (unsigned int)((discrete_distribution->stddev * _curand_normal_icdf_double(x)) + discrete_distribution->mean + 0.5); +} + + +template +QUALIFIERS unsigned int curand__discrete(STATE state, curandDiscreteDistribution_t discrete_distribution){ + if (discrete_distribution->method == CURAND_M2){ + return curand_M2_double(state, discrete_distribution->M2); + } + return (unsigned int)((discrete_distribution->stddev * curand_normal_double(state)) + discrete_distribution->mean + 0.5); //Round to nearest +} + +template +QUALIFIERS uint4 curand__discrete4(STATE state, curandDiscreteDistribution_t discrete_distribution){ + if (discrete_distribution->method == CURAND_M2){ + return curand_M2_double4(state, discrete_distribution->M2); + } + double4 _res; + uint4 result; + _res = curand_normal4_double(state); + result.x = (unsigned int)((discrete_distribution->stddev * _res.x) + discrete_distribution->mean + 0.5); //Round to nearest + result.y = (unsigned int)((discrete_distribution->stddev * _res.y) + discrete_distribution->mean + 0.5); //Round to nearest + result.z = (unsigned int)((discrete_distribution->stddev * _res.z) + discrete_distribution->mean + 0.5); //Round to nearest + result.w = (unsigned int)((discrete_distribution->stddev * _res.w) + discrete_distribution->mean + 0.5); //Round to nearest + return result; +} + +/* + * \brief Return a discrete distributed unsigned int from a XORWOW generator. + * + * Return a single discrete distributed unsigned int derived from a + * distribution defined by \p discrete_distribution from the XORWOW generator in \p state, + * increment position of generator by one. + * + * \param state - Pointer to state to update + * \param discrete_distribution - ancillary structure for discrete distribution + * + * \return unsigned int distributed by distribution defined by \p discrete_distribution. + */ +QUALIFIERS unsigned int curand_discrete(curandStateXORWOW_t *state, curandDiscreteDistribution_t discrete_distribution) +{ + return curand__discrete(state, discrete_distribution); +} + +/* + * \brief Return a discrete distributed unsigned int from a Philox4_32_10 generator. + * + * Return a single discrete distributed unsigned int derived from a + * distribution defined by \p discrete_distribution from the Philox4_32_10 generator in \p state, + * increment position of generator by one. + * + * \param state - Pointer to state to update + * \param discrete_distribution - ancillary structure for discrete distribution + * + * \return unsigned int distributed by distribution defined by \p discrete_distribution. + */ +QUALIFIERS unsigned int curand_discrete(curandStatePhilox4_32_10_t *state, curandDiscreteDistribution_t discrete_distribution) +{ + return curand__discrete(state, discrete_distribution); +} + +/* + * \brief Return four discrete distributed unsigned ints from a Philox4_32_10 generator. + * + * Return four single discrete distributed unsigned ints derived from a + * distribution defined by \p discrete_distribution from the Philox4_32_10 generator in \p state, + * increment position of generator by one. + * + * \param state - Pointer to state to update + * \param discrete_distribution - ancillary structure for discrete distribution + * + * \return unsigned int distributed by distribution defined by \p discrete_distribution. + */ +QUALIFIERS uint4 curand_discrete4(curandStatePhilox4_32_10_t *state, curandDiscreteDistribution_t discrete_distribution) +{ + return curand__discrete4(state, discrete_distribution); +} +/* + * \brief Return a discrete distributed unsigned int from a MRG32k3a generator. + * + * Re turn a single discrete distributed unsigned int derived from a + * distribution defined by \p discrete_distribution from the MRG32k3a generator in \p state, + * increment position of generator by one. + * + * \param state - Pointer to state to update + * \param discrete_distribution - ancillary structure for discrete distribution + * + * \return unsigned int distributed by distribution defined by \p discrete_distribution. + */ +QUALIFIERS unsigned int curand_discrete(curandStateMRG32k3a_t *state, curandDiscreteDistribution_t discrete_distribution) +{ + return curand__discrete(state, discrete_distribution); +} + +/* + * \brief Return a discrete distributed unsigned int from a MTGP32 generator. + * + * Return a single discrete distributed unsigned int derived from a + * distribution defined by \p discrete_distribution from the MTGP32 generator in \p state, + * increment position of generator by one. + * + * \param state - Pointer to state to update + * \param discrete_distribution - ancillary structure for discrete distribution + * + * \return unsigned int distributed by distribution defined by \p discrete_distribution. + */ +QUALIFIERS unsigned int curand_discrete(curandStateMtgp32_t *state, curandDiscreteDistribution_t discrete_distribution) +{ + return curand__discrete(state, discrete_distribution); +} + +/* + * \brief Return a discrete distributed unsigned int from a Sobol32 generator. + * + * Return a single discrete distributed unsigned int derived from a + * distribution defined by \p discrete_distribution from the Sobol32 generator in \p state, + * increment position of generator by one. + * + * \param state - Pointer to state to update + * \param discrete_distribution - ancillary structure for discrete distribution + * + * \return unsigned int distributed by distribution defined by \p discrete_distribution. + */ +QUALIFIERS unsigned int curand_discrete(curandStateSobol32_t *state, curandDiscreteDistribution_t discrete_distribution) +{ + return curand__discrete(state, discrete_distribution); +} + +/* + * \brief Return a discrete distributed unsigned int from a scrambled Sobol32 generator. + * + * Return a single discrete distributed unsigned int derived from a + * distribution defined by \p discrete_distribution from the scrambled Sobol32 generator in \p state, + * increment position of generator by one. + * + * \param state - Pointer to state to update + * \param discrete_distribution - ancillary structure for discrete distribution + * + * \return unsigned int distributed by distribution defined by \p discrete_distribution. + */ +QUALIFIERS unsigned int curand_discrete(curandStateScrambledSobol32_t *state, curandDiscreteDistribution_t discrete_distribution) +{ + return curand__discrete(state, discrete_distribution); +} + +/* + * \brief Return a discrete distributed unsigned int from a Sobol64 generator. + * + * Return a single discrete distributed unsigned int derived from a + * distribution defined by \p discrete_distribution from the Sobol64 generator in \p state, + * increment position of generator by one. + * + * \param state - Pointer to state to update + * \param discrete_distribution - ancillary structure for discrete distribution + * + * \return unsigned int distributed by distribution defined by \p discrete_distribution. + */ +QUALIFIERS unsigned int curand_discrete(curandStateSobol64_t *state, curandDiscreteDistribution_t discrete_distribution) +{ + return curand__discrete(state, discrete_distribution); +} + +/* + * \brief Return a discrete distributed unsigned int from a scrambled Sobol64 generator. + * + * Return a single discrete distributed unsigned int derived from a + * distribution defined by \p discrete_distribution from the scrambled Sobol64 generator in \p state, + * increment position of generator by one. + * + * \param state - Pointer to state to update + * \param discrete_distribution - ancillary structure for discrete distribution + * + * \return unsigned int distributed by distribution defined by \p discrete_distribution. + */ +QUALIFIERS unsigned int curand_discrete(curandStateScrambledSobol64_t *state, curandDiscreteDistribution_t discrete_distribution) +{ + return curand__discrete(state, discrete_distribution); +} + +#endif // !defined(CURAND_DISCRETE_H_) diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia/curand/include/curand_kernel.h b/llmeval-env/lib/python3.10/site-packages/nvidia/curand/include/curand_kernel.h new file mode 100644 index 0000000000000000000000000000000000000000..c9d27c5f271cae931091c85ecfe37b5e7d427bf3 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nvidia/curand/include/curand_kernel.h @@ -0,0 +1,1677 @@ + + /* Copyright 2010-2014 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * The source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * The Licensed Deliverables contained herein are PROPRIETARY and + * CONFIDENTIAL to NVIDIA and are being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + + +#if !defined(CURAND_KERNEL_H_) +#define CURAND_KERNEL_H_ + +/** + * \defgroup DEVICE Device API + * + * @{ + */ + +#if !defined(QUALIFIERS) +#define QUALIFIERS static __forceinline__ __device__ +#endif + +/* To prevent unused parameter warnings */ +#if !defined(GCC_UNUSED_PARAMETER) +#if defined(__GNUC__) +#define GCC_UNUSED_PARAMETER __attribute__((unused)) +#else +#define GCC_UNUSED_PARAMETER +#endif /* defined(__GNUC__) */ +#endif /* !defined(GCC_UNUSED_PARAMETER) */ + +#include + +#ifdef __CUDACC_RTC__ +#define CURAND_DETAIL_USE_CUDA_STL +#endif + +#if __cplusplus >= 201103L +# ifdef CURAND_DETAIL_USE_CUDA_STL +# define CURAND_STD cuda::std +# include +# else +# define CURAND_STD std +# include +# endif // CURAND_DETAIL_USE_CUDA_STL +#else +// To support C++03 compilation +# define CURAND_STD curand_detail +namespace curand_detail { + template + struct enable_if {}; + + template + struct enable_if { typedef T type; }; + + template + struct is_same { static const bool value = false; }; + + template + struct is_same { static const bool value = true; }; +} // namespace curand_detail +#endif // __cplusplus >= 201103L + +#ifndef __CUDACC_RTC__ +#include +#endif // __CUDACC_RTC__ + +#include "curand.h" +#include "curand_discrete.h" +#include "curand_precalc.h" +#include "curand_mrg32k3a.h" +#include "curand_mtgp32_kernel.h" +#include "curand_philox4x32_x.h" +#include "curand_globals.h" + +/* Test RNG */ +/* This generator uses the formula: + x_n = x_(n-1) + 1 mod 2^32 + x_0 = (unsigned int)seed * 3 + Subsequences are spaced 31337 steps apart. +*/ +struct curandStateTest { + unsigned int v; +}; + +/** \cond UNHIDE_TYPEDEFS */ +typedef struct curandStateTest curandStateTest_t; +/** \endcond */ + +/* XORSHIFT FAMILY RNGs */ +/* These generators are a family proposed by Marsaglia. They keep state + in 32 bit chunks, then use repeated shift and xor operations to scramble + the bits. The following generators are a combination of a simple Weyl + generator with an N variable XORSHIFT generator. +*/ + +/* XORSHIFT RNG */ +/* This generator uses the xorwow formula of +www.jstatsoft.org/v08/i14/paper page 5 +Has period 2^192 - 2^32. +*/ +/** + * CURAND XORWOW state + */ +struct curandStateXORWOW; + +/* + * Implementation details not in reference documentation */ +struct curandStateXORWOW { + unsigned int d, v[5]; + int boxmuller_flag; + int boxmuller_flag_double; + float boxmuller_extra; + double boxmuller_extra_double; +}; + +/* + * CURAND XORWOW state + */ +/** \cond UNHIDE_TYPEDEFS */ +typedef struct curandStateXORWOW curandStateXORWOW_t; + +#define EXTRA_FLAG_NORMAL 0x00000001 +#define EXTRA_FLAG_LOG_NORMAL 0x00000002 +/** \endcond */ + +/* Combined Multiple Recursive Generators */ +/* These generators are a family proposed by L'Ecuyer. They keep state + in sets of doubles, then use repeated modular arithmetic multiply operations + to scramble the bits in each set, and combine the result. +*/ + +/* MRG32k3a RNG */ +/* This generator uses the MRG32k3A formula of +http://www.iro.umontreal.ca/~lecuyer/myftp/streams00/c++/streams4.pdf +Has period 2^191. +*/ + +/* moduli for the recursions */ +/** \cond UNHIDE_DEFINES */ +#define MRG32K3A_MOD1 4294967087. +#define MRG32K3A_MOD2 4294944443. + +/* Constants used in generation */ + +#define MRG32K3A_A12 1403580. +#define MRG32K3A_A13N 810728. +#define MRG32K3A_A21 527612. +#define MRG32K3A_A23N 1370589. +#define MRG32K3A_NORM (2.3283065498378288e-10) +// +// #define MRG32K3A_BITS_NORM ((double)((POW32_DOUBLE-1.0)/MOD1)) +// above constant, used verbatim, rounds differently on some host systems. +#define MRG32K3A_BITS_NORM 1.000000048662 + +/** \endcond */ + + + + +/** + * CURAND MRG32K3A state + */ +struct curandStateMRG32k3a; + +/* Implementation details not in reference documentation */ +struct curandStateMRG32k3a { + unsigned int s1[3]; + unsigned int s2[3]; + int boxmuller_flag; + int boxmuller_flag_double; + float boxmuller_extra; + double boxmuller_extra_double; +}; + +/* + * CURAND MRG32K3A state + */ +/** \cond UNHIDE_TYPEDEFS */ +typedef struct curandStateMRG32k3a curandStateMRG32k3a_t; +/** \endcond */ + +/* SOBOL QRNG */ +/** + * CURAND Sobol32 state + */ +struct curandStateSobol32; + +/* Implementation details not in reference documentation */ +struct curandStateSobol32 { + unsigned int i, x, c; + unsigned int direction_vectors[32]; +}; + +/* + * CURAND Sobol32 state + */ +/** \cond UNHIDE_TYPEDEFS */ +typedef struct curandStateSobol32 curandStateSobol32_t; +/** \endcond */ + +/** + * CURAND Scrambled Sobol32 state + */ +struct curandStateScrambledSobol32; + +/* Implementation details not in reference documentation */ +struct curandStateScrambledSobol32 { + unsigned int i, x, c; + unsigned int direction_vectors[32]; +}; + +/* + * CURAND Scrambled Sobol32 state + */ +/** \cond UNHIDE_TYPEDEFS */ +typedef struct curandStateScrambledSobol32 curandStateScrambledSobol32_t; +/** \endcond */ + +/** + * CURAND Sobol64 state + */ +struct curandStateSobol64; + +/* Implementation details not in reference documentation */ +struct curandStateSobol64 { + unsigned long long i, x, c; + unsigned long long direction_vectors[64]; +}; + +/* + * CURAND Sobol64 state + */ +/** \cond UNHIDE_TYPEDEFS */ +typedef struct curandStateSobol64 curandStateSobol64_t; +/** \endcond */ + +/** + * CURAND Scrambled Sobol64 state + */ +struct curandStateScrambledSobol64; + +/* Implementation details not in reference documentation */ +struct curandStateScrambledSobol64 { + unsigned long long i, x, c; + unsigned long long direction_vectors[64]; +}; + +/* + * CURAND Scrambled Sobol64 state + */ +/** \cond UNHIDE_TYPEDEFS */ +typedef struct curandStateScrambledSobol64 curandStateScrambledSobol64_t; +/** \endcond */ + +/* + * Default RNG + */ +/** \cond UNHIDE_TYPEDEFS */ +typedef struct curandStateXORWOW curandState_t; +typedef struct curandStateXORWOW curandState; +/** \endcond */ + +/****************************************************************************/ +/* Utility functions needed by RNGs */ +/****************************************************************************/ +/** \cond UNHIDE_UTILITIES */ +/* + multiply vector by matrix, store in result + matrix is n x n, measured in 32 bit units + matrix is stored in row major order + vector and result cannot be same pointer +*/ +template +QUALIFIERS void __curand_matvec_inplace(unsigned int *vector, unsigned int *matrix) +{ + unsigned int result[N] = { 0 }; + for(int i = 0; i < N; i++) { + #ifdef __CUDA_ARCH__ + #pragma unroll 16 + #endif + for(int j = 0; j < 32; j++) { + if(vector[i] & (1 << j)) { + for(int k = 0; k < N; k++) { + result[k] ^= matrix[N * (i * 32 + j) + k]; + } + } + } + } + for(int i = 0; i < N; i++) { + vector[i] = result[i]; + } +} + +QUALIFIERS void __curand_matvec(unsigned int *vector, unsigned int *matrix, + unsigned int *result, int n) +{ + for(int i = 0; i < n; i++) { + result[i] = 0; + } + for(int i = 0; i < n; i++) { + for(int j = 0; j < 32; j++) { + if(vector[i] & (1 << j)) { + for(int k = 0; k < n; k++) { + result[k] ^= matrix[n * (i * 32 + j) + k]; + } + } + } + } +} + +/* generate identity matrix */ +QUALIFIERS void __curand_matidentity(unsigned int *matrix, int n) +{ + int r; + for(int i = 0; i < n * 32; i++) { + for(int j = 0; j < n; j++) { + r = i & 31; + if(i / 32 == j) { + matrix[i * n + j] = (1 << r); + } else { + matrix[i * n + j] = 0; + } + } + } +} + +/* multiply matrixA by matrixB, store back in matrixA + matrixA and matrixB must not be same matrix */ +QUALIFIERS void __curand_matmat(unsigned int *matrixA, unsigned int *matrixB, int n) +{ + unsigned int result[MAX_XOR_N]; + for(int i = 0; i < n * 32; i++) { + __curand_matvec(matrixA + i * n, matrixB, result, n); + for(int j = 0; j < n; j++) { + matrixA[i * n + j] = result[j]; + } + } +} + +/* copy vectorA to vector */ +QUALIFIERS void __curand_veccopy(unsigned int *vector, unsigned int *vectorA, int n) +{ + for(int i = 0; i < n; i++) { + vector[i] = vectorA[i]; + } +} + +/* copy matrixA to matrix */ +QUALIFIERS void __curand_matcopy(unsigned int *matrix, unsigned int *matrixA, int n) +{ + for(int i = 0; i < n * n * 32; i++) { + matrix[i] = matrixA[i]; + } +} + +/* compute matrixA to power p, store result in matrix */ +QUALIFIERS void __curand_matpow(unsigned int *matrix, unsigned int *matrixA, + unsigned long long p, int n) +{ + unsigned int matrixR[MAX_XOR_N * MAX_XOR_N * 32]; + unsigned int matrixS[MAX_XOR_N * MAX_XOR_N * 32]; + __curand_matidentity(matrix, n); + __curand_matcopy(matrixR, matrixA, n); + while(p) { + if(p & 1) { + __curand_matmat(matrix, matrixR, n); + } + __curand_matcopy(matrixS, matrixR, n); + __curand_matmat(matrixR, matrixS, n); + p >>= 1; + } +} + +/****************************************************************************/ +/* Utility functions needed by MRG32k3a RNG */ +/* Matrix operations modulo some integer less than 2**32, done in */ +/* double precision floating point, with care not to overflow 53 bits */ +/****************************************************************************/ + +/* return i mod m. */ +/* assumes i and m are integers represented accurately in doubles */ + +QUALIFIERS double curand_MRGmod(double i, double m) +{ + double quo; + double rem; + quo = floor(i/m); + rem = i - (quo*m); + if (rem < 0.0) rem += m; + return rem; +} + +/* Multiplication modulo m. Inputs i and j less than 2**32 */ +/* Ensure intermediate results do not exceed 2**53 */ + +QUALIFIERS double curand_MRGmodMul(double i, double j, double m) +{ + double tempHi; + double tempLo; + + tempHi = floor(i/131072.0); + tempLo = i - (tempHi*131072.0); + tempLo = curand_MRGmod( curand_MRGmod( (tempHi * j), m) * 131072.0 + curand_MRGmod(tempLo * j, m),m); + + if (tempLo < 0.0) tempLo += m; + return tempLo; +} + +/* multiply 3 by 3 matrices of doubles, modulo m */ + +QUALIFIERS void curand_MRGmatMul3x3(unsigned int i1[][3],unsigned int i2[][3],unsigned int o[][3],double m) +{ + int i,j; + double temp[3][3]; + for (i=0; i<3; i++){ + for (j=0; j<3; j++){ + temp[i][j] = ( curand_MRGmodMul(i1[i][0], i2[0][j], m) + + curand_MRGmodMul(i1[i][1], i2[1][j], m) + + curand_MRGmodMul(i1[i][2], i2[2][j], m)); + temp[i][j] = curand_MRGmod( temp[i][j], m ); + } + } + for (i=0; i<3; i++){ + for (j=0; j<3; j++){ + o[i][j] = (unsigned int)temp[i][j]; + } + } +} + +/* multiply 3 by 3 matrix times 3 by 1 vector of doubles, modulo m */ + +QUALIFIERS void curand_MRGmatVecMul3x3( unsigned int i[][3], unsigned int v[], double m) +{ + int k; + double t[3]; + for (k = 0; k < 3; k++) { + t[k] = ( curand_MRGmodMul(i[k][0], v[0], m) + + curand_MRGmodMul(i[k][1], v[1], m) + + curand_MRGmodMul(i[k][2], v[2], m) ); + t[k] = curand_MRGmod( t[k], m ); + } + for (k = 0; k < 3; k++) { + v[k] = (unsigned int)t[k]; + } + +} + +/* raise a 3 by 3 matrix of doubles to a 64 bit integer power pow, modulo m */ +/* input is index zero of an array of 3 by 3 matrices m, */ +/* each m = m[0]**(2**index) */ + +QUALIFIERS void curand_MRGmatPow3x3( unsigned int in[][3][3], unsigned int o[][3], double m, unsigned long long pow ) +{ + int i,j; + for ( i = 0; i < 3; i++ ) { + for ( j = 0; j < 3; j++ ) { + o[i][j] = 0; + if ( i == j ) o[i][j] = 1; + } + } + i = 0; + curand_MRGmatVecMul3x3(o,o[0],m); + while (pow) { + if ( pow & 1ll ) { + curand_MRGmatMul3x3(in[i], o, o, m); + } + i++; + pow >>= 1; + } +} + +/* raise a 3 by 3 matrix of doubles to the power */ +/* 2 to the power (pow modulo 191), modulo m */ + +QUALIFIERS void curnand_MRGmatPow2Pow3x3( double in[][3], double o[][3], double m, unsigned long pow ) +{ + unsigned int temp[3][3]; + int i,j; + pow = pow % 191; + for ( i = 0; i < 3; i++ ) { + for ( j = 0; j < 3; j++ ) { + temp[i][j] = (unsigned int)in[i][j]; + } + } + while (pow) { + curand_MRGmatMul3x3(temp, temp, temp, m); + pow--; + } + for ( i = 0; i < 3; i++ ) { + for ( j = 0; j < 3; j++ ) { + o[i][j] = temp[i][j]; + } + } +} + +/** \endcond */ + +/****************************************************************************/ +/* Kernel implementations of RNGs */ +/****************************************************************************/ + +/* Test RNG */ + +QUALIFIERS void curand_init(unsigned long long seed, + unsigned long long subsequence, + unsigned long long offset, + curandStateTest_t *state) +{ + state->v = (unsigned int)(seed * 3) + (unsigned int)(subsequence * 31337) + \ + (unsigned int)offset; +} + + +QUALIFIERS unsigned int curand(curandStateTest_t *state) +{ + unsigned int r = state->v++; + return r; +} + +QUALIFIERS void skipahead(unsigned long long n, curandStateTest_t *state) +{ + state->v += (unsigned int)n; +} + +/* XORWOW RNG */ + +template +QUALIFIERS void __curand_generate_skipahead_matrix_xor(unsigned int matrix[]) +{ + T state; + // Generate matrix that advances one step + // matrix has n * n * 32 32-bit elements + // solve for matrix by stepping single bit states + for(int i = 0; i < 32 * n; i++) { + state.d = 0; + for(int j = 0; j < n; j++) { + state.v[j] = 0; + } + state.v[i / 32] = (1 << (i & 31)); + curand(&state); + for(int j = 0; j < n; j++) { + matrix[i * n + j] = state.v[j]; + } + } +} + +template +QUALIFIERS void _skipahead_scratch(unsigned long long x, T *state, unsigned int *scratch) +{ + // unsigned int matrix[n * n * 32]; + unsigned int *matrix = scratch; + // unsigned int matrixA[n * n * 32]; + unsigned int *matrixA = scratch + (n * n * 32); + // unsigned int vector[n]; + unsigned int *vector = scratch + (n * n * 32) + (n * n * 32); + // unsigned int result[n]; + unsigned int *result = scratch + (n * n * 32) + (n * n * 32) + n; + unsigned long long p = x; + for(int i = 0; i < n; i++) { + vector[i] = state->v[i]; + } + int matrix_num = 0; + while(p && (matrix_num < PRECALC_NUM_MATRICES - 1)) { + for(unsigned int t = 0; t < (p & PRECALC_BLOCK_MASK); t++) { +NV_IF_ELSE_TARGET(NV_IS_DEVICE, + __curand_matvec(vector, precalc_xorwow_offset_matrix[matrix_num], result, n); +, + __curand_matvec(vector, precalc_xorwow_offset_matrix_host[matrix_num], result, n); +) + __curand_veccopy(vector, result, n); + } + p >>= PRECALC_BLOCK_SIZE; + matrix_num++; + } + if(p) { +NV_IF_ELSE_TARGET(NV_IS_DEVICE, + __curand_matcopy(matrix, precalc_xorwow_offset_matrix[PRECALC_NUM_MATRICES - 1], n); + __curand_matcopy(matrixA, precalc_xorwow_offset_matrix[PRECALC_NUM_MATRICES - 1], n); +, + __curand_matcopy(matrix, precalc_xorwow_offset_matrix_host[PRECALC_NUM_MATRICES - 1], n); + __curand_matcopy(matrixA, precalc_xorwow_offset_matrix_host[PRECALC_NUM_MATRICES - 1], n); +) + } + while(p) { + for(unsigned int t = 0; t < (p & SKIPAHEAD_MASK); t++) { + __curand_matvec(vector, matrixA, result, n); + __curand_veccopy(vector, result, n); + } + p >>= SKIPAHEAD_BLOCKSIZE; + if(p) { + for(int i = 0; i < SKIPAHEAD_BLOCKSIZE; i++) { + __curand_matmat(matrix, matrixA, n); + __curand_matcopy(matrixA, matrix, n); + } + } + } + for(int i = 0; i < n; i++) { + state->v[i] = vector[i]; + } + state->d += 362437 * (unsigned int)x; +} + +template +QUALIFIERS void _skipahead_sequence_scratch(unsigned long long x, T *state, unsigned int *scratch) +{ + // unsigned int matrix[n * n * 32]; + unsigned int *matrix = scratch; + // unsigned int matrixA[n * n * 32]; + unsigned int *matrixA = scratch + (n * n * 32); + // unsigned int vector[n]; + unsigned int *vector = scratch + (n * n * 32) + (n * n * 32); + // unsigned int result[n]; + unsigned int *result = scratch + (n * n * 32) + (n * n * 32) + n; + unsigned long long p = x; + for(int i = 0; i < n; i++) { + vector[i] = state->v[i]; + } + int matrix_num = 0; + while(p && matrix_num < PRECALC_NUM_MATRICES - 1) { + for(unsigned int t = 0; t < (p & PRECALC_BLOCK_MASK); t++) { +NV_IF_ELSE_TARGET(NV_IS_DEVICE, + __curand_matvec(vector, precalc_xorwow_matrix[matrix_num], result, n); +, + __curand_matvec(vector, precalc_xorwow_matrix_host[matrix_num], result, n); +) + __curand_veccopy(vector, result, n); + } + p >>= PRECALC_BLOCK_SIZE; + matrix_num++; + } + if(p) { +NV_IF_ELSE_TARGET(NV_IS_DEVICE, + __curand_matcopy(matrix, precalc_xorwow_matrix[PRECALC_NUM_MATRICES - 1], n); + __curand_matcopy(matrixA, precalc_xorwow_matrix[PRECALC_NUM_MATRICES - 1], n); +, + __curand_matcopy(matrix, precalc_xorwow_matrix_host[PRECALC_NUM_MATRICES - 1], n); + __curand_matcopy(matrixA, precalc_xorwow_matrix_host[PRECALC_NUM_MATRICES - 1], n); +) + } + while(p) { + for(unsigned int t = 0; t < (p & SKIPAHEAD_MASK); t++) { + __curand_matvec(vector, matrixA, result, n); + __curand_veccopy(vector, result, n); + } + p >>= SKIPAHEAD_BLOCKSIZE; + if(p) { + for(int i = 0; i < SKIPAHEAD_BLOCKSIZE; i++) { + __curand_matmat(matrix, matrixA, n); + __curand_matcopy(matrixA, matrix, n); + } + } + } + for(int i = 0; i < n; i++) { + state->v[i] = vector[i]; + } + /* No update of state->d needed, guaranteed to be a multiple of 2^32 */ +} + +template +QUALIFIERS void _skipahead_inplace(const unsigned long long x, T *state) +{ + unsigned long long p = x; + int matrix_num = 0; + while(p) { + for(unsigned int t = 0; t < (p & PRECALC_BLOCK_MASK); t++) { +NV_IF_ELSE_TARGET(NV_IS_DEVICE, + __curand_matvec_inplace(state->v, precalc_xorwow_offset_matrix[matrix_num]); +, + __curand_matvec_inplace(state->v, precalc_xorwow_offset_matrix_host[matrix_num]); +) + } + p >>= PRECALC_BLOCK_SIZE; + matrix_num++; + } + state->d += 362437 * (unsigned int)x; +} + +template +QUALIFIERS void _skipahead_sequence_inplace(unsigned long long x, T *state) +{ + int matrix_num = 0; + while(x) { + for(unsigned int t = 0; t < (x & PRECALC_BLOCK_MASK); t++) { +NV_IF_ELSE_TARGET(NV_IS_DEVICE, + __curand_matvec_inplace(state->v, precalc_xorwow_matrix[matrix_num]); +, + __curand_matvec_inplace(state->v, precalc_xorwow_matrix_host[matrix_num]); +) + } + x >>= PRECALC_BLOCK_SIZE; + matrix_num++; + } + /* No update of state->d needed, guaranteed to be a multiple of 2^32 */ +} + +/** + * \brief Update XORWOW state to skip \p n elements. + * + * Update the XORWOW state in \p state to skip ahead \p n elements. + * + * All values of \p n are valid. Large values require more computation and so + * will take more time to complete. + * + * \param n - Number of elements to skip + * \param state - Pointer to state to update + */ +QUALIFIERS void skipahead(unsigned long long n, curandStateXORWOW_t *state) +{ + _skipahead_inplace(n, state); +} + +/** + * \brief Update XORWOW state to skip ahead \p n subsequences. + * + * Update the XORWOW state in \p state to skip ahead \p n subsequences. Each + * subsequence is \xmlonly267\endxmlonly elements long, so this means the function will skip ahead + * \xmlonly267\endxmlonly * n elements. + * + * All values of \p n are valid. Large values require more computation and so + * will take more time to complete. + * + * \param n - Number of subsequences to skip + * \param state - Pointer to state to update + */ +QUALIFIERS void skipahead_sequence(unsigned long long n, curandStateXORWOW_t *state) +{ + _skipahead_sequence_inplace(n, state); +} + +QUALIFIERS void _curand_init_scratch(unsigned long long seed, + unsigned long long subsequence, + unsigned long long offset, + curandStateXORWOW_t *state, + unsigned int *scratch) +{ + // Break up seed, apply salt + // Constants are arbitrary nonzero values + unsigned int s0 = ((unsigned int)seed) ^ 0xaad26b49UL; + unsigned int s1 = (unsigned int)(seed >> 32) ^ 0xf7dcefddUL; + // Simple multiplication to mix up bits + // Constants are arbitrary odd values + unsigned int t0 = 1099087573UL * s0; + unsigned int t1 = 2591861531UL * s1; + state->d = 6615241 + t1 + t0; + state->v[0] = 123456789UL + t0; + state->v[1] = 362436069UL ^ t0; + state->v[2] = 521288629UL + t1; + state->v[3] = 88675123UL ^ t1; + state->v[4] = 5783321UL + t0; + _skipahead_sequence_scratch(subsequence, state, scratch); + _skipahead_scratch(offset, state, scratch); + state->boxmuller_flag = 0; + state->boxmuller_flag_double = 0; + state->boxmuller_extra = 0.f; + state->boxmuller_extra_double = 0.; +} + +QUALIFIERS void _curand_init_inplace(unsigned long long seed, + unsigned long long subsequence, + unsigned long long offset, + curandStateXORWOW_t *state) +{ + // Break up seed, apply salt + // Constants are arbitrary nonzero values + unsigned int s0 = ((unsigned int)seed) ^ 0xaad26b49UL; + unsigned int s1 = (unsigned int)(seed >> 32) ^ 0xf7dcefddUL; + // Simple multiplication to mix up bits + // Constants are arbitrary odd values + unsigned int t0 = 1099087573UL * s0; + unsigned int t1 = 2591861531UL * s1; + state->d = 6615241 + t1 + t0; + state->v[0] = 123456789UL + t0; + state->v[1] = 362436069UL ^ t0; + state->v[2] = 521288629UL + t1; + state->v[3] = 88675123UL ^ t1; + state->v[4] = 5783321UL + t0; + _skipahead_sequence_inplace(subsequence, state); + _skipahead_inplace(offset, state); + state->boxmuller_flag = 0; + state->boxmuller_flag_double = 0; + state->boxmuller_extra = 0.f; + state->boxmuller_extra_double = 0.; +} + +/** + * \brief Initialize XORWOW state. + * + * Initialize XORWOW state in \p state with the given \p seed, \p subsequence, + * and \p offset. + * + * All input values of \p seed, \p subsequence, and \p offset are legal. Large + * values for \p subsequence and \p offset require more computation and so will + * take more time to complete. + * + * A value of 0 for \p seed sets the state to the values of the original + * published version of the \p xorwow algorithm. + * + * \param seed - Arbitrary bits to use as a seed + * \param subsequence - Subsequence to start at + * \param offset - Absolute offset into sequence + * \param state - Pointer to state to initialize + */ +QUALIFIERS void curand_init(unsigned long long seed, + unsigned long long subsequence, + unsigned long long offset, + curandStateXORWOW_t *state) +{ + _curand_init_inplace(seed, subsequence, offset, state); +} + +/** + * \brief Return 32-bits of pseudorandomness from an XORWOW generator. + * + * Return 32-bits of pseudorandomness from the XORWOW generator in \p state, + * increment position of generator by one. + * + * \param state - Pointer to state to update + * + * \return 32-bits of pseudorandomness as an unsigned int, all bits valid to use. + */ +QUALIFIERS unsigned int curand(curandStateXORWOW_t *state) +{ + unsigned int t; + t = (state->v[0] ^ (state->v[0] >> 2)); + state->v[0] = state->v[1]; + state->v[1] = state->v[2]; + state->v[2] = state->v[3]; + state->v[3] = state->v[4]; + state->v[4] = (state->v[4] ^ (state->v[4] <<4)) ^ (t ^ (t << 1)); + state->d += 362437; + return state->v[4] + state->d; +} + + +/** + * \brief Return 32-bits of pseudorandomness from an Philox4_32_10 generator. + * + * Return 32-bits of pseudorandomness from the Philox4_32_10 generator in \p state, + * increment position of generator by one. + * + * \param state - Pointer to state to update + * + * \return 32-bits of pseudorandomness as an unsigned int, all bits valid to use. + */ + +QUALIFIERS unsigned int curand(curandStatePhilox4_32_10_t *state) +{ + // Maintain the invariant: output[STATE] is always "good" and + // is the next value to be returned by curand. + unsigned int ret; + switch(state->STATE++){ + default: + ret = state->output.x; + break; + case 1: + ret = state->output.y; + break; + case 2: + ret = state->output.z; + break; + case 3: + ret = state->output.w; + break; + } + if(state->STATE == 4){ + Philox_State_Incr(state); + state->output = curand_Philox4x32_10(state->ctr,state->key); + state->STATE = 0; + } + return ret; +} + +/** + * \brief Return tuple of 4 32-bit pseudorandoms from a Philox4_32_10 generator. + * + * Return 128 bits of pseudorandomness from the Philox4_32_10 generator in \p state, + * increment position of generator by four. + * + * \param state - Pointer to state to update + * + * \return 128-bits of pseudorandomness as a uint4, all bits valid to use. + */ + +QUALIFIERS uint4 curand4(curandStatePhilox4_32_10_t *state) +{ + uint4 r; + + uint4 tmp = state->output; + Philox_State_Incr(state); + state->output= curand_Philox4x32_10(state->ctr,state->key); + switch(state->STATE){ + case 0: + return tmp; + case 1: + r.x = tmp.y; + r.y = tmp.z; + r.z = tmp.w; + r.w = state->output.x; + break; + case 2: + r.x = tmp.z; + r.y = tmp.w; + r.z = state->output.x; + r.w = state->output.y; + break; + case 3: + r.x = tmp.w; + r.y = state->output.x; + r.z = state->output.y; + r.w = state->output.z; + break; + default: + // NOT possible but needed to avoid compiler warnings + return tmp; + } + return r; +} + +/** + * \brief Update Philox4_32_10 state to skip \p n elements. + * + * Update the Philox4_32_10 state in \p state to skip ahead \p n elements. + * + * All values of \p n are valid. + * + * \param n - Number of elements to skip + * \param state - Pointer to state to update + */ +QUALIFIERS void skipahead(unsigned long long n, curandStatePhilox4_32_10_t *state) +{ + state->STATE += (n & 3); + n /= 4; + if( state->STATE > 3 ){ + n += 1; + state->STATE -= 4; + } + Philox_State_Incr(state, n); + state->output = curand_Philox4x32_10(state->ctr,state->key); +} + +/** + * \brief Update Philox4_32_10 state to skip ahead \p n subsequences. + * + * Update the Philox4_32_10 state in \p state to skip ahead \p n subsequences. Each + * subsequence is \xmlonly266\endxmlonly elements long, so this means the function will skip ahead + * \xmlonly266\endxmlonly * n elements. + * + * All values of \p n are valid. + * + * \param n - Number of subsequences to skip + * \param state - Pointer to state to update + */ +QUALIFIERS void skipahead_sequence(unsigned long long n, curandStatePhilox4_32_10_t *state) +{ + Philox_State_Incr_hi(state, n); + state->output = curand_Philox4x32_10(state->ctr,state->key); +} + +/** + * \brief Initialize Philox4_32_10 state. + * + * Initialize Philox4_32_10 state in \p state with the given \p seed, p\ subsequence, + * and \p offset. + * + * All input values for \p seed, \p subseqence and \p offset are legal. Each of the + * \xmlonly264\endxmlonly possible + * values of seed selects an independent sequence of length + * \xmlonly2130\endxmlonly. + * The first + * \xmlonly266 * subsequence + offset\endxmlonly. + * values of the sequence are skipped. + * I.e., subsequences are of length + * \xmlonly266\endxmlonly. + * + * \param seed - Arbitrary bits to use as a seed + * \param subsequence - Subsequence to start at + * \param offset - Absolute offset into subsequence + * \param state - Pointer to state to initialize + */ +QUALIFIERS void curand_init(unsigned long long seed, + unsigned long long subsequence, + unsigned long long offset, + curandStatePhilox4_32_10_t *state) +{ + state->ctr = make_uint4(0, 0, 0, 0); + state->key.x = (unsigned int)seed; + state->key.y = (unsigned int)(seed>>32); + state->STATE = 0; + state->boxmuller_flag = 0; + state->boxmuller_flag_double = 0; + state->boxmuller_extra = 0.f; + state->boxmuller_extra_double = 0.; + skipahead_sequence(subsequence, state); + skipahead(offset, state); +} + + +/* MRG32k3a RNG */ + +/* Base generator for MRG32k3a */ +QUALIFIERS unsigned long long __curand_umad(GCC_UNUSED_PARAMETER unsigned int a, GCC_UNUSED_PARAMETER unsigned int b, GCC_UNUSED_PARAMETER unsigned long long c) +{ + unsigned long long r = 0; +NV_IF_TARGET(NV_PROVIDES_SM_61, + asm("mad.wide.u32 %0, %1, %2, %3;" + : "=l"(r) : "r"(a), "r"(b), "l"(c)); +) + return r; +} +QUALIFIERS unsigned long long __curand_umul(GCC_UNUSED_PARAMETER unsigned int a, GCC_UNUSED_PARAMETER unsigned int b) +{ + unsigned long long r = 0; +NV_IF_TARGET(NV_PROVIDES_SM_61, + asm("mul.wide.u32 %0, %1, %2;" + : "=l"(r) : "r"(a), "r"(b)); +) + return r; +} +QUALIFIERS double curand_MRG32k3a (curandStateMRG32k3a_t *state) +{ +NV_IF_TARGET(NV_PROVIDES_SM_61, + const unsigned int m1 = 4294967087u; + const unsigned int m2 = 4294944443u; + const unsigned int m1c = 209u; + const unsigned int m2c = 22853u; + const unsigned int a12 = 1403580u; + const unsigned int a13n = 810728u; + const unsigned int a21 = 527612u; + const unsigned int a23n = 1370589u; + + unsigned long long p1; + unsigned long long p2; + const unsigned long long p3 = __curand_umul(a13n, m1 - state->s1[0]); + p1 = __curand_umad(a12, state->s1[1], p3); + + // Putting addition inside and changing umul to umad + // slowed this function down on GV100 + p1 = __curand_umul(p1 >> 32, m1c) + (p1 & 0xffffffff); + if (p1 >= m1) p1 -= m1; + + state->s1[0] = state->s1[1]; state->s1[1] = state->s1[2]; state->s1[2] = p1; + const unsigned long long p4 = __curand_umul(a23n, m2 - state->s2[0]); + p2 = __curand_umad(a21, state->s2[2], p4); + + // Putting addition inside and changing umul to umad + // slowed this function down on GV100 + p2 = __curand_umul(p2 >> 32, m2c) + (p2 & 0xffffffff); + p2 = __curand_umul(p2 >> 32, m2c) + (p2 & 0xffffffff); + if (p2 >= m2) p2 -= m2; + + state->s2[0] = state->s2[1]; state->s2[1] = state->s2[2]; state->s2[2] = p2; + + const unsigned int p5 = (unsigned int)p1 - (unsigned int)p2; + if(p1 <= p2) return p5 + m1; + return p5; +) +NV_IF_TARGET(NV_IS_DEVICE, +/* nj's implementation */ + const double m1 = 4294967087.; + const double m2 = 4294944443.; + const double a12 = 1403580.; + const double a13n = 810728.; + const double a21 = 527612.; + const double a23n = 1370589.; + + const double rh1 = 2.3283065498378290e-010; /* (1.0 / m1)__hi */ + const double rl1 = -1.7354913086174288e-026; /* (1.0 / m1)__lo */ + const double rh2 = 2.3283188252407387e-010; /* (1.0 / m2)__hi */ + const double rl2 = 2.4081018096503646e-026; /* (1.0 / m2)__lo */ + + double q; + double p1; + double p2; + p1 = a12 * state->s1[1] - a13n * state->s1[0]; + q = trunc (fma (p1, rh1, p1 * rl1)); + p1 -= q * m1; + if (p1 < 0.0) p1 += m1; + state->s1[0] = state->s1[1]; state->s1[1] = state->s1[2]; state->s1[2] = (unsigned int)p1; + p2 = a21 * state->s2[2] - a23n * state->s2[0]; + q = trunc (fma (p2, rh2, p2 * rl2)); + p2 -= q * m2; + if (p2 < 0.0) p2 += m2; + state->s2[0] = state->s2[1]; state->s2[1] = state->s2[2]; state->s2[2] = (unsigned int)p2; + if (p1 <= p2) return (p1 - p2 + m1); + else return (p1 - p2); +) +/* end nj's implementation */ + double p1; + double p2; + double r; + p1 = (MRG32K3A_A12 * state->s1[1]) - (MRG32K3A_A13N * state->s1[0]); + p1 = curand_MRGmod(p1, MRG32K3A_MOD1); + if (p1 < 0.0) p1 += MRG32K3A_MOD1; + state->s1[0] = state->s1[1]; + state->s1[1] = state->s1[2]; + state->s1[2] = (unsigned int)p1; + p2 = (MRG32K3A_A21 * state->s2[2]) - (MRG32K3A_A23N * state->s2[0]); + p2 = curand_MRGmod(p2, MRG32K3A_MOD2); + if (p2 < 0) p2 += MRG32K3A_MOD2; + state->s2[0] = state->s2[1]; + state->s2[1] = state->s2[2]; + state->s2[2] = (unsigned int)p2; + r = p1 - p2; + if (r <= 0) r += MRG32K3A_MOD1; + return r; +} + + +/** + * \brief Return 32-bits of pseudorandomness from an MRG32k3a generator. + * + * Return 32-bits of pseudorandomness from the MRG32k3a generator in \p state, + * increment position of generator by one. + * + * \param state - Pointer to state to update + * + * \return 32-bits of pseudorandomness as an unsigned int, all bits valid to use. + */ +QUALIFIERS unsigned int curand(curandStateMRG32k3a_t *state) +{ + double dRet; + dRet = (double)curand_MRG32k3a(state)*(double)MRG32K3A_BITS_NORM; + return (unsigned int)dRet; +} + + + +/** + * \brief Update MRG32k3a state to skip \p n elements. + * + * Update the MRG32k3a state in \p state to skip ahead \p n elements. + * + * All values of \p n are valid. Large values require more computation and so + * will take more time to complete. + * + * \param n - Number of elements to skip + * \param state - Pointer to state to update + */ +QUALIFIERS void skipahead(unsigned long long n, curandStateMRG32k3a_t *state) +{ + unsigned int t[3][3]; +NV_IF_ELSE_TARGET(NV_IS_DEVICE, + curand_MRGmatPow3x3( mrg32k3aM1, t, MRG32K3A_MOD1, n); + curand_MRGmatVecMul3x3( t, state->s1, MRG32K3A_MOD1); + curand_MRGmatPow3x3(mrg32k3aM2, t, MRG32K3A_MOD2, n); + curand_MRGmatVecMul3x3( t, state->s2, MRG32K3A_MOD2); +, + curand_MRGmatPow3x3( mrg32k3aM1Host, t, MRG32K3A_MOD1, n); + curand_MRGmatVecMul3x3( t, state->s1, MRG32K3A_MOD1); + curand_MRGmatPow3x3(mrg32k3aM2Host, t, MRG32K3A_MOD2, n); + curand_MRGmatVecMul3x3( t, state->s2, MRG32K3A_MOD2); +) +} + +/** + * \brief Update MRG32k3a state to skip ahead \p n subsequences. + * + * Update the MRG32k3a state in \p state to skip ahead \p n subsequences. Each + * subsequence is \xmlonly2127\endxmlonly + * + * \xmlonly276\endxmlonly elements long, so this means the function will skip ahead + * \xmlonly267\endxmlonly * n elements. + * + * Valid values of \p n are 0 to \xmlonly251\endxmlonly. Note \p n will be masked to 51 bits + * + * \param n - Number of subsequences to skip + * \param state - Pointer to state to update + */ +QUALIFIERS void skipahead_subsequence(unsigned long long n, curandStateMRG32k3a_t *state) +{ + unsigned int t[3][3]; +NV_IF_ELSE_TARGET(NV_IS_DEVICE, + curand_MRGmatPow3x3( mrg32k3aM1SubSeq, t, MRG32K3A_MOD1, n); + curand_MRGmatVecMul3x3( t, state->s1, MRG32K3A_MOD1); + curand_MRGmatPow3x3( mrg32k3aM2SubSeq, t, MRG32K3A_MOD2, n); + curand_MRGmatVecMul3x3( t, state->s2, MRG32K3A_MOD2); +, + curand_MRGmatPow3x3( mrg32k3aM1SubSeqHost, t, MRG32K3A_MOD1, n); + curand_MRGmatVecMul3x3( t, state->s1, MRG32K3A_MOD1); + curand_MRGmatPow3x3( mrg32k3aM2SubSeqHost, t, MRG32K3A_MOD2, n); + curand_MRGmatVecMul3x3( t, state->s2, MRG32K3A_MOD2); +) +} + +/** + * \brief Update MRG32k3a state to skip ahead \p n sequences. + * + * Update the MRG32k3a state in \p state to skip ahead \p n sequences. Each + * sequence is \xmlonly2127\endxmlonly elements long, so this means the function will skip ahead + * \xmlonly2127\endxmlonly * n elements. + * + * All values of \p n are valid. Large values require more computation and so + * will take more time to complete. + * + * \param n - Number of sequences to skip + * \param state - Pointer to state to update + */ +QUALIFIERS void skipahead_sequence(unsigned long long n, curandStateMRG32k3a_t *state) +{ + unsigned int t[3][3]; +NV_IF_ELSE_TARGET(NV_IS_DEVICE, + curand_MRGmatPow3x3( mrg32k3aM1Seq, t, MRG32K3A_MOD1, n); + curand_MRGmatVecMul3x3( t, state->s1, MRG32K3A_MOD1); + curand_MRGmatPow3x3( mrg32k3aM2Seq, t, MRG32K3A_MOD2, n); + curand_MRGmatVecMul3x3( t, state->s2, MRG32K3A_MOD2); +, + curand_MRGmatPow3x3( mrg32k3aM1SeqHost, t, MRG32K3A_MOD1, n); + curand_MRGmatVecMul3x3( t, state->s1, MRG32K3A_MOD1); + curand_MRGmatPow3x3( mrg32k3aM2SeqHost, t, MRG32K3A_MOD2, n); + curand_MRGmatVecMul3x3( t, state->s2, MRG32K3A_MOD2); +) +} + + +/** + * \brief Initialize MRG32k3a state. + * + * Initialize MRG32k3a state in \p state with the given \p seed, \p subsequence, + * and \p offset. + * + * All input values of \p seed, \p subsequence, and \p offset are legal. + * \p subsequence will be truncated to 51 bits to avoid running into the next sequence + * + * A value of 0 for \p seed sets the state to the values of the original + * published version of the \p MRG32k3a algorithm. + * + * \param seed - Arbitrary bits to use as a seed + * \param subsequence - Subsequence to start at + * \param offset - Absolute offset into sequence + * \param state - Pointer to state to initialize + */ +QUALIFIERS void curand_init(unsigned long long seed, + unsigned long long subsequence, + unsigned long long offset, + curandStateMRG32k3a_t *state) +{ + int i; + for ( i=0; i<3; i++ ) { + state->s1[i] = 12345u; + state->s2[i] = 12345u; + } + if (seed != 0ull) { + unsigned int x1 = ((unsigned int)seed) ^ 0x55555555UL; + unsigned int x2 = (unsigned int)((seed >> 32) ^ 0xAAAAAAAAUL); + state->s1[0] = (unsigned int)curand_MRGmodMul(x1, state->s1[0], MRG32K3A_MOD1); + state->s1[1] = (unsigned int)curand_MRGmodMul(x2, state->s1[1], MRG32K3A_MOD1); + state->s1[2] = (unsigned int)curand_MRGmodMul(x1, state->s1[2], MRG32K3A_MOD1); + state->s2[0] = (unsigned int)curand_MRGmodMul(x2, state->s2[0], MRG32K3A_MOD2); + state->s2[1] = (unsigned int)curand_MRGmodMul(x1, state->s2[1], MRG32K3A_MOD2); + state->s2[2] = (unsigned int)curand_MRGmodMul(x2, state->s2[2], MRG32K3A_MOD2); + } + skipahead_subsequence( subsequence, state ); + skipahead( offset, state ); + state->boxmuller_flag = 0; + state->boxmuller_flag_double = 0; + state->boxmuller_extra = 0.f; + state->boxmuller_extra_double = 0.; +} + +/** + * \brief Update Sobol32 state to skip \p n elements. + * + * Update the Sobol32 state in \p state to skip ahead \p n elements. + * + * All values of \p n are valid. + * + * \param n - Number of elements to skip + * \param state - Pointer to state to update + */ +template +QUALIFIERS +typename CURAND_STD::enable_if::value || CURAND_STD::is_same::value>::type +skipahead(unsigned int n, T state) +{ + unsigned int i_gray; + state->x = state->c; + state->i += n; + /* Convert state->i to gray code */ + i_gray = state->i ^ (state->i >> 1); + for(unsigned int k = 0; k < 32; k++) { + if(i_gray & (1 << k)) { + state->x ^= state->direction_vectors[k]; + } + } + return; +} + +/** + * \brief Update Sobol64 state to skip \p n elements. + * + * Update the Sobol64 state in \p state to skip ahead \p n elements. + * + * All values of \p n are valid. + * + * \param n - Number of elements to skip + * \param state - Pointer to state to update + */ +template +QUALIFIERS +typename CURAND_STD::enable_if::value || CURAND_STD::is_same::value>::type +skipahead(unsigned long long n, T state) +{ + unsigned long long i_gray; + state->x = state->c; + state->i += n; + /* Convert state->i to gray code */ + i_gray = state->i ^ (state->i >> 1); + for(unsigned k = 0; k < 64; k++) { + if(i_gray & (1ULL << k)) { + state->x ^= state->direction_vectors[k]; + } + } + return; +} + +/** + * \brief Initialize Sobol32 state. + * + * Initialize Sobol32 state in \p state with the given \p direction \p vectors and + * \p offset. + * + * The direction vector is a device pointer to an array of 32 unsigned ints. + * All input values of \p offset are legal. + * + * \param direction_vectors - Pointer to array of 32 unsigned ints representing the + * direction vectors for the desired dimension + * \param offset - Absolute offset into sequence + * \param state - Pointer to state to initialize + */ +QUALIFIERS void curand_init(curandDirectionVectors32_t direction_vectors, + unsigned int offset, + curandStateSobol32_t *state) +{ + state->i = 0; + state->c = 0; + for(int i = 0; i < 32; i++) { + state->direction_vectors[i] = direction_vectors[i]; + } + state->x = 0; + skipahead(offset, state); +} +/** + * \brief Initialize Scrambled Sobol32 state. + * + * Initialize Sobol32 state in \p state with the given \p direction \p vectors and + * \p offset. + * + * The direction vector is a device pointer to an array of 32 unsigned ints. + * All input values of \p offset are legal. + * + * \param direction_vectors - Pointer to array of 32 unsigned ints representing the + direction vectors for the desired dimension + * \param scramble_c Scramble constant + * \param offset - Absolute offset into sequence + * \param state - Pointer to state to initialize + */ +QUALIFIERS void curand_init(curandDirectionVectors32_t direction_vectors, + unsigned int scramble_c, + unsigned int offset, + curandStateScrambledSobol32_t *state) +{ + state->i = 0; + state->c = scramble_c; + for(int i = 0; i < 32; i++) { + state->direction_vectors[i] = direction_vectors[i]; + } + state->x = state->c; + skipahead(offset, state); +} + +QUALIFIERS int __curand_find_trailing_zero(unsigned int x) +{ +NV_IF_ELSE_TARGET(NV_IS_DEVICE, + int y = __ffs(~x); + if(y) + return y - 1; + return 31; +, + int i = 1; + while(x & 1) { + i++; + x >>= 1; + } + i = i - 1; + return i == 32 ? 31 : i; +) +} + +QUALIFIERS int __curand_find_trailing_zero(unsigned long long x) +{ +NV_IF_ELSE_TARGET(NV_IS_DEVICE, + int y = __ffsll(~x); + if(y) + return y - 1; + return 63; +, + int i = 1; + while(x & 1) { + i++; + x >>= 1; + } + i = i - 1; + return i == 64 ? 63 : i; +) +} + +/** + * \brief Initialize Sobol64 state. + * + * Initialize Sobol64 state in \p state with the given \p direction \p vectors and + * \p offset. + * + * The direction vector is a device pointer to an array of 64 unsigned long longs. + * All input values of \p offset are legal. + * + * \param direction_vectors - Pointer to array of 64 unsigned long longs representing the + direction vectors for the desired dimension + * \param offset - Absolute offset into sequence + * \param state - Pointer to state to initialize + */ +QUALIFIERS void curand_init(curandDirectionVectors64_t direction_vectors, + unsigned long long offset, + curandStateSobol64_t *state) +{ + state->i = 0; + state->c = 0; + for(int i = 0; i < 64; i++) { + state->direction_vectors[i] = direction_vectors[i]; + } + state->x = 0; + skipahead(offset, state); +} + +/** + * \brief Initialize Scrambled Sobol64 state. + * + * Initialize Sobol64 state in \p state with the given \p direction \p vectors and + * \p offset. + * + * The direction vector is a device pointer to an array of 64 unsigned long longs. + * All input values of \p offset are legal. + * + * \param direction_vectors - Pointer to array of 64 unsigned long longs representing the + direction vectors for the desired dimension + * \param scramble_c Scramble constant + * \param offset - Absolute offset into sequence + * \param state - Pointer to state to initialize + */ +QUALIFIERS void curand_init(curandDirectionVectors64_t direction_vectors, + unsigned long long scramble_c, + unsigned long long offset, + curandStateScrambledSobol64_t *state) +{ + state->i = 0; + state->c = scramble_c; + for(int i = 0; i < 64; i++) { + state->direction_vectors[i] = direction_vectors[i]; + } + state->x = state->c; + skipahead(offset, state); +} + +/** + * \brief Return 32-bits of quasirandomness from a Sobol32 generator. + * + * Return 32-bits of quasirandomness from the Sobol32 generator in \p state, + * increment position of generator by one. + * + * \param state - Pointer to state to update + * + * \return 32-bits of quasirandomness as an unsigned int, all bits valid to use. + */ + +QUALIFIERS unsigned int curand(curandStateSobol32_t * state) +{ + /* Moving from i to i+1 element in gray code is flipping one bit, + the trailing zero bit of i + */ + unsigned int res = state->x; + state->x ^= state->direction_vectors[__curand_find_trailing_zero(state->i)]; + state->i ++; + return res; +} + +/** + * \brief Return 32-bits of quasirandomness from a scrambled Sobol32 generator. + * + * Return 32-bits of quasirandomness from the scrambled Sobol32 generator in \p state, + * increment position of generator by one. + * + * \param state - Pointer to state to update + * + * \return 32-bits of quasirandomness as an unsigned int, all bits valid to use. + */ + +QUALIFIERS unsigned int curand(curandStateScrambledSobol32_t * state) +{ + /* Moving from i to i+1 element in gray code is flipping one bit, + the trailing zero bit of i + */ + unsigned int res = state->x; + state->x ^= state->direction_vectors[__curand_find_trailing_zero(state->i)]; + state->i ++; + return res; +} + +/** + * \brief Return 64-bits of quasirandomness from a Sobol64 generator. + * + * Return 64-bits of quasirandomness from the Sobol64 generator in \p state, + * increment position of generator by one. + * + * \param state - Pointer to state to update + * + * \return 64-bits of quasirandomness as an unsigned long long, all bits valid to use. + */ + +QUALIFIERS unsigned long long curand(curandStateSobol64_t * state) +{ + /* Moving from i to i+1 element in gray code is flipping one bit, + the trailing zero bit of i + */ + unsigned long long res = state->x; + state->x ^= state->direction_vectors[__curand_find_trailing_zero(state->i)]; + state->i ++; + return res; +} + +/** + * \brief Return 64-bits of quasirandomness from a scrambled Sobol64 generator. + * + * Return 64-bits of quasirandomness from the scrambled Sobol32 generator in \p state, + * increment position of generator by one. + * + * \param state - Pointer to state to update + * + * \return 64-bits of quasirandomness as an unsigned long long, all bits valid to use. + */ + +QUALIFIERS unsigned long long curand(curandStateScrambledSobol64_t * state) +{ + /* Moving from i to i+1 element in gray code is flipping one bit, + the trailing zero bit of i + */ + unsigned long long res = state->x; + state->x ^= state->direction_vectors[__curand_find_trailing_zero(state->i)]; + state->i ++; + return res; +} + +#include "curand_uniform.h" +#include "curand_normal.h" +#include "curand_lognormal.h" +#include "curand_poisson.h" +#include "curand_discrete2.h" + +__device__ static inline unsigned int *__get_precalculated_matrix(int n) +{ + if(n == 0) { + return precalc_xorwow_matrix[n]; + } + if(n == 2) { + return precalc_xorwow_offset_matrix[n]; + } + return precalc_xorwow_matrix[n]; +} + +#ifndef __CUDACC_RTC__ +__host__ static inline unsigned int *__get_precalculated_matrix_host(int n) +{ + if(n == 1) { + return precalc_xorwow_matrix_host[n]; + } + if(n == 3) { + return precalc_xorwow_offset_matrix_host[n]; + } + return precalc_xorwow_matrix_host[n]; +} +#endif // #ifndef __CUDACC_RTC__ + +__device__ static inline unsigned int *__get_mrg32k3a_matrix(int n) +{ + if(n == 0) { + return mrg32k3aM1[n][0]; + } + if(n == 2) { + return mrg32k3aM2[n][0]; + } + if(n == 4) { + return mrg32k3aM1SubSeq[n][0]; + } + if(n == 6) { + return mrg32k3aM2SubSeq[n][0]; + } + if(n == 8) { + return mrg32k3aM1Seq[n][0]; + } + if(n == 10) { + return mrg32k3aM2Seq[n][0]; + } + return mrg32k3aM1[n][0]; +} + +#ifndef __CUDACC_RTC__ +__host__ static inline unsigned int *__get_mrg32k3a_matrix_host(int n) +{ + if(n == 1) { + return mrg32k3aM1Host[n][0]; + } + if(n == 3) { + return mrg32k3aM2Host[n][0]; + } + if(n == 5) { + return mrg32k3aM1SubSeqHost[n][0]; + } + if(n == 7) { + return mrg32k3aM2SubSeqHost[n][0]; + } + if(n == 9) { + return mrg32k3aM1SeqHost[n][0]; + } + if(n == 11) { + return mrg32k3aM2SeqHost[n][0]; + } + return mrg32k3aM1Host[n][0]; +} + +__host__ static inline double *__get__cr_lgamma_table_host(void) { + return __cr_lgamma_table; +} +#endif // #ifndef __CUDACC_RTC__ + +/** @} */ + +#endif // !defined(CURAND_KERNEL_H_) diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia/curand/include/curand_mrg32k3a.h b/llmeval-env/lib/python3.10/site-packages/nvidia/curand/include/curand_mrg32k3a.h new file mode 100644 index 0000000000000000000000000000000000000000..a052a9c3b3f45dcf1d7d0369d0d62d8c4719e3ee --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nvidia/curand/include/curand_mrg32k3a.h @@ -0,0 +1,3721 @@ +/* + * Copyright 2010-2018 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ +#if !defined(CURAND_MRG32K3A_MATRICES_H_) +#define CURAND_MRG32K3A_MATRICES_H_ + +#if defined(__CUDACC_RDC__) && (__cplusplus >= 201703L) && defined(__cpp_inline_variables) +#define CURAND_MRG32K3A_MATRICES_DEVICE_QUALIFIERS inline __device__ +#else +#define CURAND_MRG32K3A_MATRICES_DEVICE_QUALIFIERS static __device__ +#endif + +#if (__cplusplus >= 201703L) && defined(__cpp_inline_variables) +#define CURAND_MRG32K3A_MATRICES_HOST_QUALIFIERS inline +#else +#define CURAND_MRG32K3A_MATRICES_HOST_QUALIFIERS static +#endif + + /* base matrices */ + + /* these are not actually used in the runtime code. They are */ + /* used in computing the skipahead matrices, and are included */ + /* for reference */ + /* + double M1[3][3] = { 0., 1., 0., + 0., 0., 1., + -810728., 1403580., 0. }; + + double M2[3][3] = { 0., 1., 0., + 0., 0., 1., + -1370589., 0., 527612. }; + + */ + + /* Base matrices to power 2 to the power n, n the first array index, from 0..63 */ +CURAND_MRG32K3A_MATRICES_DEVICE_QUALIFIERS unsigned int mrg32k3aM1[64][3][3] = { + { + { 0u, 1u, 0u }, + { 0u, 0u, 1u }, + { 4294156359u, 1403580u, 0u } + }, + { + { 0u, 0u, 1u }, + { 4294156359u, 1403580u, 0u }, + { 0u, 4294156359u, 1403580u } + }, + { + { 0u, 4294156359u, 1403580u }, + { 244671815u, 2941890554u, 4294156359u }, + { 149925673u, 489343630u, 2941890554u } + }, + { + { 1527363550u, 2758233149u, 1831234280u }, + { 4072640363u, 939574583u, 2758233149u }, + { 2064391165u, 3228066636u, 939574583u } + }, + { + { 736416029u, 2961816100u, 342112271u }, + { 387300998u, 1062452522u, 2961816100u }, + { 2955879160u, 340793741u, 1062452522u } + }, + { + { 1243502014u, 2218748291u, 1709215645u }, + { 2019641772u, 3847560959u, 2218748291u }, + { 3866010231u, 2305448679u, 3847560959u } + }, + { + { 3241775219u, 3453352062u, 3721871040u }, + { 4062454730u, 3015754u, 3453352062u }, + { 919711945u, 613405362u, 3015754u } + }, + { + { 1955221006u, 1414472808u, 1746037714u }, + { 3653507277u, 1644962013u, 1414472808u }, + { 3501544776u, 2336229602u, 1644962013u } + }, + { + { 1170096663u, 49135452u, 3441537107u }, + { 1857945175u, 1649398389u, 49135452u }, + { 333002869u, 3109147376u, 1649398389u } + }, + { + { 2299034194u, 2297111910u, 862649200u }, + { 1399961132u, 996706937u, 2297111910u }, + { 3439056503u, 1481993076u, 996706937u } + }, + { + { 4146310528u, 458782589u, 1007330283u }, + { 4241015765u, 3979619964u, 458782589u }, + { 553886495u, 2186897562u, 3979619964u } + }, + { + { 3630027893u, 2130448350u, 292773857u }, + { 1392525159u, 1299285967u, 2130448350u }, + { 2589171163u, 1217405758u, 1299285967u } + }, + { + { 892409263u, 1999175811u, 2979225418u }, + { 1996163538u, 2148702503u, 1999175811u }, + { 3922720782u, 103819730u, 2148702503u } + }, + { + { 1586003016u, 2114210471u, 3240775579u }, + { 2777288607u, 1400478398u, 2114210471u }, + { 3018215420u, 535326008u, 1400478398u } + }, + { + { 2188531273u, 1783231160u, 3576659343u }, + { 1908318389u, 379210133u, 1783231160u }, + { 554369329u, 250053591u, 379210133u } + }, + { + { 4022841636u, 3951951872u, 2143424240u }, + { 1046219306u, 1591992468u, 3951951872u }, + { 1510277444u, 381333958u, 1591992468u } + }, + { + { 2256493727u, 3715182130u, 642697923u }, + { 3615342722u, 3975008370u, 3715182130u }, + { 2405650329u, 754337639u, 3975008370u } + }, + { + { 1286664224u, 627406673u, 963516608u }, + { 1541344588u, 460768826u, 627406673u }, + { 1089892553u, 2717717970u, 460768826u } + }, + { + { 2956342842u, 3471097641u, 2353092905u }, + { 2996150472u, 420480221u, 3471097641u }, + { 2221681883u, 372736411u, 420480221u } + }, + { + { 420492906u, 153526651u, 3499730988u }, + { 2662640502u, 3278195133u, 153526651u }, + { 4086436419u, 2510762118u, 3278195133u } + }, + { + { 3310184147u, 2228376089u, 823220763u }, + { 3992771814u, 1693168425u, 2228376089u }, + { 2295790366u, 1401872772u, 1693168425u } + }, + { + { 2529428830u, 1497104068u, 4253248635u }, + { 3746310018u, 630867741u, 1497104068u }, + { 627043435u, 721725795u, 630867741u } + }, + { + { 2571072593u, 3039669025u, 1591031831u }, + { 526054481u, 661344445u, 3039669025u }, + { 4246010312u, 735391270u, 661344445u } + }, + { + { 1847312821u, 4042890210u, 4241772463u }, + { 606605705u, 2644799309u, 4042890210u }, + { 2658402822u, 1342278931u, 2644799309u } + }, + { + { 2409846784u, 1096138313u, 1416249993u }, + { 1501878241u, 138013862u, 1096138313u }, + { 1617749306u, 1975136163u, 138013862u } + }, + { + { 599453422u, 73950522u, 2965395603u }, + { 55354701u, 3855242202u, 73950522u }, + { 3981734504u, 3354399019u, 3855242202u } + }, + { + { 4271076381u, 813410089u, 3461955319u }, + { 1044920137u, 3029005516u, 813410089u }, + { 3501837362u, 3321539504u, 3029005516u } + }, + { + { 3058183515u, 941408572u, 1783998098u }, + { 1546486080u, 4116985007u, 941408572u }, + { 2247500745u, 1460625377u, 4116985007u } + }, + { + { 4216782514u, 3352801941u, 2315095646u }, + { 639029973u, 94451952u, 3352801941u }, + { 1242898773u, 3964593332u, 94451952u } + }, + { + { 2264905138u, 1926285644u, 1108147171u }, + { 2390706911u, 385258225u, 1926285644u }, + { 3569882325u, 3728744670u, 385258225u } + }, + { + { 270679073u, 1065683096u, 2992662885u }, + { 4196917281u, 2886425156u, 1065683096u }, + { 749134119u, 1849148167u, 2886425156u } + }, + { + { 35689930u, 1378151623u, 951629713u }, + { 673810920u, 948843427u, 1378151623u }, + { 3808868984u, 927013635u, 948843427u } + }, + { + { 1891490872u, 1130489594u, 3734864133u }, + { 1457450350u, 3362920032u, 1130489594u }, + { 638998846u, 1401175590u, 3362920032u } + }, + { + { 2254459023u, 2384691454u, 1730098031u }, + { 2844861718u, 1807491073u, 2384691454u }, + { 351423668u, 1570264155u, 1807491073u } + }, + { + { 3047429268u, 4245359555u, 2449575498u }, + { 1797081212u, 1237196477u, 4245359555u }, + { 143400628u, 3663731096u, 1237196477u } + }, + { + { 3313321106u, 4263819658u, 1047529624u }, + { 3719941673u, 3155049403u, 4263819658u }, + { 1981313839u, 4281524426u, 3155049403u } + }, + { + { 2005252417u, 3263186729u, 1535805957u }, + { 2951515865u, 1729281525u, 3263186729u }, + { 1141249417u, 2268963059u, 1729281525u } + }, + { + { 2367065164u, 83908466u, 4294308508u }, + { 1352516724u, 1416676049u, 83908466u }, + { 1040867745u, 1304732377u, 1416676049u } + }, + { + { 3214147257u, 1434230503u, 2944821434u }, + { 2753040912u, 4041536918u, 1434230503u }, + { 1317260239u, 338830578u, 4041536918u } + }, + { + { 300628476u, 2054743463u, 1499597869u }, + { 1762244284u, 1422043015u, 2054743463u }, + { 3581125669u, 1207561803u, 1422043015u } + }, + { + { 4171745404u, 4064983592u, 1934508265u }, + { 3049723261u, 1744636487u, 4064983592u }, + { 947753516u, 3952135907u, 1744636487u } + }, + { + { 1625369148u, 3577024659u, 2778677259u }, + { 1729967818u, 1049600974u, 3577024659u }, + { 2089137344u, 1569794605u, 1049600974u } + }, + { + { 1373068765u, 3958611830u, 569117280u }, + { 410042396u, 3551255470u, 3958611830u }, + { 869476379u, 1680625376u, 3551255470u } + }, + { + { 2108618602u, 2543645250u, 913717833u }, + { 2111984988u, 1012482542u, 2543645250u }, + { 2545745615u, 3141042890u, 1012482542u } + }, + { + { 1157293598u, 584852249u, 2272893205u }, + { 1631801979u, 3013855247u, 584852249u }, + { 3977310441u, 82049263u, 3013855247u } + }, + { + { 3580234334u, 3137526662u, 2403875621u }, + { 3580869206u, 3670086228u, 3137526662u }, + { 656744553u, 1764904195u, 3670086228u } + }, + { + { 2792496861u, 3634185196u, 3887031679u }, + { 3601823850u, 3464838365u, 3634185196u }, + { 3136165138u, 2842987937u, 3464838365u } + }, + { + { 1362557480u, 3230022138u, 4278720212u }, + { 3427386258u, 3848976950u, 3230022138u }, + { 2109817045u, 2441486578u, 3848976950u } + }, + { + { 1198519135u, 2007945401u, 3868481u }, + { 3335076429u, 2082683147u, 2007945401u }, + { 2341088247u, 888193479u, 2082683147u } + }, + { + { 3473925387u, 3193380570u, 565138859u }, + { 307060547u, 782210925u, 3193380570u }, + { 167617770u, 2180014252u, 782210925u } + }, + { + { 3811588895u, 3303532086u, 2766583698u }, + { 908630605u, 2665400165u, 3303532086u }, + { 2499994113u, 3316180851u, 2665400165u } + }, + { + { 4288926968u, 3033075037u, 1505732852u }, + { 1531633406u, 645804125u, 3033075037u }, + { 2942690261u, 2205365640u, 645804125u } + }, + { + { 3976196483u, 3651411522u, 1652430357u }, + { 1690405883u, 1294990760u, 3651411522u }, + { 209339647u, 3088484327u, 1294990760u } + }, + { + { 3171589548u, 2291131070u, 2093793287u }, + { 2997812074u, 4093879780u, 2291131070u }, + { 3255666800u, 858124816u, 4093879780u } + }, + { + { 4113016361u, 2999667479u, 3995043314u }, + { 1333973326u, 4007774239u, 2999667479u }, + { 3322921863u, 4278103786u, 4007774239u } + }, + { + { 925786347u, 2109676036u, 1879981040u }, + { 1701566570u, 1489702270u, 2109676036u }, + { 2719807628u, 158549605u, 1489702270u } + }, + { + { 2255405265u, 3460246357u, 218033453u }, + { 2135115875u, 359516994u, 3460246357u }, + { 3568862459u, 3114762683u, 359516994u } + }, + { + { 773148471u, 4117539411u, 3073622315u }, + { 3807175775u, 186466108u, 4117539411u }, + { 2842197411u, 651334129u, 186466108u } + }, + { + { 615242951u, 1475251263u, 3586439101u }, + { 1693917167u, 3058812486u, 1475251263u }, + { 568701600u, 1164226398u, 3058812486u } + }, + { + { 1632636204u, 15370275u, 2061555515u }, + { 4187505695u, 1741164221u, 15370275u }, + { 2882176274u, 3978412194u, 1741164221u } + }, + { + { 3446066703u, 344820524u, 74213775u }, + { 1008543583u, 2579620192u, 344820524u }, + { 3753911358u, 1538453821u, 2579620192u } + }, + { + { 3600859892u, 1269921024u, 4069458760u }, + { 2050939727u, 2222725697u, 1269921024u }, + { 3208347646u, 690898125u, 2222725697u } + }, + { + { 599407451u, 2806239788u, 1742216102u }, + { 975123999u, 764869161u, 2806239788u }, + { 2729710367u, 1845257036u, 764869161u } + }, + { + { 967330218u, 3464884028u, 3444447102u }, + { 580449578u, 1343714307u, 3464884028u }, + { 1775329096u, 4027221761u, 1343714307u } + } + }; + +#ifndef __CUDACC_RTC__ +CURAND_MRG32K3A_MATRICES_HOST_QUALIFIERS unsigned int mrg32k3aM1Host[64][3][3] = { + { + { 0u, 1u, 0u }, + { 0u, 0u, 1u }, + { 4294156359u, 1403580u, 0u } + }, + { + { 0u, 0u, 1u }, + { 4294156359u, 1403580u, 0u }, + { 0u, 4294156359u, 1403580u } + }, + { + { 0u, 4294156359u, 1403580u }, + { 244671815u, 2941890554u, 4294156359u }, + { 149925673u, 489343630u, 2941890554u } + }, + { + { 1527363550u, 2758233149u, 1831234280u }, + { 4072640363u, 939574583u, 2758233149u }, + { 2064391165u, 3228066636u, 939574583u } + }, + { + { 736416029u, 2961816100u, 342112271u }, + { 387300998u, 1062452522u, 2961816100u }, + { 2955879160u, 340793741u, 1062452522u } + }, + { + { 1243502014u, 2218748291u, 1709215645u }, + { 2019641772u, 3847560959u, 2218748291u }, + { 3866010231u, 2305448679u, 3847560959u } + }, + { + { 3241775219u, 3453352062u, 3721871040u }, + { 4062454730u, 3015754u, 3453352062u }, + { 919711945u, 613405362u, 3015754u } + }, + { + { 1955221006u, 1414472808u, 1746037714u }, + { 3653507277u, 1644962013u, 1414472808u }, + { 3501544776u, 2336229602u, 1644962013u } + }, + { + { 1170096663u, 49135452u, 3441537107u }, + { 1857945175u, 1649398389u, 49135452u }, + { 333002869u, 3109147376u, 1649398389u } + }, + { + { 2299034194u, 2297111910u, 862649200u }, + { 1399961132u, 996706937u, 2297111910u }, + { 3439056503u, 1481993076u, 996706937u } + }, + { + { 4146310528u, 458782589u, 1007330283u }, + { 4241015765u, 3979619964u, 458782589u }, + { 553886495u, 2186897562u, 3979619964u } + }, + { + { 3630027893u, 2130448350u, 292773857u }, + { 1392525159u, 1299285967u, 2130448350u }, + { 2589171163u, 1217405758u, 1299285967u } + }, + { + { 892409263u, 1999175811u, 2979225418u }, + { 1996163538u, 2148702503u, 1999175811u }, + { 3922720782u, 103819730u, 2148702503u } + }, + { + { 1586003016u, 2114210471u, 3240775579u }, + { 2777288607u, 1400478398u, 2114210471u }, + { 3018215420u, 535326008u, 1400478398u } + }, + { + { 2188531273u, 1783231160u, 3576659343u }, + { 1908318389u, 379210133u, 1783231160u }, + { 554369329u, 250053591u, 379210133u } + }, + { + { 4022841636u, 3951951872u, 2143424240u }, + { 1046219306u, 1591992468u, 3951951872u }, + { 1510277444u, 381333958u, 1591992468u } + }, + { + { 2256493727u, 3715182130u, 642697923u }, + { 3615342722u, 3975008370u, 3715182130u }, + { 2405650329u, 754337639u, 3975008370u } + }, + { + { 1286664224u, 627406673u, 963516608u }, + { 1541344588u, 460768826u, 627406673u }, + { 1089892553u, 2717717970u, 460768826u } + }, + { + { 2956342842u, 3471097641u, 2353092905u }, + { 2996150472u, 420480221u, 3471097641u }, + { 2221681883u, 372736411u, 420480221u } + }, + { + { 420492906u, 153526651u, 3499730988u }, + { 2662640502u, 3278195133u, 153526651u }, + { 4086436419u, 2510762118u, 3278195133u } + }, + { + { 3310184147u, 2228376089u, 823220763u }, + { 3992771814u, 1693168425u, 2228376089u }, + { 2295790366u, 1401872772u, 1693168425u } + }, + { + { 2529428830u, 1497104068u, 4253248635u }, + { 3746310018u, 630867741u, 1497104068u }, + { 627043435u, 721725795u, 630867741u } + }, + { + { 2571072593u, 3039669025u, 1591031831u }, + { 526054481u, 661344445u, 3039669025u }, + { 4246010312u, 735391270u, 661344445u } + }, + { + { 1847312821u, 4042890210u, 4241772463u }, + { 606605705u, 2644799309u, 4042890210u }, + { 2658402822u, 1342278931u, 2644799309u } + }, + { + { 2409846784u, 1096138313u, 1416249993u }, + { 1501878241u, 138013862u, 1096138313u }, + { 1617749306u, 1975136163u, 138013862u } + }, + { + { 599453422u, 73950522u, 2965395603u }, + { 55354701u, 3855242202u, 73950522u }, + { 3981734504u, 3354399019u, 3855242202u } + }, + { + { 4271076381u, 813410089u, 3461955319u }, + { 1044920137u, 3029005516u, 813410089u }, + { 3501837362u, 3321539504u, 3029005516u } + }, + { + { 3058183515u, 941408572u, 1783998098u }, + { 1546486080u, 4116985007u, 941408572u }, + { 2247500745u, 1460625377u, 4116985007u } + }, + { + { 4216782514u, 3352801941u, 2315095646u }, + { 639029973u, 94451952u, 3352801941u }, + { 1242898773u, 3964593332u, 94451952u } + }, + { + { 2264905138u, 1926285644u, 1108147171u }, + { 2390706911u, 385258225u, 1926285644u }, + { 3569882325u, 3728744670u, 385258225u } + }, + { + { 270679073u, 1065683096u, 2992662885u }, + { 4196917281u, 2886425156u, 1065683096u }, + { 749134119u, 1849148167u, 2886425156u } + }, + { + { 35689930u, 1378151623u, 951629713u }, + { 673810920u, 948843427u, 1378151623u }, + { 3808868984u, 927013635u, 948843427u } + }, + { + { 1891490872u, 1130489594u, 3734864133u }, + { 1457450350u, 3362920032u, 1130489594u }, + { 638998846u, 1401175590u, 3362920032u } + }, + { + { 2254459023u, 2384691454u, 1730098031u }, + { 2844861718u, 1807491073u, 2384691454u }, + { 351423668u, 1570264155u, 1807491073u } + }, + { + { 3047429268u, 4245359555u, 2449575498u }, + { 1797081212u, 1237196477u, 4245359555u }, + { 143400628u, 3663731096u, 1237196477u } + }, + { + { 3313321106u, 4263819658u, 1047529624u }, + { 3719941673u, 3155049403u, 4263819658u }, + { 1981313839u, 4281524426u, 3155049403u } + }, + { + { 2005252417u, 3263186729u, 1535805957u }, + { 2951515865u, 1729281525u, 3263186729u }, + { 1141249417u, 2268963059u, 1729281525u } + }, + { + { 2367065164u, 83908466u, 4294308508u }, + { 1352516724u, 1416676049u, 83908466u }, + { 1040867745u, 1304732377u, 1416676049u } + }, + { + { 3214147257u, 1434230503u, 2944821434u }, + { 2753040912u, 4041536918u, 1434230503u }, + { 1317260239u, 338830578u, 4041536918u } + }, + { + { 300628476u, 2054743463u, 1499597869u }, + { 1762244284u, 1422043015u, 2054743463u }, + { 3581125669u, 1207561803u, 1422043015u } + }, + { + { 4171745404u, 4064983592u, 1934508265u }, + { 3049723261u, 1744636487u, 4064983592u }, + { 947753516u, 3952135907u, 1744636487u } + }, + { + { 1625369148u, 3577024659u, 2778677259u }, + { 1729967818u, 1049600974u, 3577024659u }, + { 2089137344u, 1569794605u, 1049600974u } + }, + { + { 1373068765u, 3958611830u, 569117280u }, + { 410042396u, 3551255470u, 3958611830u }, + { 869476379u, 1680625376u, 3551255470u } + }, + { + { 2108618602u, 2543645250u, 913717833u }, + { 2111984988u, 1012482542u, 2543645250u }, + { 2545745615u, 3141042890u, 1012482542u } + }, + { + { 1157293598u, 584852249u, 2272893205u }, + { 1631801979u, 3013855247u, 584852249u }, + { 3977310441u, 82049263u, 3013855247u } + }, + { + { 3580234334u, 3137526662u, 2403875621u }, + { 3580869206u, 3670086228u, 3137526662u }, + { 656744553u, 1764904195u, 3670086228u } + }, + { + { 2792496861u, 3634185196u, 3887031679u }, + { 3601823850u, 3464838365u, 3634185196u }, + { 3136165138u, 2842987937u, 3464838365u } + }, + { + { 1362557480u, 3230022138u, 4278720212u }, + { 3427386258u, 3848976950u, 3230022138u }, + { 2109817045u, 2441486578u, 3848976950u } + }, + { + { 1198519135u, 2007945401u, 3868481u }, + { 3335076429u, 2082683147u, 2007945401u }, + { 2341088247u, 888193479u, 2082683147u } + }, + { + { 3473925387u, 3193380570u, 565138859u }, + { 307060547u, 782210925u, 3193380570u }, + { 167617770u, 2180014252u, 782210925u } + }, + { + { 3811588895u, 3303532086u, 2766583698u }, + { 908630605u, 2665400165u, 3303532086u }, + { 2499994113u, 3316180851u, 2665400165u } + }, + { + { 4288926968u, 3033075037u, 1505732852u }, + { 1531633406u, 645804125u, 3033075037u }, + { 2942690261u, 2205365640u, 645804125u } + }, + { + { 3976196483u, 3651411522u, 1652430357u }, + { 1690405883u, 1294990760u, 3651411522u }, + { 209339647u, 3088484327u, 1294990760u } + }, + { + { 3171589548u, 2291131070u, 2093793287u }, + { 2997812074u, 4093879780u, 2291131070u }, + { 3255666800u, 858124816u, 4093879780u } + }, + { + { 4113016361u, 2999667479u, 3995043314u }, + { 1333973326u, 4007774239u, 2999667479u }, + { 3322921863u, 4278103786u, 4007774239u } + }, + { + { 925786347u, 2109676036u, 1879981040u }, + { 1701566570u, 1489702270u, 2109676036u }, + { 2719807628u, 158549605u, 1489702270u } + }, + { + { 2255405265u, 3460246357u, 218033453u }, + { 2135115875u, 359516994u, 3460246357u }, + { 3568862459u, 3114762683u, 359516994u } + }, + { + { 773148471u, 4117539411u, 3073622315u }, + { 3807175775u, 186466108u, 4117539411u }, + { 2842197411u, 651334129u, 186466108u } + }, + { + { 615242951u, 1475251263u, 3586439101u }, + { 1693917167u, 3058812486u, 1475251263u }, + { 568701600u, 1164226398u, 3058812486u } + }, + { + { 1632636204u, 15370275u, 2061555515u }, + { 4187505695u, 1741164221u, 15370275u }, + { 2882176274u, 3978412194u, 1741164221u } + }, + { + { 3446066703u, 344820524u, 74213775u }, + { 1008543583u, 2579620192u, 344820524u }, + { 3753911358u, 1538453821u, 2579620192u } + }, + { + { 3600859892u, 1269921024u, 4069458760u }, + { 2050939727u, 2222725697u, 1269921024u }, + { 3208347646u, 690898125u, 2222725697u } + }, + { + { 599407451u, 2806239788u, 1742216102u }, + { 975123999u, 764869161u, 2806239788u }, + { 2729710367u, 1845257036u, 764869161u } + }, + { + { 967330218u, 3464884028u, 3444447102u }, + { 580449578u, 1343714307u, 3464884028u }, + { 1775329096u, 4027221761u, 1343714307u } + } + }; +#endif // ifndef __CUDACC_RTC__ + +CURAND_MRG32K3A_MATRICES_DEVICE_QUALIFIERS unsigned int mrg32k3aM2[64][3][3] = { + { + { 0u, 1u, 0u }, + { 0u, 0u, 1u }, + { 4293573854u, 0u, 527612u } + }, + { + { 0u, 0u, 1u }, + { 4293573854u, 0u, 527612u }, + { 2706407399u, 4293573854u, 3497978192u } + }, + { + { 2706407399u, 4293573854u, 3497978192u }, + { 1431525864u, 2706407399u, 3281754271u }, + { 97673890u, 1431525864u, 1673476130u } + }, + { + { 3405842137u, 2680076935u, 893509979u }, + { 4035147174u, 3405842137u, 3280220074u }, + { 2623373296u, 4035147174u, 361718588u } + }, + { + { 818368950u, 3790774567u, 3542344109u }, + { 1817134745u, 818368950u, 3321940838u }, + { 3493477402u, 1817134745u, 2854655037u } + }, + { + { 498682467u, 2928649385u, 811441367u }, + { 1777037472u, 498682467u, 479207863u }, + { 3058260025u, 1777037472u, 1528225099u } + }, + { + { 3893311647u, 3140922085u, 64039185u }, + { 82107183u, 3893311647u, 2655465224u }, + { 1674879036u, 82107183u, 1089381262u } + }, + { + { 28639152u, 3496041927u, 2231910770u }, + { 3174683233u, 28639152u, 2828785870u }, + { 3681140872u, 3174683233u, 3910194649u } + }, + { + { 1463826069u, 300842059u, 3313769518u }, + { 1799677538u, 1463826069u, 3174861078u }, + { 1882279394u, 1799677538u, 3509975160u } + }, + { + { 2092194020u, 184076987u, 2202401252u }, + { 3103629604u, 2092194020u, 3409560232u }, + { 4257445059u, 3103629604u, 2390202783u } + }, + { + { 812917091u, 2574011276u, 4168802395u }, + { 209817750u, 812917091u, 2974870628u }, + { 3238802184u, 209817750u, 3692836406u } + }, + { + { 477309738u, 3314523413u, 3442242150u }, + { 2755731404u, 477309738u, 2782713347u }, + { 1606221490u, 2755731404u, 1033463096u } + }, + { + { 2155469603u, 3326516116u, 3843369786u }, + { 288604458u, 2155469603u, 571673571u }, + { 1501677614u, 288604458u, 2928213494u } + }, + { + { 2082469029u, 749754403u, 3963963316u }, + { 2764859700u, 2082469029u, 3576428059u }, + { 2840894706u, 2764859700u, 1782279859u } + }, + { + { 3760163766u, 1041986082u, 1799196192u }, + { 1022129134u, 3760163766u, 1332558840u }, + { 276873446u, 1022129134u, 3979423632u } + }, + { + { 1021313167u, 1312544548u, 1716381787u }, + { 3037868518u, 1021313167u, 199085085u }, + { 2582787611u, 3037868518u, 3539882179u } + }, + { + { 2569413030u, 1631336015u, 2594942403u }, + { 1030618503u, 2569413030u, 3467650326u }, + { 1998739584u, 1030618503u, 3174552073u } + }, + { + { 2334639309u, 3114094203u, 601680947u }, + { 2110199318u, 2334639309u, 678342865u }, + { 1649523168u, 2110199318u, 2154948056u } + }, + { + { 563657176u, 191330473u, 1641595774u }, + { 780563537u, 563657176u, 3029522338u }, + { 2037330914u, 780563537u, 2084602709u } + }, + { + { 3414769923u, 1968799026u, 2238126504u }, + { 832866376u, 3414769923u, 3754780168u }, + { 2165145850u, 832866376u, 1594768331u } + }, + { + { 1646861218u, 2317984620u, 2301581548u }, + { 2672536210u, 1646861218u, 359763062u }, + { 2391283983u, 2672536210u, 1885870777u } + }, + { + { 841254072u, 3765813448u, 1635365181u }, + { 2013240130u, 841254072u, 605925849u }, + { 3743932305u, 2013240130u, 400681955u } + }, + { + { 1930213004u, 2072952279u, 3077694794u }, + { 3579956569u, 1930213004u, 2478539210u }, + { 1960229502u, 3579956569u, 1455652656u } + }, + { + { 1097613522u, 1784540933u, 1194440107u }, + { 321747515u, 1097613522u, 1225209584u }, + { 74521379u, 321747515u, 4288531000u } + }, + { + { 143812745u, 3254530816u, 3514348856u }, + { 769295000u, 143812745u, 2468210728u }, + { 1927161272u, 769295000u, 522705580u } + }, + { + { 2692035063u, 2596905012u, 1643240704u }, + { 1103432342u, 2692035063u, 1446182108u }, + { 4161111774u, 1103432342u, 3076435551u } + }, + { + { 2375319030u, 1391532370u, 3742334018u }, + { 1202100604u, 2375319030u, 4098434768u }, + { 2327872488u, 1202100604u, 1471526950u } + }, + { + { 4269164791u, 2795313144u, 2507855960u }, + { 4245372460u, 4269164791u, 4094914553u }, + { 3873219634u, 4245372460u, 1473695507u } + }, + { + { 513890845u, 1208902926u, 2870530442u }, + { 1984873167u, 513890845u, 1257532340u }, + { 1212627640u, 1984873167u, 2354363842u } + }, + { + { 1848364568u, 1552116673u, 3496528455u }, + { 4160778291u, 1848364568u, 141769900u }, + { 3611019106u, 4160778291u, 596424080u } + }, + { + { 364070020u, 3520039729u, 837362349u }, + { 2544671570u, 364070020u, 2188646679u }, + { 163978331u, 2544671570u, 672947816u } + }, + { + { 1192700714u, 3968150021u, 298357363u }, + { 635565666u, 1192700714u, 2589432341u }, + { 2548654227u, 635565666u, 3531570992u } + }, + { + { 2709640529u, 676525399u, 875361870u }, + { 1315499519u, 2709640529u, 3842690720u }, + { 3300994644u, 1315499519u, 2446760804u } + }, + { + { 2742149264u, 1410604392u, 3032350755u }, + { 3774935330u, 2742149264u, 597633965u }, + { 4085935803u, 3774935330u, 3952463556u } + }, + { + { 3878579563u, 845297523u, 1721916511u }, + { 2077922420u, 3878579563u, 3651360351u }, + { 2177255734u, 2077922420u, 3791239282u } + }, + { + { 1570315355u, 4252790045u, 3522351060u }, + { 2324624266u, 1570315355u, 3594939336u }, + { 1725087354u, 2324624266u, 1338343327u } + }, + { + { 2305761589u, 381933244u, 3663579047u }, + { 1355307047u, 2305761589u, 313617972u }, + { 992174375u, 1355307047u, 3881593435u } + }, + { + { 1667857811u, 1564715297u, 2263851601u }, + { 3791771273u, 1667857811u, 4196134923u }, + { 3347975047u, 3791771273u, 615040705u } + }, + { + { 4093947334u, 3454015638u, 2815567716u }, + { 4261953004u, 4093947334u, 3973733876u }, + { 2979573134u, 4261953004u, 3757047667u } + }, + { + { 250120061u, 570149551u, 1513430926u }, + { 3178644752u, 250120061u, 1701869032u }, + { 4172515680u, 3178644752u, 4213855850u } + }, + { + { 4158106802u, 3062358456u, 1815738463u }, + { 1379176112u, 4158106802u, 3926509890u }, + { 2842564878u, 1379176112u, 2852219546u } + }, + { + { 931848746u, 256263523u, 2633569246u }, + { 3284646837u, 931848746u, 2567084715u }, + { 415258465u, 3284646837u, 2017565947u } + }, + { + { 1648005210u, 1032291296u, 3987397422u }, + { 1831496020u, 1648005210u, 2829448427u }, + { 1821082272u, 1831496020u, 2917140265u } + }, + { + { 4161327077u, 489964129u, 3870847744u }, + { 1669447863u, 4161327077u, 4292947198u }, + { 1522417114u, 1669447863u, 2652286672u } + }, + { + { 1270934555u, 3136631324u, 505612043u }, + { 2981474723u, 1270934555u, 2528619024u }, + { 625182639u, 2981474723u, 1008985039u } + }, + { + { 280996820u, 143706137u, 3013099060u }, + { 1797675893u, 280996820u, 3743985508u }, + { 1123794455u, 1797675893u, 2460119169u } + }, + { + { 919218027u, 4154920441u, 1125672685u }, + { 3933041881u, 919218027u, 474242849u }, + { 564891116u, 3933041881u, 2263904321u } + }, + { + { 2920112852u, 1965329198u, 1177141043u }, + { 2135250851u, 2920112852u, 969184056u }, + { 296035385u, 2135250851u, 4267827987u } + }, + { + { 1481142942u, 4120754772u, 1088557292u }, + { 265491023u, 1481142942u, 2860005744u }, + { 301796252u, 265491023u, 1935975979u } + }, + { + { 2111859033u, 2813610100u, 1001476468u }, + { 73849832u, 2111859033u, 3980799998u }, + { 3330206241u, 73849832u, 1933943506u } + }, + { + { 1781286360u, 3661231931u, 3509383709u }, + { 2753158871u, 1781286360u, 3119883109u }, + { 3576525143u, 2753158871u, 551079002u } + }, + { + { 1185024844u, 587779104u, 1004942725u }, + { 3763632860u, 1185024844u, 947424568u }, + { 3811666068u, 3763632860u, 2352253462u } + }, + { + { 1310227170u, 218138208u, 3172947233u }, + { 766129426u, 1310227170u, 1808643264u }, + { 2226659371u, 766129426u, 3853798112u } + }, + { + { 2230902378u, 4243560874u, 2491962392u }, + { 3836629116u, 2230902378u, 3637515403u }, + { 2846140932u, 3836629116u, 3083355464u } + }, + { + { 999448569u, 1464488480u, 3344426626u }, + { 946166795u, 999448569u, 340856814u }, + { 3686999436u, 946166795u, 3231079441u } + }, + { + { 1226155368u, 3477563770u, 550006884u }, + { 2378667355u, 1226155368u, 1493409040u }, + { 260364836u, 2378667355u, 4133888397u } + }, + { + { 1277901832u, 310796286u, 2818511068u }, + { 3088910653u, 1277901832u, 3303406025u }, + { 2507911914u, 3088910653u, 3712928074u } + }, + { + { 481918378u, 339570348u, 1728801469u }, + { 1623163429u, 481918378u, 2209094694u }, + { 3146982514u, 1623163429u, 508445538u } + }, + { + { 3138921230u, 2381863183u, 1992357430u }, + { 1024510915u, 3138921230u, 2122851650u }, + { 1453455184u, 1024510915u, 941946604u } + }, + { + { 2465372719u, 1391015357u, 3328905025u }, + { 1821933605u, 2465372719u, 1343489680u }, + { 3648970313u, 1821933605u, 1816599716u } + }, + { + { 118634664u, 3358712512u, 2492792220u }, + { 348833376u, 118634664u, 2495544591u }, + { 3235582254u, 348833376u, 4043157504u } + }, + { + { 2303067090u, 3371139074u, 1967771133u }, + { 598630070u, 2303067090u, 1819012637u }, + { 2049250561u, 598630070u, 4093044926u } + }, + { + { 3035321857u, 3971176093u, 226779704u }, + { 3361614254u, 3035321857u, 2807125404u }, + { 326640887u, 3361614254u, 3147308542u } + }, + { + { 1774298149u, 4179629947u, 3145006948u }, + { 1688753503u, 1774298149u, 94869516u }, + { 2327946901u, 1688753503u, 2786835219u } + } + }; + +#ifndef __CUDACC_RTC__ +CURAND_MRG32K3A_MATRICES_HOST_QUALIFIERS unsigned int mrg32k3aM2Host[64][3][3] = { + { + { 0u, 1u, 0u }, + { 0u, 0u, 1u }, + { 4293573854u, 0u, 527612u } + }, + { + { 0u, 0u, 1u }, + { 4293573854u, 0u, 527612u }, + { 2706407399u, 4293573854u, 3497978192u } + }, + { + { 2706407399u, 4293573854u, 3497978192u }, + { 1431525864u, 2706407399u, 3281754271u }, + { 97673890u, 1431525864u, 1673476130u } + }, + { + { 3405842137u, 2680076935u, 893509979u }, + { 4035147174u, 3405842137u, 3280220074u }, + { 2623373296u, 4035147174u, 361718588u } + }, + { + { 818368950u, 3790774567u, 3542344109u }, + { 1817134745u, 818368950u, 3321940838u }, + { 3493477402u, 1817134745u, 2854655037u } + }, + { + { 498682467u, 2928649385u, 811441367u }, + { 1777037472u, 498682467u, 479207863u }, + { 3058260025u, 1777037472u, 1528225099u } + }, + { + { 3893311647u, 3140922085u, 64039185u }, + { 82107183u, 3893311647u, 2655465224u }, + { 1674879036u, 82107183u, 1089381262u } + }, + { + { 28639152u, 3496041927u, 2231910770u }, + { 3174683233u, 28639152u, 2828785870u }, + { 3681140872u, 3174683233u, 3910194649u } + }, + { + { 1463826069u, 300842059u, 3313769518u }, + { 1799677538u, 1463826069u, 3174861078u }, + { 1882279394u, 1799677538u, 3509975160u } + }, + { + { 2092194020u, 184076987u, 2202401252u }, + { 3103629604u, 2092194020u, 3409560232u }, + { 4257445059u, 3103629604u, 2390202783u } + }, + { + { 812917091u, 2574011276u, 4168802395u }, + { 209817750u, 812917091u, 2974870628u }, + { 3238802184u, 209817750u, 3692836406u } + }, + { + { 477309738u, 3314523413u, 3442242150u }, + { 2755731404u, 477309738u, 2782713347u }, + { 1606221490u, 2755731404u, 1033463096u } + }, + { + { 2155469603u, 3326516116u, 3843369786u }, + { 288604458u, 2155469603u, 571673571u }, + { 1501677614u, 288604458u, 2928213494u } + }, + { + { 2082469029u, 749754403u, 3963963316u }, + { 2764859700u, 2082469029u, 3576428059u }, + { 2840894706u, 2764859700u, 1782279859u } + }, + { + { 3760163766u, 1041986082u, 1799196192u }, + { 1022129134u, 3760163766u, 1332558840u }, + { 276873446u, 1022129134u, 3979423632u } + }, + { + { 1021313167u, 1312544548u, 1716381787u }, + { 3037868518u, 1021313167u, 199085085u }, + { 2582787611u, 3037868518u, 3539882179u } + }, + { + { 2569413030u, 1631336015u, 2594942403u }, + { 1030618503u, 2569413030u, 3467650326u }, + { 1998739584u, 1030618503u, 3174552073u } + }, + { + { 2334639309u, 3114094203u, 601680947u }, + { 2110199318u, 2334639309u, 678342865u }, + { 1649523168u, 2110199318u, 2154948056u } + }, + { + { 563657176u, 191330473u, 1641595774u }, + { 780563537u, 563657176u, 3029522338u }, + { 2037330914u, 780563537u, 2084602709u } + }, + { + { 3414769923u, 1968799026u, 2238126504u }, + { 832866376u, 3414769923u, 3754780168u }, + { 2165145850u, 832866376u, 1594768331u } + }, + { + { 1646861218u, 2317984620u, 2301581548u }, + { 2672536210u, 1646861218u, 359763062u }, + { 2391283983u, 2672536210u, 1885870777u } + }, + { + { 841254072u, 3765813448u, 1635365181u }, + { 2013240130u, 841254072u, 605925849u }, + { 3743932305u, 2013240130u, 400681955u } + }, + { + { 1930213004u, 2072952279u, 3077694794u }, + { 3579956569u, 1930213004u, 2478539210u }, + { 1960229502u, 3579956569u, 1455652656u } + }, + { + { 1097613522u, 1784540933u, 1194440107u }, + { 321747515u, 1097613522u, 1225209584u }, + { 74521379u, 321747515u, 4288531000u } + }, + { + { 143812745u, 3254530816u, 3514348856u }, + { 769295000u, 143812745u, 2468210728u }, + { 1927161272u, 769295000u, 522705580u } + }, + { + { 2692035063u, 2596905012u, 1643240704u }, + { 1103432342u, 2692035063u, 1446182108u }, + { 4161111774u, 1103432342u, 3076435551u } + }, + { + { 2375319030u, 1391532370u, 3742334018u }, + { 1202100604u, 2375319030u, 4098434768u }, + { 2327872488u, 1202100604u, 1471526950u } + }, + { + { 4269164791u, 2795313144u, 2507855960u }, + { 4245372460u, 4269164791u, 4094914553u }, + { 3873219634u, 4245372460u, 1473695507u } + }, + { + { 513890845u, 1208902926u, 2870530442u }, + { 1984873167u, 513890845u, 1257532340u }, + { 1212627640u, 1984873167u, 2354363842u } + }, + { + { 1848364568u, 1552116673u, 3496528455u }, + { 4160778291u, 1848364568u, 141769900u }, + { 3611019106u, 4160778291u, 596424080u } + }, + { + { 364070020u, 3520039729u, 837362349u }, + { 2544671570u, 364070020u, 2188646679u }, + { 163978331u, 2544671570u, 672947816u } + }, + { + { 1192700714u, 3968150021u, 298357363u }, + { 635565666u, 1192700714u, 2589432341u }, + { 2548654227u, 635565666u, 3531570992u } + }, + { + { 2709640529u, 676525399u, 875361870u }, + { 1315499519u, 2709640529u, 3842690720u }, + { 3300994644u, 1315499519u, 2446760804u } + }, + { + { 2742149264u, 1410604392u, 3032350755u }, + { 3774935330u, 2742149264u, 597633965u }, + { 4085935803u, 3774935330u, 3952463556u } + }, + { + { 3878579563u, 845297523u, 1721916511u }, + { 2077922420u, 3878579563u, 3651360351u }, + { 2177255734u, 2077922420u, 3791239282u } + }, + { + { 1570315355u, 4252790045u, 3522351060u }, + { 2324624266u, 1570315355u, 3594939336u }, + { 1725087354u, 2324624266u, 1338343327u } + }, + { + { 2305761589u, 381933244u, 3663579047u }, + { 1355307047u, 2305761589u, 313617972u }, + { 992174375u, 1355307047u, 3881593435u } + }, + { + { 1667857811u, 1564715297u, 2263851601u }, + { 3791771273u, 1667857811u, 4196134923u }, + { 3347975047u, 3791771273u, 615040705u } + }, + { + { 4093947334u, 3454015638u, 2815567716u }, + { 4261953004u, 4093947334u, 3973733876u }, + { 2979573134u, 4261953004u, 3757047667u } + }, + { + { 250120061u, 570149551u, 1513430926u }, + { 3178644752u, 250120061u, 1701869032u }, + { 4172515680u, 3178644752u, 4213855850u } + }, + { + { 4158106802u, 3062358456u, 1815738463u }, + { 1379176112u, 4158106802u, 3926509890u }, + { 2842564878u, 1379176112u, 2852219546u } + }, + { + { 931848746u, 256263523u, 2633569246u }, + { 3284646837u, 931848746u, 2567084715u }, + { 415258465u, 3284646837u, 2017565947u } + }, + { + { 1648005210u, 1032291296u, 3987397422u }, + { 1831496020u, 1648005210u, 2829448427u }, + { 1821082272u, 1831496020u, 2917140265u } + }, + { + { 4161327077u, 489964129u, 3870847744u }, + { 1669447863u, 4161327077u, 4292947198u }, + { 1522417114u, 1669447863u, 2652286672u } + }, + { + { 1270934555u, 3136631324u, 505612043u }, + { 2981474723u, 1270934555u, 2528619024u }, + { 625182639u, 2981474723u, 1008985039u } + }, + { + { 280996820u, 143706137u, 3013099060u }, + { 1797675893u, 280996820u, 3743985508u }, + { 1123794455u, 1797675893u, 2460119169u } + }, + { + { 919218027u, 4154920441u, 1125672685u }, + { 3933041881u, 919218027u, 474242849u }, + { 564891116u, 3933041881u, 2263904321u } + }, + { + { 2920112852u, 1965329198u, 1177141043u }, + { 2135250851u, 2920112852u, 969184056u }, + { 296035385u, 2135250851u, 4267827987u } + }, + { + { 1481142942u, 4120754772u, 1088557292u }, + { 265491023u, 1481142942u, 2860005744u }, + { 301796252u, 265491023u, 1935975979u } + }, + { + { 2111859033u, 2813610100u, 1001476468u }, + { 73849832u, 2111859033u, 3980799998u }, + { 3330206241u, 73849832u, 1933943506u } + }, + { + { 1781286360u, 3661231931u, 3509383709u }, + { 2753158871u, 1781286360u, 3119883109u }, + { 3576525143u, 2753158871u, 551079002u } + }, + { + { 1185024844u, 587779104u, 1004942725u }, + { 3763632860u, 1185024844u, 947424568u }, + { 3811666068u, 3763632860u, 2352253462u } + }, + { + { 1310227170u, 218138208u, 3172947233u }, + { 766129426u, 1310227170u, 1808643264u }, + { 2226659371u, 766129426u, 3853798112u } + }, + { + { 2230902378u, 4243560874u, 2491962392u }, + { 3836629116u, 2230902378u, 3637515403u }, + { 2846140932u, 3836629116u, 3083355464u } + }, + { + { 999448569u, 1464488480u, 3344426626u }, + { 946166795u, 999448569u, 340856814u }, + { 3686999436u, 946166795u, 3231079441u } + }, + { + { 1226155368u, 3477563770u, 550006884u }, + { 2378667355u, 1226155368u, 1493409040u }, + { 260364836u, 2378667355u, 4133888397u } + }, + { + { 1277901832u, 310796286u, 2818511068u }, + { 3088910653u, 1277901832u, 3303406025u }, + { 2507911914u, 3088910653u, 3712928074u } + }, + { + { 481918378u, 339570348u, 1728801469u }, + { 1623163429u, 481918378u, 2209094694u }, + { 3146982514u, 1623163429u, 508445538u } + }, + { + { 3138921230u, 2381863183u, 1992357430u }, + { 1024510915u, 3138921230u, 2122851650u }, + { 1453455184u, 1024510915u, 941946604u } + }, + { + { 2465372719u, 1391015357u, 3328905025u }, + { 1821933605u, 2465372719u, 1343489680u }, + { 3648970313u, 1821933605u, 1816599716u } + }, + { + { 118634664u, 3358712512u, 2492792220u }, + { 348833376u, 118634664u, 2495544591u }, + { 3235582254u, 348833376u, 4043157504u } + }, + { + { 2303067090u, 3371139074u, 1967771133u }, + { 598630070u, 2303067090u, 1819012637u }, + { 2049250561u, 598630070u, 4093044926u } + }, + { + { 3035321857u, 3971176093u, 226779704u }, + { 3361614254u, 3035321857u, 2807125404u }, + { 326640887u, 3361614254u, 3147308542u } + }, + { + { 1774298149u, 4179629947u, 3145006948u }, + { 1688753503u, 1774298149u, 94869516u }, + { 2327946901u, 1688753503u, 2786835219u } + } + }; + +#endif // ifndef __CUDACC_RTC__ + +/*Base matrices to power (2 to the power 76) to power 2 to power n + 1u, n the first array index, from 0..63*/ + +CURAND_MRG32K3A_MATRICES_DEVICE_QUALIFIERS unsigned int mrg32k3aM1SubSeq[56][3][3] = { + { + { 82758667u, 1871391091u, 4127413238u }, + { 3672831523u, 69195019u, 1871391091u }, + { 3672091415u, 3528743235u, 69195019u } + }, + { + { 3361372532u, 2329303404u, 99651939u }, + { 2008671965u, 2931758910u, 2329303404u }, + { 1113529483u, 2374097189u, 2931758910u } + }, + { + { 1831590873u, 1588259595u, 1314332382u }, + { 2385989343u, 2508077280u, 1588259595u }, + { 1787615788u, 661437137u, 2508077280u } + }, + { + { 2326052247u, 4183591379u, 4049009082u }, + { 2604529491u, 1453913233u, 4183591379u }, + { 2311925423u, 1805360390u, 1453913233u } + }, + { + { 3956367490u, 604461629u, 1257432102u }, + { 794711716u, 1155867175u, 604461629u }, + { 1777070788u, 429445904u, 1155867175u } + }, + { + { 1686241617u, 1257046062u, 1427609439u }, + { 490376081u, 387798431u, 1257046062u }, + { 235551485u, 1312672615u, 387798431u } + }, + { + { 2362447880u, 3445363024u, 3160262066u }, + { 2426867845u, 4194339866u, 3445363024u }, + { 1046144413u, 4177893681u, 4194339866u } + }, + { + { 4251175413u, 3559576374u, 3107663662u }, + { 697539134u, 1909472435u, 3559576374u }, + { 280754246u, 375835695u, 1909472435u } + }, + { + { 1099512970u, 712404985u, 1571467521u }, + { 546519870u, 1135109300u, 712404985u }, + { 3325312332u, 2352874613u, 1135109300u } + }, + { + { 1945425936u, 1653045514u, 381988982u }, + { 3733376326u, 414410025u, 1653045514u }, + { 1181583679u, 1185848176u, 414410025u } + }, + { + { 2526336124u, 3019211015u, 4215964965u }, + { 2683163472u, 4188191530u, 3019211015u }, + { 2964651598u, 293801056u, 4188191530u } + }, + { + { 1444052678u, 2253324417u, 39719589u }, + { 1880267534u, 2391992038u, 2253324417u }, + { 987740265u, 3691889508u, 2391992038u } + }, + { + { 166599066u, 2335494420u, 1232261118u }, + { 2227597731u, 2570600780u, 2335494420u }, + { 2700034538u, 3460843234u, 2570600780u } + }, + { + { 2511338360u, 1188954576u, 1251401239u }, + { 2511664974u, 292276982u, 1188954576u }, + { 697844082u, 3093661552u, 292276982u } + }, + { + { 3624650744u, 51993077u, 3540268009u }, + { 3252828938u, 3710319575u, 51993077u }, + { 2858628849u, 3910069381u, 3710319575u } + }, + { + { 655966702u, 754002362u, 1646581402u }, + { 1958331075u, 475572423u, 754002362u }, + { 3248619000u, 3228514800u, 475572423u } + }, + { + { 2760311307u, 4166372813u, 741596417u }, + { 2282679206u, 3090782630u, 4166372813u }, + { 3242468721u, 1628442374u, 3090782630u } + }, + { + { 4265279407u, 3532111852u, 1754687396u }, + { 500404765u, 2603727025u, 3532111852u }, + { 1428367254u, 3149485478u, 2603727025u } + }, + { + { 2873769531u, 2081104178u, 596284397u }, + { 4153800443u, 1261269623u, 2081104178u }, + { 3967600061u, 1830023157u, 1261269623u } + }, + { + { 278611533u, 2229285304u, 3443204327u }, + { 3110641420u, 77498444u, 2229285304u }, + { 3904070810u, 1070507239u, 77498444u } + }, + { + { 544639534u, 568528663u, 2177189807u }, + { 2475829068u, 121482268u, 568528663u }, + { 876978915u, 3116647617u, 121482268u } + }, + { + { 1547862823u, 2404658587u, 4191448009u }, + { 2158188804u, 2976916793u, 2404658587u }, + { 168571747u, 1691884706u, 2976916793u } + }, + { + { 3208213311u, 4212638780u, 3235157352u }, + { 671148556u, 2951207765u, 4212638780u }, + { 2075145516u, 2395485231u, 2951207765u } + }, + { + { 4080517315u, 2133433101u, 4043998180u }, + { 2044221845u, 867670560u, 2133433101u }, + { 834432416u, 3613001199u, 867670560u } + }, + { + { 4102885735u, 1319434267u, 2678775073u }, + { 740092580u, 607380970u, 1319434267u }, + { 2198271844u, 2610193258u, 607380970u } + }, + { + { 1165218048u, 1317690360u, 1189150958u }, + { 399240205u, 2507168618u, 1317690360u }, + { 2988334517u, 2687593413u, 2507168618u } + }, + { + { 1028861702u, 4082006648u, 338232527u }, + { 1888486946u, 1842080991u, 4082006648u }, + { 3903826366u, 3109935091u, 1842080991u } + }, + { + { 614134826u, 2261996505u, 2888080641u }, + { 710199359u, 2773979788u, 2261996505u }, + { 1144301620u, 2554371815u, 2773979788u } + }, + { + { 4056173823u, 1285620078u, 357420018u }, + { 2423072612u, 2309408315u, 1285620078u }, + { 1533175115u, 2760088020u, 2309408315u } + }, + { + { 4264130267u, 815015434u, 3142242173u }, + { 180649975u, 2500813569u, 815015434u }, + { 3378723563u, 829683767u, 2500813569u } + }, + { + { 4174387531u, 1030729435u, 2812778314u }, + { 1752988797u, 4044178729u, 1030729435u }, + { 467969301u, 554748104u, 4044178729u } + }, + { + { 1348429235u, 2928743274u, 3776082629u }, + { 3607529209u, 3069812185u, 2928743274u }, + { 2542432347u, 3208181168u, 3069812185u } + }, + { + { 4064845753u, 668285756u, 3816217625u }, + { 3713143233u, 1380634204u, 668285756u }, + { 3533700508u, 1192551435u, 1380634204u } + }, + { + { 1515684518u, 1706771705u, 728123349u }, + { 3174850469u, 2057456462u, 1706771705u }, + { 3410402985u, 2897339640u, 2057456462u } + }, + { + { 3082272717u, 531091457u, 1390161328u }, + { 3895139973u, 2171402857u, 531091457u }, + { 4030688141u, 3049703400u, 2171402857u } + }, + { + { 1241147206u, 3193892819u, 1244284192u }, + { 65180262u, 4065669017u, 3193892819u }, + { 1484817937u, 3661081858u, 4065669017u } + }, + { + { 1438760812u, 3491341751u, 3414470157u }, + { 2805337292u, 272266053u, 3491341751u }, + { 824109230u, 3202556526u, 272266053u } + }, + { + { 135412706u, 3627115412u, 2345042216u }, + { 1565169824u, 2166856449u, 3627115412u }, + { 1026946745u, 3467845248u, 2166856449u } + }, + { + { 1889419951u, 3256876154u, 1240505488u }, + { 1254783743u, 989966800u, 3256876154u }, + { 1995297400u, 3692472918u, 989966800u } + }, + { + { 3206226875u, 285700890u, 496017472u }, + { 2515316194u, 2129675196u, 285700890u }, + { 1863853990u, 2673457552u, 2129675196u } + }, + { + { 4163770641u, 255160418u, 772100749u }, + { 1987092456u, 3237660221u, 255160418u }, + { 1394381051u, 4216039401u, 3237660221u } + }, + { + { 2133915627u, 2713747584u, 627765421u }, + { 2300605925u, 35690583u, 2713747584u }, + { 2918902946u, 2638220304u, 35690583u } + }, + { + { 2587549655u, 998684270u, 4292130625u }, + { 1791772791u, 2820705344u, 998684270u }, + { 124590158u, 3831143549u, 2820705344u } + }, + { + { 978482299u, 3200877282u, 497605289u }, + { 3717741518u, 3737164414u, 3200877282u }, + { 4046686626u, 861393946u, 3737164414u } + }, + { + { 2665561897u, 300934584u, 3179822945u }, + { 893043137u, 2031413512u, 300934584u }, + { 3806926970u, 2413249929u, 2031413512u } + }, + { + { 1417581911u, 3071835354u, 2575196237u }, + { 4101127251u, 1375339216u, 3071835354u }, + { 847617977u, 3632503316u, 1375339216u } + }, + { + { 2747488994u, 3296604805u, 898095468u }, + { 1742777145u, 219265369u, 3296604805u }, + { 823714885u, 667779292u, 219265369u } + }, + { + { 2640209692u, 3040506537u, 3626115220u }, + { 161827078u, 852668118u, 3040506537u }, + { 3856381322u, 3360242076u, 852668118u } + }, + { + { 3734246393u, 4151553160u, 4177051283u }, + { 266522866u, 1731798531u, 4151553160u }, + { 632196679u, 3864297722u, 1731798531u } + }, + { + { 1694175127u, 1087914338u, 2384195794u }, + { 2764925057u, 505782858u, 1087914338u }, + { 3235634082u, 807915248u, 505782858u } + }, + { + { 2402749950u, 2353776151u, 75909174u }, + { 890570951u, 1752665661u, 2353776151u }, + { 3120241607u, 3862435696u, 1752665661u } + } + }; + +#ifndef __CUDACC_RTC__ +CURAND_MRG32K3A_MATRICES_HOST_QUALIFIERS unsigned int mrg32k3aM1SubSeqHost[56][3][3] = { + { + { 82758667u, 1871391091u, 4127413238u }, + { 3672831523u, 69195019u, 1871391091u }, + { 3672091415u, 3528743235u, 69195019u } + }, + { + { 3361372532u, 2329303404u, 99651939u }, + { 2008671965u, 2931758910u, 2329303404u }, + { 1113529483u, 2374097189u, 2931758910u } + }, + { + { 1831590873u, 1588259595u, 1314332382u }, + { 2385989343u, 2508077280u, 1588259595u }, + { 1787615788u, 661437137u, 2508077280u } + }, + { + { 2326052247u, 4183591379u, 4049009082u }, + { 2604529491u, 1453913233u, 4183591379u }, + { 2311925423u, 1805360390u, 1453913233u } + }, + { + { 3956367490u, 604461629u, 1257432102u }, + { 794711716u, 1155867175u, 604461629u }, + { 1777070788u, 429445904u, 1155867175u } + }, + { + { 1686241617u, 1257046062u, 1427609439u }, + { 490376081u, 387798431u, 1257046062u }, + { 235551485u, 1312672615u, 387798431u } + }, + { + { 2362447880u, 3445363024u, 3160262066u }, + { 2426867845u, 4194339866u, 3445363024u }, + { 1046144413u, 4177893681u, 4194339866u } + }, + { + { 4251175413u, 3559576374u, 3107663662u }, + { 697539134u, 1909472435u, 3559576374u }, + { 280754246u, 375835695u, 1909472435u } + }, + { + { 1099512970u, 712404985u, 1571467521u }, + { 546519870u, 1135109300u, 712404985u }, + { 3325312332u, 2352874613u, 1135109300u } + }, + { + { 1945425936u, 1653045514u, 381988982u }, + { 3733376326u, 414410025u, 1653045514u }, + { 1181583679u, 1185848176u, 414410025u } + }, + { + { 2526336124u, 3019211015u, 4215964965u }, + { 2683163472u, 4188191530u, 3019211015u }, + { 2964651598u, 293801056u, 4188191530u } + }, + { + { 1444052678u, 2253324417u, 39719589u }, + { 1880267534u, 2391992038u, 2253324417u }, + { 987740265u, 3691889508u, 2391992038u } + }, + { + { 166599066u, 2335494420u, 1232261118u }, + { 2227597731u, 2570600780u, 2335494420u }, + { 2700034538u, 3460843234u, 2570600780u } + }, + { + { 2511338360u, 1188954576u, 1251401239u }, + { 2511664974u, 292276982u, 1188954576u }, + { 697844082u, 3093661552u, 292276982u } + }, + { + { 3624650744u, 51993077u, 3540268009u }, + { 3252828938u, 3710319575u, 51993077u }, + { 2858628849u, 3910069381u, 3710319575u } + }, + { + { 655966702u, 754002362u, 1646581402u }, + { 1958331075u, 475572423u, 754002362u }, + { 3248619000u, 3228514800u, 475572423u } + }, + { + { 2760311307u, 4166372813u, 741596417u }, + { 2282679206u, 3090782630u, 4166372813u }, + { 3242468721u, 1628442374u, 3090782630u } + }, + { + { 4265279407u, 3532111852u, 1754687396u }, + { 500404765u, 2603727025u, 3532111852u }, + { 1428367254u, 3149485478u, 2603727025u } + }, + { + { 2873769531u, 2081104178u, 596284397u }, + { 4153800443u, 1261269623u, 2081104178u }, + { 3967600061u, 1830023157u, 1261269623u } + }, + { + { 278611533u, 2229285304u, 3443204327u }, + { 3110641420u, 77498444u, 2229285304u }, + { 3904070810u, 1070507239u, 77498444u } + }, + { + { 544639534u, 568528663u, 2177189807u }, + { 2475829068u, 121482268u, 568528663u }, + { 876978915u, 3116647617u, 121482268u } + }, + { + { 1547862823u, 2404658587u, 4191448009u }, + { 2158188804u, 2976916793u, 2404658587u }, + { 168571747u, 1691884706u, 2976916793u } + }, + { + { 3208213311u, 4212638780u, 3235157352u }, + { 671148556u, 2951207765u, 4212638780u }, + { 2075145516u, 2395485231u, 2951207765u } + }, + { + { 4080517315u, 2133433101u, 4043998180u }, + { 2044221845u, 867670560u, 2133433101u }, + { 834432416u, 3613001199u, 867670560u } + }, + { + { 4102885735u, 1319434267u, 2678775073u }, + { 740092580u, 607380970u, 1319434267u }, + { 2198271844u, 2610193258u, 607380970u } + }, + { + { 1165218048u, 1317690360u, 1189150958u }, + { 399240205u, 2507168618u, 1317690360u }, + { 2988334517u, 2687593413u, 2507168618u } + }, + { + { 1028861702u, 4082006648u, 338232527u }, + { 1888486946u, 1842080991u, 4082006648u }, + { 3903826366u, 3109935091u, 1842080991u } + }, + { + { 614134826u, 2261996505u, 2888080641u }, + { 710199359u, 2773979788u, 2261996505u }, + { 1144301620u, 2554371815u, 2773979788u } + }, + { + { 4056173823u, 1285620078u, 357420018u }, + { 2423072612u, 2309408315u, 1285620078u }, + { 1533175115u, 2760088020u, 2309408315u } + }, + { + { 4264130267u, 815015434u, 3142242173u }, + { 180649975u, 2500813569u, 815015434u }, + { 3378723563u, 829683767u, 2500813569u } + }, + { + { 4174387531u, 1030729435u, 2812778314u }, + { 1752988797u, 4044178729u, 1030729435u }, + { 467969301u, 554748104u, 4044178729u } + }, + { + { 1348429235u, 2928743274u, 3776082629u }, + { 3607529209u, 3069812185u, 2928743274u }, + { 2542432347u, 3208181168u, 3069812185u } + }, + { + { 4064845753u, 668285756u, 3816217625u }, + { 3713143233u, 1380634204u, 668285756u }, + { 3533700508u, 1192551435u, 1380634204u } + }, + { + { 1515684518u, 1706771705u, 728123349u }, + { 3174850469u, 2057456462u, 1706771705u }, + { 3410402985u, 2897339640u, 2057456462u } + }, + { + { 3082272717u, 531091457u, 1390161328u }, + { 3895139973u, 2171402857u, 531091457u }, + { 4030688141u, 3049703400u, 2171402857u } + }, + { + { 1241147206u, 3193892819u, 1244284192u }, + { 65180262u, 4065669017u, 3193892819u }, + { 1484817937u, 3661081858u, 4065669017u } + }, + { + { 1438760812u, 3491341751u, 3414470157u }, + { 2805337292u, 272266053u, 3491341751u }, + { 824109230u, 3202556526u, 272266053u } + }, + { + { 135412706u, 3627115412u, 2345042216u }, + { 1565169824u, 2166856449u, 3627115412u }, + { 1026946745u, 3467845248u, 2166856449u } + }, + { + { 1889419951u, 3256876154u, 1240505488u }, + { 1254783743u, 989966800u, 3256876154u }, + { 1995297400u, 3692472918u, 989966800u } + }, + { + { 3206226875u, 285700890u, 496017472u }, + { 2515316194u, 2129675196u, 285700890u }, + { 1863853990u, 2673457552u, 2129675196u } + }, + { + { 4163770641u, 255160418u, 772100749u }, + { 1987092456u, 3237660221u, 255160418u }, + { 1394381051u, 4216039401u, 3237660221u } + }, + { + { 2133915627u, 2713747584u, 627765421u }, + { 2300605925u, 35690583u, 2713747584u }, + { 2918902946u, 2638220304u, 35690583u } + }, + { + { 2587549655u, 998684270u, 4292130625u }, + { 1791772791u, 2820705344u, 998684270u }, + { 124590158u, 3831143549u, 2820705344u } + }, + { + { 978482299u, 3200877282u, 497605289u }, + { 3717741518u, 3737164414u, 3200877282u }, + { 4046686626u, 861393946u, 3737164414u } + }, + { + { 2665561897u, 300934584u, 3179822945u }, + { 893043137u, 2031413512u, 300934584u }, + { 3806926970u, 2413249929u, 2031413512u } + }, + { + { 1417581911u, 3071835354u, 2575196237u }, + { 4101127251u, 1375339216u, 3071835354u }, + { 847617977u, 3632503316u, 1375339216u } + }, + { + { 2747488994u, 3296604805u, 898095468u }, + { 1742777145u, 219265369u, 3296604805u }, + { 823714885u, 667779292u, 219265369u } + }, + { + { 2640209692u, 3040506537u, 3626115220u }, + { 161827078u, 852668118u, 3040506537u }, + { 3856381322u, 3360242076u, 852668118u } + }, + { + { 3734246393u, 4151553160u, 4177051283u }, + { 266522866u, 1731798531u, 4151553160u }, + { 632196679u, 3864297722u, 1731798531u } + }, + { + { 1694175127u, 1087914338u, 2384195794u }, + { 2764925057u, 505782858u, 1087914338u }, + { 3235634082u, 807915248u, 505782858u } + }, + { + { 2402749950u, 2353776151u, 75909174u }, + { 890570951u, 1752665661u, 2353776151u }, + { 3120241607u, 3862435696u, 1752665661u } + } + }; +#endif // #ifndef __CUDACC_RTC__ + +CURAND_MRG32K3A_MATRICES_DEVICE_QUALIFIERS unsigned int mrg32k3aM2SubSeq[56][3][3] = { + { + { 1511326704u, 3759209742u, 1610795712u }, + { 4292754251u, 1511326704u, 3889917532u }, + { 3859662829u, 4292754251u, 3708466080u } + }, + { + { 972103006u, 964807713u, 878035866u }, + { 4248550197u, 972103006u, 1926628839u }, + { 1448629089u, 4248550197u, 3196114006u } + }, + { + { 3497384788u, 3174249442u, 3182508868u }, + { 3864816447u, 3497384788u, 3038399593u }, + { 2546884738u, 3864816447u, 2980208068u } + }, + { + { 1776335558u, 1189944887u, 4095757548u }, + { 3813600746u, 1776335558u, 789475914u }, + { 4119698302u, 3813600746u, 2145357457u } + }, + { + { 4022832294u, 4130146837u, 1942923647u }, + { 1675130777u, 4022832294u, 916677004u }, + { 4089786548u, 1675130777u, 116540512u } + }, + { + { 165639584u, 1205513289u, 2037453462u }, + { 1444587280u, 165639584u, 161923120u }, + { 2617085459u, 1444587280u, 2006913311u } + }, + { + { 3458099202u, 3062421748u, 4052486999u }, + { 1064270720u, 3458099202u, 230768332u }, + { 4056228301u, 1064270720u, 2219267779u } + }, + { + { 296275263u, 3452455838u, 2081462173u }, + { 1789143993u, 296275263u, 3463234943u }, + { 2097389984u, 1789143993u, 3447191459u } + }, + { + { 2828288883u, 3866690251u, 410553827u }, + { 1587005542u, 2828288883u, 1469478670u }, + { 2766486018u, 1587005542u, 2627363449u } + }, + { + { 3288027530u, 412403981u, 2458742268u }, + { 4267121909u, 3288027530u, 138566505u }, + { 420803572u, 4267121909u, 4094554844u } + }, + { + { 3844599430u, 2430152838u, 3283485436u }, + { 2486244684u, 3844599430u, 4252427633u }, + { 3560842909u, 2486244684u, 3960267499u } + }, + { + { 67933059u, 1294996291u, 2657888382u }, + { 513233413u, 67933059u, 1379805031u }, + { 44564058u, 513233413u, 86971645u } + }, + { + { 2732588524u, 1866530072u, 818237694u }, + { 2540507736u, 2732588524u, 3257104212u }, + { 1164400003u, 2540507736u, 1124501551u } + }, + { + { 4199239222u, 3155848463u, 2121388468u }, + { 1135554501u, 4199239222u, 2056492193u }, + { 3251740389u, 1135554501u, 2343537248u } + }, + { + { 550710036u, 500329021u, 1075236085u }, + { 356444753u, 550710036u, 1634965500u }, + { 58733535u, 356444753u, 1261552815u } + }, + { + { 708689546u, 419139045u, 2012018174u }, + { 706488081u, 708689546u, 1113760995u }, + { 585555005u, 706488081u, 76092226u } + }, + { + { 1293182265u, 3168473803u, 366230236u }, + { 3319068849u, 1293182265u, 1085259665u }, + { 1675229290u, 3319068849u, 3912300371u } + }, + { + { 3186089068u, 4188864734u, 1211781402u }, + { 756122322u, 3186089068u, 578262892u }, + { 2518961174u, 756122322u, 1658665581u } + }, + { + { 1347291439u, 2050427676u, 736113023u }, + { 4102191254u, 1347291439u, 878627148u }, + { 1293500383u, 4102191254u, 745646810u } + }, + { + { 4196897331u, 3436564969u, 1900167098u }, + { 3108887846u, 4196897331u, 2697923227u }, + { 1405263476u, 3108887846u, 314631094u } + }, + { + { 958383622u, 3694638688u, 1150087061u }, + { 3770009830u, 958383622u, 793326651u }, + { 533700213u, 3770009830u, 1513734026u } + }, + { + { 4119603367u, 3479396923u, 3534176399u }, + { 3765397477u, 4119603367u, 1458031003u }, + { 3380901602u, 3765397477u, 2684083587u } + }, + { + { 980937351u, 2094378936u, 448446028u }, + { 1421333909u, 980937351u, 3405683645u }, + { 323724368u, 1421333909u, 338680738u } + }, + { + { 2942968846u, 4293637338u, 3549906544u }, + { 527851489u, 2942968846u, 3852871282u }, + { 4209198933u, 527851489u, 1091268872u } + }, + { + { 1975983015u, 2092556693u, 611187071u }, + { 3982652344u, 1975983015u, 3001736262u }, + { 2055073597u, 3982652344u, 1875181995u } + }, + { + { 2970221269u, 880904779u, 2447465272u }, + { 2888742196u, 2970221269u, 3521651749u }, + { 3019977656u, 2888742196u, 2712717326u } + }, + { + { 419134859u, 2976059897u, 747864206u }, + { 4101695717u, 419134859u, 4264593116u }, + { 2657991148u, 4101695717u, 2542621682u } + }, + { + { 4043135299u, 1612983166u, 1149778656u }, + { 1267010518u, 4043135299u, 3496325546u }, + { 3094232897u, 1267010518u, 2949176293u } + }, + { + { 3949395794u, 1774568686u, 2123036003u }, + { 2182983404u, 3949395794u, 2355671350u }, + { 2820933455u, 2182983404u, 513963325u } + }, + { + { 3046911698u, 2576744453u, 2492729814u }, + { 4277866093u, 3046911698u, 3146977604u }, + { 2249371766u, 4277866093u, 3622293976u } + }, + { + { 1391529818u, 423458502u, 2587125255u }, + { 3536237833u, 1391529818u, 985347517u }, + { 157623850u, 3536237833u, 1015566287u } + }, + { + { 48329260u, 2599277669u, 821961664u }, + { 902187690u, 48329260u, 1716556555u }, + { 4019658974u, 902187690u, 950730510u } + }, + { + { 1318489562u, 1530977112u, 3713577419u }, + { 4270158447u, 1318489562u, 1654940598u }, + { 2679964938u, 4270158447u, 1337075195u } + }, + { + { 770600793u, 3249576224u, 3578552768u }, + { 2710443459u, 770600793u, 2990852339u }, + { 3098163705u, 2710443459u, 522138188u } + }, + { + { 2803285489u, 1922250286u, 3164022812u }, + { 477609731u, 2803285489u, 2140252218u }, + { 2252852611u, 477609731u, 3058519788u } + }, + { + { 208329741u, 3633562083u, 3548346666u }, + { 3892091460u, 208329741u, 516833304u }, + { 3440632377u, 3892091460u, 1638833719u } + }, + { + { 1816075033u, 3570111203u, 959489356u }, + { 3482051486u, 1816075033u, 861657108u }, + { 3119495098u, 3482051486u, 2576849579u } + }, + { + { 4240216888u, 2891584407u, 2102314945u }, + { 4064489450u, 4240216888u, 1427441010u }, + { 2441164913u, 4064489450u, 3558527186u } + }, + { + { 2918371295u, 65155283u, 3469357011u }, + { 3579773554u, 2918371295u, 3494391959u }, + { 3266584309u, 3579773554u, 3837485479u } + }, + { + { 2959420453u, 1365016881u, 4082486022u }, + { 236489012u, 2959420453u, 3802558529u }, + { 2687043642u, 236489012u, 2547086826u } + }, + { + { 4185325422u, 2762854843u, 3200044912u }, + { 3664909559u, 4185325422u, 3543921700u }, + { 4240262918u, 3664909559u, 2853212443u } + }, + { + { 2618500928u, 4237264351u, 1470046497u }, + { 1893990098u, 2618500928u, 2982567031u }, + { 3017062825u, 1893990098u, 3195556801u } + }, + { + { 1868464655u, 3407681142u, 1652841784u }, + { 1678569574u, 1868464655u, 4162480901u }, + { 1477016185u, 1678569574u, 4145063890u } + }, + { + { 792188465u, 4251338402u, 2219407026u }, + { 3840340879u, 792188465u, 3493367465u }, + { 2979958414u, 3840340879u, 2338974139u } + }, + { + { 478845700u, 2378167062u, 882114621u }, + { 1674533845u, 478845700u, 3572905305u }, + { 3571222880u, 1674533845u, 1242316901u } + }, + { + { 2636090868u, 1972761498u, 71690719u }, + { 1228103463u, 2636090868u, 1280685025u }, + { 3741735502u, 1228103463u, 994061750u } + }, + { + { 1156725261u, 1100755307u, 221922891u }, + { 2892200461u, 1156725261u, 1505716533u }, + { 2287613563u, 2892200461u, 3689457190u } + }, + { + { 1387244644u, 3135090808u, 1243609165u }, + { 1724967466u, 1387244644u, 3296353235u }, + { 1064364031u, 1724967466u, 2107521044u } + }, + { + { 2822471992u, 2034317853u, 2071407475u }, + { 170903528u, 2822471992u, 1322162887u }, + { 2524982332u, 170903528u, 2656231333u } + }, + { + { 3653936868u, 3893194049u, 2484299328u }, + { 1313746234u, 3653936868u, 1705346273u }, + { 1397638018u, 1313746234u, 4015529545u } + }, + { + { 4129760842u, 1671665759u, 1677834656u }, + { 3200005334u, 4129760842u, 3486207172u }, + { 2850728736u, 3200005334u, 3076201597u } + } + }; + +#ifndef __CUDACC_RTC__ +CURAND_MRG32K3A_MATRICES_HOST_QUALIFIERS unsigned int mrg32k3aM2SubSeqHost[56][3][3] = { + { + { 1511326704u, 3759209742u, 1610795712u }, + { 4292754251u, 1511326704u, 3889917532u }, + { 3859662829u, 4292754251u, 3708466080u } + }, + { + { 972103006u, 964807713u, 878035866u }, + { 4248550197u, 972103006u, 1926628839u }, + { 1448629089u, 4248550197u, 3196114006u } + }, + { + { 3497384788u, 3174249442u, 3182508868u }, + { 3864816447u, 3497384788u, 3038399593u }, + { 2546884738u, 3864816447u, 2980208068u } + }, + { + { 1776335558u, 1189944887u, 4095757548u }, + { 3813600746u, 1776335558u, 789475914u }, + { 4119698302u, 3813600746u, 2145357457u } + }, + { + { 4022832294u, 4130146837u, 1942923647u }, + { 1675130777u, 4022832294u, 916677004u }, + { 4089786548u, 1675130777u, 116540512u } + }, + { + { 165639584u, 1205513289u, 2037453462u }, + { 1444587280u, 165639584u, 161923120u }, + { 2617085459u, 1444587280u, 2006913311u } + }, + { + { 3458099202u, 3062421748u, 4052486999u }, + { 1064270720u, 3458099202u, 230768332u }, + { 4056228301u, 1064270720u, 2219267779u } + }, + { + { 296275263u, 3452455838u, 2081462173u }, + { 1789143993u, 296275263u, 3463234943u }, + { 2097389984u, 1789143993u, 3447191459u } + }, + { + { 2828288883u, 3866690251u, 410553827u }, + { 1587005542u, 2828288883u, 1469478670u }, + { 2766486018u, 1587005542u, 2627363449u } + }, + { + { 3288027530u, 412403981u, 2458742268u }, + { 4267121909u, 3288027530u, 138566505u }, + { 420803572u, 4267121909u, 4094554844u } + }, + { + { 3844599430u, 2430152838u, 3283485436u }, + { 2486244684u, 3844599430u, 4252427633u }, + { 3560842909u, 2486244684u, 3960267499u } + }, + { + { 67933059u, 1294996291u, 2657888382u }, + { 513233413u, 67933059u, 1379805031u }, + { 44564058u, 513233413u, 86971645u } + }, + { + { 2732588524u, 1866530072u, 818237694u }, + { 2540507736u, 2732588524u, 3257104212u }, + { 1164400003u, 2540507736u, 1124501551u } + }, + { + { 4199239222u, 3155848463u, 2121388468u }, + { 1135554501u, 4199239222u, 2056492193u }, + { 3251740389u, 1135554501u, 2343537248u } + }, + { + { 550710036u, 500329021u, 1075236085u }, + { 356444753u, 550710036u, 1634965500u }, + { 58733535u, 356444753u, 1261552815u } + }, + { + { 708689546u, 419139045u, 2012018174u }, + { 706488081u, 708689546u, 1113760995u }, + { 585555005u, 706488081u, 76092226u } + }, + { + { 1293182265u, 3168473803u, 366230236u }, + { 3319068849u, 1293182265u, 1085259665u }, + { 1675229290u, 3319068849u, 3912300371u } + }, + { + { 3186089068u, 4188864734u, 1211781402u }, + { 756122322u, 3186089068u, 578262892u }, + { 2518961174u, 756122322u, 1658665581u } + }, + { + { 1347291439u, 2050427676u, 736113023u }, + { 4102191254u, 1347291439u, 878627148u }, + { 1293500383u, 4102191254u, 745646810u } + }, + { + { 4196897331u, 3436564969u, 1900167098u }, + { 3108887846u, 4196897331u, 2697923227u }, + { 1405263476u, 3108887846u, 314631094u } + }, + { + { 958383622u, 3694638688u, 1150087061u }, + { 3770009830u, 958383622u, 793326651u }, + { 533700213u, 3770009830u, 1513734026u } + }, + { + { 4119603367u, 3479396923u, 3534176399u }, + { 3765397477u, 4119603367u, 1458031003u }, + { 3380901602u, 3765397477u, 2684083587u } + }, + { + { 980937351u, 2094378936u, 448446028u }, + { 1421333909u, 980937351u, 3405683645u }, + { 323724368u, 1421333909u, 338680738u } + }, + { + { 2942968846u, 4293637338u, 3549906544u }, + { 527851489u, 2942968846u, 3852871282u }, + { 4209198933u, 527851489u, 1091268872u } + }, + { + { 1975983015u, 2092556693u, 611187071u }, + { 3982652344u, 1975983015u, 3001736262u }, + { 2055073597u, 3982652344u, 1875181995u } + }, + { + { 2970221269u, 880904779u, 2447465272u }, + { 2888742196u, 2970221269u, 3521651749u }, + { 3019977656u, 2888742196u, 2712717326u } + }, + { + { 419134859u, 2976059897u, 747864206u }, + { 4101695717u, 419134859u, 4264593116u }, + { 2657991148u, 4101695717u, 2542621682u } + }, + { + { 4043135299u, 1612983166u, 1149778656u }, + { 1267010518u, 4043135299u, 3496325546u }, + { 3094232897u, 1267010518u, 2949176293u } + }, + { + { 3949395794u, 1774568686u, 2123036003u }, + { 2182983404u, 3949395794u, 2355671350u }, + { 2820933455u, 2182983404u, 513963325u } + }, + { + { 3046911698u, 2576744453u, 2492729814u }, + { 4277866093u, 3046911698u, 3146977604u }, + { 2249371766u, 4277866093u, 3622293976u } + }, + { + { 1391529818u, 423458502u, 2587125255u }, + { 3536237833u, 1391529818u, 985347517u }, + { 157623850u, 3536237833u, 1015566287u } + }, + { + { 48329260u, 2599277669u, 821961664u }, + { 902187690u, 48329260u, 1716556555u }, + { 4019658974u, 902187690u, 950730510u } + }, + { + { 1318489562u, 1530977112u, 3713577419u }, + { 4270158447u, 1318489562u, 1654940598u }, + { 2679964938u, 4270158447u, 1337075195u } + }, + { + { 770600793u, 3249576224u, 3578552768u }, + { 2710443459u, 770600793u, 2990852339u }, + { 3098163705u, 2710443459u, 522138188u } + }, + { + { 2803285489u, 1922250286u, 3164022812u }, + { 477609731u, 2803285489u, 2140252218u }, + { 2252852611u, 477609731u, 3058519788u } + }, + { + { 208329741u, 3633562083u, 3548346666u }, + { 3892091460u, 208329741u, 516833304u }, + { 3440632377u, 3892091460u, 1638833719u } + }, + { + { 1816075033u, 3570111203u, 959489356u }, + { 3482051486u, 1816075033u, 861657108u }, + { 3119495098u, 3482051486u, 2576849579u } + }, + { + { 4240216888u, 2891584407u, 2102314945u }, + { 4064489450u, 4240216888u, 1427441010u }, + { 2441164913u, 4064489450u, 3558527186u } + }, + { + { 2918371295u, 65155283u, 3469357011u }, + { 3579773554u, 2918371295u, 3494391959u }, + { 3266584309u, 3579773554u, 3837485479u } + }, + { + { 2959420453u, 1365016881u, 4082486022u }, + { 236489012u, 2959420453u, 3802558529u }, + { 2687043642u, 236489012u, 2547086826u } + }, + { + { 4185325422u, 2762854843u, 3200044912u }, + { 3664909559u, 4185325422u, 3543921700u }, + { 4240262918u, 3664909559u, 2853212443u } + }, + { + { 2618500928u, 4237264351u, 1470046497u }, + { 1893990098u, 2618500928u, 2982567031u }, + { 3017062825u, 1893990098u, 3195556801u } + }, + { + { 1868464655u, 3407681142u, 1652841784u }, + { 1678569574u, 1868464655u, 4162480901u }, + { 1477016185u, 1678569574u, 4145063890u } + }, + { + { 792188465u, 4251338402u, 2219407026u }, + { 3840340879u, 792188465u, 3493367465u }, + { 2979958414u, 3840340879u, 2338974139u } + }, + { + { 478845700u, 2378167062u, 882114621u }, + { 1674533845u, 478845700u, 3572905305u }, + { 3571222880u, 1674533845u, 1242316901u } + }, + { + { 2636090868u, 1972761498u, 71690719u }, + { 1228103463u, 2636090868u, 1280685025u }, + { 3741735502u, 1228103463u, 994061750u } + }, + { + { 1156725261u, 1100755307u, 221922891u }, + { 2892200461u, 1156725261u, 1505716533u }, + { 2287613563u, 2892200461u, 3689457190u } + }, + { + { 1387244644u, 3135090808u, 1243609165u }, + { 1724967466u, 1387244644u, 3296353235u }, + { 1064364031u, 1724967466u, 2107521044u } + }, + { + { 2822471992u, 2034317853u, 2071407475u }, + { 170903528u, 2822471992u, 1322162887u }, + { 2524982332u, 170903528u, 2656231333u } + }, + { + { 3653936868u, 3893194049u, 2484299328u }, + { 1313746234u, 3653936868u, 1705346273u }, + { 1397638018u, 1313746234u, 4015529545u } + }, + { + { 4129760842u, 1671665759u, 1677834656u }, + { 3200005334u, 4129760842u, 3486207172u }, + { 2850728736u, 3200005334u, 3076201597u } + } + }; +#endif // #ifndef __CUDACC_RTC__ + /*Base matrices to power (2 to the power 127) to power 2 to power n+1u, n the first array index, from 0..63*/ + +CURAND_MRG32K3A_MATRICES_DEVICE_QUALIFIERS unsigned int mrg32k3aM1Seq[64][3][3] = { + { + { 2427906178u, 3580155704u, 949770784u }, + { 226153695u, 1230515664u, 3580155704u }, + { 1988835001u, 986791581u, 1230515664u } + }, + { + { 1774047142u, 3199155377u, 3106427820u }, + { 1901920839u, 4290900039u, 3199155377u }, + { 4178980191u, 280623348u, 4290900039u } + }, + { + { 3567524348u, 1934119675u, 3188270128u }, + { 2997767678u, 826363896u, 1934119675u }, + { 262952343u, 614326610u, 826363896u } + }, + { + { 1625613062u, 4288164505u, 2481284279u }, + { 4273461426u, 1177260757u, 4288164505u }, + { 305959988u, 4017252267u, 1177260757u } + }, + { + { 337929267u, 333342539u, 418300166u }, + { 2944208672u, 379097734u, 333342539u }, + { 2084056909u, 3625475947u, 379097734u } + }, + { + { 1189899255u, 1307754719u, 1214919992u }, + { 3736721708u, 3514751918u, 1307754719u }, + { 732435953u, 2021244538u, 3514751918u } + }, + { + { 4089172695u, 1533534334u, 525643282u }, + { 1497577018u, 1335684482u, 1533534334u }, + { 2079007086u, 3977541427u, 1335684482u } + }, + { + { 3075256652u, 2762754934u, 3846844247u }, + { 3057872364u, 3274545167u, 2762754934u }, + { 4028573983u, 938934351u, 3274545167u } + }, + { + { 2597859300u, 2880151048u, 2523330453u }, + { 1121709186u, 175667448u, 2880151048u }, + { 4182510911u, 1723133625u, 175667448u } + }, + { + { 484148868u, 1404283933u, 2982534313u }, + { 3736767353u, 3179865161u, 1404283933u }, + { 391120388u, 3758716888u, 3179865161u } + }, + { + { 2138867468u, 1128973399u, 2133702321u }, + { 1613561693u, 3622350766u, 1128973399u }, + { 1500151924u, 3759983985u, 3622350766u } + }, + { + { 3027706760u, 3786576552u, 2698781808u }, + { 2810527099u, 90498489u, 3786576552u }, + { 4220122612u, 1855245979u, 90498489u } + }, + { + { 3739389517u, 1110440720u, 917457922u }, + { 2163873618u, 3707591763u, 1110440720u }, + { 2667061910u, 2533383962u, 3707591763u } + }, + { + { 1545226000u, 1812182123u, 3693349190u }, + { 3422065122u, 3291428549u, 1812182123u }, + { 1193168720u, 2072837757u, 3291428549u } + }, + { + { 3230096243u, 2131723358u, 3262178024u }, + { 2882890127u, 4088518247u, 2131723358u }, + { 3991553306u, 1282224087u, 4088518247u } + }, + { + { 301207261u, 1722796810u, 3697719854u }, + { 3350228505u, 3410986694u, 1722796810u }, + { 3684514720u, 2846958957u, 3410986694u } + }, + { + { 1532963114u, 4236235786u, 3871128158u }, + { 3540401964u, 1285250577u, 4236235786u }, + { 1105070646u, 2764245175u, 1285250577u } + }, + { + { 210906218u, 3068599594u, 3034582784u }, + { 340633153u, 4004365908u, 3068599594u }, + { 4238928187u, 2299166464u, 4004365908u } + }, + { + { 2274701639u, 3955606166u, 3081246407u }, + { 3199954992u, 3948054919u, 3955606166u }, + { 2399101442u, 3438340286u, 3948054919u } + }, + { + { 504137100u, 1182303684u, 201533985u }, + { 4188299661u, 3042453580u, 1182303684u }, + { 2578519273u, 2674782930u, 3042453580u } + }, + { + { 1382964588u, 2578452047u, 3140440866u }, + { 261861891u, 1076783073u, 2578452047u }, + { 1634588989u, 164438428u, 1076783073u } + }, + { + { 2529186343u, 526867394u, 3102803247u }, + { 2687252475u, 2908898908u, 526867394u }, + { 1213100579u, 86050422u, 2908898908u } + }, + { + { 2690118316u, 538108523u, 790337895u }, + { 4193870709u, 1053552056u, 538108523u }, + { 1635227281u, 4002399925u, 1053552056u } + }, + { + { 2123712957u, 4205383007u, 1812304090u }, + { 1095349745u, 166243972u, 4205383007u }, + { 428569070u, 2128782357u, 166243972u } + }, + { + { 1330151766u, 3569679412u, 4107175982u }, + { 3808641551u, 3621125056u, 3569679412u }, + { 4262164578u, 1927692878u, 3621125056u } + }, + { + { 3606295184u, 2442739556u, 3894922338u }, + { 1629626641u, 2729678535u, 2442739556u }, + { 3379124758u, 4279360935u, 2729678535u } + }, + { + { 1052092278u, 4249024666u, 919210106u }, + { 3253349463u, 3629539480u, 4249024666u }, + { 852514024u, 4025926501u, 3629539480u } + }, + { + { 12394571u, 1252747620u, 2133571953u }, + { 4227339509u, 3197545170u, 1252747620u }, + { 1884529704u, 1976203831u, 3197545170u } + }, + { + { 2986331025u, 2671019282u, 2847338542u }, + { 3173738401u, 3542657885u, 2671019282u }, + { 745203060u, 1546667401u, 3542657885u } + }, + { + { 2613012997u, 2311336951u, 2911336433u }, + { 1493974713u, 92565032u, 2311336951u }, + { 2786645250u, 257065974u, 92565032u } + }, + { + { 3424925004u, 2776053372u, 2204068573u }, + { 3770626858u, 2509257810u, 2776053372u }, + { 2979919489u, 1146336783u, 2509257810u } + }, + { + { 1474384834u, 827894421u, 515339473u }, + { 1373055755u, 1949809417u, 827894421u }, + { 3088339524u, 1194193824u, 1949809417u } + }, + { + { 1825805135u, 1289872272u, 3700877161u }, + { 3433422861u, 4062509844u, 1289872272u }, + { 3019008744u, 2060641859u, 4062509844u } + }, + { + { 3842597153u, 4253338264u, 3424495942u }, + { 698444416u, 60268595u, 4253338264u }, + { 4096010585u, 47309624u, 60268595u } + }, + { + { 2662288323u, 2043518992u, 1593435980u }, + { 1330201507u, 3618850300u, 2043518992u }, + { 2538793204u, 271787962u, 3618850300u } + }, + { + { 741020448u, 997594656u, 2398808739u }, + { 1160477043u, 1522130854u, 997594656u }, + { 3036916315u, 2847712653u, 1522130854u } + }, + { + { 2654964886u, 1889728930u, 53329096u }, + { 2042322941u, 1621136330u, 1889728930u }, + { 1553642730u, 784545882u, 1621136330u } + }, + { + { 1715219514u, 2831829177u, 929124824u }, + { 997274536u, 404228189u, 2831829177u }, + { 1386575385u, 4107238699u, 404228189u } + }, + { + { 3928131551u, 2912523524u, 1840499723u }, + { 4216003022u, 2970489088u, 2912523524u }, + { 1158689953u, 1425511081u, 2970489088u } + }, + { + { 2807004452u, 2510299562u, 271603006u }, + { 2505735035u, 2370490899u, 2510299562u }, + { 10873814u, 2450376936u, 2370490899u } + }, + { + { 2000734342u, 1113679064u, 2502160539u }, + { 1475266926u, 2787925323u, 1113679064u }, + { 1475797635u, 3044470744u, 2787925323u } + }, + { + { 1457157056u, 1252556678u, 3073232607u }, + { 1926798761u, 3639907189u, 1252556678u }, + { 2067740348u, 2256217204u, 3639907189u } + }, + { + { 3740999688u, 1035400458u, 3162437311u }, + { 4126312242u, 686702830u, 1035400458u }, + { 1699805291u, 667792040u, 686702830u } + }, + { + { 2422495016u, 3203768688u, 1858240466u }, + { 848719394u, 4092709154u, 3203768688u }, + { 659945473u, 1863075174u, 4092709154u } + }, + { + { 246817944u, 871751352u, 2834051003u }, + { 3976202597u, 3721214025u, 871751352u }, + { 783929942u, 745295675u, 3721214025u } + }, + { + { 3811740424u, 3603608092u, 2365398362u }, + { 3826150877u, 2906557036u, 3603608092u }, + { 2300510686u, 966815948u, 2906557036u } + }, + { + { 2816329160u, 18201123u, 3367710570u }, + { 437309679u, 2220769388u, 18201123u }, + { 1346863388u, 705296543u, 2220769388u } + }, + { + { 3310028953u, 1662315499u, 132645114u }, + { 2572908401u, 3105849797u, 1662315499u }, + { 1937586849u, 1735620028u, 3105849797u } + }, + { + { 461386353u, 1359675853u, 3599822966u }, + { 106675209u, 2044154050u, 1359675853u }, + { 1787730088u, 1149892630u, 2044154050u } + }, + { + { 3303902397u, 345146034u, 1417149696u }, + { 2231869247u, 1116882637u, 345146034u }, + { 1846832385u, 79626976u, 1116882637u } + }, + { + { 2765049417u, 3117782790u, 1805260159u }, + { 3796182890u, 1101141726u, 3117782790u }, + { 224270120u, 1004001443u, 1101141726u } + }, + { + { 89118668u, 2494198515u, 1356989069u }, + { 2490435731u, 997151755u, 2494198515u }, + { 1175528637u, 3444341166u, 997151755u } + }, + { + { 2340639019u, 510225634u, 286119182u }, + { 2045217287u, 1194574818u, 510225634u }, + { 2662281592u, 1728500627u, 1194574818u } + }, + { + { 210787847u, 1189120688u, 2848040407u }, + { 1087786165u, 2343328484u, 1189120688u }, + { 3465141330u, 2893041005u, 2343328484u } + }, + { + { 3438170226u, 3236285682u, 962036916u }, + { 2873263091u, 215280489u, 3236285682u }, + { 730413847u, 1474823842u, 215280489u } + }, + { + { 1566461658u, 133010024u, 2886695328u }, + { 2835827516u, 653809404u, 133010024u }, + { 3082882924u, 3710942807u, 653809404u } + }, + { + { 4201558916u, 1263786956u, 326001602u }, + { 762846463u, 621546357u, 1263786956u }, + { 2697142404u, 1156650856u, 621546357u } + }, + { + { 2655768102u, 2339029465u, 2430211448u }, + { 2669906627u, 403962847u, 2339029465u }, + { 1483118807u, 639660658u, 403962847u } + }, + { + { 3508595200u, 4228486662u, 754946994u }, + { 1913148390u, 3500531602u, 4228486662u }, + { 24637u, 3773159052u, 3500531602u } + }, + { + { 4024866227u, 1143874914u, 3205058469u }, + { 2970344133u, 2873927273u, 1143874914u }, + { 2167114735u, 4095476435u, 2873927273u } + }, + { + { 1479401095u, 2958366486u, 3027708794u }, + { 2704486034u, 3574053987u, 2958366486u }, + { 3630964515u, 1276667706u, 3574053987u } + }, + { + { 2035927380u, 1363628533u, 818363998u }, + { 3023327955u, 3968427114u, 1363628533u }, + { 1284825950u, 2871663372u, 3968427114u } + }, + { + { 3827747418u, 3897287251u, 4106993377u }, + { 1527779946u, 3221052941u, 3897287251u }, + { 4178727866u, 4281160673u, 3221052941u } + }, + { + { 1174358892u, 2835476193u, 959978619u }, + { 850076464u, 3774782533u, 2835476193u }, + { 3880910680u, 3237990203u, 3774782533u } + } + }; + +#ifndef __CUDACC_RTC__ +CURAND_MRG32K3A_MATRICES_HOST_QUALIFIERS unsigned int mrg32k3aM1SeqHost[64][3][3] = { + { + { 2427906178u, 3580155704u, 949770784u }, + { 226153695u, 1230515664u, 3580155704u }, + { 1988835001u, 986791581u, 1230515664u } + }, + { + { 1774047142u, 3199155377u, 3106427820u }, + { 1901920839u, 4290900039u, 3199155377u }, + { 4178980191u, 280623348u, 4290900039u } + }, + { + { 3567524348u, 1934119675u, 3188270128u }, + { 2997767678u, 826363896u, 1934119675u }, + { 262952343u, 614326610u, 826363896u } + }, + { + { 1625613062u, 4288164505u, 2481284279u }, + { 4273461426u, 1177260757u, 4288164505u }, + { 305959988u, 4017252267u, 1177260757u } + }, + { + { 337929267u, 333342539u, 418300166u }, + { 2944208672u, 379097734u, 333342539u }, + { 2084056909u, 3625475947u, 379097734u } + }, + { + { 1189899255u, 1307754719u, 1214919992u }, + { 3736721708u, 3514751918u, 1307754719u }, + { 732435953u, 2021244538u, 3514751918u } + }, + { + { 4089172695u, 1533534334u, 525643282u }, + { 1497577018u, 1335684482u, 1533534334u }, + { 2079007086u, 3977541427u, 1335684482u } + }, + { + { 3075256652u, 2762754934u, 3846844247u }, + { 3057872364u, 3274545167u, 2762754934u }, + { 4028573983u, 938934351u, 3274545167u } + }, + { + { 2597859300u, 2880151048u, 2523330453u }, + { 1121709186u, 175667448u, 2880151048u }, + { 4182510911u, 1723133625u, 175667448u } + }, + { + { 484148868u, 1404283933u, 2982534313u }, + { 3736767353u, 3179865161u, 1404283933u }, + { 391120388u, 3758716888u, 3179865161u } + }, + { + { 2138867468u, 1128973399u, 2133702321u }, + { 1613561693u, 3622350766u, 1128973399u }, + { 1500151924u, 3759983985u, 3622350766u } + }, + { + { 3027706760u, 3786576552u, 2698781808u }, + { 2810527099u, 90498489u, 3786576552u }, + { 4220122612u, 1855245979u, 90498489u } + }, + { + { 3739389517u, 1110440720u, 917457922u }, + { 2163873618u, 3707591763u, 1110440720u }, + { 2667061910u, 2533383962u, 3707591763u } + }, + { + { 1545226000u, 1812182123u, 3693349190u }, + { 3422065122u, 3291428549u, 1812182123u }, + { 1193168720u, 2072837757u, 3291428549u } + }, + { + { 3230096243u, 2131723358u, 3262178024u }, + { 2882890127u, 4088518247u, 2131723358u }, + { 3991553306u, 1282224087u, 4088518247u } + }, + { + { 301207261u, 1722796810u, 3697719854u }, + { 3350228505u, 3410986694u, 1722796810u }, + { 3684514720u, 2846958957u, 3410986694u } + }, + { + { 1532963114u, 4236235786u, 3871128158u }, + { 3540401964u, 1285250577u, 4236235786u }, + { 1105070646u, 2764245175u, 1285250577u } + }, + { + { 210906218u, 3068599594u, 3034582784u }, + { 340633153u, 4004365908u, 3068599594u }, + { 4238928187u, 2299166464u, 4004365908u } + }, + { + { 2274701639u, 3955606166u, 3081246407u }, + { 3199954992u, 3948054919u, 3955606166u }, + { 2399101442u, 3438340286u, 3948054919u } + }, + { + { 504137100u, 1182303684u, 201533985u }, + { 4188299661u, 3042453580u, 1182303684u }, + { 2578519273u, 2674782930u, 3042453580u } + }, + { + { 1382964588u, 2578452047u, 3140440866u }, + { 261861891u, 1076783073u, 2578452047u }, + { 1634588989u, 164438428u, 1076783073u } + }, + { + { 2529186343u, 526867394u, 3102803247u }, + { 2687252475u, 2908898908u, 526867394u }, + { 1213100579u, 86050422u, 2908898908u } + }, + { + { 2690118316u, 538108523u, 790337895u }, + { 4193870709u, 1053552056u, 538108523u }, + { 1635227281u, 4002399925u, 1053552056u } + }, + { + { 2123712957u, 4205383007u, 1812304090u }, + { 1095349745u, 166243972u, 4205383007u }, + { 428569070u, 2128782357u, 166243972u } + }, + { + { 1330151766u, 3569679412u, 4107175982u }, + { 3808641551u, 3621125056u, 3569679412u }, + { 4262164578u, 1927692878u, 3621125056u } + }, + { + { 3606295184u, 2442739556u, 3894922338u }, + { 1629626641u, 2729678535u, 2442739556u }, + { 3379124758u, 4279360935u, 2729678535u } + }, + { + { 1052092278u, 4249024666u, 919210106u }, + { 3253349463u, 3629539480u, 4249024666u }, + { 852514024u, 4025926501u, 3629539480u } + }, + { + { 12394571u, 1252747620u, 2133571953u }, + { 4227339509u, 3197545170u, 1252747620u }, + { 1884529704u, 1976203831u, 3197545170u } + }, + { + { 2986331025u, 2671019282u, 2847338542u }, + { 3173738401u, 3542657885u, 2671019282u }, + { 745203060u, 1546667401u, 3542657885u } + }, + { + { 2613012997u, 2311336951u, 2911336433u }, + { 1493974713u, 92565032u, 2311336951u }, + { 2786645250u, 257065974u, 92565032u } + }, + { + { 3424925004u, 2776053372u, 2204068573u }, + { 3770626858u, 2509257810u, 2776053372u }, + { 2979919489u, 1146336783u, 2509257810u } + }, + { + { 1474384834u, 827894421u, 515339473u }, + { 1373055755u, 1949809417u, 827894421u }, + { 3088339524u, 1194193824u, 1949809417u } + }, + { + { 1825805135u, 1289872272u, 3700877161u }, + { 3433422861u, 4062509844u, 1289872272u }, + { 3019008744u, 2060641859u, 4062509844u } + }, + { + { 3842597153u, 4253338264u, 3424495942u }, + { 698444416u, 60268595u, 4253338264u }, + { 4096010585u, 47309624u, 60268595u } + }, + { + { 2662288323u, 2043518992u, 1593435980u }, + { 1330201507u, 3618850300u, 2043518992u }, + { 2538793204u, 271787962u, 3618850300u } + }, + { + { 741020448u, 997594656u, 2398808739u }, + { 1160477043u, 1522130854u, 997594656u }, + { 3036916315u, 2847712653u, 1522130854u } + }, + { + { 2654964886u, 1889728930u, 53329096u }, + { 2042322941u, 1621136330u, 1889728930u }, + { 1553642730u, 784545882u, 1621136330u } + }, + { + { 1715219514u, 2831829177u, 929124824u }, + { 997274536u, 404228189u, 2831829177u }, + { 1386575385u, 4107238699u, 404228189u } + }, + { + { 3928131551u, 2912523524u, 1840499723u }, + { 4216003022u, 2970489088u, 2912523524u }, + { 1158689953u, 1425511081u, 2970489088u } + }, + { + { 2807004452u, 2510299562u, 271603006u }, + { 2505735035u, 2370490899u, 2510299562u }, + { 10873814u, 2450376936u, 2370490899u } + }, + { + { 2000734342u, 1113679064u, 2502160539u }, + { 1475266926u, 2787925323u, 1113679064u }, + { 1475797635u, 3044470744u, 2787925323u } + }, + { + { 1457157056u, 1252556678u, 3073232607u }, + { 1926798761u, 3639907189u, 1252556678u }, + { 2067740348u, 2256217204u, 3639907189u } + }, + { + { 3740999688u, 1035400458u, 3162437311u }, + { 4126312242u, 686702830u, 1035400458u }, + { 1699805291u, 667792040u, 686702830u } + }, + { + { 2422495016u, 3203768688u, 1858240466u }, + { 848719394u, 4092709154u, 3203768688u }, + { 659945473u, 1863075174u, 4092709154u } + }, + { + { 246817944u, 871751352u, 2834051003u }, + { 3976202597u, 3721214025u, 871751352u }, + { 783929942u, 745295675u, 3721214025u } + }, + { + { 3811740424u, 3603608092u, 2365398362u }, + { 3826150877u, 2906557036u, 3603608092u }, + { 2300510686u, 966815948u, 2906557036u } + }, + { + { 2816329160u, 18201123u, 3367710570u }, + { 437309679u, 2220769388u, 18201123u }, + { 1346863388u, 705296543u, 2220769388u } + }, + { + { 3310028953u, 1662315499u, 132645114u }, + { 2572908401u, 3105849797u, 1662315499u }, + { 1937586849u, 1735620028u, 3105849797u } + }, + { + { 461386353u, 1359675853u, 3599822966u }, + { 106675209u, 2044154050u, 1359675853u }, + { 1787730088u, 1149892630u, 2044154050u } + }, + { + { 3303902397u, 345146034u, 1417149696u }, + { 2231869247u, 1116882637u, 345146034u }, + { 1846832385u, 79626976u, 1116882637u } + }, + { + { 2765049417u, 3117782790u, 1805260159u }, + { 3796182890u, 1101141726u, 3117782790u }, + { 224270120u, 1004001443u, 1101141726u } + }, + { + { 89118668u, 2494198515u, 1356989069u }, + { 2490435731u, 997151755u, 2494198515u }, + { 1175528637u, 3444341166u, 997151755u } + }, + { + { 2340639019u, 510225634u, 286119182u }, + { 2045217287u, 1194574818u, 510225634u }, + { 2662281592u, 1728500627u, 1194574818u } + }, + { + { 210787847u, 1189120688u, 2848040407u }, + { 1087786165u, 2343328484u, 1189120688u }, + { 3465141330u, 2893041005u, 2343328484u } + }, + { + { 3438170226u, 3236285682u, 962036916u }, + { 2873263091u, 215280489u, 3236285682u }, + { 730413847u, 1474823842u, 215280489u } + }, + { + { 1566461658u, 133010024u, 2886695328u }, + { 2835827516u, 653809404u, 133010024u }, + { 3082882924u, 3710942807u, 653809404u } + }, + { + { 4201558916u, 1263786956u, 326001602u }, + { 762846463u, 621546357u, 1263786956u }, + { 2697142404u, 1156650856u, 621546357u } + }, + { + { 2655768102u, 2339029465u, 2430211448u }, + { 2669906627u, 403962847u, 2339029465u }, + { 1483118807u, 639660658u, 403962847u } + }, + { + { 3508595200u, 4228486662u, 754946994u }, + { 1913148390u, 3500531602u, 4228486662u }, + { 24637u, 3773159052u, 3500531602u } + }, + { + { 4024866227u, 1143874914u, 3205058469u }, + { 2970344133u, 2873927273u, 1143874914u }, + { 2167114735u, 4095476435u, 2873927273u } + }, + { + { 1479401095u, 2958366486u, 3027708794u }, + { 2704486034u, 3574053987u, 2958366486u }, + { 3630964515u, 1276667706u, 3574053987u } + }, + { + { 2035927380u, 1363628533u, 818363998u }, + { 3023327955u, 3968427114u, 1363628533u }, + { 1284825950u, 2871663372u, 3968427114u } + }, + { + { 3827747418u, 3897287251u, 4106993377u }, + { 1527779946u, 3221052941u, 3897287251u }, + { 4178727866u, 4281160673u, 3221052941u } + }, + { + { 1174358892u, 2835476193u, 959978619u }, + { 850076464u, 3774782533u, 2835476193u }, + { 3880910680u, 3237990203u, 3774782533u } + } + }; +#endif // #ifndef __CUDACC_RTC__ + +CURAND_MRG32K3A_MATRICES_DEVICE_QUALIFIERS unsigned int mrg32k3aM2Seq[64][3][3] = { + { + { 1464411153u, 277697599u, 1610723613u }, + { 32183930u, 1464411153u, 1022607788u }, + { 2824425944u, 32183930u, 2093834863u } + }, + { + { 3492361727u, 1027004383u, 3167429889u }, + { 3674905362u, 3492361727u, 3572939265u }, + { 4270409313u, 3674905362u, 698814233u } + }, + { + { 880482061u, 205175925u, 4070445105u }, + { 2208329119u, 880482061u, 1933248566u }, + { 3741227945u, 2208329119u, 3962062826u } + }, + { + { 4184605179u, 1189429800u, 567967482u }, + { 107217966u, 4184605179u, 784865788u }, + { 549462420u, 107217966u, 3134382704u } + }, + { + { 2732536445u, 1231107067u, 3374588386u }, + { 409954030u, 2732536445u, 1044831206u }, + { 3398162498u, 409954030u, 3505648581u } + }, + { + { 2169560691u, 1076348534u, 637306236u }, + { 3704346564u, 2169560691u, 293694496u }, + { 632453145u, 3704346564u, 1609425246u } + }, + { + { 372115891u, 3928812480u, 2830541169u }, + { 3056527841u, 372115891u, 1924239834u }, + { 3044937468u, 3056527841u, 547142630u } + }, + { + { 1660852083u, 3635660815u, 1389092450u }, + { 1025573319u, 1660852083u, 3276803366u }, + { 4036331438u, 1025573319u, 4092197741u } + }, + { + { 1360732901u, 2887812973u, 4101068693u }, + { 52572783u, 1360732901u, 112458461u }, + { 2636566855u, 52572783u, 1136777988u } + }, + { + { 3455696508u, 536919193u, 3978804036u }, + { 3094157668u, 3455696508u, 3821833900u }, + { 2278849016u, 3094157668u, 2531965909u } + }, + { + { 2125991744u, 890897326u, 3790557569u }, + { 1433592392u, 2125991744u, 3671109604u }, + { 808215503u, 1433592392u, 2446306581u } + }, + { + { 3524411799u, 932865240u, 1838275365u }, + { 1789634890u, 3524411799u, 4130736474u }, + { 2252266098u, 1789634890u, 3048775967u } + }, + { + { 1773339925u, 948403862u, 1999624391u }, + { 983864203u, 1773339925u, 3734776305u }, + { 314407045u, 983864203u, 2648614071u } + }, + { + { 321802921u, 1099164995u, 2112167358u }, + { 3760936985u, 321802921u, 1003573324u }, + { 3758858458u, 3760936985u, 4014658840u } + }, + { + { 2196438580u, 805386227u, 4266375092u }, + { 4124675351u, 2196438580u, 2527961345u }, + { 94452540u, 4124675351u, 2825656399u } + }, + { + { 66735368u, 2228005807u, 4186703168u }, + { 2624855312u, 66735368u, 2708679078u }, + { 4098470056u, 2624855312u, 1773862183u } + }, + { + { 3072642883u, 2746897053u, 2690305546u }, + { 1105106652u, 3072642883u, 4047666135u }, + { 2862886282u, 1105106652u, 3597347398u } + }, + { + { 232906611u, 3873338256u, 4051554873u }, + { 3027413363u, 232906611u, 3159432673u }, + { 3872967050u, 3027413363u, 987156327u } + }, + { + { 1160686753u, 3676603152u, 1635979789u }, + { 1447386846u, 1160686753u, 2670438424u }, + { 816212890u, 1447386846u, 4288868534u } + }, + { + { 3825238244u, 1445162354u, 2362389441u }, + { 3440193648u, 3825238244u, 3520937545u }, + { 2652790808u, 3440193648u, 405299994u } + }, + { + { 1984094858u, 532165989u, 2027397575u }, + { 1455977136u, 1984094858u, 2433255524u }, + { 1039994763u, 1455977136u, 2069333087u } + }, + { + { 3680843319u, 2332949611u, 3516795313u }, + { 2033851810u, 3680843319u, 3843367307u }, + { 3686294589u, 2033851810u, 3912995069u } + }, + { + { 967423689u, 1724183394u, 635932799u }, + { 641380480u, 967423689u, 2145297779u }, + { 1723000412u, 641380480u, 455633660u } + }, + { + { 2130938335u, 1534972306u, 2511584766u }, + { 273828453u, 2130938335u, 3112810093u }, + { 4084843716u, 273828453u, 1399334152u } + }, + { + { 168278549u, 541167592u, 190177712u }, + { 403188859u, 168278549u, 2092073970u }, + { 58789558u, 403188859u, 2777887189u } + }, + { + { 634843389u, 4082275720u, 2092828966u }, + { 351187677u, 634843389u, 1312056270u }, + { 3347241070u, 351187677u, 2417192332u } + }, + { + { 443276110u, 1113643788u, 271102234u }, + { 3083745876u, 443276110u, 3370743767u }, + { 4200577503u, 3083745876u, 3298601960u } + }, + { + { 3533393557u, 764977733u, 3400275098u }, + { 144639933u, 3533393557u, 2646475951u }, + { 77963866u, 144639933u, 3794766611u } + }, + { + { 4064854722u, 1198665008u, 2872196602u }, + { 3274748603u, 4064854722u, 4164637970u }, + { 4238693771u, 3274748603u, 1981721347u } + }, + { + { 2279220396u, 2355957139u, 1417574285u }, + { 885864931u, 2279220396u, 1344421653u }, + { 1895527787u, 885864931u, 3726919367u } + }, + { + { 2898100178u, 2427331008u, 348923199u }, + { 3175444953u, 2898100178u, 4290541487u }, + { 246118669u, 3175444953u, 3410622769u } + }, + { + { 284442065u, 4064194676u, 2295560707u }, + { 4182706556u, 284442065u, 3696899246u }, + { 1201342255u, 4182706556u, 1145356382u } + }, + { + { 656615546u, 442908965u, 3724738272u }, + { 1624967553u, 656615546u, 798014134u }, + { 1157949454u, 1624967553u, 496247378u } + }, + { + { 265689579u, 675056541u, 3009083380u }, + { 3820679930u, 265689579u, 2961990151u }, + { 562287964u, 3820679930u, 1853486796u } + }, + { + { 1675739167u, 2319843005u, 760605578u }, + { 4161492847u, 1675739167u, 226142150u }, + { 1017447188u, 4161492847u, 3431158427u } + }, + { + { 1759873736u, 2334568602u, 2154570180u }, + { 1812793060u, 1759873736u, 2111094408u }, + { 1168460586u, 1812793060u, 2495653141u } + }, + { + { 317621194u, 868104288u, 664971082u }, + { 2340275074u, 317621194u, 2168960688u }, + { 725706104u, 2340275074u, 3532023115u } + }, + { + { 3926931954u, 2907684453u, 615601328u }, + { 1132340715u, 3926931954u, 676995757u }, + { 1154819290u, 1132340715u, 1662727700u } + }, + { + { 3921782078u, 3376494857u, 2969567377u }, + { 475345024u, 3921782078u, 4206379953u }, + { 1795936544u, 475345024u, 934679595u } + }, + { + { 3119292228u, 741613041u, 2083352304u }, + { 1047885963u, 3119292228u, 1581078542u }, + { 1065969969u, 1047885963u, 661718928u } + }, + { + { 3643472111u, 2870554228u, 3995474529u }, + { 3804264051u, 3643472111u, 1366457944u }, + { 1246805564u, 3804264051u, 993186530u } + }, + { + { 796711791u, 3878204845u, 3160293932u }, + { 255632881u, 796711791u, 3778927111u }, + { 3472564181u, 255632881u, 388382377u } + }, + { + { 1776984101u, 1742284034u, 3449763933u }, + { 1349354417u, 1776984101u, 1264780832u }, + { 715722511u, 1349354417u, 1213319489u } + }, + { + { 4261866865u, 1914382786u, 201872335u }, + { 614207188u, 4261866865u, 1853554849u }, + { 2046042882u, 614207188u, 3193186353u } + }, + { + { 2210205512u, 2847073169u, 3324925707u }, + { 1251969297u, 2210205512u, 3491451503u }, + { 470400916u, 1251969297u, 2184392547u } + }, + { + { 1523590942u, 2391111113u, 68341529u }, + { 295466806u, 1523590942u, 4143310876u }, + { 3527253079u, 295466806u, 4059123142u } + }, + { + { 1406902110u, 3735012720u, 1774518130u }, + { 1814959027u, 1406902110u, 1560544267u }, + { 346472965u, 1814959027u, 964257199u } + }, + { + { 855309653u, 4208503105u, 1518467541u }, + { 2025248418u, 855309653u, 4148125749u }, + { 1349947330u, 2025248418u, 1168504873u } + }, + { + { 2375338156u, 3629519168u, 409696181u }, + { 252401654u, 2375338156u, 3992097193u }, + { 2793725401u, 252401654u, 1350184085u } + }, + { + { 873141039u, 3885583138u, 361604799u }, + { 3554143374u, 873141039u, 894746180u }, + { 1919765327u, 3554143374u, 876210854u } + }, + { + { 246368794u, 1703793169u, 2317362874u }, + { 2300930144u, 246368794u, 2560214589u }, + { 2016163623u, 2300930144u, 1504276775u } + }, + { + { 1574610921u, 2147546631u, 4103450226u }, + { 107416526u, 1574610921u, 1773803959u }, + { 1402542742u, 107416526u, 550063800u } + }, + { + { 363388665u, 592194244u, 1746615522u }, + { 2637234667u, 363388665u, 4031408742u }, + { 2895130475u, 2637234667u, 296510335u } + }, + { + { 3997368560u, 3047771871u, 3178383826u }, + { 1160174754u, 3997368560u, 4027094919u }, + { 1234984211u, 1160174754u, 4226264344u } + }, + { + { 3303179301u, 4243968063u, 3235964171u }, + { 1776841674u, 3303179301u, 2867287469u }, + { 1500495759u, 1776841674u, 1708226553u } + }, + { + { 1482944153u, 3192311574u, 354466071u }, + { 3932773012u, 1482944153u, 389193591u }, + { 3350181058u, 3932773012u, 3398059015u } + }, + { + { 640968550u, 3226860971u, 922372912u }, + { 1254989667u, 640968550u, 2383815228u }, + { 2027371896u, 1254989667u, 2925300409u } + }, + { + { 2313146046u, 3910187183u, 1377591475u }, + { 1689291784u, 2313146046u, 4255405993u }, + { 1650609719u, 1689291784u, 1897624297u } + }, + { + { 3656310954u, 882924050u, 2702189958u }, + { 3185020283u, 3656310954u, 1923190496u }, + { 2449669145u, 3185020283u, 4235849984u } + }, + { + { 377232416u, 1498446142u, 4229103619u }, + { 3926377906u, 377232416u, 600268838u }, + { 511317726u, 3926377906u, 216160452u } + }, + { + { 1969399344u, 3273966859u, 4220943579u }, + { 3952111894u, 1969399344u, 575096961u }, + { 3815277103u, 3952111894u, 792177412u } + }, + { + { 2957238169u, 1410010554u, 1523740068u }, + { 3949237584u, 2957238169u, 74149658u }, + { 2564746147u, 3949237584u, 2557663578u } + }, + { + { 3377318569u, 1927835240u, 2556102508u }, + { 3022040116u, 3377318569u, 2549406364u }, + { 2387074241u, 3022040116u, 1477293711u } + }, + { + { 257306870u, 1748489735u, 547809226u }, + { 3708493374u, 257306870u, 4183546362u }, + { 4435502u, 3708493374u, 1607696753u } + } + }; + +#ifndef __CUDACC_RTC__ +CURAND_MRG32K3A_MATRICES_HOST_QUALIFIERS unsigned int mrg32k3aM2SeqHost[64][3][3] = { + { + { 1464411153u, 277697599u, 1610723613u }, + { 32183930u, 1464411153u, 1022607788u }, + { 2824425944u, 32183930u, 2093834863u } + }, + { + { 3492361727u, 1027004383u, 3167429889u }, + { 3674905362u, 3492361727u, 3572939265u }, + { 4270409313u, 3674905362u, 698814233u } + }, + { + { 880482061u, 205175925u, 4070445105u }, + { 2208329119u, 880482061u, 1933248566u }, + { 3741227945u, 2208329119u, 3962062826u } + }, + { + { 4184605179u, 1189429800u, 567967482u }, + { 107217966u, 4184605179u, 784865788u }, + { 549462420u, 107217966u, 3134382704u } + }, + { + { 2732536445u, 1231107067u, 3374588386u }, + { 409954030u, 2732536445u, 1044831206u }, + { 3398162498u, 409954030u, 3505648581u } + }, + { + { 2169560691u, 1076348534u, 637306236u }, + { 3704346564u, 2169560691u, 293694496u }, + { 632453145u, 3704346564u, 1609425246u } + }, + { + { 372115891u, 3928812480u, 2830541169u }, + { 3056527841u, 372115891u, 1924239834u }, + { 3044937468u, 3056527841u, 547142630u } + }, + { + { 1660852083u, 3635660815u, 1389092450u }, + { 1025573319u, 1660852083u, 3276803366u }, + { 4036331438u, 1025573319u, 4092197741u } + }, + { + { 1360732901u, 2887812973u, 4101068693u }, + { 52572783u, 1360732901u, 112458461u }, + { 2636566855u, 52572783u, 1136777988u } + }, + { + { 3455696508u, 536919193u, 3978804036u }, + { 3094157668u, 3455696508u, 3821833900u }, + { 2278849016u, 3094157668u, 2531965909u } + }, + { + { 2125991744u, 890897326u, 3790557569u }, + { 1433592392u, 2125991744u, 3671109604u }, + { 808215503u, 1433592392u, 2446306581u } + }, + { + { 3524411799u, 932865240u, 1838275365u }, + { 1789634890u, 3524411799u, 4130736474u }, + { 2252266098u, 1789634890u, 3048775967u } + }, + { + { 1773339925u, 948403862u, 1999624391u }, + { 983864203u, 1773339925u, 3734776305u }, + { 314407045u, 983864203u, 2648614071u } + }, + { + { 321802921u, 1099164995u, 2112167358u }, + { 3760936985u, 321802921u, 1003573324u }, + { 3758858458u, 3760936985u, 4014658840u } + }, + { + { 2196438580u, 805386227u, 4266375092u }, + { 4124675351u, 2196438580u, 2527961345u }, + { 94452540u, 4124675351u, 2825656399u } + }, + { + { 66735368u, 2228005807u, 4186703168u }, + { 2624855312u, 66735368u, 2708679078u }, + { 4098470056u, 2624855312u, 1773862183u } + }, + { + { 3072642883u, 2746897053u, 2690305546u }, + { 1105106652u, 3072642883u, 4047666135u }, + { 2862886282u, 1105106652u, 3597347398u } + }, + { + { 232906611u, 3873338256u, 4051554873u }, + { 3027413363u, 232906611u, 3159432673u }, + { 3872967050u, 3027413363u, 987156327u } + }, + { + { 1160686753u, 3676603152u, 1635979789u }, + { 1447386846u, 1160686753u, 2670438424u }, + { 816212890u, 1447386846u, 4288868534u } + }, + { + { 3825238244u, 1445162354u, 2362389441u }, + { 3440193648u, 3825238244u, 3520937545u }, + { 2652790808u, 3440193648u, 405299994u } + }, + { + { 1984094858u, 532165989u, 2027397575u }, + { 1455977136u, 1984094858u, 2433255524u }, + { 1039994763u, 1455977136u, 2069333087u } + }, + { + { 3680843319u, 2332949611u, 3516795313u }, + { 2033851810u, 3680843319u, 3843367307u }, + { 3686294589u, 2033851810u, 3912995069u } + }, + { + { 967423689u, 1724183394u, 635932799u }, + { 641380480u, 967423689u, 2145297779u }, + { 1723000412u, 641380480u, 455633660u } + }, + { + { 2130938335u, 1534972306u, 2511584766u }, + { 273828453u, 2130938335u, 3112810093u }, + { 4084843716u, 273828453u, 1399334152u } + }, + { + { 168278549u, 541167592u, 190177712u }, + { 403188859u, 168278549u, 2092073970u }, + { 58789558u, 403188859u, 2777887189u } + }, + { + { 634843389u, 4082275720u, 2092828966u }, + { 351187677u, 634843389u, 1312056270u }, + { 3347241070u, 351187677u, 2417192332u } + }, + { + { 443276110u, 1113643788u, 271102234u }, + { 3083745876u, 443276110u, 3370743767u }, + { 4200577503u, 3083745876u, 3298601960u } + }, + { + { 3533393557u, 764977733u, 3400275098u }, + { 144639933u, 3533393557u, 2646475951u }, + { 77963866u, 144639933u, 3794766611u } + }, + { + { 4064854722u, 1198665008u, 2872196602u }, + { 3274748603u, 4064854722u, 4164637970u }, + { 4238693771u, 3274748603u, 1981721347u } + }, + { + { 2279220396u, 2355957139u, 1417574285u }, + { 885864931u, 2279220396u, 1344421653u }, + { 1895527787u, 885864931u, 3726919367u } + }, + { + { 2898100178u, 2427331008u, 348923199u }, + { 3175444953u, 2898100178u, 4290541487u }, + { 246118669u, 3175444953u, 3410622769u } + }, + { + { 284442065u, 4064194676u, 2295560707u }, + { 4182706556u, 284442065u, 3696899246u }, + { 1201342255u, 4182706556u, 1145356382u } + }, + { + { 656615546u, 442908965u, 3724738272u }, + { 1624967553u, 656615546u, 798014134u }, + { 1157949454u, 1624967553u, 496247378u } + }, + { + { 265689579u, 675056541u, 3009083380u }, + { 3820679930u, 265689579u, 2961990151u }, + { 562287964u, 3820679930u, 1853486796u } + }, + { + { 1675739167u, 2319843005u, 760605578u }, + { 4161492847u, 1675739167u, 226142150u }, + { 1017447188u, 4161492847u, 3431158427u } + }, + { + { 1759873736u, 2334568602u, 2154570180u }, + { 1812793060u, 1759873736u, 2111094408u }, + { 1168460586u, 1812793060u, 2495653141u } + }, + { + { 317621194u, 868104288u, 664971082u }, + { 2340275074u, 317621194u, 2168960688u }, + { 725706104u, 2340275074u, 3532023115u } + }, + { + { 3926931954u, 2907684453u, 615601328u }, + { 1132340715u, 3926931954u, 676995757u }, + { 1154819290u, 1132340715u, 1662727700u } + }, + { + { 3921782078u, 3376494857u, 2969567377u }, + { 475345024u, 3921782078u, 4206379953u }, + { 1795936544u, 475345024u, 934679595u } + }, + { + { 3119292228u, 741613041u, 2083352304u }, + { 1047885963u, 3119292228u, 1581078542u }, + { 1065969969u, 1047885963u, 661718928u } + }, + { + { 3643472111u, 2870554228u, 3995474529u }, + { 3804264051u, 3643472111u, 1366457944u }, + { 1246805564u, 3804264051u, 993186530u } + }, + { + { 796711791u, 3878204845u, 3160293932u }, + { 255632881u, 796711791u, 3778927111u }, + { 3472564181u, 255632881u, 388382377u } + }, + { + { 1776984101u, 1742284034u, 3449763933u }, + { 1349354417u, 1776984101u, 1264780832u }, + { 715722511u, 1349354417u, 1213319489u } + }, + { + { 4261866865u, 1914382786u, 201872335u }, + { 614207188u, 4261866865u, 1853554849u }, + { 2046042882u, 614207188u, 3193186353u } + }, + { + { 2210205512u, 2847073169u, 3324925707u }, + { 1251969297u, 2210205512u, 3491451503u }, + { 470400916u, 1251969297u, 2184392547u } + }, + { + { 1523590942u, 2391111113u, 68341529u }, + { 295466806u, 1523590942u, 4143310876u }, + { 3527253079u, 295466806u, 4059123142u } + }, + { + { 1406902110u, 3735012720u, 1774518130u }, + { 1814959027u, 1406902110u, 1560544267u }, + { 346472965u, 1814959027u, 964257199u } + }, + { + { 855309653u, 4208503105u, 1518467541u }, + { 2025248418u, 855309653u, 4148125749u }, + { 1349947330u, 2025248418u, 1168504873u } + }, + { + { 2375338156u, 3629519168u, 409696181u }, + { 252401654u, 2375338156u, 3992097193u }, + { 2793725401u, 252401654u, 1350184085u } + }, + { + { 873141039u, 3885583138u, 361604799u }, + { 3554143374u, 873141039u, 894746180u }, + { 1919765327u, 3554143374u, 876210854u } + }, + { + { 246368794u, 1703793169u, 2317362874u }, + { 2300930144u, 246368794u, 2560214589u }, + { 2016163623u, 2300930144u, 1504276775u } + }, + { + { 1574610921u, 2147546631u, 4103450226u }, + { 107416526u, 1574610921u, 1773803959u }, + { 1402542742u, 107416526u, 550063800u } + }, + { + { 363388665u, 592194244u, 1746615522u }, + { 2637234667u, 363388665u, 4031408742u }, + { 2895130475u, 2637234667u, 296510335u } + }, + { + { 3997368560u, 3047771871u, 3178383826u }, + { 1160174754u, 3997368560u, 4027094919u }, + { 1234984211u, 1160174754u, 4226264344u } + }, + { + { 3303179301u, 4243968063u, 3235964171u }, + { 1776841674u, 3303179301u, 2867287469u }, + { 1500495759u, 1776841674u, 1708226553u } + }, + { + { 1482944153u, 3192311574u, 354466071u }, + { 3932773012u, 1482944153u, 389193591u }, + { 3350181058u, 3932773012u, 3398059015u } + }, + { + { 640968550u, 3226860971u, 922372912u }, + { 1254989667u, 640968550u, 2383815228u }, + { 2027371896u, 1254989667u, 2925300409u } + }, + { + { 2313146046u, 3910187183u, 1377591475u }, + { 1689291784u, 2313146046u, 4255405993u }, + { 1650609719u, 1689291784u, 1897624297u } + }, + { + { 3656310954u, 882924050u, 2702189958u }, + { 3185020283u, 3656310954u, 1923190496u }, + { 2449669145u, 3185020283u, 4235849984u } + }, + { + { 377232416u, 1498446142u, 4229103619u }, + { 3926377906u, 377232416u, 600268838u }, + { 511317726u, 3926377906u, 216160452u } + }, + { + { 1969399344u, 3273966859u, 4220943579u }, + { 3952111894u, 1969399344u, 575096961u }, + { 3815277103u, 3952111894u, 792177412u } + }, + { + { 2957238169u, 1410010554u, 1523740068u }, + { 3949237584u, 2957238169u, 74149658u }, + { 2564746147u, 3949237584u, 2557663578u } + }, + { + { 3377318569u, 1927835240u, 2556102508u }, + { 3022040116u, 3377318569u, 2549406364u }, + { 2387074241u, 3022040116u, 1477293711u } + }, + { + { 257306870u, 1748489735u, 547809226u }, + { 3708493374u, 257306870u, 4183546362u }, + { 4435502u, 3708493374u, 1607696753u } + } + }; +#endif // ifndef __CUDACC_RTC__ + +#ifdef CURAND_MRG32K3A_MATRICES_DEVICE_QUALIFIERS +#undef CURAND_MRG32K3A_MATRICES_DEVICE_QUALIFIERS +#endif + +#ifdef CURAND_MRG32K3A_MATRICES_HOST_QUALIFIERS +#undef CURAND_MRG32K3A_MATRICES_HOST_QUALIFIERS +#endif + +#endif /* !defined(CURAND_MRG32K3A_MATRICES_H_) */ diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia/curand/include/curand_mtgp32.h b/llmeval-env/lib/python3.10/site-packages/nvidia/curand/include/curand_mtgp32.h new file mode 100644 index 0000000000000000000000000000000000000000..ab8a5a5b78629ba86dbb996a7561b415e90dfb53 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nvidia/curand/include/curand_mtgp32.h @@ -0,0 +1,210 @@ +/* + * Copyright 2010-2014 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +#ifndef CURAND_MTGP32_H +#define CURAND_MTGP32_H +/* + * @file curand_mtgp32.h + * + * @brief Mersenne Twister for Graphic Processors (mtgp32), which + * generates 32-bit unsigned integers and single precision floating + * point numbers based on IEEE 754 format. + * + * @author Mutsuo Saito (Hiroshima University) + * @author Makoto Matsumoto (Hiroshima University) + * + */ +/* + * Copyright (c) 2009, 2010 Mutsuo Saito, Makoto Matsumoto and Hiroshima + * University. All rights reserved. + * Copyright (c) 2011 Mutsuo Saito, Makoto Matsumoto, Hiroshima + * University and University of Tokyo. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of the Hiroshima University nor the names of + * its contributors may be used to endorse or promote products + * derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + + +#define MTGPDC_MEXP 11213 +#define MTGPDC_N 351 +#define MTGPDC_FLOOR_2P 256 +#define MTGPDC_CEIL_2P 512 +#define MTGPDC_PARAM_TABLE mtgp32dc_params_fast_11213 +#define MTGP32_STATE_SIZE 1024 +#define MTGP32_STATE_MASK 1023 +#define CURAND_NUM_MTGP32_PARAMS 200 +#define MEXP 11213 +#define THREAD_NUM MTGPDC_FLOOR_2P +#define LARGE_SIZE (THREAD_NUM * 3) +#define TBL_SIZE 16 + +/** + * \addtogroup DEVICE Device API + * + * @{ + */ + +/* + * \struct MTGP32_PARAMS_FAST_T + * MTGP32 parameters. + * Some element is redundant to keep structure simple. + * + * \b pos is a pick up position which is selected to have good + * performance on graphic processors. 3 < \b pos < Q, where Q is a + * maximum number such that the size of status array - Q is a power of + * 2. For example, when \b mexp is 44497, size of 32-bit status array + * is 696, and Q is 184, then \b pos is between 4 and 183. This means + * 512 parallel calculations is allowed when \b mexp is 44497. + * + * \b poly_sha1 is SHA1 digest of the characteristic polynomial of + * state transition function. SHA1 is calculated based on printing + * form of the polynomial. This is important when we use parameters + * generated by the dynamic creator which + * + * \b mask This is a mask to make the dimension of state space have + * just Mersenne Prime. This is redundant. + */ + +struct mtgp32_params_fast; + +struct mtgp32_params_fast { + int mexp; /*< Mersenne exponent. This is redundant. */ + int pos; /*< pick up position. */ + int sh1; /*< shift value 1. 0 < sh1 < 32. */ + int sh2; /*< shift value 2. 0 < sh2 < 32. */ + unsigned int tbl[16]; /*< a small matrix. */ + unsigned int tmp_tbl[16]; /*< a small matrix for tempering. */ + unsigned int flt_tmp_tbl[16]; /*< a small matrix for tempering and + converting to float. */ + unsigned int mask; /*< This is a mask for state space */ + unsigned char poly_sha1[21]; /*< SHA1 digest */ +}; + +/** \cond UNHIDE_TYPEDEFS */ +typedef struct mtgp32_params_fast mtgp32_params_fast_t; +/** \endcond */ + +/* + * Generator Parameters. + */ +struct mtgp32_kernel_params; +struct mtgp32_kernel_params { + unsigned int pos_tbl[CURAND_NUM_MTGP32_PARAMS]; + unsigned int param_tbl[CURAND_NUM_MTGP32_PARAMS][TBL_SIZE]; + unsigned int temper_tbl[CURAND_NUM_MTGP32_PARAMS][TBL_SIZE]; + unsigned int single_temper_tbl[CURAND_NUM_MTGP32_PARAMS][TBL_SIZE]; + unsigned int sh1_tbl[CURAND_NUM_MTGP32_PARAMS]; + unsigned int sh2_tbl[CURAND_NUM_MTGP32_PARAMS]; + unsigned int mask[1]; +}; + +/** \cond UNHIDE_TYPEDEFS */ +typedef struct mtgp32_kernel_params mtgp32_kernel_params_t; +/** \endcond */ + + + +/* + * kernel I/O + * This structure must be initialized before first use. + */ + +/* MTGP (Mersenne Twister) RNG */ +/* This generator uses the Mersenne Twister algorithm of + * http://arxiv.org/abs/1005.4973v2 + * Has period 2^11213. +*/ + +/** + * CURAND MTGP32 state + */ +struct curandStateMtgp32; + +struct curandStateMtgp32 { + unsigned int s[MTGP32_STATE_SIZE]; + int offset; + int pIdx; + mtgp32_kernel_params_t * k; +}; + +/* + * CURAND MTGP32 state + */ +/** \cond UNHIDE_TYPEDEFS */ +typedef struct curandStateMtgp32 curandStateMtgp32_t; +/** \endcond */ + +/** @} */ + +#endif + diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia/curand/include/curand_mtgp32dc_p_11213.h b/llmeval-env/lib/python3.10/site-packages/nvidia/curand/include/curand_mtgp32dc_p_11213.h new file mode 100644 index 0000000000000000000000000000000000000000..56a8994c7f2016e4cd2bed77cf6b6516a42f26be --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nvidia/curand/include/curand_mtgp32dc_p_11213.h @@ -0,0 +1,11710 @@ +/* + * Copyright 2010-2014 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + +/* + * Multiple sets of generator parameters for Mersenne Twister + * with period 2**11213 -1 + */ +/* + * Copyright (c) 2009, 2010 Mutsuo Saito, Makoto Matsumoto and Hiroshima + * University. All rights reserved. + * Copyright (c) 2011 Mutsuo Saito, Makoto Matsumoto, Hiroshima + * University and University of Tokyo. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of the Hiroshima University nor the names of + * its contributors may be used to endorse or promote products + * derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#if !defined CURAND_MTGP32DC_P_11213_H +#define CURAND_MTGP32DC_P_11213_H + +#include "curand_mtgp32.h" + + +#if (__cplusplus >= 201703L) && defined(__cpp_inline_variables) +inline const int mtgpdc_params_11213_num = 200; +#else +static const int mtgpdc_params_11213_num = 200; +#endif + +#if (__cplusplus >= 201703L) && defined(__cpp_inline_variables) +inline mtgp32_params_fast_t mtgp32dc_params_fast_11213[] +#else +static mtgp32_params_fast_t mtgp32dc_params_fast_11213[] +#endif + = { + { + /* No.0 delta:1599 weight:665 */ + 11213, + 88, + 19, + 5, + {(0x00000000), + (0xaba4d62c), + (0xbb076f87), + (0x10a3b9ab), + (0x22000000), + (0x89a4d62c), + (0x99076f87), + (0x32a3b9ab), + (0x000095ba), + (0xaba44396), + (0xbb07fa3d), + (0x10a32c11), + (0x220095ba), + (0x89a44396), + (0x9907fa3d), + (0x32a32c11)}, + {(0x00000000), + (0x06100000), + (0x25d80000), + (0x23c80000), + (0x282c0000), + (0x2e3c0000), + (0x0df40000), + (0x0be40000), + (0x3302de00), + (0x3512de00), + (0x16dade00), + (0x10cade00), + (0x1b2ede00), + (0x1d3ede00), + (0x3ef6de00), + (0x38e6de00)}, + {(0x3f800000), + (0x3f830800), + (0x3f92ec00), + (0x3f91e400), + (0x3f941600), + (0x3f971e00), + (0x3f86fa00), + (0x3f85f200), + (0x3f99816f), + (0x3f9a896f), + (0x3f8b6d6f), + (0x3f88656f), + (0x3f8d976f), + (0x3f8e9f6f), + (0x3f9f7b6f), + (0x3f9c736f)}, + (0xfff80000), + {0xcb,0xb0,0x3f,0xaa,0x65,0x0d,0xbd,0x1c,0x8c,0xc2, + 0x91,0x00,0x87,0x25,0x7f,0xf8,0x6f,0x23,0xf2,0x18,0x00} + }, + { + /* No.1 delta:1185 weight:1129 */ + 11213, + 84, + 15, + 12, + {(0x00000000), + (0x9a975707), + (0x75c21b11), + (0xef554c16), + (0x15500019), + (0x8fc7571e), + (0x60921b08), + (0xfa054c0f), + (0x0000d269), + (0x9a97856e), + (0x75c2c978), + (0xef559e7f), + (0x1550d270), + (0x8fc78577), + (0x6092c961), + (0xfa059e66)}, + {(0x00000000), + (0x00646c00), + (0x0810f000), + (0x08749c00), + (0x37028000), + (0x3766ec00), + (0x3f127000), + (0x3f761c00), + (0x00505e00), + (0x00343200), + (0x0840ae00), + (0x0824c200), + (0x3752de00), + (0x3736b200), + (0x3f422e00), + (0x3f264200)}, + {(0x3f800000), + (0x3f803236), + (0x3f840878), + (0x3f843a4e), + (0x3f9b8140), + (0x3f9bb376), + (0x3f9f8938), + (0x3f9fbb0e), + (0x3f80282f), + (0x3f801a19), + (0x3f842057), + (0x3f841261), + (0x3f9ba96f), + (0x3f9b9b59), + (0x3f9fa117), + (0x3f9f9321)}, + (0xfff80000), + {0x53,0xe0,0x89,0xe0,0x47,0x42,0x00,0xbd,0xe9,0x04, + 0xc9,0x11,0x5c,0xe3,0x97,0x94,0x94,0xd8,0xbb,0xfc,0x00} + }, + { + /* No.2 delta:2848 weight:755 */ + 11213, + 25, + 4, + 18, + {(0x00000000), + (0x507cbad4), + (0xe742103b), + (0xb73eaaef), + (0x71c00027), + (0x21bcbaf3), + (0x9682101c), + (0xc6feaac8), + (0x0000dd6c), + (0x507c67b8), + (0xe742cd57), + (0xb73e7783), + (0x71c0dd4b), + (0x21bc679f), + (0x9682cd70), + (0xc6fe77a4)}, + {(0x00000000), + (0x04210200), + (0x08040000), + (0x0c250200), + (0x11400000), + (0x15610200), + (0x19440000), + (0x1d650200), + (0x206f1e00), + (0x244e1c00), + (0x286b1e00), + (0x2c4a1c00), + (0x312f1e00), + (0x350e1c00), + (0x392b1e00), + (0x3d0a1c00)}, + {(0x3f800000), + (0x3f821081), + (0x3f840200), + (0x3f861281), + (0x3f88a000), + (0x3f8ab081), + (0x3f8ca200), + (0x3f8eb281), + (0x3f90378f), + (0x3f92270e), + (0x3f94358f), + (0x3f96250e), + (0x3f98978f), + (0x3f9a870e), + (0x3f9c958f), + (0x3f9e850e)}, + (0xfff80000), + {0x9e,0x64,0xa3,0x28,0x10,0x66,0x35,0x72,0xe3,0x07, + 0xf7,0xcf,0x5a,0xe3,0x57,0x6f,0x90,0x0c,0x28,0x79,0x00} + }, + { + /* No.3 delta:1100 weight:1261 */ + 11213, + 42, + 20, + 9, + {(0x00000000), + (0x3ef0ef20), + (0x9fddd9ea), + (0xa12d36ca), + (0x10300036), + (0x2ec0ef16), + (0x8fedd9dc), + (0xb11d36fc), + (0x00005ced), + (0x3ef0b3cd), + (0x9fdd8507), + (0xa12d6a27), + (0x10305cdb), + (0x2ec0b3fb), + (0x8fed8531), + (0xb11d6a11)}, + {(0x00000000), + (0x24470c00), + (0x00268200), + (0x24618e00), + (0x00530000), + (0x24140c00), + (0x00758200), + (0x24328e00), + (0x105c5e00), + (0x341b5200), + (0x107adc00), + (0x343dd000), + (0x100f5e00), + (0x34485200), + (0x1029dc00), + (0x346ed000)}, + {(0x3f800000), + (0x3f922386), + (0x3f801341), + (0x3f9230c7), + (0x3f802980), + (0x3f920a06), + (0x3f803ac1), + (0x3f921947), + (0x3f882e2f), + (0x3f9a0da9), + (0x3f883d6e), + (0x3f9a1ee8), + (0x3f8807af), + (0x3f9a2429), + (0x3f8814ee), + (0x3f9a3768)}, + (0xfff80000), + {0x9b,0x1d,0x8b,0x59,0x56,0x8e,0x7b,0x75,0x24,0x4a, + 0xaf,0x49,0x6e,0xcf,0xcb,0x5d,0x67,0x00,0x31,0xac,0x00} + }, + { + /* No.4 delta:5389 weight:1871 */ + 11213, + 22, + 1, + 5, + {(0x00000000), + (0xb004feb9), + (0x03b7c894), + (0xb3b3362d), + (0x0750004d), + (0xb754fef4), + (0x04e7c8d9), + (0xb4e33660), + (0x000049e4), + (0xb004b75d), + (0x03b78170), + (0xb3b37fc9), + (0x075049a9), + (0xb754b710), + (0x04e7813d), + (0xb4e37f84)}, + {(0x00000000), + (0x48698800), + (0x04010000), + (0x4c688800), + (0x60340000), + (0x285d8800), + (0x64350000), + (0x2c5c8800), + (0x20065e00), + (0x686fd600), + (0x24075e00), + (0x6c6ed600), + (0x40325e00), + (0x085bd600), + (0x44335e00), + (0x0c5ad600)}, + {(0x3f800000), + (0x3fa434c4), + (0x3f820080), + (0x3fa63444), + (0x3fb01a00), + (0x3f942ec4), + (0x3fb21a80), + (0x3f962e44), + (0x3f90032f), + (0x3fb437eb), + (0x3f9203af), + (0x3fb6376b), + (0x3fa0192f), + (0x3f842deb), + (0x3fa219af), + (0x3f862d6b)}, + (0xfff80000), + {0xbb,0x80,0x47,0x21,0x2c,0xbd,0xd4,0x7b,0xf3,0xb3, + 0xbd,0x4d,0x05,0x36,0xf5,0x4d,0xe7,0x89,0xdc,0x11,0x00} + }, + { + /* No.5 delta:2280 weight:1243 */ + 11213, + 11, + 16, + 1, + {(0x00000000), + (0x646a361c), + (0xe36e4823), + (0x87047e3f), + (0x2b20005f), + (0x4f4a3643), + (0xc84e487c), + (0xac247e60), + (0x0000caac), + (0x646afcb0), + (0xe36e828f), + (0x8704b493), + (0x2b20caf3), + (0x4f4afcef), + (0xc84e82d0), + (0xac24b4cc)}, + {(0x00000000), + (0xd49c4200), + (0x0b98f000), + (0xdf04b200), + (0x24007200), + (0xf09c3000), + (0x2f988200), + (0xfb04c000), + (0x5d311e00), + (0x89ad5c00), + (0x56a9ee00), + (0x8235ac00), + (0x79316c00), + (0xadad2e00), + (0x72a99c00), + (0xa635de00)}, + {(0x3f800000), + (0x3fea4e21), + (0x3f85cc78), + (0x3fef8259), + (0x3f920039), + (0x3ff84e18), + (0x3f97cc41), + (0x3ffd8260), + (0x3fae988f), + (0x3fc4d6ae), + (0x3fab54f7), + (0x3fc11ad6), + (0x3fbc98b6), + (0x3fd6d697), + (0x3fb954ce), + (0x3fd31aef)}, + (0xfff80000), + {0x24,0x94,0x81,0x62,0xb4,0x6f,0x10,0x4e,0x66,0xd1, + 0xb4,0xdd,0xa0,0x92,0x2e,0xdf,0x8d,0x6b,0x03,0xa3,0x00} + }, + { + /* No.6 delta:722 weight:1357 */ + 11213, + 76, + 16, + 6, + {(0x00000000), + (0x1bb96b3c), + (0x7c6323a4), + (0x67da4898), + (0x22400061), + (0x39f96b5d), + (0x5e2323c5), + (0x459a48f9), + (0x00004c61), + (0x1bb9275d), + (0x7c636fc5), + (0x67da04f9), + (0x22404c00), + (0x39f9273c), + (0x5e236fa4), + (0x459a0498)}, + {(0x00000000), + (0x08258800), + (0x1042c000), + (0x18674800), + (0x00203000), + (0x0805b800), + (0x1062f000), + (0x18477800), + (0x70c01e00), + (0x78e59600), + (0x6082de00), + (0x68a75600), + (0x70e02e00), + (0x78c5a600), + (0x60a2ee00), + (0x68876600)}, + {(0x3f800000), + (0x3f8412c4), + (0x3f882160), + (0x3f8c33a4), + (0x3f801018), + (0x3f8402dc), + (0x3f883178), + (0x3f8c23bc), + (0x3fb8600f), + (0x3fbc72cb), + (0x3fb0416f), + (0x3fb453ab), + (0x3fb87017), + (0x3fbc62d3), + (0x3fb05177), + (0x3fb443b3)}, + (0xfff80000), + {0x06,0x1a,0x0a,0x3e,0x05,0xb0,0xd2,0xab,0x76,0xef, + 0xe5,0xf6,0xe4,0x09,0x02,0x41,0x4b,0x21,0xef,0x76,0x00} + }, + { + /* No.7 delta:3952 weight:825 */ + 11213, + 11, + 15, + 16, + {(0x00000000), + (0xa49b7ddb), + (0xbc75e45f), + (0x18ee9984), + (0x75600076), + (0xd1fb7dad), + (0xc915e429), + (0x6d8e99f2), + (0x0000c4f9), + (0xa49bb922), + (0xbc7520a6), + (0x18ee5d7d), + (0x7560c48f), + (0xd1fbb954), + (0xc91520d0), + (0x6d8e5d0b)}, + {(0x00000000), + (0xf0015000), + (0x0a4c2c00), + (0xfa4d7c00), + (0x0482b000), + (0xf483e000), + (0x0ece9c00), + (0xfecfcc00), + (0x45105e00), + (0xb5110e00), + (0x4f5c7200), + (0xbf5d2200), + (0x4192ee00), + (0xb193be00), + (0x4bdec200), + (0xbbdf9200)}, + {(0x3f800000), + (0x3ff800a8), + (0x3f852616), + (0x3ffd26be), + (0x3f824158), + (0x3ffa41f0), + (0x3f87674e), + (0x3fff67e6), + (0x3fa2882f), + (0x3fda8887), + (0x3fa7ae39), + (0x3fdfae91), + (0x3fa0c977), + (0x3fd8c9df), + (0x3fa5ef61), + (0x3fddefc9)}, + (0xfff80000), + {0xa9,0x6f,0x34,0xf9,0xdd,0xff,0x0a,0x48,0x3c,0x3f, + 0x3e,0x6f,0x74,0x70,0x37,0x33,0x19,0xe4,0x24,0x9b,0x00} + }, + { + /* No.8 delta:4274 weight:1347 */ + 11213, + 42, + 6, + 11, + {(0x00000000), + (0x311b3c94), + (0xa1e0c93f), + (0x90fbf5ab), + (0x3c600082), + (0x0d7b3c16), + (0x9d80c9bd), + (0xac9bf529), + (0x0000b622), + (0x311b8ab6), + (0xa1e07f1d), + (0x90fb4389), + (0x3c60b6a0), + (0x0d7b8a34), + (0x9d807f9f), + (0xac9b430b)}, + {(0x00000000), + (0x80000000), + (0x00000000), + (0x80000000), + (0xc0000000), + (0x40000000), + (0xc0000000), + (0x40000000), + (0x20001e00), + (0xa0001e00), + (0x20001e00), + (0xa0001e00), + (0xe0001e00), + (0x60001e00), + (0xe0001e00), + (0x60001e00)}, + {(0x3f800000), + (0x3fc00000), + (0x3f800000), + (0x3fc00000), + (0x3fe00000), + (0x3fa00000), + (0x3fe00000), + (0x3fa00000), + (0x3f90000f), + (0x3fd0000f), + (0x3f90000f), + (0x3fd0000f), + (0x3ff0000f), + (0x3fb0000f), + (0x3ff0000f), + (0x3fb0000f)}, + (0xfff80000), + {0x3c,0x5a,0x73,0x0e,0xab,0x68,0x32,0x57,0x6f,0xe6, + 0x1d,0x93,0x6e,0xbd,0xef,0x1a,0x6f,0x25,0x1f,0x70,0x00} + }, + { + /* No.9 delta:1902 weight:1001 */ + 11213, + 60, + 6, + 11, + {(0x00000000), + (0xbc8ed6c0), + (0x4d9fbb3d), + (0xf1116dfd), + (0x8e70009d), + (0x32fed65d), + (0xc3efbba0), + (0x7f616d60), + (0x0000a00a), + (0xbc8e76ca), + (0x4d9f1b37), + (0xf111cdf7), + (0x8e70a097), + (0x32fe7657), + (0xc3ef1baa), + (0x7f61cd6a)}, + {(0x00000000), + (0xc0400400), + (0x00220000), + (0xc0620400), + (0x303e0000), + (0xf07e0400), + (0x301c0000), + (0xf05c0400), + (0x40021e00), + (0x80421a00), + (0x40201e00), + (0x80601a00), + (0x703c1e00), + (0xb07c1a00), + (0x701e1e00), + (0xb05e1a00)}, + {(0x3f800000), + (0x3fe02002), + (0x3f801100), + (0x3fe03102), + (0x3f981f00), + (0x3ff83f02), + (0x3f980e00), + (0x3ff82e02), + (0x3fa0010f), + (0x3fc0210d), + (0x3fa0100f), + (0x3fc0300d), + (0x3fb81e0f), + (0x3fd83e0d), + (0x3fb80f0f), + (0x3fd82f0d)}, + (0xfff80000), + {0x83,0x62,0x6c,0x11,0xe4,0x19,0x2b,0x9b,0x87,0x4d, + 0x8e,0x68,0xde,0x69,0x56,0xc5,0x44,0x33,0x4f,0x20,0x00} + }, + { + /* No.10 delta:1148 weight:927 */ + 11213, + 45, + 12, + 13, + {(0x00000000), + (0x06bc3338), + (0xdde8b9f9), + (0xdb548ac1), + (0xfb2000a5), + (0xfd9c339d), + (0x26c8b95c), + (0x20748a64), + (0x0000a4ec), + (0x06bc97d4), + (0xdde81d15), + (0xdb542e2d), + (0xfb20a449), + (0xfd9c9771), + (0x26c81db0), + (0x20742e88)}, + {(0x00000000), + (0x0c494c00), + (0x42040c00), + (0x4e4d4000), + (0x20090800), + (0x2c404400), + (0x620d0400), + (0x6e444800), + (0x10409e00), + (0x1c09d200), + (0x52449200), + (0x5e0dde00), + (0x30499600), + (0x3c00da00), + (0x724d9a00), + (0x7e04d600)}, + {(0x3f800000), + (0x3f8624a6), + (0x3fa10206), + (0x3fa726a0), + (0x3f900484), + (0x3f962022), + (0x3fb10682), + (0x3fb72224), + (0x3f88204f), + (0x3f8e04e9), + (0x3fa92249), + (0x3faf06ef), + (0x3f9824cb), + (0x3f9e006d), + (0x3fb926cd), + (0x3fbf026b)}, + (0xfff80000), + {0xf8,0x39,0x8a,0x36,0x0f,0x90,0x4f,0x3c,0x04,0x0c, + 0x0a,0x02,0xd4,0x04,0x1b,0xc9,0xc4,0x8e,0x15,0x55,0x00} + }, + { + /* No.11 delta:2163 weight:1669 */ + 11213, + 80, + 6, + 9, + {(0x00000000), + (0x247f444d), + (0xb70a5ab0), + (0x93751efd), + (0xc44000b5), + (0xe03f44f8), + (0x734a5a05), + (0x57351e48), + (0x0000554f), + (0x247f1102), + (0xb70a0fff), + (0x93754bb2), + (0xc44055fa), + (0xe03f11b7), + (0x734a0f4a), + (0x57354b07)}, + {(0x00000000), + (0x30200a00), + (0x106c0000), + (0x204c0a00), + (0x00180400), + (0x30380e00), + (0x10740400), + (0x20540e00), + (0x100a1e00), + (0x202a1400), + (0x00661e00), + (0x30461400), + (0x10121a00), + (0x20321000), + (0x007e1a00), + (0x305e1000)}, + {(0x3f800000), + (0x3f981005), + (0x3f883600), + (0x3f902605), + (0x3f800c02), + (0x3f981c07), + (0x3f883a02), + (0x3f902a07), + (0x3f88050f), + (0x3f90150a), + (0x3f80330f), + (0x3f98230a), + (0x3f88090d), + (0x3f901908), + (0x3f803f0d), + (0x3f982f08)}, + (0xfff80000), + {0x98,0xb0,0x30,0x32,0xcf,0x75,0x63,0xeb,0x3a,0xb6, + 0x4f,0x70,0x47,0x4e,0x6a,0xd1,0x7b,0x40,0xbd,0x90,0x00} + }, + { + /* No.12 delta:4985 weight:689 */ + 11213, + 81, + 8, + 18, + {(0x00000000), + (0xcdcab880), + (0xdedf1226), + (0x1315aaa6), + (0xd17000c6), + (0x1cbab846), + (0x0faf12e0), + (0xc265aa60), + (0x0000b647), + (0xcdca0ec7), + (0xdedfa461), + (0x13151ce1), + (0xd170b681), + (0x1cba0e01), + (0x0fafa4a7), + (0xc2651c27)}, + {(0x00000000), + (0x601c0000), + (0x08780000), + (0x68640000), + (0x00740000), + (0x60680000), + (0x080c0000), + (0x68100000), + (0x00a01e00), + (0x60bc1e00), + (0x08d81e00), + (0x68c41e00), + (0x00d41e00), + (0x60c81e00), + (0x08ac1e00), + (0x68b01e00)}, + {(0x3f800000), + (0x3fb00e00), + (0x3f843c00), + (0x3fb43200), + (0x3f803a00), + (0x3fb03400), + (0x3f840600), + (0x3fb40800), + (0x3f80500f), + (0x3fb05e0f), + (0x3f846c0f), + (0x3fb4620f), + (0x3f806a0f), + (0x3fb0640f), + (0x3f84560f), + (0x3fb4580f)}, + (0xfff80000), + {0x90,0xf2,0x51,0xc2,0x2d,0xc4,0x52,0x73,0x15,0xfa, + 0xeb,0xc3,0x64,0x51,0x48,0xf7,0xb1,0x26,0x38,0xb4,0x00} + }, + { + /* No.13 delta:6473 weight:513 */ + 11213, + 16, + 1, + 19, + {(0x00000000), + (0x2310a4f2), + (0x91c8cf8c), + (0xb2d86b7e), + (0x89c000d1), + (0xaad0a423), + (0x1808cf5d), + (0x3b186baf), + (0x0000f299), + (0x2310566b), + (0x91c83d15), + (0xb2d899e7), + (0x89c0f248), + (0xaad056ba), + (0x18083dc4), + (0x3b189936)}, + {(0x00000000), + (0x4c020000), + (0xa6800000), + (0xea820000), + (0x15000000), + (0x59020000), + (0xb3800000), + (0xff820000), + (0x29201e00), + (0x65221e00), + (0x8fa01e00), + (0xc3a21e00), + (0x3c201e00), + (0x70221e00), + (0x9aa01e00), + (0xd6a21e00)}, + {(0x3f800000), + (0x3fa60100), + (0x3fd34000), + (0x3ff54100), + (0x3f8a8000), + (0x3fac8100), + (0x3fd9c000), + (0x3fffc100), + (0x3f94900f), + (0x3fb2910f), + (0x3fc7d00f), + (0x3fe1d10f), + (0x3f9e100f), + (0x3fb8110f), + (0x3fcd500f), + (0x3feb510f)}, + (0xfff80000), + {0x89,0x7c,0x3c,0xc6,0x3d,0x0a,0xad,0xf2,0xa0,0xf5, + 0x0a,0x4c,0x4e,0x83,0xc5,0x45,0xdb,0xcb,0xb5,0x98,0x00} + }, + { + /* No.14 delta:1389 weight:725 */ + 11213, + 63, + 14, + 18, + {(0x00000000), + (0x81c7909c), + (0xecb46b0c), + (0x6d73fb90), + (0xf33000ef), + (0x72f79073), + (0x1f846be3), + (0x9e43fb7f), + (0x0000e902), + (0x81c7799e), + (0xecb4820e), + (0x6d731292), + (0xf330e9ed), + (0x72f77971), + (0x1f8482e1), + (0x9e43127d)}, + {(0x00000000), + (0x09001000), + (0x02010000), + (0x0b011000), + (0x00201800), + (0x09200800), + (0x02211800), + (0x0b210800), + (0x30039e00), + (0x39038e00), + (0x32029e00), + (0x3b028e00), + (0x30238600), + (0x39239600), + (0x32228600), + (0x3b229600)}, + {(0x3f800000), + (0x3f848008), + (0x3f810080), + (0x3f858088), + (0x3f80100c), + (0x3f849004), + (0x3f81108c), + (0x3f859084), + (0x3f9801cf), + (0x3f9c81c7), + (0x3f99014f), + (0x3f9d8147), + (0x3f9811c3), + (0x3f9c91cb), + (0x3f991143), + (0x3f9d914b)}, + (0xfff80000), + {0xb7,0x0b,0xee,0xcb,0xc6,0x29,0x7c,0xe3,0xc5,0x58, + 0x71,0xe7,0x4b,0xe5,0xc9,0x63,0xbd,0xa4,0x75,0xc7,0x00} + }, + { + /* No.15 delta:8365 weight:1135 */ + 11213, + 38, + 28, + 1, + {(0x00000000), + (0xe56c7b3b), + (0x759d951b), + (0x90f1ee20), + (0x1a6000fa), + (0xff0c7bc1), + (0x6ffd95e1), + (0x8a91eeda), + (0x00004474), + (0xe56c3f4f), + (0x759dd16f), + (0x90f1aa54), + (0x1a60448e), + (0xff0c3fb5), + (0x6ffdd195), + (0x8a91aaae)}, + {(0x00000000), + (0xc0410a00), + (0xe70c0400), + (0x274d0e00), + (0xfa3e8000), + (0x3a7f8a00), + (0x1d328400), + (0xdd738e00), + (0x20801e00), + (0xe0c11400), + (0xc78c1a00), + (0x07cd1000), + (0xdabe9e00), + (0x1aff9400), + (0x3db29a00), + (0xfdf39000)}, + {(0x3f800000), + (0x3fe02085), + (0x3ff38602), + (0x3f93a687), + (0x3ffd1f40), + (0x3f9d3fc5), + (0x3f8e9942), + (0x3feeb9c7), + (0x3f90400f), + (0x3ff0608a), + (0x3fe3c60d), + (0x3f83e688), + (0x3fed5f4f), + (0x3f8d7fca), + (0x3f9ed94d), + (0x3ffef9c8)}, + (0xfff80000), + {0xd2,0xa4,0x8f,0xf8,0x54,0x0f,0xe3,0x14,0x17,0xeb, + 0x17,0x78,0x41,0x78,0x69,0xbb,0x4b,0x01,0x85,0x8e,0x00} + }, + { + /* No.16 delta:6253 weight:641 */ + 11213, + 3, + 30, + 2, + {(0x00000000), + (0x83a2a3cb), + (0x52e0a64c), + (0xd1420587), + (0x9850010b), + (0x1bf2a2c0), + (0xcab0a747), + (0x4912048c), + (0x0000aaa5), + (0x83a2096e), + (0x52e00ce9), + (0xd142af22), + (0x9850abae), + (0x1bf20865), + (0xcab00de2), + (0x4912ae29)}, + {(0x00000000), + (0xe9100000), + (0x94d00000), + (0x7dc00000), + (0xdbcc0000), + (0x32dc0000), + (0x4f1c0000), + (0xa60c0000), + (0x09d01e00), + (0xe0c01e00), + (0x9d001e00), + (0x74101e00), + (0xd21c1e00), + (0x3b0c1e00), + (0x46cc1e00), + (0xafdc1e00)}, + {(0x3f800000), + (0x3ff48800), + (0x3fca6800), + (0x3fbee000), + (0x3fede600), + (0x3f996e00), + (0x3fa78e00), + (0x3fd30600), + (0x3f84e80f), + (0x3ff0600f), + (0x3fce800f), + (0x3fba080f), + (0x3fe90e0f), + (0x3f9d860f), + (0x3fa3660f), + (0x3fd7ee0f)}, + (0xfff80000), + {0xa6,0xc6,0x12,0x64,0xaf,0x79,0x40,0x50,0x7b,0xb9, + 0xa9,0xfa,0x9a,0xc0,0x7a,0xaf,0x63,0x83,0x39,0x6c,0x00} + }, + { + /* No.17 delta:5329 weight:467 */ + 11213, + 55, + 1, + 16, + {(0x00000000), + (0x2ce39cc8), + (0x6312ea87), + (0x4ff1764f), + (0x9ef00113), + (0xb2139ddb), + (0xfde2eb94), + (0xd101775c), + (0x00007ddc), + (0x2ce3e114), + (0x6312975b), + (0x4ff10b93), + (0x9ef07ccf), + (0xb213e007), + (0xfde29648), + (0xd1010a80)}, + {(0x00000000), + (0x0c021000), + (0xe3400000), + (0xef421000), + (0x02000000), + (0x0e021000), + (0xe1400000), + (0xed421000), + (0x50a01e00), + (0x5ca20e00), + (0xb3e01e00), + (0xbfe20e00), + (0x52a01e00), + (0x5ea20e00), + (0xb1e01e00), + (0xbde20e00)}, + {(0x3f800000), + (0x3f860108), + (0x3ff1a000), + (0x3ff7a108), + (0x3f810000), + (0x3f870108), + (0x3ff0a000), + (0x3ff6a108), + (0x3fa8500f), + (0x3fae5107), + (0x3fd9f00f), + (0x3fdff107), + (0x3fa9500f), + (0x3faf5107), + (0x3fd8f00f), + (0x3fdef107)}, + (0xfff80000), + {0xe4,0xb8,0x1d,0x0f,0x15,0xe2,0xd6,0xd2,0x30,0x67, + 0x18,0x06,0x38,0xd0,0x63,0x09,0x4d,0xc1,0xaa,0x48,0x00} + }, + { + /* No.18 delta:3046 weight:699 */ + 11213, + 9, + 9, + 15, + {(0x00000000), + (0x228294e5), + (0xc1861e7b), + (0xe3048a9e), + (0x6250012a), + (0x40d295cf), + (0xa3d61f51), + (0x81548bb4), + (0x00005800), + (0x2282cce5), + (0xc186467b), + (0xe304d29e), + (0x6250592a), + (0x40d2cdcf), + (0xa3d64751), + (0x8154d3b4)}, + {(0x00000000), + (0x57910c00), + (0xd07cc400), + (0x87edc800), + (0xa8036800), + (0xff926400), + (0x787fac00), + (0x2feea000), + (0x00521e00), + (0x57c31200), + (0xd02eda00), + (0x87bfd600), + (0xa8517600), + (0xffc07a00), + (0x782db200), + (0x2fbcbe00)}, + {(0x3f800000), + (0x3fabc886), + (0x3fe83e62), + (0x3fc3f6e4), + (0x3fd401b4), + (0x3fffc932), + (0x3fbc3fd6), + (0x3f97f750), + (0x3f80290f), + (0x3fabe189), + (0x3fe8176d), + (0x3fc3dfeb), + (0x3fd428bb), + (0x3fffe03d), + (0x3fbc16d9), + (0x3f97de5f)}, + (0xfff80000), + {0x3d,0xb7,0x41,0x11,0xf8,0x6e,0xe6,0x89,0x71,0x95, + 0x3b,0x7c,0x3a,0x38,0x60,0x12,0x4c,0xec,0xaa,0x83,0x00} + }, + { + /* No.19 delta:786 weight:1431 */ + 11213, + 75, + 17, + 6, + {(0x00000000), + (0x819ee5dc), + (0x4ffaa079), + (0xce6445a5), + (0xe6300137), + (0x67aee4eb), + (0xa9caa14e), + (0x28544492), + (0x00008a74), + (0x819e6fa8), + (0x4ffa2a0d), + (0xce64cfd1), + (0xe6308b43), + (0x67ae6e9f), + (0xa9ca2b3a), + (0x2854cee6)}, + {(0x00000000), + (0x98018000), + (0x20039800), + (0xb8021800), + (0x40104000), + (0xd811c000), + (0x6013d800), + (0xf8125800), + (0x00033e00), + (0x9802be00), + (0x2000a600), + (0xb8012600), + (0x40137e00), + (0xd812fe00), + (0x6010e600), + (0xf8116600)}, + {(0x3f800000), + (0x3fcc00c0), + (0x3f9001cc), + (0x3fdc010c), + (0x3fa00820), + (0x3fec08e0), + (0x3fb009ec), + (0x3ffc092c), + (0x3f80019f), + (0x3fcc015f), + (0x3f900053), + (0x3fdc0093), + (0x3fa009bf), + (0x3fec097f), + (0x3fb00873), + (0x3ffc08b3)}, + (0xfff80000), + {0x90,0xc1,0xc9,0xe7,0xea,0xaf,0xd9,0x5b,0x1f,0x93, + 0x8d,0xcd,0x9b,0xf4,0xbb,0x91,0x58,0xb2,0x03,0x70,0x00} + }, + { + /* No.20 delta:846 weight:909 */ + 11213, + 70, + 15, + 6, + {(0x00000000), + (0xc56d80c5), + (0x4bb862e9), + (0x8ed5e22c), + (0x92e00143), + (0x578d8186), + (0xd95863aa), + (0x1c35e36f), + (0x000095b5), + (0xc56d1570), + (0x4bb8f75c), + (0x8ed57799), + (0x92e094f6), + (0x578d1433), + (0xd958f61f), + (0x1c3576da)}, + {(0x00000000), + (0x20642000), + (0x2530d800), + (0x0554f800), + (0x01a05200), + (0x21c47200), + (0x24908a00), + (0x04f4aa00), + (0x00521e00), + (0x20363e00), + (0x2562c600), + (0x0506e600), + (0x01f24c00), + (0x21966c00), + (0x24c29400), + (0x04a6b400)}, + {(0x3f800000), + (0x3f903210), + (0x3f92986c), + (0x3f82aa7c), + (0x3f80d029), + (0x3f90e239), + (0x3f924845), + (0x3f827a55), + (0x3f80290f), + (0x3f901b1f), + (0x3f92b163), + (0x3f828373), + (0x3f80f926), + (0x3f90cb36), + (0x3f92614a), + (0x3f82535a)}, + (0xfff80000), + {0xd6,0xb1,0xe1,0x27,0x40,0x54,0x48,0x67,0x71,0xe5, + 0x2a,0x38,0xbf,0xa7,0x93,0x82,0x17,0xcd,0x71,0x33,0x00} + }, + { + /* No.21 delta:1909 weight:729 */ + 11213, + 63, + 15, + 17, + {(0x00000000), + (0x3c495454), + (0xf3a1ddf0), + (0xcfe889a4), + (0x1b200159), + (0x2769550d), + (0xe881dca9), + (0xd4c888fd), + (0x0000cd3b), + (0x3c49996f), + (0xf3a110cb), + (0xcfe8449f), + (0x1b20cc62), + (0x27699836), + (0xe8811192), + (0xd4c845c6)}, + {(0x00000000), + (0x08027000), + (0x9651d400), + (0x9e53a400), + (0x0040a400), + (0x0842d400), + (0x96117000), + (0x9e130000), + (0x006e9e00), + (0x086cee00), + (0x963f4a00), + (0x9e3d3a00), + (0x002e3a00), + (0x082c4a00), + (0x967fee00), + (0x9e7d9e00)}, + {(0x3f800000), + (0x3f840138), + (0x3fcb28ea), + (0x3fcf29d2), + (0x3f802052), + (0x3f84216a), + (0x3fcb08b8), + (0x3fcf0980), + (0x3f80374f), + (0x3f843677), + (0x3fcb1fa5), + (0x3fcf1e9d), + (0x3f80171d), + (0x3f841625), + (0x3fcb3ff7), + (0x3fcf3ecf)}, + (0xfff80000), + {0xc8,0xdc,0x70,0x0a,0x23,0xa5,0x20,0x35,0xa2,0xd7, + 0xd1,0x53,0xfc,0xe0,0xc1,0xde,0xc0,0x2b,0x65,0x4d,0x00} + }, + { + /* No.22 delta:2420 weight:985 */ + 11213, + 32, + 7, + 15, + {(0x00000000), + (0xd25d9ecf), + (0xb1288d77), + (0x637513b8), + (0xc3700160), + (0x112d9faf), + (0x72588c17), + (0xa00512d8), + (0x000093d5), + (0xd25d0d1a), + (0xb1281ea2), + (0x6375806d), + (0xc37092b5), + (0x112d0c7a), + (0x72581fc2), + (0xa005810d)}, + {(0x00000000), + (0x10134200), + (0x028b0000), + (0x12984200), + (0x50480000), + (0x405b4200), + (0x52c30000), + (0x42d04200), + (0x10ee5e00), + (0x00fd1c00), + (0x12655e00), + (0x02761c00), + (0x40a65e00), + (0x50b51c00), + (0x422d5e00), + (0x523e1c00)}, + {(0x3f800000), + (0x3f8809a1), + (0x3f814580), + (0x3f894c21), + (0x3fa82400), + (0x3fa02da1), + (0x3fa96180), + (0x3fa16821), + (0x3f88772f), + (0x3f807e8e), + (0x3f8932af), + (0x3f813b0e), + (0x3fa0532f), + (0x3fa85a8e), + (0x3fa116af), + (0x3fa91f0e)}, + (0xfff80000), + {0xd0,0xc2,0xd1,0x3e,0x09,0x41,0xc7,0x9c,0x09,0x33, + 0x25,0x5f,0x84,0x21,0xc4,0xcc,0xc5,0x43,0x2a,0x14,0x00} + }, + { + /* No.23 delta:985 weight:863 */ + 11213, + 70, + 12, + 10, + {(0x00000000), + (0x6e6163b2), + (0xa3cecdea), + (0xcdafae58), + (0xb670017d), + (0xd81162cf), + (0x15becc97), + (0x7bdfaf25), + (0x0000d554), + (0x6e61b6e6), + (0xa3ce18be), + (0xcdaf7b0c), + (0xb670d429), + (0xd811b79b), + (0x15be19c3), + (0x7bdf7a71)}, + {(0x00000000), + (0x0338d800), + (0x00545800), + (0x036c8000), + (0x01521000), + (0x026ac800), + (0x01064800), + (0x023e9000), + (0x0020de00), + (0x03180600), + (0x00748600), + (0x034c5e00), + (0x0172ce00), + (0x024a1600), + (0x01269600), + (0x021e4e00)}, + {(0x3f800000), + (0x3f819c6c), + (0x3f802a2c), + (0x3f81b640), + (0x3f80a908), + (0x3f813564), + (0x3f808324), + (0x3f811f48), + (0x3f80106f), + (0x3f818c03), + (0x3f803a43), + (0x3f81a62f), + (0x3f80b967), + (0x3f81250b), + (0x3f80934b), + (0x3f810f27)}, + (0xfff80000), + {0x57,0xda,0x1e,0x1e,0x0f,0xc1,0x5c,0xac,0x32,0x8f, + 0xe4,0x94,0x24,0xf0,0x86,0x24,0x1b,0xde,0x44,0x16,0x00} + }, + { + /* No.24 delta:1573 weight:1043 */ + 11213, + 58, + 21, + 2, + {(0x00000000), + (0xa6c80c3d), + (0x3502d818), + (0x93cad425), + (0xda700189), + (0x7cb80db4), + (0xef72d991), + (0x49bad5ac), + (0x0000cb8c), + (0xa6c8c7b1), + (0x35021394), + (0x93ca1fa9), + (0xda70ca05), + (0x7cb8c638), + (0xef72121d), + (0x49ba1e20)}, + {(0x00000000), + (0xd66f0400), + (0x014ca600), + (0xd723a200), + (0x00442a00), + (0xd62b2e00), + (0x01088c00), + (0xd7678800), + (0x0c835e00), + (0xdaec5a00), + (0x0dcff800), + (0xdba0fc00), + (0x0cc77400), + (0xdaa87000), + (0x0d8bd200), + (0xdbe4d600)}, + {(0x3f800000), + (0x3feb3782), + (0x3f80a653), + (0x3feb91d1), + (0x3f802215), + (0x3feb1597), + (0x3f808446), + (0x3febb3c4), + (0x3f8641af), + (0x3fed762d), + (0x3f86e7fc), + (0x3fedd07e), + (0x3f8663ba), + (0x3fed5438), + (0x3f86c5e9), + (0x3fedf26b)}, + (0xfff80000), + {0xd3,0x5b,0x43,0x3f,0x00,0x55,0xf8,0x40,0xfe,0x8f, + 0xd0,0xe0,0xac,0x48,0x50,0xf2,0x50,0x2a,0xb7,0xcf,0x00} + }, + { + /* No.25 delta:1860 weight:1169 */ + 11213, + 33, + 7, + 10, + {(0x00000000), + (0x3f9ccf17), + (0xcdd7a2a1), + (0xf24b6db6), + (0xf9c00195), + (0xc65cce82), + (0x3417a334), + (0x0b8b6c23), + (0x00006c32), + (0x3f9ca325), + (0xcdd7ce93), + (0xf24b0184), + (0xf9c06da7), + (0xc65ca2b0), + (0x3417cf06), + (0x0b8b0011)}, + {(0x00000000), + (0x043a0400), + (0x00714000), + (0x044b4400), + (0x10408400), + (0x147a8000), + (0x1031c400), + (0x140bc000), + (0x70049e00), + (0x743e9a00), + (0x7075de00), + (0x744fda00), + (0x60441a00), + (0x647e1e00), + (0x60355a00), + (0x640f5e00)}, + {(0x3f800000), + (0x3f821d02), + (0x3f8038a0), + (0x3f8225a2), + (0x3f882042), + (0x3f8a3d40), + (0x3f8818e2), + (0x3f8a05e0), + (0x3fb8024f), + (0x3fba1f4d), + (0x3fb83aef), + (0x3fba27ed), + (0x3fb0220d), + (0x3fb23f0f), + (0x3fb01aad), + (0x3fb207af)}, + (0xfff80000), + {0x6f,0x6a,0x15,0x14,0x9e,0x4e,0xca,0xcd,0xe1,0xfb, + 0x26,0x23,0xa0,0xa9,0x21,0x22,0xca,0x8c,0x68,0x8f,0x00} + }, + { + /* No.26 delta:2274 weight:1019 */ + 11213, + 18, + 7, + 13, + {(0x00000000), + (0x83f4f835), + (0x34e8f3fa), + (0xb71c0bcf), + (0xf21001a3), + (0x71e4f996), + (0xc6f8f259), + (0x450c0a6c), + (0x00009d49), + (0x83f4657c), + (0x34e86eb3), + (0xb71c9686), + (0xf2109cea), + (0x71e464df), + (0xc6f86f10), + (0x450c9725)}, + {(0x00000000), + (0x69050000), + (0x0b0a1800), + (0x620f1800), + (0x41a06000), + (0x28a56000), + (0x4aaa7800), + (0x23af7800), + (0x046b1e00), + (0x6d6e1e00), + (0x0f610600), + (0x66640600), + (0x45cb7e00), + (0x2cce7e00), + (0x4ec16600), + (0x27c46600)}, + {(0x3f800000), + (0x3fb48280), + (0x3f85850c), + (0x3fb1078c), + (0x3fa0d030), + (0x3f9452b0), + (0x3fa5553c), + (0x3f91d7bc), + (0x3f82358f), + (0x3fb6b70f), + (0x3f87b083), + (0x3fb33203), + (0x3fa2e5bf), + (0x3f96673f), + (0x3fa760b3), + (0x3f93e233)}, + (0xfff80000), + {0x6c,0xda,0xe3,0x3d,0xac,0x72,0x40,0x41,0xca,0xa3, + 0xf7,0xdf,0xff,0x2d,0x57,0xea,0x1f,0x8d,0x6c,0x04,0x00} + }, + { + /* No.27 delta:2503 weight:711 */ + 11213, + 9, + 12, + 13, + {(0x00000000), + (0xc54f7f87), + (0x6904a950), + (0xac4bd6d7), + (0x85c001bc), + (0x408f7e3b), + (0xecc4a8ec), + (0x298bd76b), + (0x00007b77), + (0xc54f04f0), + (0x6904d227), + (0xac4bada0), + (0x85c07acb), + (0x408f054c), + (0xecc4d39b), + (0x298bac1c)}, + {(0x00000000), + (0xc7002c00), + (0x5a61c800), + (0x9d61e400), + (0x12013800), + (0xd5011400), + (0x4860f000), + (0x8f60dc00), + (0xc8c01e00), + (0x0fc03200), + (0x92a1d600), + (0x55a1fa00), + (0xdac12600), + (0x1dc10a00), + (0x80a0ee00), + (0x47a0c200)}, + {(0x3f800000), + (0x3fe38016), + (0x3fad30e4), + (0x3fceb0f2), + (0x3f89009c), + (0x3fea808a), + (0x3fa43078), + (0x3fc7b06e), + (0x3fe4600f), + (0x3f87e019), + (0x3fc950eb), + (0x3faad0fd), + (0x3fed6093), + (0x3f8ee085), + (0x3fc05077), + (0x3fa3d061)}, + (0xfff80000), + {0xd6,0xd3,0xee,0x8e,0x9a,0x94,0x64,0x4c,0x3a,0x7b, + 0x1d,0x2f,0x64,0xef,0x67,0x33,0x0d,0x49,0x2e,0x20,0x00} + }, + { + /* No.28 delta:1642 weight:1597 */ + 11213, + 14, + 16, + 3, + {(0x00000000), + (0x8147d21a), + (0x627aba0f), + (0xe33d6815), + (0x279001c1), + (0xa6d7d3db), + (0x45eabbce), + (0xc4ad69d4), + (0x0000f4d1), + (0x814726cb), + (0x627a4ede), + (0xe33d9cc4), + (0x2790f510), + (0xa6d7270a), + (0x45ea4f1f), + (0xc4ad9d05)}, + {(0x00000000), + (0x210c6a00), + (0x0071b800), + (0x217dd200), + (0xcc021a00), + (0xed0e7000), + (0xcc73a200), + (0xed7fc800), + (0x41b2de00), + (0x60beb400), + (0x41c36600), + (0x60cf0c00), + (0x8db0c400), + (0xacbcae00), + (0x8dc17c00), + (0xaccd1600)}, + {(0x3f800000), + (0x3f908635), + (0x3f8038dc), + (0x3f90bee9), + (0x3fe6010d), + (0x3ff68738), + (0x3fe639d1), + (0x3ff6bfe4), + (0x3fa0d96f), + (0x3fb05f5a), + (0x3fa0e1b3), + (0x3fb06786), + (0x3fc6d862), + (0x3fd65e57), + (0x3fc6e0be), + (0x3fd6668b)}, + (0xfff80000), + {0x58,0xce,0x35,0x39,0xc0,0x04,0xad,0xd6,0xdd,0xb7, + 0x5d,0xf1,0xac,0xf9,0x85,0xea,0x3f,0x74,0x52,0x6d,0x00} + }, + { + /* No.29 delta:1888 weight:1763 */ + 11213, + 91, + 4, + 2, + {(0x00000000), + (0xa620066e), + (0x498ca8fb), + (0xefacae95), + (0xc3f001d3), + (0x65d007bd), + (0x8a7ca928), + (0x2c5caf46), + (0x0000927c), + (0xa6209412), + (0x498c3a87), + (0xefac3ce9), + (0xc3f093af), + (0x65d095c1), + (0x8a7c3b54), + (0x2c5c3d3a)}, + {(0x00000000), + (0x29909200), + (0x0074c600), + (0x29e45400), + (0x400e7000), + (0x699ee200), + (0x407ab600), + (0x69ea2400), + (0x44029e00), + (0x6d920c00), + (0x44765800), + (0x6de6ca00), + (0x040cee00), + (0x2d9c7c00), + (0x04782800), + (0x2de8ba00)}, + {(0x3f800000), + (0x3f94c849), + (0x3f803a63), + (0x3f94f22a), + (0x3fa00738), + (0x3fb4cf71), + (0x3fa03d5b), + (0x3fb4f512), + (0x3fa2014f), + (0x3fb6c906), + (0x3fa23b2c), + (0x3fb6f365), + (0x3f820677), + (0x3f96ce3e), + (0x3f823c14), + (0x3f96f45d)}, + (0xfff80000), + {0xbf,0x14,0xa8,0x90,0x29,0x75,0x15,0xde,0x68,0x12, + 0x39,0xb3,0x6e,0x2f,0xcb,0x19,0x1f,0xe6,0x2c,0x9b,0x00} + }, + { + /* No.30 delta:1194 weight:873 */ + 11213, + 90, + 10, + 14, + {(0x00000000), + (0xdd216696), + (0x738a5c01), + (0xaeab3a97), + (0x402001ec), + (0x9d01677a), + (0x33aa5ded), + (0xee8b3b7b), + (0x0000c5c9), + (0xdd21a35f), + (0x738a99c8), + (0xaeabff5e), + (0x4020c425), + (0x9d01a2b3), + (0x33aa9824), + (0xee8bfeb2)}, + {(0x00000000), + (0x193a0000), + (0x07a49000), + (0x1e9e9000), + (0x00658800), + (0x195f8800), + (0x07c11800), + (0x1efb1800), + (0x4456fe00), + (0x5d6cfe00), + (0x43f26e00), + (0x5ac86e00), + (0x44337600), + (0x5d097600), + (0x4397e600), + (0x5aade600)}, + {(0x3f800000), + (0x3f8c9d00), + (0x3f83d248), + (0x3f8f4f48), + (0x3f8032c4), + (0x3f8cafc4), + (0x3f83e08c), + (0x3f8f7d8c), + (0x3fa22b7f), + (0x3faeb67f), + (0x3fa1f937), + (0x3fad6437), + (0x3fa219bb), + (0x3fae84bb), + (0x3fa1cbf3), + (0x3fad56f3)}, + (0xfff80000), + {0x45,0xbe,0xc0,0x65,0xca,0x28,0x44,0x0e,0x06,0x4e, + 0xa0,0xbf,0x48,0xa8,0x8e,0x39,0x64,0x68,0xe1,0x30,0x00} + }, + { + /* No.31 delta:1509 weight:1313 */ + 11213, + 86, + 6, + 7, + {(0x00000000), + (0x41d73f34), + (0x0930d7f4), + (0x48e7e8c0), + (0x65b001f9), + (0x24673ecd), + (0x6c80d60d), + (0x2d57e939), + (0x00008ad2), + (0x41d7b5e6), + (0x09305d26), + (0x48e76212), + (0x65b08b2b), + (0x2467b41f), + (0x6c805cdf), + (0x2d5763eb)}, + {(0x00000000), + (0x02010000), + (0x10332000), + (0x12322000), + (0x10030000), + (0x12020000), + (0x00302000), + (0x02312000), + (0x403a9e00), + (0x423b9e00), + (0x5009be00), + (0x5208be00), + (0x50399e00), + (0x52389e00), + (0x400abe00), + (0x420bbe00)}, + {(0x3f800000), + (0x3f810080), + (0x3f881990), + (0x3f891910), + (0x3f880180), + (0x3f890100), + (0x3f801810), + (0x3f811890), + (0x3fa01d4f), + (0x3fa11dcf), + (0x3fa804df), + (0x3fa9045f), + (0x3fa81ccf), + (0x3fa91c4f), + (0x3fa0055f), + (0x3fa105df)}, + (0xfff80000), + {0x9f,0xb2,0x3a,0xbb,0xab,0xbd,0xc1,0x43,0x82,0x94, + 0x1f,0x8e,0xdf,0x89,0xbb,0xf6,0xee,0x6a,0x10,0x54,0x00} + }, + { + /* No.32 delta:3454 weight:869 */ + 11213, + 36, + 3, + 15, + {(0x00000000), + (0x1709ea04), + (0x8e9b8c43), + (0x99926647), + (0xdd100203), + (0xca19e807), + (0x538b8e40), + (0x44826444), + (0x0000f3b3), + (0x170919b7), + (0x8e9b7ff0), + (0x999295f4), + (0xdd10f1b0), + (0xca191bb4), + (0x538b7df3), + (0x448297f7)}, + {(0x00000000), + (0x28228000), + (0x1fc10000), + (0x37e38000), + (0x00400000), + (0x28628000), + (0x1f810000), + (0x37a38000), + (0x12b01e00), + (0x3a929e00), + (0x0d711e00), + (0x25539e00), + (0x12f01e00), + (0x3ad29e00), + (0x0d311e00), + (0x25139e00)}, + {(0x3f800000), + (0x3f941140), + (0x3f8fe080), + (0x3f9bf1c0), + (0x3f802000), + (0x3f943140), + (0x3f8fc080), + (0x3f9bd1c0), + (0x3f89580f), + (0x3f9d494f), + (0x3f86b88f), + (0x3f92a9cf), + (0x3f89780f), + (0x3f9d694f), + (0x3f86988f), + (0x3f9289cf)}, + (0xfff80000), + {0xbd,0x88,0xcf,0x70,0x9b,0x4f,0x1b,0xe7,0xe6,0xab, + 0x62,0x27,0x66,0xaf,0x36,0xbb,0x10,0x8b,0x27,0xd7,0x00} + }, + { + /* No.33 delta:1105 weight:1263 */ + 11213, + 40, + 9, + 3, + {(0x00000000), + (0x4406f6da), + (0xf592d5cb), + (0xb1942311), + (0xe0900211), + (0xa496f4cb), + (0x1502d7da), + (0x51042100), + (0x0000075b), + (0x4406f181), + (0xf592d290), + (0xb194244a), + (0xe090054a), + (0xa496f390), + (0x1502d081), + (0x5104265b)}, + {(0x00000000), + (0x10151000), + (0x10640000), + (0x00711000), + (0x20081800), + (0x301d0800), + (0x306c1800), + (0x20790800), + (0x00025e00), + (0x10174e00), + (0x10665e00), + (0x00734e00), + (0x200a4600), + (0x301f5600), + (0x306e4600), + (0x207b5600)}, + {(0x3f800000), + (0x3f880a88), + (0x3f883200), + (0x3f803888), + (0x3f90040c), + (0x3f980e84), + (0x3f98360c), + (0x3f903c84), + (0x3f80012f), + (0x3f880ba7), + (0x3f88332f), + (0x3f8039a7), + (0x3f900523), + (0x3f980fab), + (0x3f983723), + (0x3f903dab)}, + (0xfff80000), + {0x13,0x48,0x32,0x45,0xca,0x30,0xd6,0xa0,0xae,0xd3, + 0x68,0x38,0x03,0x61,0x73,0x81,0x24,0xd8,0x43,0xf3,0x00} + }, + { + /* No.34 delta:7663 weight:713 */ + 11213, + 62, + 2, + 15, + {(0x00000000), + (0x9fe7bf95), + (0x7e1ef3d4), + (0xe1f94c41), + (0x32e00221), + (0xad07bdb4), + (0x4cfef1f5), + (0xd3194e60), + (0x00005fc6), + (0x9fe7e053), + (0x7e1eac12), + (0xe1f91387), + (0x32e05de7), + (0xad07e272), + (0x4cfeae33), + (0xd31911a6)}, + {(0x00000000), + (0x00000000), + (0x00000000), + (0x00000000), + (0x20000000), + (0x20000000), + (0x20000000), + (0x20000000), + (0x50001e00), + (0x50001e00), + (0x50001e00), + (0x50001e00), + (0x70001e00), + (0x70001e00), + (0x70001e00), + (0x70001e00)}, + {(0x3f800000), + (0x3f800000), + (0x3f800000), + (0x3f800000), + (0x3f900000), + (0x3f900000), + (0x3f900000), + (0x3f900000), + (0x3fa8000f), + (0x3fa8000f), + (0x3fa8000f), + (0x3fa8000f), + (0x3fb8000f), + (0x3fb8000f), + (0x3fb8000f), + (0x3fb8000f)}, + (0xfff80000), + {0x05,0x3c,0xe6,0xed,0x20,0xd0,0x5e,0x82,0xc3,0x47, + 0x89,0x62,0x9a,0x00,0x4f,0xdf,0x8a,0xe2,0x89,0xe4,0x00} + }, + { + /* No.35 delta:1045 weight:1387 */ + 11213, + 66, + 18, + 8, + {(0x00000000), + (0xcc054daa), + (0x64ce00f1), + (0xa8cb4d5b), + (0xc6200238), + (0x0a254f92), + (0xa2ee02c9), + (0x6eeb4f63), + (0x00006fce), + (0xcc052264), + (0x64ce6f3f), + (0xa8cb2295), + (0xc6206df6), + (0x0a25205c), + (0xa2ee6d07), + (0x6eeb20ad)}, + {(0x00000000), + (0xd07d1a00), + (0x08927800), + (0xd8ef6200), + (0x00a4c000), + (0xd0d9da00), + (0x0836b800), + (0xd84ba200), + (0x00025e00), + (0xd07f4400), + (0x08902600), + (0xd8ed3c00), + (0x00a69e00), + (0xd0db8400), + (0x0834e600), + (0xd849fc00)}, + {(0x3f800000), + (0x3fe83e8d), + (0x3f84493c), + (0x3fec77b1), + (0x3f805260), + (0x3fe86ced), + (0x3f841b5c), + (0x3fec25d1), + (0x3f80012f), + (0x3fe83fa2), + (0x3f844813), + (0x3fec769e), + (0x3f80534f), + (0x3fe86dc2), + (0x3f841a73), + (0x3fec24fe)}, + (0xfff80000), + {0x53,0xdb,0x48,0x5e,0x80,0xb8,0xf7,0x7d,0xd4,0x43, + 0xc8,0x11,0x1f,0x88,0x87,0x4f,0x05,0x24,0x64,0x44,0x00} + }, + { + /* No.36 delta:2076 weight:1703 */ + 11213, + 75, + 9, + 2, + {(0x00000000), + (0xc42d5e09), + (0x3ecbb703), + (0xfae6e90a), + (0xe9b00243), + (0x2d9d5c4a), + (0xd77bb540), + (0x1356eb49), + (0x00003832), + (0xc42d663b), + (0x3ecb8f31), + (0xfae6d138), + (0xe9b03a71), + (0x2d9d6478), + (0xd77b8d72), + (0x1356d37b)}, + {(0x00000000), + (0x003c8000), + (0x00700000), + (0x004c8000), + (0x00080000), + (0x00348000), + (0x00780000), + (0x00448000), + (0x00021e00), + (0x003e9e00), + (0x00721e00), + (0x004e9e00), + (0x000a1e00), + (0x00369e00), + (0x007a1e00), + (0x00469e00)}, + {(0x3f800000), + (0x3f801e40), + (0x3f803800), + (0x3f802640), + (0x3f800400), + (0x3f801a40), + (0x3f803c00), + (0x3f802240), + (0x3f80010f), + (0x3f801f4f), + (0x3f80390f), + (0x3f80274f), + (0x3f80050f), + (0x3f801b4f), + (0x3f803d0f), + (0x3f80234f)}, + (0xfff80000), + {0x83,0xeb,0x26,0xe2,0x25,0xfd,0x8b,0x40,0xfc,0xd8, + 0x09,0x63,0xa1,0xb0,0x60,0x56,0xec,0x05,0xc2,0x27,0x00} + }, + { + /* No.37 delta:2017 weight:1375 */ + 11213, + 20, + 23, + 4, + {(0x00000000), + (0xf3dee608), + (0x43968c18), + (0xb0486a10), + (0x82100252), + (0x71cee45a), + (0xc1868e4a), + (0x32586842), + (0x0000b8e9), + (0xf3de5ee1), + (0x439634f1), + (0xb048d2f9), + (0x8210babb), + (0x71ce5cb3), + (0xc18636a3), + (0x3258d0ab)}, + {(0x00000000), + (0x0f350000), + (0x08b40000), + (0x07810000), + (0x20c1a000), + (0x2ff4a000), + (0x2875a000), + (0x2740a000), + (0x201c1e00), + (0x2f291e00), + (0x28a81e00), + (0x279d1e00), + (0x00ddbe00), + (0x0fe8be00), + (0x0869be00), + (0x075cbe00)}, + {(0x3f800000), + (0x3f879a80), + (0x3f845a00), + (0x3f83c080), + (0x3f9060d0), + (0x3f97fa50), + (0x3f943ad0), + (0x3f93a050), + (0x3f900e0f), + (0x3f97948f), + (0x3f94540f), + (0x3f93ce8f), + (0x3f806edf), + (0x3f87f45f), + (0x3f8434df), + (0x3f83ae5f)}, + (0xfff80000), + {0x64,0x1e,0x04,0xb6,0x86,0x5e,0xd5,0x07,0x5c,0xb3, + 0xbe,0x3a,0x1b,0xc8,0xe4,0x45,0x9e,0xc4,0xcf,0x11,0x00} + }, + { + /* No.38 delta:2221 weight:1197 */ + 11213, + 37, + 24, + 7, + {(0x00000000), + (0x4fb7bfce), + (0x968c3f7b), + (0xd93b80b5), + (0x11300267), + (0x5e87bda9), + (0x87bc3d1c), + (0xc80b82d2), + (0x000002b3), + (0x4fb7bd7d), + (0x968c3dc8), + (0xd93b8206), + (0x113000d4), + (0x5e87bf1a), + (0x87bc3faf), + (0xc80b8061)}, + {(0x00000000), + (0x207f2400), + (0x0138c000), + (0x2147e400), + (0x00160000), + (0x20692400), + (0x012ec000), + (0x2151e400), + (0x30127e00), + (0x106d5a00), + (0x312abe00), + (0x11559a00), + (0x30047e00), + (0x107b5a00), + (0x313cbe00), + (0x11439a00)}, + {(0x3f800000), + (0x3f903f92), + (0x3f809c60), + (0x3f90a3f2), + (0x3f800b00), + (0x3f903492), + (0x3f809760), + (0x3f90a8f2), + (0x3f98093f), + (0x3f8836ad), + (0x3f98955f), + (0x3f88aacd), + (0x3f98023f), + (0x3f883dad), + (0x3f989e5f), + (0x3f88a1cd)}, + (0xfff80000), + {0x56,0x6e,0x0e,0xb6,0x39,0x79,0xe8,0x2f,0xdb,0x3a, + 0xb8,0xa6,0xb7,0x15,0xb4,0x26,0x23,0x62,0x6c,0x7b,0x00} + }, + { + /* No.39 delta:3255 weight:871 */ + 11213, + 59, + 27, + 3, + {(0x00000000), + (0xd9be1e48), + (0x75016ef8), + (0xacbf70b0), + (0x0e00027a), + (0xd7be1c32), + (0x7b016c82), + (0xa2bf72ca), + (0x00004209), + (0xd9be5c41), + (0x75012cf1), + (0xacbf32b9), + (0x0e004073), + (0xd7be5e3b), + (0x7b012e8b), + (0xa2bf30c3)}, + {(0x00000000), + (0x14ad8000), + (0x05013000), + (0x11acb000), + (0x092e2000), + (0x1d83a000), + (0x0c2f1000), + (0x18829000), + (0x07501e00), + (0x13fd9e00), + (0x02512e00), + (0x16fcae00), + (0x0e7e3e00), + (0x1ad3be00), + (0x0b7f0e00), + (0x1fd28e00)}, + {(0x3f800000), + (0x3f8a56c0), + (0x3f828098), + (0x3f88d658), + (0x3f849710), + (0x3f8ec1d0), + (0x3f861788), + (0x3f8c4148), + (0x3f83a80f), + (0x3f89fecf), + (0x3f812897), + (0x3f8b7e57), + (0x3f873f1f), + (0x3f8d69df), + (0x3f85bf87), + (0x3f8fe947)}, + (0xfff80000), + {0xde,0x46,0xf8,0xe9,0xab,0x16,0xb9,0x71,0xfd,0x07, + 0x49,0x31,0xbb,0x29,0xb9,0xce,0xd3,0x1e,0x43,0x2e,0x00} + }, + { + /* No.40 delta:3532 weight:1325 */ + 11213, + 8, + 11, + 4, + {(0x00000000), + (0xe314ffbd), + (0x98f9b31f), + (0x7bed4ca2), + (0x71b0028c), + (0x92a4fd31), + (0xe949b193), + (0x0a5d4e2e), + (0x0000e823), + (0xe314179e), + (0x98f95b3c), + (0x7beda481), + (0x71b0eaaf), + (0x92a41512), + (0xe94959b0), + (0x0a5da60d)}, + {(0x00000000), + (0x0c000000), + (0x0a800000), + (0x06800000), + (0x16100000), + (0x1a100000), + (0x1c900000), + (0x10900000), + (0x95801e00), + (0x99801e00), + (0x9f001e00), + (0x93001e00), + (0x83901e00), + (0x8f901e00), + (0x89101e00), + (0x85101e00)}, + {(0x3f800000), + (0x3f860000), + (0x3f854000), + (0x3f834000), + (0x3f8b0800), + (0x3f8d0800), + (0x3f8e4800), + (0x3f884800), + (0x3fcac00f), + (0x3fccc00f), + (0x3fcf800f), + (0x3fc9800f), + (0x3fc1c80f), + (0x3fc7c80f), + (0x3fc4880f), + (0x3fc2880f)}, + (0xfff80000), + {0xc3,0xa5,0xf2,0x8c,0xb3,0x3e,0xd2,0x53,0x92,0x0c, + 0x01,0x4a,0x5c,0x0a,0x47,0xe9,0x85,0xa2,0xf5,0x7f,0x00} + }, + { + /* No.41 delta:5222 weight:1281 */ + 11213, + 90, + 27, + 2, + {(0x00000000), + (0xf1807db4), + (0x9f84df41), + (0x6e04a2f5), + (0x04200296), + (0xf5a07f22), + (0x9ba4ddd7), + (0x6a24a063), + (0x00005700), + (0xf1802ab4), + (0x9f848841), + (0x6e04f5f5), + (0x04205596), + (0xf5a02822), + (0x9ba48ad7), + (0x6a24f763)}, + {(0x00000000), + (0xe9519000), + (0x1a06a800), + (0xf3573800), + (0x0d032000), + (0xe452b000), + (0x17058800), + (0xfe541800), + (0x036ade00), + (0xea3b4e00), + (0x196c7600), + (0xf03de600), + (0x0e69fe00), + (0xe7386e00), + (0x146f5600), + (0xfd3ec600)}, + {(0x3f800000), + (0x3ff4a8c8), + (0x3f8d0354), + (0x3ff9ab9c), + (0x3f868190), + (0x3ff22958), + (0x3f8b82c4), + (0x3fff2a0c), + (0x3f81b56f), + (0x3ff51da7), + (0x3f8cb63b), + (0x3ff81ef3), + (0x3f8734ff), + (0x3ff39c37), + (0x3f8a37ab), + (0x3ffe9f63)}, + (0xfff80000), + {0x08,0x0d,0xba,0x61,0x69,0x6b,0x70,0x44,0xbf,0x27, + 0x4a,0xe9,0xcb,0x98,0x26,0xb9,0xb3,0x5d,0x82,0x3d,0x00} + }, + { + /* No.42 delta:1107 weight:1153 */ + 11213, + 48, + 16, + 10, + {(0x00000000), + (0x6adad7b8), + (0x649ee9f9), + (0x0e443e41), + (0x1b2002a0), + (0x71fad518), + (0x7fbeeb59), + (0x15643ce1), + (0x0000f400), + (0x6ada23b8), + (0x649e1df9), + (0x0e44ca41), + (0x1b20f6a0), + (0x71fa2118), + (0x7fbe1f59), + (0x1564c8e1)}, + {(0x00000000), + (0x09409c00), + (0x0060c000), + (0x09205c00), + (0x04104a00), + (0x0d50d600), + (0x04708a00), + (0x0d301600), + (0x00087e00), + (0x0948e200), + (0x0068be00), + (0x09282200), + (0x04183400), + (0x0d58a800), + (0x0478f400), + (0x0d386800)}, + {(0x3f800000), + (0x3f84a04e), + (0x3f803060), + (0x3f84902e), + (0x3f820825), + (0x3f86a86b), + (0x3f823845), + (0x3f86980b), + (0x3f80043f), + (0x3f84a471), + (0x3f80345f), + (0x3f849411), + (0x3f820c1a), + (0x3f86ac54), + (0x3f823c7a), + (0x3f869c34)}, + (0xfff80000), + {0xb4,0xff,0xa8,0x22,0xa3,0x34,0x7b,0xfd,0xd4,0xca, + 0xef,0x82,0x97,0x4e,0x9d,0x89,0xeb,0xb2,0x55,0x2c,0x00} + }, + { + /* No.43 delta:3289 weight:739 */ + 11213, + 90, + 3, + 15, + {(0x00000000), + (0x12510afd), + (0xc2094a89), + (0xd0584074), + (0x5ce002b7), + (0x4eb1084a), + (0x9ee9483e), + (0x8cb842c3), + (0x00009dee), + (0x12519713), + (0xc209d767), + (0xd058dd9a), + (0x5ce09f59), + (0x4eb195a4), + (0x9ee9d5d0), + (0x8cb8df2d)}, + {(0x00000000), + (0x0c230400), + (0x033c0000), + (0x0f1f0400), + (0x03b08000), + (0x0f938400), + (0x008c8000), + (0x0caf8400), + (0x08705e00), + (0x04535a00), + (0x0b4c5e00), + (0x076f5a00), + (0x0bc0de00), + (0x07e3da00), + (0x08fcde00), + (0x04dfda00)}, + {(0x3f800000), + (0x3f861182), + (0x3f819e00), + (0x3f878f82), + (0x3f81d840), + (0x3f87c9c2), + (0x3f804640), + (0x3f8657c2), + (0x3f84382f), + (0x3f8229ad), + (0x3f85a62f), + (0x3f83b7ad), + (0x3f85e06f), + (0x3f83f1ed), + (0x3f847e6f), + (0x3f826fed)}, + (0xfff80000), + {0x47,0xd8,0xbd,0x66,0xb2,0xe8,0x3c,0x2c,0x45,0x90, + 0x88,0x27,0x9c,0x8f,0x27,0x40,0xc8,0xd9,0x0e,0xd6,0x00} + }, + { + /* No.44 delta:1477 weight:1191 */ + 11213, + 33, + 20, + 2, + {(0x00000000), + (0x3f4d49c2), + (0xdca457bf), + (0xe3e91e7d), + (0x5d5002cf), + (0x621d4b0d), + (0x81f45570), + (0xbeb91cb2), + (0x0000c277), + (0x3f4d8bb5), + (0xdca495c8), + (0xe3e9dc0a), + (0x5d50c0b8), + (0x621d897a), + (0x81f49707), + (0xbeb9dec5)}, + {(0x00000000), + (0x0275b800), + (0x7149c400), + (0x733c7c00), + (0x1058c000), + (0x122d7800), + (0x61110400), + (0x6364bc00), + (0x006f3e00), + (0x021a8600), + (0x7126fa00), + (0x73534200), + (0x1037fe00), + (0x12424600), + (0x617e3a00), + (0x630b8200)}, + {(0x3f800000), + (0x3f813adc), + (0x3fb8a4e2), + (0x3fb99e3e), + (0x3f882c60), + (0x3f8916bc), + (0x3fb08882), + (0x3fb1b25e), + (0x3f80379f), + (0x3f810d43), + (0x3fb8937d), + (0x3fb9a9a1), + (0x3f881bff), + (0x3f892123), + (0x3fb0bf1d), + (0x3fb185c1)}, + (0xfff80000), + {0x43,0xbd,0xc4,0xf1,0xb1,0xd7,0x24,0x47,0xc5,0xd0, + 0xa4,0xbc,0xc9,0x66,0xb3,0x4b,0x26,0xd1,0x26,0x85,0x00} + }, + { + /* No.45 delta:1261 weight:1353 */ + 11213, + 24, + 18, + 5, + {(0x00000000), + (0xabac4677), + (0x903dfbe7), + (0x3b91bd90), + (0xf83002d0), + (0x539c44a7), + (0x680df937), + (0xc3a1bf40), + (0x00004840), + (0xabac0e37), + (0x903db3a7), + (0x3b91f5d0), + (0xf8304a90), + (0x539c0ce7), + (0x680db177), + (0xc3a1f700)}, + {(0x00000000), + (0x00762800), + (0x200acc00), + (0x207ce400), + (0x6ea81000), + (0x6ede3800), + (0x4ea2dc00), + (0x4ed4f400), + (0x004e1e00), + (0x00383600), + (0x2044d200), + (0x2032fa00), + (0x6ee60e00), + (0x6e902600), + (0x4eecc200), + (0x4e9aea00)}, + {(0x3f800000), + (0x3f803b14), + (0x3f900566), + (0x3f903e72), + (0x3fb75408), + (0x3fb76f1c), + (0x3fa7516e), + (0x3fa76a7a), + (0x3f80270f), + (0x3f801c1b), + (0x3f902269), + (0x3f90197d), + (0x3fb77307), + (0x3fb74813), + (0x3fa77661), + (0x3fa74d75)}, + (0xfff80000), + {0x1c,0x79,0xc4,0xaf,0x4a,0xca,0x6f,0x97,0x73,0x87, + 0xe2,0xfa,0x1b,0x02,0x4e,0xfc,0xc2,0xc5,0x85,0xa8,0x00} + }, + { + /* No.46 delta:2871 weight:1351 */ + 11213, + 24, + 4, + 11, + {(0x00000000), + (0x5f7bd2a5), + (0x9b479801), + (0xc43c4aa4), + (0x793002ea), + (0x264bd04f), + (0xe2779aeb), + (0xbd0c484e), + (0x0000737e), + (0x5f7ba1db), + (0x9b47eb7f), + (0xc43c39da), + (0x79307194), + (0x264ba331), + (0xe277e995), + (0xbd0c3b30)}, + {(0x00000000), + (0x0a8c0000), + (0x10810000), + (0x1a0d0000), + (0x00600800), + (0x0aec0800), + (0x10e10800), + (0x1a6d0800), + (0x40101e00), + (0x4a9c1e00), + (0x50911e00), + (0x5a1d1e00), + (0x40701600), + (0x4afc1600), + (0x50f11600), + (0x5a7d1600)}, + {(0x3f800000), + (0x3f854600), + (0x3f884080), + (0x3f8d0680), + (0x3f803004), + (0x3f857604), + (0x3f887084), + (0x3f8d3684), + (0x3fa0080f), + (0x3fa54e0f), + (0x3fa8488f), + (0x3fad0e8f), + (0x3fa0380b), + (0x3fa57e0b), + (0x3fa8788b), + (0x3fad3e8b)}, + (0xfff80000), + {0x23,0xc7,0xec,0x47,0xae,0x9d,0xf4,0x29,0xa3,0xce, + 0x13,0x6c,0x06,0x67,0x5b,0x3a,0xb2,0xac,0x33,0xdb,0x00} + }, + { + /* No.47 delta:4370 weight:1245 */ + 11213, + 26, + 30, + 4, + {(0x00000000), + (0x1af999ae), + (0x1bfe6053), + (0x0107f9fd), + (0xb1f002f1), + (0xab099b5f), + (0xaa0e62a2), + (0xb0f7fb0c), + (0x0000281b), + (0x1af9b1b5), + (0x1bfe4848), + (0x0107d1e6), + (0xb1f02aea), + (0xab09b344), + (0xaa0e4ab9), + (0xb0f7d317)}, + {(0x00000000), + (0x0c201000), + (0x03140000), + (0x0f341000), + (0xe4200800), + (0xe8001800), + (0xe7340800), + (0xeb141800), + (0x08001e00), + (0x04200e00), + (0x0b141e00), + (0x07340e00), + (0xec201600), + (0xe0000600), + (0xef341600), + (0xe3140600)}, + {(0x3f800000), + (0x3f861008), + (0x3f818a00), + (0x3f879a08), + (0x3ff21004), + (0x3ff4000c), + (0x3ff39a04), + (0x3ff58a0c), + (0x3f84000f), + (0x3f821007), + (0x3f858a0f), + (0x3f839a07), + (0x3ff6100b), + (0x3ff00003), + (0x3ff79a0b), + (0x3ff18a03)}, + (0xfff80000), + {0x98,0x58,0x06,0x9e,0x81,0x14,0x32,0xd7,0x93,0x32, + 0x65,0xc8,0x13,0x43,0x02,0x53,0x1e,0xd3,0xaf,0x94,0x00} + }, + { + /* No.48 delta:1177 weight:859 */ + 11213, + 75, + 13, + 16, + {(0x00000000), + (0x8e9ecf83), + (0xb438b3ee), + (0x3aa67c6d), + (0xbd800305), + (0x331ecc86), + (0x09b8b0eb), + (0x87267f68), + (0x0000d641), + (0x8e9e19c2), + (0xb43865af), + (0x3aa6aa2c), + (0xbd80d544), + (0x331e1ac7), + (0x09b866aa), + (0x8726a929)}, + {(0x00000000), + (0x0f7d1c00), + (0x76262000), + (0x795b3c00), + (0x41532000), + (0x4e2e3c00), + (0x37750000), + (0x38081c00), + (0x00429e00), + (0x0f3f8200), + (0x7664be00), + (0x7919a200), + (0x4111be00), + (0x4e6ca200), + (0x37379e00), + (0x384a8200)}, + {(0x3f800000), + (0x3f87be8e), + (0x3fbb1310), + (0x3fbcad9e), + (0x3fa0a990), + (0x3fa7171e), + (0x3f9bba80), + (0x3f9c040e), + (0x3f80214f), + (0x3f879fc1), + (0x3fbb325f), + (0x3fbc8cd1), + (0x3fa088df), + (0x3fa73651), + (0x3f9b9bcf), + (0x3f9c2541)}, + (0xfff80000), + {0xc9,0xe0,0x77,0x04,0x4f,0x92,0x96,0xd4,0x05,0xdc, + 0x3e,0x3c,0xa6,0xf3,0x8e,0x2d,0xdb,0xf2,0x8c,0xe4,0x00} + }, + { + /* No.49 delta:3224 weight:947 */ + 11213, + 65, + 7, + 13, + {(0x00000000), + (0x9221eadf), + (0x5003b0d0), + (0xc2225a0f), + (0x0d800317), + (0x9fa1e9c8), + (0x5d83b3c7), + (0xcfa25918), + (0x00005031), + (0x9221baee), + (0x5003e0e1), + (0xc2220a3e), + (0x0d805326), + (0x9fa1b9f9), + (0x5d83e3f6), + (0xcfa20929)}, + {(0x00000000), + (0x00400800), + (0x60600000), + (0x60200800), + (0x00000000), + (0x00400800), + (0x60600000), + (0x60200800), + (0x30101e00), + (0x30501600), + (0x50701e00), + (0x50301600), + (0x30101e00), + (0x30501600), + (0x50701e00), + (0x50301600)}, + {(0x3f800000), + (0x3f802004), + (0x3fb03000), + (0x3fb01004), + (0x3f800000), + (0x3f802004), + (0x3fb03000), + (0x3fb01004), + (0x3f98080f), + (0x3f98280b), + (0x3fa8380f), + (0x3fa8180b), + (0x3f98080f), + (0x3f98280b), + (0x3fa8380f), + (0x3fa8180b)}, + (0xfff80000), + {0xc1,0x70,0x72,0x7f,0xb1,0x1e,0x6e,0xc5,0xec,0xcc, + 0x69,0xd8,0x0b,0xba,0x4e,0x2c,0x73,0x46,0x81,0x52,0x00} + }, + { + /* No.50 delta:6665 weight:1221 */ + 11213, + 84, + 1, + 9, + {(0x00000000), + (0xbdb9b41e), + (0x9876c465), + (0x25cf707b), + (0x25c0032f), + (0x9879b731), + (0xbdb6c74a), + (0x000f7354), + (0x00003009), + (0xbdb98417), + (0x9876f46c), + (0x25cf4072), + (0x25c03326), + (0x98798738), + (0xbdb6f743), + (0x000f435d)}, + {(0x00000000), + (0x51000000), + (0x20000000), + (0x71000000), + (0x04200000), + (0x55200000), + (0x24200000), + (0x75200000), + (0x17201e00), + (0x46201e00), + (0x37201e00), + (0x66201e00), + (0x13001e00), + (0x42001e00), + (0x33001e00), + (0x62001e00)}, + {(0x3f800000), + (0x3fa88000), + (0x3f900000), + (0x3fb88000), + (0x3f821000), + (0x3faa9000), + (0x3f921000), + (0x3fba9000), + (0x3f8b900f), + (0x3fa3100f), + (0x3f9b900f), + (0x3fb3100f), + (0x3f89800f), + (0x3fa1000f), + (0x3f99800f), + (0x3fb1000f)}, + (0xfff80000), + {0xdf,0xd6,0xd2,0xb4,0x0a,0xa4,0x35,0xbe,0x0f,0xa6, + 0x48,0x42,0xfe,0x0c,0x0f,0xa3,0xa9,0x64,0x1b,0x93,0x00} + }, + { + /* No.51 delta:6604 weight:1075 */ + 11213, + 61, + 3, + 13, + {(0x00000000), + (0xe7ec9698), + (0xdc943f6e), + (0x3b78a9f6), + (0xdf600330), + (0x388c95a8), + (0x03f43c5e), + (0xe418aac6), + (0x000058c3), + (0xe7ecce5b), + (0xdc9467ad), + (0x3b78f135), + (0xdf605bf3), + (0x388ccd6b), + (0x03f4649d), + (0xe418f205)}, + {(0x00000000), + (0x80000000), + (0x50000000), + (0xd0000000), + (0x20000000), + (0xa0000000), + (0x70000000), + (0xf0000000), + (0x00001e00), + (0x80001e00), + (0x50001e00), + (0xd0001e00), + (0x20001e00), + (0xa0001e00), + (0x70001e00), + (0xf0001e00)}, + {(0x3f800000), + (0x3fc00000), + (0x3fa80000), + (0x3fe80000), + (0x3f900000), + (0x3fd00000), + (0x3fb80000), + (0x3ff80000), + (0x3f80000f), + (0x3fc0000f), + (0x3fa8000f), + (0x3fe8000f), + (0x3f90000f), + (0x3fd0000f), + (0x3fb8000f), + (0x3ff8000f)}, + (0xfff80000), + {0xf2,0x95,0xcc,0x96,0x13,0xa0,0x75,0xc2,0xea,0x1a, + 0x5c,0x2d,0xcc,0x41,0x22,0xf8,0x87,0x8d,0x52,0xc0,0x00} + }, + { + /* No.52 delta:980 weight:1115 */ + 11213, + 75, + 17, + 12, + {(0x00000000), + (0x619af787), + (0x463e8326), + (0x27a474a1), + (0xc5c00348), + (0xa45af4cf), + (0x83fe806e), + (0xe26477e9), + (0x00008ae3), + (0x619a7d64), + (0x463e09c5), + (0x27a4fe42), + (0xc5c089ab), + (0xa45a7e2c), + (0x83fe0a8d), + (0xe264fd0a)}, + {(0x00000000), + (0x107c1400), + (0x81204400), + (0x915c5000), + (0x00113400), + (0x106d2000), + (0x81317000), + (0x914d6400), + (0x000ede00), + (0x1072ca00), + (0x812e9a00), + (0x91528e00), + (0x001fea00), + (0x1063fe00), + (0x813fae00), + (0x9143ba00)}, + {(0x3f800000), + (0x3f883e0a), + (0x3fc09022), + (0x3fc8ae28), + (0x3f80089a), + (0x3f883690), + (0x3fc098b8), + (0x3fc8a6b2), + (0x3f80076f), + (0x3f883965), + (0x3fc0974d), + (0x3fc8a947), + (0x3f800ff5), + (0x3f8831ff), + (0x3fc09fd7), + (0x3fc8a1dd)}, + (0xfff80000), + {0x31,0x3b,0x59,0xba,0x4b,0xfa,0x15,0x6d,0x68,0x38, + 0xd0,0x59,0x4b,0xc5,0xee,0xd6,0xd1,0x72,0x77,0x56,0x00} + }, + { + /* No.53 delta:2128 weight:1031 */ + 11213, + 90, + 18, + 10, + {(0x00000000), + (0x54ddf61b), + (0x3289e26b), + (0x66541470), + (0x8c500359), + (0xd88df542), + (0xbed9e132), + (0xea041729), + (0x0000fb30), + (0x54dd0d2b), + (0x3289195b), + (0x6654ef40), + (0x8c50f869), + (0xd88d0e72), + (0xbed91a02), + (0xea04ec19)}, + {(0x00000000), + (0x80710000), + (0x704e0000), + (0xf03f0000), + (0x40450000), + (0xc0340000), + (0x300b0000), + (0xb07a0000), + (0x000c1e00), + (0x807d1e00), + (0x70421e00), + (0xf0331e00), + (0x40491e00), + (0xc0381e00), + (0x30071e00), + (0xb0761e00)}, + {(0x3f800000), + (0x3fc03880), + (0x3fb82700), + (0x3ff81f80), + (0x3fa02280), + (0x3fe01a00), + (0x3f980580), + (0x3fd83d00), + (0x3f80060f), + (0x3fc03e8f), + (0x3fb8210f), + (0x3ff8198f), + (0x3fa0248f), + (0x3fe01c0f), + (0x3f98038f), + (0x3fd83b0f)}, + (0xfff80000), + {0x2c,0x15,0xc7,0x91,0x8f,0xe7,0xba,0x43,0x56,0x9b, + 0xf8,0xd4,0x59,0xf2,0x0c,0x4e,0xf2,0x42,0x9a,0xfe,0x00} + }, + { + /* No.54 delta:2047 weight:935 */ + 11213, + 73, + 6, + 15, + {(0x00000000), + (0x1b3e4d84), + (0x40583cec), + (0x5b667168), + (0x82600363), + (0x995e4ee7), + (0xc2383f8f), + (0xd906720b), + (0x000025e9), + (0x1b3e686d), + (0x40581905), + (0x5b665481), + (0x8260268a), + (0x995e6b0e), + (0xc2381a66), + (0xd90657e2)}, + {(0x00000000), + (0x00524000), + (0x52804000), + (0x52d20000), + (0x00232000), + (0x00716000), + (0x52a36000), + (0x52f12000), + (0x21c51e00), + (0x21975e00), + (0x73455e00), + (0x73171e00), + (0x21e63e00), + (0x21b47e00), + (0x73667e00), + (0x73343e00)}, + {(0x3f800000), + (0x3f802920), + (0x3fa94020), + (0x3fa96900), + (0x3f801190), + (0x3f8038b0), + (0x3fa951b0), + (0x3fa97890), + (0x3f90e28f), + (0x3f90cbaf), + (0x3fb9a2af), + (0x3fb98b8f), + (0x3f90f31f), + (0x3f90da3f), + (0x3fb9b33f), + (0x3fb99a1f)}, + (0xfff80000), + {0x67,0xd5,0xbe,0x19,0x7f,0x4e,0x37,0xca,0xeb,0x0b, + 0x09,0x52,0xc2,0x91,0x23,0x02,0xb9,0xa6,0xdd,0x1e,0x00} + }, + { + /* No.55 delta:1367 weight:1089 */ + 11213, + 30, + 14, + 10, + {(0x00000000), + (0x3d4e7360), + (0xd47d559c), + (0xe93326fc), + (0x10e00371), + (0x2dae7011), + (0xc49d56ed), + (0xf9d3258d), + (0x0000f71b), + (0x3d4e847b), + (0xd47da287), + (0xe933d1e7), + (0x10e0f46a), + (0x2dae870a), + (0xc49da1f6), + (0xf9d3d296)}, + {(0x00000000), + (0x6b729000), + (0x206f1800), + (0x4b1d8800), + (0x00501a00), + (0x6b228a00), + (0x203f0200), + (0x4b4d9200), + (0x20103e00), + (0x4b62ae00), + (0x007f2600), + (0x6b0db600), + (0x20402400), + (0x4b32b400), + (0x002f3c00), + (0x6b5dac00)}, + {(0x3f800000), + (0x3fb5b948), + (0x3f90378c), + (0x3fa58ec4), + (0x3f80280d), + (0x3fb59145), + (0x3f901f81), + (0x3fa5a6c9), + (0x3f90081f), + (0x3fa5b157), + (0x3f803f93), + (0x3fb586db), + (0x3f902012), + (0x3fa5995a), + (0x3f80179e), + (0x3fb5aed6)}, + (0xfff80000), + {0x74,0x06,0xc4,0x94,0x50,0xc8,0x89,0xd9,0x6c,0x24, + 0x1b,0x4e,0x7f,0x96,0xbe,0x0a,0xb0,0xbb,0xa9,0x75,0x00} + }, + { + /* No.56 delta:1232 weight:1251 */ + 11213, + 56, + 20, + 8, + {(0x00000000), + (0x303bfd65), + (0x96ea74b0), + (0xa6d189d5), + (0x1d200385), + (0x2d1bfee0), + (0x8bca7735), + (0xbbf18a50), + (0x0000e0a3), + (0x303b1dc6), + (0x96ea9413), + (0xa6d16976), + (0x1d20e326), + (0x2d1b1e43), + (0x8bca9796), + (0xbbf16af3)}, + {(0x00000000), + (0x0e9c1400), + (0x00661e00), + (0x0efa0a00), + (0x20125000), + (0x2e8e4400), + (0x20744e00), + (0x2ee85a00), + (0x001abe00), + (0x0e86aa00), + (0x007ca000), + (0x0ee0b400), + (0x2008ee00), + (0x2e94fa00), + (0x206ef000), + (0x2ef2e400)}, + {(0x3f800000), + (0x3f874e0a), + (0x3f80330f), + (0x3f877d05), + (0x3f900928), + (0x3f974722), + (0x3f903a27), + (0x3f97742d), + (0x3f800d5f), + (0x3f874355), + (0x3f803e50), + (0x3f87705a), + (0x3f900477), + (0x3f974a7d), + (0x3f903778), + (0x3f977972)}, + (0xfff80000), + {0x9f,0xb8,0x5c,0x52,0x53,0x38,0x40,0xed,0x9b,0x43, + 0xe8,0x89,0x8c,0xd0,0x44,0x25,0x4b,0xbf,0xca,0xd0,0x00} + }, + { + /* No.57 delta:1379 weight:873 */ + 11213, + 89, + 13, + 1, + {(0x00000000), + (0xfacfdd0b), + (0x22ecafbf), + (0xd82372b4), + (0x85500390), + (0x7f9fde9b), + (0xa7bcac2f), + (0x5d737124), + (0x0000d423), + (0xfacf0928), + (0x22ec7b9c), + (0xd823a697), + (0x8550d7b3), + (0x7f9f0ab8), + (0xa7bc780c), + (0x5d73a507)}, + {(0x00000000), + (0x2c081000), + (0x05201800), + (0x29280800), + (0x20e00400), + (0x0ce81400), + (0x25c01c00), + (0x09c80c00), + (0x11481e00), + (0x3d400e00), + (0x14680600), + (0x38601600), + (0x31a81a00), + (0x1da00a00), + (0x34880200), + (0x18801200)}, + {(0x3f800000), + (0x3f960408), + (0x3f82900c), + (0x3f949404), + (0x3f907002), + (0x3f86740a), + (0x3f92e00e), + (0x3f84e406), + (0x3f88a40f), + (0x3f9ea007), + (0x3f8a3403), + (0x3f9c300b), + (0x3f98d40d), + (0x3f8ed005), + (0x3f9a4401), + (0x3f8c4009)}, + (0xfff80000), + {0x08,0x0f,0x24,0x9d,0x3a,0xba,0x6d,0xb4,0xb0,0x1a, + 0x1c,0x4b,0x44,0xe7,0x44,0xf7,0xc9,0x4a,0x5f,0x97,0x00} + }, + { + /* No.58 delta:904 weight:1097 */ + 11213, + 60, + 16, + 2, + {(0x00000000), + (0xda86476d), + (0x6c05798d), + (0xb6833ee0), + (0xb72003ae), + (0x6da644c3), + (0xdb257a23), + (0x01a33d4e), + (0x00001489), + (0xda8653e4), + (0x6c056d04), + (0xb6832a69), + (0xb7201727), + (0x6da6504a), + (0xdb256eaa), + (0x01a329c7)}, + {(0x00000000), + (0x0c74f000), + (0x0059a800), + (0x0c2d5800), + (0x02445000), + (0x0e30a000), + (0x021df800), + (0x0e690800), + (0x0902de00), + (0x05762e00), + (0x095b7600), + (0x052f8600), + (0x0b468e00), + (0x07327e00), + (0x0b1f2600), + (0x076bd600)}, + {(0x3f800000), + (0x3f863a78), + (0x3f802cd4), + (0x3f8616ac), + (0x3f812228), + (0x3f871850), + (0x3f810efc), + (0x3f873484), + (0x3f84816f), + (0x3f82bb17), + (0x3f84adbb), + (0x3f8297c3), + (0x3f85a347), + (0x3f83993f), + (0x3f858f93), + (0x3f83b5eb)}, + (0xfff80000), + {0xb8,0xe0,0xa0,0x3b,0x2e,0x11,0x17,0x83,0x7b,0x75, + 0x85,0x89,0xfa,0x1e,0x49,0xb4,0x5c,0xe6,0xb8,0xc1,0x00} + }, + { + /* No.59 delta:7724 weight:1539 */ + 11213, + 42, + 28, + 4, + {(0x00000000), + (0xdfefef22), + (0x804185a4), + (0x5fae6a86), + (0x54b003b3), + (0x8b5fec91), + (0xd4f18617), + (0x0b1e6935), + (0x00009211), + (0xdfef7d33), + (0x804117b5), + (0x5faef897), + (0x54b091a2), + (0x8b5f7e80), + (0xd4f11406), + (0x0b1efb24)}, + {(0x00000000), + (0x38020000), + (0x08014000), + (0x30034000), + (0x04000800), + (0x3c020800), + (0x0c014800), + (0x34034800), + (0x08001e00), + (0x30021e00), + (0x00015e00), + (0x38035e00), + (0x0c001600), + (0x34021600), + (0x04015600), + (0x3c035600)}, + {(0x3f800000), + (0x3f9c0100), + (0x3f8400a0), + (0x3f9801a0), + (0x3f820004), + (0x3f9e0104), + (0x3f8600a4), + (0x3f9a01a4), + (0x3f84000f), + (0x3f98010f), + (0x3f8000af), + (0x3f9c01af), + (0x3f86000b), + (0x3f9a010b), + (0x3f8200ab), + (0x3f9e01ab)}, + (0xfff80000), + {0x91,0xba,0x8c,0xac,0x44,0xa7,0x81,0xa7,0xed,0x37, + 0x7e,0x8d,0xaf,0xe5,0x08,0xe6,0xc7,0x1d,0xfd,0xe5,0x00} + }, + { + /* No.60 delta:2411 weight:1317 */ + 11213, + 13, + 24, + 5, + {(0x00000000), + (0x9dd3ce8f), + (0x90da0878), + (0x0d09c6f7), + (0xad8003cf), + (0x3053cd40), + (0x3d5a0bb7), + (0xa089c538), + (0x0000bcb0), + (0x9dd3723f), + (0x90dab4c8), + (0x0d097a47), + (0xad80bf7f), + (0x305371f0), + (0x3d5ab707), + (0xa0897988)}, + {(0x00000000), + (0x4c0b9800), + (0x0fa54000), + (0x43aed800), + (0x0067c000), + (0x4c6c5800), + (0x0fc28000), + (0x43c91800), + (0x0a105e00), + (0x461bc600), + (0x05b51e00), + (0x49be8600), + (0x0a779e00), + (0x467c0600), + (0x05d2de00), + (0x49d94600)}, + {(0x3f800000), + (0x3fa605cc), + (0x3f87d2a0), + (0x3fa1d76c), + (0x3f8033e0), + (0x3fa6362c), + (0x3f87e140), + (0x3fa1e48c), + (0x3f85082f), + (0x3fa30de3), + (0x3f82da8f), + (0x3fa4df43), + (0x3f853bcf), + (0x3fa33e03), + (0x3f82e96f), + (0x3fa4eca3)}, + (0xfff80000), + {0xac,0x0b,0xb8,0x4d,0x6f,0xbd,0x47,0x8b,0x3f,0x9c, + 0x92,0xd0,0x32,0xf9,0x89,0x38,0xda,0x67,0xea,0xbe,0x00} + }, + { + /* No.61 delta:1265 weight:827 */ + 11213, + 52, + 17, + 15, + {(0x00000000), + (0xa4b47608), + (0x36bc0fca), + (0x920879c2), + (0xdeb003d6), + (0x7a0475de), + (0xe80c0c1c), + (0x4cb87a14), + (0x0000fbcf), + (0xa4b48dc7), + (0x36bcf405), + (0x9208820d), + (0xdeb0f819), + (0x7a048e11), + (0xe80cf7d3), + (0x4cb881db)}, + {(0x00000000), + (0x0813ae00), + (0x20151000), + (0x2806be00), + (0x4029c000), + (0x483a6e00), + (0x603cd000), + (0x682f7e00), + (0x52035e00), + (0x5a10f000), + (0x72164e00), + (0x7a05e000), + (0x122a9e00), + (0x1a393000), + (0x323f8e00), + (0x3a2c2000)}, + {(0x3f800000), + (0x3f8409d7), + (0x3f900a88), + (0x3f94035f), + (0x3fa014e0), + (0x3fa41d37), + (0x3fb01e68), + (0x3fb417bf), + (0x3fa901af), + (0x3fad0878), + (0x3fb90b27), + (0x3fbd02f0), + (0x3f89154f), + (0x3f8d1c98), + (0x3f991fc7), + (0x3f9d1610)}, + (0xfff80000), + {0xb2,0x25,0x08,0xf6,0xcd,0xda,0xfa,0xdb,0x2f,0xe7, + 0x81,0x83,0x7f,0xb8,0x75,0x05,0xff,0x2c,0x1a,0x80,0x00} + }, + { + /* No.62 delta:3397 weight:1223 */ + 11213, + 44, + 3, + 7, + {(0x00000000), + (0x89ffcb3c), + (0xafe67560), + (0x2619be5c), + (0xf79003e8), + (0x7e6fc8d4), + (0x58767688), + (0xd189bdb4), + (0x0000dcdc), + (0x89ff17e0), + (0xafe6a9bc), + (0x26196280), + (0xf790df34), + (0x7e6f1408), + (0x5876aa54), + (0xd1896168)}, + {(0x00000000), + (0x00710000), + (0xb00e0800), + (0xb07f0800), + (0x90120000), + (0x90630000), + (0x201c0800), + (0x206d0800), + (0x40091e00), + (0x40781e00), + (0xf0071600), + (0xf0761600), + (0xd01b1e00), + (0xd06a1e00), + (0x60151600), + (0x60641600)}, + {(0x3f800000), + (0x3f803880), + (0x3fd80704), + (0x3fd83f84), + (0x3fc80900), + (0x3fc83180), + (0x3f900e04), + (0x3f903684), + (0x3fa0048f), + (0x3fa03c0f), + (0x3ff8038b), + (0x3ff83b0b), + (0x3fe80d8f), + (0x3fe8350f), + (0x3fb00a8b), + (0x3fb0320b)}, + (0xfff80000), + {0x65,0x92,0xc9,0x30,0xd4,0x39,0xe2,0xf8,0x3a,0x06, + 0x72,0x5a,0xf0,0xe9,0x6f,0x1d,0x41,0xb8,0x38,0xcb,0x00} + }, + { + /* No.63 delta:1253 weight:1063 */ + 11213, + 40, + 15, + 8, + {(0x00000000), + (0xf0f1fa2b), + (0xc095cbf9), + (0x306431d2), + (0x84c003f6), + (0x7431f9dd), + (0x4455c80f), + (0xb4a43224), + (0x000041b7), + (0xf0f1bb9c), + (0xc0958a4e), + (0x30647065), + (0x84c04241), + (0x7431b86a), + (0x445589b8), + (0xb4a47393)}, + {(0x00000000), + (0x000f4800), + (0x00c47000), + (0x00cb3800), + (0x001ec000), + (0x00118800), + (0x00dab000), + (0x00d5f800), + (0xc01d9e00), + (0xc012d600), + (0xc0d9ee00), + (0xc0d6a600), + (0xc0035e00), + (0xc00c1600), + (0xc0c72e00), + (0xc0c86600)}, + {(0x3f800000), + (0x3f8007a4), + (0x3f806238), + (0x3f80659c), + (0x3f800f60), + (0x3f8008c4), + (0x3f806d58), + (0x3f806afc), + (0x3fe00ecf), + (0x3fe0096b), + (0x3fe06cf7), + (0x3fe06b53), + (0x3fe001af), + (0x3fe0060b), + (0x3fe06397), + (0x3fe06433)}, + (0xfff80000), + {0x6a,0x42,0x24,0x72,0x49,0x2e,0x6c,0xc6,0x39,0xde, + 0x29,0x45,0x5e,0xba,0x3b,0xda,0xc6,0xb7,0x79,0xd2,0x00} + }, + { + /* No.64 delta:5028 weight:833 */ + 11213, + 40, + 3, + 16, + {(0x00000000), + (0x0fda9709), + (0xdb5d90d9), + (0xd48707d0), + (0x78500401), + (0x778a9308), + (0xa30d94d8), + (0xacd703d1), + (0x00000aa4), + (0x0fda9dad), + (0xdb5d9a7d), + (0xd4870d74), + (0x78500ea5), + (0x778a99ac), + (0xa30d9e7c), + (0xacd70975)}, + {(0x00000000), + (0x19200000), + (0x83a00000), + (0x9a800000), + (0x00800000), + (0x19a00000), + (0x83200000), + (0x9a000000), + (0x41001e00), + (0x58201e00), + (0xc2a01e00), + (0xdb801e00), + (0x41801e00), + (0x58a01e00), + (0xc2201e00), + (0xdb001e00)}, + {(0x3f800000), + (0x3f8c9000), + (0x3fc1d000), + (0x3fcd4000), + (0x3f804000), + (0x3f8cd000), + (0x3fc19000), + (0x3fcd0000), + (0x3fa0800f), + (0x3fac100f), + (0x3fe1500f), + (0x3fedc00f), + (0x3fa0c00f), + (0x3fac500f), + (0x3fe1100f), + (0x3fed800f)}, + (0xfff80000), + {0x7f,0xa7,0x79,0xc1,0x02,0x5a,0xb9,0x48,0xd7,0xc2, + 0x1b,0xde,0x64,0x28,0xca,0x0c,0xa1,0xb8,0xbd,0x71,0x00} + }, + { + /* No.65 delta:1199 weight:1181 */ + 11213, + 35, + 20, + 9, + {(0x00000000), + (0xf97f5fb7), + (0x820665df), + (0x7b793a68), + (0xeda00417), + (0x14df5ba0), + (0x6fa661c8), + (0x96d93e7f), + (0x0000be8a), + (0xf97fe13d), + (0x8206db55), + (0x7b7984e2), + (0xeda0ba9d), + (0x14dfe52a), + (0x6fa6df42), + (0x96d980f5)}, + {(0x00000000), + (0x0c163000), + (0x000c1000), + (0x0c1a2000), + (0xc0028000), + (0xcc14b000), + (0xc00e9000), + (0xcc18a000), + (0x7101de00), + (0x7d17ee00), + (0x710dce00), + (0x7d1bfe00), + (0xb1035e00), + (0xbd156e00), + (0xb10f4e00), + (0xbd197e00)}, + {(0x3f800000), + (0x3f860b18), + (0x3f800608), + (0x3f860d10), + (0x3fe00140), + (0x3fe60a58), + (0x3fe00748), + (0x3fe60c50), + (0x3fb880ef), + (0x3fbe8bf7), + (0x3fb886e7), + (0x3fbe8dff), + (0x3fd881af), + (0x3fde8ab7), + (0x3fd887a7), + (0x3fde8cbf)}, + (0xfff80000), + {0x44,0x09,0x7e,0x16,0x6a,0x0b,0xbf,0xd6,0xf6,0x83, + 0xc1,0x33,0x24,0x8d,0x24,0x40,0xc5,0xee,0xb0,0xd2,0x00} + }, + { + /* No.66 delta:1553 weight:539 */ + 11213, + 26, + 13, + 19, + {(0x00000000), + (0xc75eb6dc), + (0xfa4bc661), + (0x3d1570bd), + (0x6f600428), + (0xa83eb2f4), + (0x952bc249), + (0x52757495), + (0x0000df90), + (0xc75e694c), + (0xfa4b19f1), + (0x3d15af2d), + (0x6f60dbb8), + (0xa83e6d64), + (0x952b1dd9), + (0x5275ab05)}, + {(0x00000000), + (0x00521800), + (0x20647400), + (0x20366c00), + (0x60dc1800), + (0x608e0000), + (0x40b86c00), + (0x40ea7400), + (0x10341e00), + (0x10660600), + (0x30506a00), + (0x30027200), + (0x70e80600), + (0x70ba1e00), + (0x508c7200), + (0x50de6a00)}, + {(0x3f800000), + (0x3f80290c), + (0x3f90323a), + (0x3f901b36), + (0x3fb06e0c), + (0x3fb04700), + (0x3fa05c36), + (0x3fa0753a), + (0x3f881a0f), + (0x3f883303), + (0x3f982835), + (0x3f980139), + (0x3fb87403), + (0x3fb85d0f), + (0x3fa84639), + (0x3fa86f35)}, + (0xfff80000), + {0xe3,0x81,0xfe,0x0e,0xae,0xa6,0x17,0x99,0xb5,0xdc, + 0x36,0xa3,0xa9,0x84,0x5e,0xac,0xaf,0x0b,0xa6,0x5e,0x00} + }, + { + /* No.67 delta:1370 weight:1349 */ + 11213, + 50, + 8, + 4, + {(0x00000000), + (0xf2eee92e), + (0x184a48a6), + (0xeaa4a188), + (0x3cb00439), + (0xce5eed17), + (0x24fa4c9f), + (0xd614a5b1), + (0x0000c236), + (0xf2ee2b18), + (0x184a8a90), + (0xeaa463be), + (0x3cb0c60f), + (0xce5e2f21), + (0x24fa8ea9), + (0xd6146787)}, + {(0x00000000), + (0x40028000), + (0x70014000), + (0x3003c000), + (0x20002000), + (0x6002a000), + (0x50016000), + (0x1003e000), + (0x10001e00), + (0x50029e00), + (0x60015e00), + (0x2003de00), + (0x30003e00), + (0x7002be00), + (0x40017e00), + (0x0003fe00)}, + {(0x3f800000), + (0x3fa00140), + (0x3fb800a0), + (0x3f9801e0), + (0x3f900010), + (0x3fb00150), + (0x3fa800b0), + (0x3f8801f0), + (0x3f88000f), + (0x3fa8014f), + (0x3fb000af), + (0x3f9001ef), + (0x3f98001f), + (0x3fb8015f), + (0x3fa000bf), + (0x3f8001ff)}, + (0xfff80000), + {0xce,0xab,0xcf,0x53,0x4e,0xc1,0xc1,0xd0,0x64,0x84, + 0xd7,0xc3,0xad,0x13,0xad,0xf4,0xf7,0x14,0xe1,0xd6,0x00} + }, + { + /* No.68 delta:1769 weight:1053 */ + 11213, + 44, + 23, + 7, + {(0x00000000), + (0x1c10f784), + (0xb81f8711), + (0xa40f7095), + (0xd0d00449), + (0xccc0f3cd), + (0x68cf8358), + (0x74df74dc), + (0x00006582), + (0x1c109206), + (0xb81fe293), + (0xa40f1517), + (0xd0d061cb), + (0xccc0964f), + (0x68cfe6da), + (0x74df115e)}, + {(0x00000000), + (0x009da000), + (0x40746000), + (0x40e9c000), + (0x00028400), + (0x009f2400), + (0x4076e400), + (0x40eb4400), + (0x700bbe00), + (0x70961e00), + (0x307fde00), + (0x30e27e00), + (0x70093a00), + (0x70949a00), + (0x307d5a00), + (0x30e0fa00)}, + {(0x3f800000), + (0x3f804ed0), + (0x3fa03a30), + (0x3fa074e0), + (0x3f800142), + (0x3f804f92), + (0x3fa03b72), + (0x3fa075a2), + (0x3fb805df), + (0x3fb84b0f), + (0x3f983fef), + (0x3f98713f), + (0x3fb8049d), + (0x3fb84a4d), + (0x3f983ead), + (0x3f98707d)}, + (0xfff80000), + {0x25,0xc5,0x2b,0x7e,0x94,0x70,0x50,0xab,0x68,0x79, + 0xe0,0x34,0x49,0x1e,0xd3,0x2a,0xe2,0x9f,0xef,0x26,0x00} + }, + { + /* No.69 delta:1887 weight:1253 */ + 11213, + 64, + 9, + 8, + {(0x00000000), + (0x4cbf4fb8), + (0xcc209ffc), + (0x809fd044), + (0x7e700455), + (0x32cf4bed), + (0xb2509ba9), + (0xfeefd411), + (0x0000612d), + (0x4cbf2e95), + (0xcc20fed1), + (0x809fb169), + (0x7e706578), + (0x32cf2ac0), + (0xb250fa84), + (0xfeefb53c)}, + {(0x00000000), + (0x50090000), + (0x00035000), + (0x500a5000), + (0x04019000), + (0x54089000), + (0x0402c000), + (0x540bc000), + (0x05027e00), + (0x550b7e00), + (0x05012e00), + (0x55082e00), + (0x0103ee00), + (0x510aee00), + (0x0100be00), + (0x5109be00)}, + {(0x3f800000), + (0x3fa80480), + (0x3f8001a8), + (0x3fa80528), + (0x3f8200c8), + (0x3faa0448), + (0x3f820160), + (0x3faa05e0), + (0x3f82813f), + (0x3faa85bf), + (0x3f828097), + (0x3faa8417), + (0x3f8081f7), + (0x3fa88577), + (0x3f80805f), + (0x3fa884df)}, + (0xfff80000), + {0x1f,0x11,0x3a,0x53,0x30,0xf5,0x3a,0x92,0x69,0x8f, + 0x5e,0x40,0x86,0x58,0x16,0xa4,0x7f,0x33,0x04,0xc6,0x00} + }, + { + /* No.70 delta:1651 weight:1401 */ + 11213, + 35, + 9, + 8, + {(0x00000000), + (0xcaa73bcd), + (0x48ffa480), + (0x82589f4d), + (0x0b600460), + (0xc1c73fad), + (0x439fa0e0), + (0x89389b2d), + (0x0000d8fb), + (0xcaa7e336), + (0x48ff7c7b), + (0x825847b6), + (0x0b60dc9b), + (0xc1c7e756), + (0x439f781b), + (0x893843d6)}, + {(0x00000000), + (0x90604200), + (0x20222000), + (0xb0426200), + (0x5023a800), + (0xc043ea00), + (0x70018800), + (0xe061ca00), + (0x16031e00), + (0x86635c00), + (0x36213e00), + (0xa6417c00), + (0x4620b600), + (0xd640f400), + (0x66029600), + (0xf662d400)}, + {(0x3f800000), + (0x3fc83021), + (0x3f901110), + (0x3fd82131), + (0x3fa811d4), + (0x3fe021f5), + (0x3fb800c4), + (0x3ff030e5), + (0x3f8b018f), + (0x3fc331ae), + (0x3f9b109f), + (0x3fd320be), + (0x3fa3105b), + (0x3feb207a), + (0x3fb3014b), + (0x3ffb316a)}, + (0xfff80000), + {0xf7,0xac,0x3b,0x82,0x60,0x2e,0xa2,0x38,0xd5,0xbd, + 0x9e,0x07,0x19,0x05,0x37,0x68,0xa8,0x2b,0xcc,0x5b,0x00} + }, + { + /* No.71 delta:805 weight:1559 */ + 11213, + 85, + 17, + 2, + {(0x00000000), + (0x4ea39f4b), + (0xeedcca5e), + (0xa07f5515), + (0x1a80047d), + (0x54239b36), + (0xf45cce23), + (0xbaff5168), + (0x000051a3), + (0x4ea3cee8), + (0xeedc9bfd), + (0xa07f04b6), + (0x1a8055de), + (0x5423ca95), + (0xf45c9f80), + (0xbaff00cb)}, + {(0x00000000), + (0x0879e800), + (0x10141400), + (0x186dfc00), + (0x006c0800), + (0x0815e000), + (0x10781c00), + (0x1801f400), + (0x40237e00), + (0x485a9600), + (0x50376a00), + (0x584e8200), + (0x404f7600), + (0x48369e00), + (0x505b6200), + (0x58228a00)}, + {(0x3f800000), + (0x3f843cf4), + (0x3f880a0a), + (0x3f8c36fe), + (0x3f803604), + (0x3f840af0), + (0x3f883c0e), + (0x3f8c00fa), + (0x3fa011bf), + (0x3fa42d4b), + (0x3fa81bb5), + (0x3fac2741), + (0x3fa027bb), + (0x3fa41b4f), + (0x3fa82db1), + (0x3fac1145)}, + (0xfff80000), + {0x81,0x81,0xe0,0x37,0x51,0x1b,0x87,0x01,0xa3,0x40, + 0x70,0xd0,0xb9,0x63,0xbb,0xc0,0xe9,0x49,0x0b,0x62,0x00} + }, + { + /* No.72 delta:1929 weight:1383 */ + 11213, + 77, + 8, + 10, + {(0x00000000), + (0x9625646a), + (0xa06390e5), + (0x3646f48f), + (0x6c400487), + (0xfa6560ed), + (0xcc239462), + (0x5a06f008), + (0x00006232), + (0x96250658), + (0xa063f2d7), + (0x364696bd), + (0x6c4066b5), + (0xfa6502df), + (0xcc23f650), + (0x5a06923a)}, + {(0x00000000), + (0x60350000), + (0x08a08000), + (0x68958000), + (0x00120000), + (0x60270000), + (0x08b28000), + (0x68878000), + (0x60021e00), + (0x00371e00), + (0x68a29e00), + (0x08979e00), + (0x60101e00), + (0x00251e00), + (0x68b09e00), + (0x08859e00)}, + {(0x3f800000), + (0x3fb01a80), + (0x3f845040), + (0x3fb44ac0), + (0x3f800900), + (0x3fb01380), + (0x3f845940), + (0x3fb443c0), + (0x3fb0010f), + (0x3f801b8f), + (0x3fb4514f), + (0x3f844bcf), + (0x3fb0080f), + (0x3f80128f), + (0x3fb4584f), + (0x3f8442cf)}, + (0xfff80000), + {0xec,0xe8,0xd6,0x3f,0xf0,0x4f,0x6e,0xc9,0x92,0xb9, + 0x0b,0xf3,0x46,0x01,0x83,0x06,0xba,0xbb,0xfc,0x3c,0x00} + }, + { + /* No.73 delta:1312 weight:1037 */ + 11213, + 79, + 14, + 13, + {(0x00000000), + (0x3c3b4194), + (0x3b4a522a), + (0x077113be), + (0x70e00495), + (0x4cdb4501), + (0x4baa56bf), + (0x7791172b), + (0x00001842), + (0x3c3b59d6), + (0x3b4a4a68), + (0x07710bfc), + (0x70e01cd7), + (0x4cdb5d43), + (0x4baa4efd), + (0x77910f69)}, + {(0x00000000), + (0x5e1d3000), + (0x08002800), + (0x561d1800), + (0x00404000), + (0x5e5d7000), + (0x08406800), + (0x565d5800), + (0xa7841e00), + (0xf9992e00), + (0xaf843600), + (0xf1990600), + (0xa7c45e00), + (0xf9d96e00), + (0xafc47600), + (0xf1d94600)}, + {(0x3f800000), + (0x3faf0e98), + (0x3f840014), + (0x3fab0e8c), + (0x3f802020), + (0x3faf2eb8), + (0x3f842034), + (0x3fab2eac), + (0x3fd3c20f), + (0x3ffccc97), + (0x3fd7c21b), + (0x3ff8cc83), + (0x3fd3e22f), + (0x3ffcecb7), + (0x3fd7e23b), + (0x3ff8eca3)}, + (0xfff80000), + {0xe5,0x84,0x28,0x6a,0xc7,0xb8,0xff,0x35,0x7e,0x4a, + 0xd0,0x29,0x99,0x65,0xc9,0xf9,0x00,0xb5,0xa5,0x73,0x00} + }, + { + /* No.74 delta:1274 weight:753 */ + 11213, + 83, + 10, + 18, + {(0x00000000), + (0xdb6fd096), + (0x7bead936), + (0xa08509a0), + (0x059004a2), + (0xdeffd434), + (0x7e7add94), + (0xa5150d02), + (0x00003dd4), + (0xdb6fed42), + (0x7beae4e2), + (0xa0853474), + (0x05903976), + (0xdeffe9e0), + (0x7e7ae040), + (0xa51530d6)}, + {(0x00000000), + (0x0862a000), + (0x20176000), + (0x2875c000), + (0x21182000), + (0x297a8000), + (0x010f4000), + (0x096de000), + (0x100c1e00), + (0x186ebe00), + (0x301b7e00), + (0x3879de00), + (0x31143e00), + (0x39769e00), + (0x11035e00), + (0x1961fe00)}, + {(0x3f800000), + (0x3f843150), + (0x3f900bb0), + (0x3f943ae0), + (0x3f908c10), + (0x3f94bd40), + (0x3f8087a0), + (0x3f84b6f0), + (0x3f88060f), + (0x3f8c375f), + (0x3f980dbf), + (0x3f9c3cef), + (0x3f988a1f), + (0x3f9cbb4f), + (0x3f8881af), + (0x3f8cb0ff)}, + (0xfff80000), + {0x94,0x6b,0xe1,0x97,0xcf,0xd7,0x1d,0x69,0x3d,0xf1, + 0xd3,0x31,0x9b,0x3f,0x74,0x40,0xe4,0x78,0x37,0x93,0x00} + }, + { + /* No.75 delta:3240 weight:999 */ + 11213, + 25, + 3, + 14, + {(0x00000000), + (0x5231ede5), + (0x75f41157), + (0x27c5fcb2), + (0x231004bc), + (0x7121e959), + (0x56e415eb), + (0x04d5f80e), + (0x000014a2), + (0x5231f947), + (0x75f405f5), + (0x27c5e810), + (0x2310101e), + (0x7121fdfb), + (0x56e40149), + (0x04d5ecac)}, + {(0x00000000), + (0x09c10000), + (0x50f28000), + (0x59338000), + (0x12424000), + (0x1b834000), + (0x42b0c000), + (0x4b71c000), + (0x02361e00), + (0x0bf71e00), + (0x52c49e00), + (0x5b059e00), + (0x10745e00), + (0x19b55e00), + (0x4086de00), + (0x4947de00)}, + {(0x3f800000), + (0x3f84e080), + (0x3fa87940), + (0x3fac99c0), + (0x3f892120), + (0x3f8dc1a0), + (0x3fa15860), + (0x3fa5b8e0), + (0x3f811b0f), + (0x3f85fb8f), + (0x3fa9624f), + (0x3fad82cf), + (0x3f883a2f), + (0x3f8cdaaf), + (0x3fa0436f), + (0x3fa4a3ef)}, + (0xfff80000), + {0x22,0xa1,0x8f,0x2e,0x7e,0xb0,0x8f,0x95,0xab,0x7b, + 0xe4,0x15,0x6f,0x5a,0x78,0x2a,0xeb,0x47,0xfe,0xa6,0x00} + }, + { + /* No.76 delta:1884 weight:1457 */ + 11213, + 11, + 13, + 3, + {(0x00000000), + (0xb165bd1d), + (0x1f5b5cd1), + (0xae3ee1cc), + (0x07b004ca), + (0xb6d5b9d7), + (0x18eb581b), + (0xa98ee506), + (0x00000ebc), + (0xb165b3a1), + (0x1f5b526d), + (0xae3eef70), + (0x07b00a76), + (0xb6d5b76b), + (0x18eb56a7), + (0xa98eebba)}, + {(0x00000000), + (0x2a1ef400), + (0x43091000), + (0x6917e400), + (0x05182000), + (0x2f06d400), + (0x46113000), + (0x6c0fc400), + (0x20c41e00), + (0x0adaea00), + (0x63cd0e00), + (0x49d3fa00), + (0x25dc3e00), + (0x0fc2ca00), + (0x66d52e00), + (0x4ccbda00)}, + {(0x3f800000), + (0x3f950f7a), + (0x3fa18488), + (0x3fb48bf2), + (0x3f828c10), + (0x3f97836a), + (0x3fa30898), + (0x3fb607e2), + (0x3f90620f), + (0x3f856d75), + (0x3fb1e687), + (0x3fa4e9fd), + (0x3f92ee1f), + (0x3f87e165), + (0x3fb36a97), + (0x3fa665ed)}, + (0xfff80000), + {0x5f,0x66,0x78,0x0e,0x66,0x3f,0x37,0x9d,0x75,0xa2, + 0x49,0x78,0xb7,0x8c,0xf1,0x7c,0x84,0xf3,0xab,0x84,0x00} + }, + { + /* No.77 delta:1037 weight:1439 */ + 11213, + 43, + 15, + 7, + {(0x00000000), + (0x3b7738f7), + (0x2ee7f42b), + (0x1590ccdc), + (0x8c3004de), + (0xb7473c29), + (0xa2d7f0f5), + (0x99a0c802), + (0x0000b3f9), + (0x3b778b0e), + (0x2ee747d2), + (0x15907f25), + (0x8c30b727), + (0xb7478fd0), + (0xa2d7430c), + (0x99a07bfb)}, + {(0x00000000), + (0x20009400), + (0x70030800), + (0x50039c00), + (0x90011600), + (0xb0018200), + (0xe0021e00), + (0xc0028a00), + (0x40005e00), + (0x6000ca00), + (0x30035600), + (0x1003c200), + (0xd0014800), + (0xf001dc00), + (0xa0024000), + (0x8002d400)}, + {(0x3f800000), + (0x3f90004a), + (0x3fb80184), + (0x3fa801ce), + (0x3fc8008b), + (0x3fd800c1), + (0x3ff0010f), + (0x3fe00145), + (0x3fa0002f), + (0x3fb00065), + (0x3f9801ab), + (0x3f8801e1), + (0x3fe800a4), + (0x3ff800ee), + (0x3fd00120), + (0x3fc0016a)}, + (0xfff80000), + {0x4d,0xd9,0x1c,0xbc,0x61,0x94,0x41,0xb3,0x64,0x5a, + 0xe0,0x27,0x21,0x33,0x58,0x8a,0x3a,0x27,0xe7,0x61,0x00} + }, + { + /* No.78 delta:3771 weight:1149 */ + 11213, + 67, + 6, + 13, + {(0x00000000), + (0xeb935e7e), + (0x12dd9004), + (0xf94ece7a), + (0x518004ec), + (0xba135a92), + (0x435d94e8), + (0xa8ceca96), + (0x00002915), + (0xeb93776b), + (0x12ddb911), + (0xf94ee76f), + (0x51802df9), + (0xba137387), + (0x435dbdfd), + (0xa8cee383)}, + {(0x00000000), + (0x00600000), + (0x005c0000), + (0x003c0000), + (0xa7000000), + (0xa7600000), + (0xa75c0000), + (0xa73c0000), + (0x00381e00), + (0x00581e00), + (0x00641e00), + (0x00041e00), + (0xa7381e00), + (0xa7581e00), + (0xa7641e00), + (0xa7041e00)}, + {(0x3f800000), + (0x3f803000), + (0x3f802e00), + (0x3f801e00), + (0x3fd38000), + (0x3fd3b000), + (0x3fd3ae00), + (0x3fd39e00), + (0x3f801c0f), + (0x3f802c0f), + (0x3f80320f), + (0x3f80020f), + (0x3fd39c0f), + (0x3fd3ac0f), + (0x3fd3b20f), + (0x3fd3820f)}, + (0xfff80000), + {0x61,0xb9,0x79,0x4a,0xf6,0x0b,0x90,0x19,0x41,0xed, + 0xc7,0x02,0x30,0x6e,0x66,0x3a,0xa0,0x21,0x38,0x4d,0x00} + }, + { + /* No.79 delta:8905 weight:995 */ + 11213, + 37, + 2, + 14, + {(0x00000000), + (0xc386d224), + (0xa19cefca), + (0x621a3dee), + (0x73c004f0), + (0xb046d6d4), + (0xd25ceb3a), + (0x11da391e), + (0x00006023), + (0xc386b207), + (0xa19c8fe9), + (0x621a5dcd), + (0x73c064d3), + (0xb046b6f7), + (0xd25c8b19), + (0x11da593d)}, + {(0x00000000), + (0x50000000), + (0x60000000), + (0x30000000), + (0x00000000), + (0x50000000), + (0x60000000), + (0x30000000), + (0x10001e00), + (0x40001e00), + (0x70001e00), + (0x20001e00), + (0x10001e00), + (0x40001e00), + (0x70001e00), + (0x20001e00)}, + {(0x3f800000), + (0x3fa80000), + (0x3fb00000), + (0x3f980000), + (0x3f800000), + (0x3fa80000), + (0x3fb00000), + (0x3f980000), + (0x3f88000f), + (0x3fa0000f), + (0x3fb8000f), + (0x3f90000f), + (0x3f88000f), + (0x3fa0000f), + (0x3fb8000f), + (0x3f90000f)}, + (0xfff80000), + {0xcd,0x77,0xa6,0x4e,0x16,0x97,0x82,0x51,0x40,0x8c, + 0x23,0x3d,0x69,0x41,0x55,0xf4,0x6d,0xb5,0x97,0xc4,0x00} + }, + { + /* No.80 delta:1084 weight:1327 */ + 11213, + 41, + 20, + 7, + {(0x00000000), + (0x03b0124c), + (0x2709d148), + (0x24b9c304), + (0xd150050f), + (0xd2e01743), + (0xf659d447), + (0xf5e9c60b), + (0x0000e5a0), + (0x03b0f7ec), + (0x270934e8), + (0x24b926a4), + (0xd150e0af), + (0xd2e0f2e3), + (0xf65931e7), + (0xf5e923ab)}, + {(0x00000000), + (0x58fc3200), + (0x70059800), + (0x28f9aa00), + (0x00443a00), + (0x58b80800), + (0x7041a200), + (0x28bd9000), + (0x60225e00), + (0x38de6c00), + (0x1027c600), + (0x48dbf400), + (0x60666400), + (0x389a5600), + (0x1063fc00), + (0x489fce00)}, + {(0x3f800000), + (0x3fac7e19), + (0x3fb802cc), + (0x3f947cd5), + (0x3f80221d), + (0x3fac5c04), + (0x3fb820d1), + (0x3f945ec8), + (0x3fb0112f), + (0x3f9c6f36), + (0x3f8813e3), + (0x3fa46dfa), + (0x3fb03332), + (0x3f9c4d2b), + (0x3f8831fe), + (0x3fa44fe7)}, + (0xfff80000), + {0x17,0x06,0xb5,0x56,0x08,0x24,0x02,0x45,0x7d,0xec, + 0xaa,0x6e,0xee,0x42,0x3d,0x61,0xec,0x7b,0x95,0xda,0x00} + }, + { + /* No.81 delta:2107 weight:929 */ + 11213, + 68, + 7, + 13, + {(0x00000000), + (0xa2680084), + (0x72c07011), + (0xd0a87095), + (0xe9c00518), + (0x4ba8059c), + (0x9b007509), + (0x3968758d), + (0x000019d6), + (0xa2681952), + (0x72c069c7), + (0xd0a86943), + (0xe9c01cce), + (0x4ba81c4a), + (0x9b006cdf), + (0x39686c5b)}, + {(0x00000000), + (0x06214000), + (0x50182000), + (0x56396000), + (0x004c0000), + (0x066d4000), + (0x50542000), + (0x56756000), + (0x003abe00), + (0x061bfe00), + (0x50229e00), + (0x5603de00), + (0x0076be00), + (0x0657fe00), + (0x506e9e00), + (0x564fde00)}, + {(0x3f800000), + (0x3f8310a0), + (0x3fa80c10), + (0x3fab1cb0), + (0x3f802600), + (0x3f8336a0), + (0x3fa82a10), + (0x3fab3ab0), + (0x3f801d5f), + (0x3f830dff), + (0x3fa8114f), + (0x3fab01ef), + (0x3f803b5f), + (0x3f832bff), + (0x3fa8374f), + (0x3fab27ef)}, + (0xfff80000), + {0x06,0xc1,0x16,0x3a,0x4b,0xf5,0xcc,0xe4,0x06,0xc5, + 0x47,0x64,0xd8,0x26,0x6e,0xa3,0xd2,0xe3,0xfc,0x67,0x00} + }, + { + /* No.82 delta:1653 weight:993 */ + 11213, + 67, + 15, + 1, + {(0x00000000), + (0x205bddc2), + (0x51a5fb05), + (0x71fe26c7), + (0xbc800525), + (0x9cdbd8e7), + (0xed25fe20), + (0xcd7e23e2), + (0x0000ae60), + (0x205b73a2), + (0x51a55565), + (0x71fe88a7), + (0xbc80ab45), + (0x9cdb7687), + (0xed255040), + (0xcd7e8d82)}, + {(0x00000000), + (0x200d8e00), + (0x01004000), + (0x210dce00), + (0x1710c000), + (0x371d4e00), + (0x16108000), + (0x361d0e00), + (0x00409e00), + (0x204d1000), + (0x0140de00), + (0x214d5000), + (0x17505e00), + (0x375dd000), + (0x16501e00), + (0x365d9000)}, + {(0x3f800000), + (0x3f9006c7), + (0x3f808020), + (0x3f9086e7), + (0x3f8b8860), + (0x3f9b8ea7), + (0x3f8b0840), + (0x3f9b0e87), + (0x3f80204f), + (0x3f902688), + (0x3f80a06f), + (0x3f90a6a8), + (0x3f8ba82f), + (0x3f9baee8), + (0x3f8b280f), + (0x3f9b2ec8)}, + (0xfff80000), + {0x31,0x5a,0x14,0x56,0x22,0x69,0x71,0xb4,0xed,0xe9, + 0x57,0x3c,0xb3,0xa3,0xc2,0xd8,0x5c,0x3b,0xf9,0x7a,0x00} + }, + { + /* No.83 delta:2591 weight:1355 */ + 11213, + 78, + 3, + 3, + {(0x00000000), + (0x3fa6f3ae), + (0x11561526), + (0x2ef0e688), + (0x3f40053d), + (0x00e6f693), + (0x2e16101b), + (0x11b0e3b5), + (0x000038dd), + (0x3fa6cb73), + (0x11562dfb), + (0x2ef0de55), + (0x3f403de0), + (0x00e6ce4e), + (0x2e1628c6), + (0x11b0db68)}, + {(0x00000000), + (0x88264000), + (0x60420000), + (0xe8644000), + (0x20102000), + (0xa8366000), + (0x40522000), + (0xc8746000), + (0x00791e00), + (0x885f5e00), + (0x603b1e00), + (0xe81d5e00), + (0x20693e00), + (0xa84f7e00), + (0x402b3e00), + (0xc80d7e00)}, + {(0x3f800000), + (0x3fc41320), + (0x3fb02100), + (0x3ff43220), + (0x3f900810), + (0x3fd41b30), + (0x3fa02910), + (0x3fe43a30), + (0x3f803c8f), + (0x3fc42faf), + (0x3fb01d8f), + (0x3ff40eaf), + (0x3f90349f), + (0x3fd427bf), + (0x3fa0159f), + (0x3fe406bf)}, + (0xfff80000), + {0x95,0xde,0xea,0xd2,0x91,0x96,0x3a,0x31,0x18,0xb4, + 0x29,0x3c,0x05,0x83,0x13,0x20,0xa6,0xbb,0xfa,0x90,0x00} + }, + { + /* No.84 delta:1724 weight:1387 */ + 11213, + 31, + 24, + 7, + {(0x00000000), + (0x6f77a064), + (0x9d4e3ea4), + (0xf2399ec0), + (0xdab00543), + (0xb5c7a527), + (0x47fe3be7), + (0x28899b83), + (0x0000ba57), + (0x6f771a33), + (0x9d4e84f3), + (0xf2392497), + (0xdab0bf14), + (0xb5c71f70), + (0x47fe81b0), + (0x288921d4)}, + {(0x00000000), + (0x4052c800), + (0x403d7000), + (0x006fb800), + (0x01748400), + (0x41264c00), + (0x4149f400), + (0x011b3c00), + (0x30183e00), + (0x704af600), + (0x70254e00), + (0x30778600), + (0x316cba00), + (0x713e7200), + (0x7151ca00), + (0x31030200)}, + {(0x3f800000), + (0x3fa02964), + (0x3fa01eb8), + (0x3f8037dc), + (0x3f80ba42), + (0x3fa09326), + (0x3fa0a4fa), + (0x3f808d9e), + (0x3f980c1f), + (0x3fb8257b), + (0x3fb812a7), + (0x3f983bc3), + (0x3f98b65d), + (0x3fb89f39), + (0x3fb8a8e5), + (0x3f988181)}, + (0xfff80000), + {0x24,0x42,0xb5,0x8a,0x26,0x25,0x0b,0x43,0xb5,0xa1, + 0xfd,0xf3,0xef,0xe4,0x58,0x1d,0x2f,0x82,0x54,0xe1,0x00} + }, + { + /* No.85 delta:1315 weight:1167 */ + 11213, + 28, + 18, + 8, + {(0x00000000), + (0x7039a7b2), + (0x655a69d6), + (0x1563ce64), + (0x55800552), + (0x25b9a2e0), + (0x30da6c84), + (0x40e3cb36), + (0x0000db22), + (0x70397c90), + (0x655ab2f4), + (0x15631546), + (0x5580de70), + (0x25b979c2), + (0x30dab7a6), + (0x40e31014)}, + {(0x00000000), + (0x091ae000), + (0xc01fd200), + (0xc9053200), + (0x60072600), + (0x691dc600), + (0xa018f400), + (0xa9021400), + (0x40083e00), + (0x4912de00), + (0x8017ec00), + (0x890d0c00), + (0x200f1800), + (0x2915f800), + (0xe010ca00), + (0xe90a2a00)}, + {(0x3f800000), + (0x3f848d70), + (0x3fe00fe9), + (0x3fe48299), + (0x3fb00393), + (0x3fb48ee3), + (0x3fd00c7a), + (0x3fd4810a), + (0x3fa0041f), + (0x3fa4896f), + (0x3fc00bf6), + (0x3fc48686), + (0x3f90078c), + (0x3f948afc), + (0x3ff00865), + (0x3ff48515)}, + (0xfff80000), + {0x54,0x00,0xe5,0x09,0x83,0x8a,0xa3,0xac,0xe2,0x03, + 0xe4,0x5f,0x87,0xaa,0x93,0x3c,0x49,0xef,0xf1,0x12,0x00} + }, + { + /* No.86 delta:1805 weight:1023 */ + 11213, + 40, + 17, + 1, + {(0x00000000), + (0xd727963d), + (0x760f18b0), + (0xa1288e8d), + (0xe2d00563), + (0x35f7935e), + (0x94df1dd3), + (0x43f88bee), + (0x0000c942), + (0xd7275f7f), + (0x760fd1f2), + (0xa12847cf), + (0xe2d0cc21), + (0x35f75a1c), + (0x94dfd491), + (0x43f842ac)}, + {(0x00000000), + (0x207c0400), + (0x56934000), + (0x76ef4400), + (0x00009e00), + (0x207c9a00), + (0x5693de00), + (0x76efda00), + (0x20009e00), + (0x007c9a00), + (0x7693de00), + (0x56efda00), + (0x20000000), + (0x007c0400), + (0x76934000), + (0x56ef4400)}, + {(0x3f800000), + (0x3f903e02), + (0x3fab49a0), + (0x3fbb77a2), + (0x3f80004f), + (0x3f903e4d), + (0x3fab49ef), + (0x3fbb77ed), + (0x3f90004f), + (0x3f803e4d), + (0x3fbb49ef), + (0x3fab77ed), + (0x3f900000), + (0x3f803e02), + (0x3fbb49a0), + (0x3fab77a2)}, + (0xfff80000), + {0xc8,0x53,0x9a,0xab,0x96,0xd8,0xe7,0x02,0xe6,0xf1, + 0x74,0x36,0x00,0x32,0x32,0x6e,0x36,0xe4,0x78,0x88,0x00} + }, + { + /* No.87 delta:2165 weight:1423 */ + 11213, + 29, + 6, + 7, + {(0x00000000), + (0xb0a4f75d), + (0xd99a0020), + (0x693ef77d), + (0x0010057f), + (0xb0b4f222), + (0xd98a055f), + (0x692ef202), + (0x00002860), + (0xb0a4df3d), + (0xd99a2840), + (0x693edf1d), + (0x00102d1f), + (0xb0b4da42), + (0xd98a2d3f), + (0x692eda62)}, + {(0x00000000), + (0x00c02000), + (0x10102000), + (0x10d00000), + (0x700c0000), + (0x70cc2000), + (0x601c2000), + (0x60dc0000), + (0x00701e00), + (0x00b03e00), + (0x10603e00), + (0x10a01e00), + (0x707c1e00), + (0x70bc3e00), + (0x606c3e00), + (0x60ac1e00)}, + {(0x3f800000), + (0x3f806010), + (0x3f880810), + (0x3f886800), + (0x3fb80600), + (0x3fb86610), + (0x3fb00e10), + (0x3fb06e00), + (0x3f80380f), + (0x3f80581f), + (0x3f88301f), + (0x3f88500f), + (0x3fb83e0f), + (0x3fb85e1f), + (0x3fb0361f), + (0x3fb0560f)}, + (0xfff80000), + {0xc8,0x26,0xe1,0xb8,0xbd,0xd2,0x06,0x99,0x21,0x8b, + 0xa2,0x82,0x21,0x08,0x66,0x43,0x39,0x7d,0x22,0x3f,0x00} + }, + { + /* No.88 delta:2728 weight:929 */ + 11213, + 9, + 5, + 12, + {(0x00000000), + (0xba91b8b7), + (0x1f94657f), + (0xa505ddc8), + (0xc8900581), + (0x7201bd36), + (0xd70460fe), + (0x6d95d849), + (0x00009507), + (0xba912db0), + (0x1f94f078), + (0xa50548cf), + (0xc8909086), + (0x72012831), + (0xd704f5f9), + (0x6d954d4e)}, + {(0x00000000), + (0x74d60000), + (0x48810000), + (0x3c570000), + (0x07840000), + (0x73520000), + (0x4f050000), + (0x3bd30000), + (0xa0019e00), + (0xd4d79e00), + (0xe8809e00), + (0x9c569e00), + (0xa7859e00), + (0xd3539e00), + (0xef049e00), + (0x9bd29e00)}, + {(0x3f800000), + (0x3fba6b00), + (0x3fa44080), + (0x3f9e2b80), + (0x3f83c200), + (0x3fb9a900), + (0x3fa78280), + (0x3f9de980), + (0x3fd000cf), + (0x3fea6bcf), + (0x3ff4404f), + (0x3fce2b4f), + (0x3fd3c2cf), + (0x3fe9a9cf), + (0x3ff7824f), + (0x3fcde94f)}, + (0xfff80000), + {0xef,0x82,0x25,0x92,0x4d,0x84,0x32,0xcc,0x51,0x79, + 0xa6,0xf1,0xa8,0xb8,0xd3,0xc8,0xf4,0x1f,0x61,0x14,0x00} + }, + { + /* No.89 delta:1302 weight:829 */ + 11213, + 52, + 17, + 15, + {(0x00000000), + (0xff9c667b), + (0x076e42cb), + (0xf8f224b0), + (0x5a200593), + (0xa5bc63e8), + (0x5d4e4758), + (0xa2d22123), + (0x0000fbe9), + (0xff9c9d92), + (0x076eb922), + (0xf8f2df59), + (0x5a20fe7a), + (0xa5bc9801), + (0x5d4ebcb1), + (0xa2d2daca)}, + {(0x00000000), + (0x168c2000), + (0x08819000), + (0x1e0db000), + (0x8400d800), + (0x928cf800), + (0x8c814800), + (0x9a0d6800), + (0x0a023e00), + (0x1c8e1e00), + (0x0283ae00), + (0x140f8e00), + (0x8e02e600), + (0x988ec600), + (0x86837600), + (0x900f5600)}, + {(0x3f800000), + (0x3f8b4610), + (0x3f8440c8), + (0x3f8f06d8), + (0x3fc2006c), + (0x3fc9467c), + (0x3fc640a4), + (0x3fcd06b4), + (0x3f85011f), + (0x3f8e470f), + (0x3f8141d7), + (0x3f8a07c7), + (0x3fc70173), + (0x3fcc4763), + (0x3fc341bb), + (0x3fc807ab)}, + (0xfff80000), + {0x44,0x22,0x55,0x0c,0x46,0x02,0x05,0x6b,0x49,0xa7, + 0x79,0xcc,0xa0,0x5b,0x5c,0xeb,0x82,0xb3,0x48,0x1e,0x00} + }, + { + /* No.90 delta:1794 weight:683 */ + 11213, + 71, + 7, + 3, + {(0x00000000), + (0x28fcb37c), + (0x3de27e63), + (0x151ecd1f), + (0x99d005af), + (0xb12cb6d3), + (0xa4327bcc), + (0x8ccec8b0), + (0x0000473d), + (0x28fcf441), + (0x3de2395e), + (0x151e8a22), + (0x99d04292), + (0xb12cf1ee), + (0xa4323cf1), + (0x8cce8f8d)}, + {(0x00000000), + (0x00170000), + (0x00059200), + (0x00129200), + (0x61088000), + (0x611f8000), + (0x610d1200), + (0x611a1200), + (0x22825e00), + (0x22955e00), + (0x2287cc00), + (0x2290cc00), + (0x438ade00), + (0x439dde00), + (0x438f4c00), + (0x43984c00)}, + {(0x3f800000), + (0x3f800b80), + (0x3f8002c9), + (0x3f800949), + (0x3fb08440), + (0x3fb08fc0), + (0x3fb08689), + (0x3fb08d09), + (0x3f91412f), + (0x3f914aaf), + (0x3f9143e6), + (0x3f914866), + (0x3fa1c56f), + (0x3fa1ceef), + (0x3fa1c7a6), + (0x3fa1cc26)}, + (0xfff80000), + {0x63,0x96,0x53,0x66,0x89,0x8f,0x5a,0x0f,0x8c,0x80, + 0x0a,0x9a,0x17,0xd9,0x66,0x91,0xf8,0x92,0x97,0x62,0x00} + }, + { + /* No.91 delta:10527 weight:393 */ + 11213, + 5, + 2, + 19, + {(0x00000000), + (0x9ea45304), + (0x56ae02a1), + (0xc80a51a5), + (0x9ad005b9), + (0x047456bd), + (0xcc7e0718), + (0x52da541c), + (0x0000c554), + (0x9ea49650), + (0x56aec7f5), + (0xc80a94f1), + (0x9ad0c0ed), + (0x047493e9), + (0xcc7ec24c), + (0x52da9148)}, + {(0x00000000), + (0x64000000), + (0x0f800000), + (0x6b800000), + (0x00000000), + (0x64000000), + (0x0f800000), + (0x6b800000), + (0x00001e00), + (0x64001e00), + (0x0f801e00), + (0x6b801e00), + (0x00001e00), + (0x64001e00), + (0x0f801e00), + (0x6b801e00)}, + {(0x3f800000), + (0x3fb20000), + (0x3f87c000), + (0x3fb5c000), + (0x3f800000), + (0x3fb20000), + (0x3f87c000), + (0x3fb5c000), + (0x3f80000f), + (0x3fb2000f), + (0x3f87c00f), + (0x3fb5c00f), + (0x3f80000f), + (0x3fb2000f), + (0x3f87c00f), + (0x3fb5c00f)}, + (0xfff80000), + {0xd2,0x73,0xa3,0x8b,0x1a,0x90,0xef,0x94,0xe3,0x4a, + 0x16,0x2f,0x38,0x48,0xd9,0x68,0xf2,0xdf,0xe5,0xab,0x00} + }, + { + /* No.92 delta:2494 weight:1233 */ + 11213, + 7, + 21, + 4, + {(0x00000000), + (0x67c6fdcf), + (0x39165a15), + (0x5ed0a7da), + (0xa11005c0), + (0xc6d6f80f), + (0x98065fd5), + (0xffc0a21a), + (0x0000cdee), + (0x67c63021), + (0x391697fb), + (0x5ed06a34), + (0xa110c82e), + (0xc6d635e1), + (0x9806923b), + (0xffc06ff4)}, + {(0x00000000), + (0xc64d0800), + (0x67f58000), + (0xa1b88800), + (0x11ca8200), + (0xd7878a00), + (0x763f0200), + (0xb0720a00), + (0x4eb51e00), + (0x88f81600), + (0x29409e00), + (0xef0d9600), + (0x5f7f9c00), + (0x99329400), + (0x388a1c00), + (0xfec71400)}, + {(0x3f800000), + (0x3fe32684), + (0x3fb3fac0), + (0x3fd0dc44), + (0x3f88e541), + (0x3febc3c5), + (0x3fbb1f81), + (0x3fd83905), + (0x3fa75a8f), + (0x3fc47c0b), + (0x3f94a04f), + (0x3ff786cb), + (0x3fafbfce), + (0x3fcc994a), + (0x3f9c450e), + (0x3fff638a)}, + (0xfff80000), + {0x9e,0xa5,0x58,0x0a,0xe1,0x2c,0x68,0x94,0x2b,0x3f, + 0x4b,0xcd,0xe6,0xee,0x21,0x60,0xf3,0xf9,0x28,0xef,0x00} + }, + { + /* No.93 delta:2858 weight:713 */ + 11213, + 6, + 11, + 15, + {(0x00000000), + (0xce341fc2), + (0x74808815), + (0xbab497d7), + (0x396005de), + (0xf7541a1c), + (0x4de08dcb), + (0x83d49209), + (0x0000dee4), + (0xce34c126), + (0x748056f1), + (0xbab44933), + (0x3960db3a), + (0xf754c4f8), + (0x4de0532f), + (0x83d44ced)}, + {(0x00000000), + (0x0af25000), + (0x0c419c00), + (0x06b3cc00), + (0x82144400), + (0x88e61400), + (0x8e55d800), + (0x84a78800), + (0xe0053e00), + (0xeaf76e00), + (0xec44a200), + (0xe6b6f200), + (0x62117a00), + (0x68e32a00), + (0x6e50e600), + (0x64a2b600)}, + {(0x3f800000), + (0x3f857928), + (0x3f8620ce), + (0x3f8359e6), + (0x3fc10a22), + (0x3fc4730a), + (0x3fc72aec), + (0x3fc253c4), + (0x3ff0029f), + (0x3ff57bb7), + (0x3ff62251), + (0x3ff35b79), + (0x3fb108bd), + (0x3fb47195), + (0x3fb72873), + (0x3fb2515b)}, + (0xfff80000), + {0x67,0xfb,0x7b,0xdf,0x91,0xd7,0xa6,0xcb,0xfa,0x0c, + 0x9c,0xcf,0x32,0x59,0xd4,0x14,0x88,0x02,0x20,0xbd,0x00} + }, + { + /* No.94 delta:1428 weight:871 */ + 11213, + 51, + 24, + 3, + {(0x00000000), + (0x4055c0f5), + (0x3f9446d5), + (0x7fc18620), + (0xbd5005e5), + (0xfd05c510), + (0x82c44330), + (0xc29183c5), + (0x0000a8c2), + (0x40556837), + (0x3f94ee17), + (0x7fc12ee2), + (0xbd50ad27), + (0xfd056dd2), + (0x82c4ebf2), + (0xc2912b07)}, + {(0x00000000), + (0x6064a000), + (0x4ef89000), + (0x2e9c3000), + (0x08a1d000), + (0x68c57000), + (0x46594000), + (0x263de000), + (0x0c127e00), + (0x6c76de00), + (0x42eaee00), + (0x228e4e00), + (0x04b3ae00), + (0x64d70e00), + (0x4a4b3e00), + (0x2a2f9e00)}, + {(0x3f800000), + (0x3fb03250), + (0x3fa77c48), + (0x3f974e18), + (0x3f8450e8), + (0x3fb462b8), + (0x3fa32ca0), + (0x3f931ef0), + (0x3f86093f), + (0x3fb63b6f), + (0x3fa17577), + (0x3f914727), + (0x3f8259d7), + (0x3fb26b87), + (0x3fa5259f), + (0x3f9517cf)}, + (0xfff80000), + {0xfc,0xe1,0x89,0x6f,0x64,0xe4,0x4b,0x42,0xe3,0x27, + 0xa3,0x38,0x46,0x33,0x4f,0xf6,0x63,0x31,0xbb,0x80,0x00} + }, + { + /* No.95 delta:1914 weight:999 */ + 11213, + 43, + 8, + 13, + {(0x00000000), + (0x9c0723c5), + (0xc882db89), + (0x5485f84c), + (0x994005f3), + (0x05472636), + (0x51c2de7a), + (0xcdc5fdbf), + (0x000019bd), + (0x9c073a78), + (0xc882c234), + (0x5485e1f1), + (0x99401c4e), + (0x05473f8b), + (0x51c2c7c7), + (0xcdc5e402)}, + {(0x00000000), + (0x5061f000), + (0x105bc000), + (0x403a3000), + (0x00740400), + (0x5015f400), + (0x102fc400), + (0x404e3400), + (0x041c1e00), + (0x547dee00), + (0x1447de00), + (0x44262e00), + (0x04681a00), + (0x5409ea00), + (0x1433da00), + (0x44522a00)}, + {(0x3f800000), + (0x3fa830f8), + (0x3f882de0), + (0x3fa01d18), + (0x3f803a02), + (0x3fa80afa), + (0x3f8817e2), + (0x3fa0271a), + (0x3f820e0f), + (0x3faa3ef7), + (0x3f8a23ef), + (0x3fa21317), + (0x3f82340d), + (0x3faa04f5), + (0x3f8a19ed), + (0x3fa22915)}, + (0xfff80000), + {0xbf,0x5e,0xb4,0x48,0x47,0x8e,0xcb,0x36,0x4b,0x6a, + 0xc7,0x2c,0x1c,0xc9,0xdc,0x0d,0x4b,0x46,0x15,0xdf,0x00} + }, + { + /* No.96 delta:1078 weight:1383 */ + 11213, + 93, + 22, + 5, + {(0x00000000), + (0x4598c5d2), + (0x2a3c70f7), + (0x6fa4b525), + (0xf4e0060f), + (0xb178c3dd), + (0xdedc76f8), + (0x9b44b32a), + (0x0000ee71), + (0x45982ba3), + (0x2a3c9e86), + (0x6fa45b54), + (0xf4e0e87e), + (0xb1782dac), + (0xdedc9889), + (0x9b445d5b)}, + {(0x00000000), + (0x40220000), + (0x20738000), + (0x60518000), + (0x007e2000), + (0x405c2000), + (0x200da000), + (0x602fa000), + (0x80035e00), + (0xc0215e00), + (0xa070de00), + (0xe052de00), + (0x807d7e00), + (0xc05f7e00), + (0xa00efe00), + (0xe02cfe00)}, + {(0x3f800000), + (0x3fa01100), + (0x3f9039c0), + (0x3fb028c0), + (0x3f803f10), + (0x3fa02e10), + (0x3f9006d0), + (0x3fb017d0), + (0x3fc001af), + (0x3fe010af), + (0x3fd0386f), + (0x3ff0296f), + (0x3fc03ebf), + (0x3fe02fbf), + (0x3fd0077f), + (0x3ff0167f)}, + (0xfff80000), + {0xf7,0x01,0xdc,0x94,0xdb,0xfe,0x75,0xe3,0x41,0x0d, + 0xa7,0xcf,0xdb,0xc4,0xf8,0x63,0x39,0xf1,0x73,0x83,0x00} + }, + { + /* No.97 delta:1680 weight:855 */ + 11213, + 56, + 11, + 18, + {(0x00000000), + (0xed377fcb), + (0x935b7bcf), + (0x7e6c0404), + (0xb1b0061d), + (0x5c8779d6), + (0x22eb7dd2), + (0xcfdc0219), + (0x0000dc19), + (0xed37a3d2), + (0x935ba7d6), + (0x7e6cd81d), + (0xb1b0da04), + (0x5c87a5cf), + (0x22eba1cb), + (0xcfdcde00)}, + {(0x00000000), + (0x10100000), + (0x201c0000), + (0x300c0000), + (0x30000000), + (0x20100000), + (0x101c0000), + (0x000c0000), + (0x40051e00), + (0x50151e00), + (0x60191e00), + (0x70091e00), + (0x70051e00), + (0x60151e00), + (0x50191e00), + (0x40091e00)}, + {(0x3f800000), + (0x3f880800), + (0x3f900e00), + (0x3f980600), + (0x3f980000), + (0x3f900800), + (0x3f880e00), + (0x3f800600), + (0x3fa0028f), + (0x3fa80a8f), + (0x3fb00c8f), + (0x3fb8048f), + (0x3fb8028f), + (0x3fb00a8f), + (0x3fa80c8f), + (0x3fa0048f)}, + (0xfff80000), + {0xa7,0x90,0x32,0xf0,0xe5,0x54,0x81,0x86,0xda,0xa2, + 0x04,0x5e,0xda,0xe5,0xcd,0x25,0x97,0x20,0x5f,0xd8,0x00} + }, + { + /* No.98 delta:4818 weight:1887 */ + 11213, + 73, + 1, + 4, + {(0x00000000), + (0xf3234424), + (0xe4236cf9), + (0x170028dd), + (0x62d00623), + (0x91f34207), + (0x86f36ada), + (0x75d02efe), + (0x00003707), + (0xf3237323), + (0xe4235bfe), + (0x17001fda), + (0x62d03124), + (0x91f37500), + (0x86f35ddd), + (0x75d019f9)}, + {(0x00000000), + (0x40550000), + (0x40300000), + (0x00650000), + (0x7a81c000), + (0x3ad4c000), + (0x3ab1c000), + (0x7ae4c000), + (0x54351e00), + (0x14601e00), + (0x14051e00), + (0x54501e00), + (0x2eb4de00), + (0x6ee1de00), + (0x6e84de00), + (0x2ed1de00)}, + {(0x3f800000), + (0x3fa02a80), + (0x3fa01800), + (0x3f803280), + (0x3fbd40e0), + (0x3f9d6a60), + (0x3f9d58e0), + (0x3fbd7260), + (0x3faa1a8f), + (0x3f8a300f), + (0x3f8a028f), + (0x3faa280f), + (0x3f975a6f), + (0x3fb770ef), + (0x3fb7426f), + (0x3f9768ef)}, + (0xfff80000), + {0x78,0x4c,0x48,0x62,0xcb,0x43,0x6e,0xae,0x79,0x3f, + 0xa2,0x90,0xc7,0xd5,0xf6,0x2c,0x3a,0x94,0xf6,0x88,0x00} + }, + { + /* No.99 delta:4941 weight:807 */ + 11213, + 35, + 6, + 17, + {(0x00000000), + (0xceb13d37), + (0x6cab25b0), + (0xa21a1887), + (0x2d100639), + (0xe3a13b0e), + (0x41bb2389), + (0x8f0a1ebe), + (0x00002bd0), + (0xceb116e7), + (0x6cab0e60), + (0xa21a3357), + (0x2d102de9), + (0xe3a110de), + (0x41bb0859), + (0x8f0a356e)}, + {(0x00000000), + (0x07201000), + (0x42300200), + (0x45101200), + (0x40000000), + (0x47201000), + (0x02300200), + (0x05101200), + (0x70001e00), + (0x77200e00), + (0x32301c00), + (0x35100c00), + (0x30001e00), + (0x37200e00), + (0x72301c00), + (0x75100c00)}, + {(0x3f800000), + (0x3f839008), + (0x3fa11801), + (0x3fa28809), + (0x3fa00000), + (0x3fa39008), + (0x3f811801), + (0x3f828809), + (0x3fb8000f), + (0x3fbb9007), + (0x3f99180e), + (0x3f9a8806), + (0x3f98000f), + (0x3f9b9007), + (0x3fb9180e), + (0x3fba8806)}, + (0xfff80000), + {0x47,0x35,0x44,0x50,0x63,0x4a,0x6a,0xcd,0x53,0x03, + 0xdd,0x48,0x13,0x12,0x80,0xdf,0x0e,0x77,0xfa,0xf3,0x00} + }, + { + /* No.100 delta:1308 weight:863 */ + 11213, + 86, + 13, + 14, + {(0x00000000), + (0xc5ba19cb), + (0x67d04c0f), + (0xa26a55c4), + (0x36300642), + (0xf38a1f89), + (0x51e04a4d), + (0x945a5386), + (0x0000f011), + (0xc5bae9da), + (0x67d0bc1e), + (0xa26aa5d5), + (0x3630f653), + (0xf38aef98), + (0x51e0ba5c), + (0x945aa397)}, + {(0x00000000), + (0x09233a00), + (0x40645e00), + (0x49476400), + (0x1030a000), + (0x19139a00), + (0x5054fe00), + (0x5977c400), + (0x30c05e00), + (0x39e36400), + (0x70a40000), + (0x79873a00), + (0x20f0fe00), + (0x29d3c400), + (0x6094a000), + (0x69b79a00)}, + {(0x3f800000), + (0x3f84919d), + (0x3fa0322f), + (0x3fa4a3b2), + (0x3f881850), + (0x3f8c89cd), + (0x3fa82a7f), + (0x3facbbe2), + (0x3f98602f), + (0x3f9cf1b2), + (0x3fb85200), + (0x3fbcc39d), + (0x3f90787f), + (0x3f94e9e2), + (0x3fb04a50), + (0x3fb4dbcd)}, + (0xfff80000), + {0xbe,0xe1,0xe6,0x46,0x8d,0x04,0x11,0x44,0x1a,0x37, + 0x82,0xd9,0x6f,0xd6,0x9e,0xd5,0x25,0x58,0x1e,0xae,0x00} + }, + { + /* No.101 delta:3747 weight:693 */ + 11213, + 93, + 7, + 18, + {(0x00000000), + (0x34b395ad), + (0x437cf2d0), + (0x77cf677d), + (0xa770065c), + (0x93c393f1), + (0xe40cf48c), + (0xd0bf6121), + (0x0000f47f), + (0x34b361d2), + (0x437c06af), + (0x77cf9302), + (0xa770f223), + (0x93c3678e), + (0xe40c00f3), + (0xd0bf955e)}, + {(0x00000000), + (0x40700000), + (0x00580000), + (0x40280000), + (0x40680000), + (0x00180000), + (0x40300000), + (0x00400000), + (0x21601e00), + (0x61101e00), + (0x21381e00), + (0x61481e00), + (0x61081e00), + (0x21781e00), + (0x61501e00), + (0x21201e00)}, + {(0x3f800000), + (0x3fa03800), + (0x3f802c00), + (0x3fa01400), + (0x3fa03400), + (0x3f800c00), + (0x3fa01800), + (0x3f802000), + (0x3f90b00f), + (0x3fb0880f), + (0x3f909c0f), + (0x3fb0a40f), + (0x3fb0840f), + (0x3f90bc0f), + (0x3fb0a80f), + (0x3f90900f)}, + (0xfff80000), + {0xe2,0xf1,0x5a,0xc6,0xba,0x8b,0xae,0x08,0x43,0xcc, + 0x54,0xa6,0xe9,0x8f,0x81,0x0a,0x85,0xe3,0x5e,0xef,0x00} + }, + { + /* No.102 delta:3462 weight:1273 */ + 11213, + 55, + 23, + 8, + {(0x00000000), + (0xc3cc98c8), + (0xca3b10a5), + (0x09f7886d), + (0x18300660), + (0xdbfc9ea8), + (0xd20b16c5), + (0x11c78e0d), + (0x000030c4), + (0xc3cca80c), + (0xca3b2061), + (0x09f7b8a9), + (0x183036a4), + (0xdbfcae6c), + (0xd20b2601), + (0x11c7bec9)}, + {(0x00000000), + (0x08000000), + (0x00800000), + (0x08800000), + (0x88200000), + (0x80200000), + (0x88a00000), + (0x80a00000), + (0x03201e00), + (0x0b201e00), + (0x03a01e00), + (0x0ba01e00), + (0x8b001e00), + (0x83001e00), + (0x8b801e00), + (0x83801e00)}, + {(0x3f800000), + (0x3f840000), + (0x3f804000), + (0x3f844000), + (0x3fc41000), + (0x3fc01000), + (0x3fc45000), + (0x3fc05000), + (0x3f81900f), + (0x3f85900f), + (0x3f81d00f), + (0x3f85d00f), + (0x3fc5800f), + (0x3fc1800f), + (0x3fc5c00f), + (0x3fc1c00f)}, + (0xfff80000), + {0x43,0x1f,0x6f,0x01,0x80,0xf4,0x2e,0x31,0xcd,0xc1, + 0xb9,0x2f,0x6b,0x5c,0x0e,0x0b,0x35,0xa5,0x70,0x7e,0x00} + }, + { + /* No.103 delta:5054 weight:1635 */ + 11213, + 64, + 1, + 2, + {(0x00000000), + (0x4f30581d), + (0x5542c11f), + (0x1a729902), + (0x9da00670), + (0xd2905e6d), + (0xc8e2c76f), + (0x87d29f72), + (0x00007e47), + (0x4f30265a), + (0x5542bf58), + (0x1a72e745), + (0x9da07837), + (0xd290202a), + (0xc8e2b928), + (0x87d2e135)}, + {(0x00000000), + (0x0be21000), + (0x55018000), + (0x5ee39000), + (0x83944800), + (0x88765800), + (0xd695c800), + (0xdd77d800), + (0x00669e00), + (0x0b848e00), + (0x55671e00), + (0x5e850e00), + (0x83f2d600), + (0x8810c600), + (0xd6f35600), + (0xdd114600)}, + {(0x3f800000), + (0x3f85f108), + (0x3faa80c0), + (0x3faf71c8), + (0x3fc1ca24), + (0x3fc43b2c), + (0x3feb4ae4), + (0x3feebbec), + (0x3f80334f), + (0x3f85c247), + (0x3faab38f), + (0x3faf4287), + (0x3fc1f96b), + (0x3fc40863), + (0x3feb79ab), + (0x3fee88a3)}, + (0xfff80000), + {0x44,0x10,0x9f,0x8c,0xd9,0xbc,0x8e,0xb6,0xd5,0x14, + 0x72,0x32,0xdd,0xf2,0x3c,0xea,0xb3,0x38,0xb9,0x31,0x00} + }, + { + /* No.104 delta:2496 weight:907 */ + 11213, + 79, + 15, + 14, + {(0x00000000), + (0x5c0202d2), + (0x1c3f4858), + (0x403d4a8a), + (0xd2f0068f), + (0x8ef2045d), + (0xcecf4ed7), + (0x92cd4c05), + (0x0000d76d), + (0x5c02d5bf), + (0x1c3f9f35), + (0x403d9de7), + (0xd2f0d1e2), + (0x8ef2d330), + (0xcecf99ba), + (0x92cd9b68)}, + {(0x00000000), + (0x6b8d0400), + (0x1053a000), + (0x7bdea400), + (0x19807000), + (0x720d7400), + (0x09d3d000), + (0x625ed400), + (0x08441e00), + (0x63c91a00), + (0x1817be00), + (0x739aba00), + (0x11c46e00), + (0x7a496a00), + (0x0197ce00), + (0x6a1aca00)}, + {(0x3f800000), + (0x3fb5c682), + (0x3f8829d0), + (0x3fbdef52), + (0x3f8cc038), + (0x3fb906ba), + (0x3f84e9e8), + (0x3fb12f6a), + (0x3f84220f), + (0x3fb1e48d), + (0x3f8c0bdf), + (0x3fb9cd5d), + (0x3f88e237), + (0x3fbd24b5), + (0x3f80cbe7), + (0x3fb50d65)}, + (0xfff80000), + {0xb7,0xe4,0x3c,0x74,0x28,0xb1,0x2f,0xa4,0x5c,0x37, + 0xb1,0x69,0xd9,0x95,0x83,0xd9,0xa6,0x36,0x5b,0x12,0x00} + }, + { + /* No.105 delta:1174 weight:1019 */ + 11213, + 91, + 17, + 14, + {(0x00000000), + (0xa83f2177), + (0xa0b43c75), + (0x088b1d02), + (0x43300696), + (0xeb0f27e1), + (0xe3843ae3), + (0x4bbb1b94), + (0x00007f34), + (0xa83f5e43), + (0xa0b44341), + (0x088b6236), + (0x433079a2), + (0xeb0f58d5), + (0xe38445d7), + (0x4bbb64a0)}, + {(0x00000000), + (0x0115c000), + (0x90090000), + (0x911cc000), + (0x10422000), + (0x1157e000), + (0x804b2000), + (0x815ee000), + (0x10393e00), + (0x112cfe00), + (0x80303e00), + (0x8125fe00), + (0x007b1e00), + (0x016ede00), + (0x90721e00), + (0x9167de00)}, + {(0x3f800000), + (0x3f808ae0), + (0x3fc80480), + (0x3fc88e60), + (0x3f882110), + (0x3f88abf0), + (0x3fc02590), + (0x3fc0af70), + (0x3f881c9f), + (0x3f88967f), + (0x3fc0181f), + (0x3fc092ff), + (0x3f803d8f), + (0x3f80b76f), + (0x3fc8390f), + (0x3fc8b3ef)}, + (0xfff80000), + {0xe2,0x4f,0x2f,0xcb,0x05,0x76,0xce,0x0b,0xc7,0x7b, + 0xcc,0x65,0x06,0xce,0x2c,0x0c,0x3c,0xb6,0x2c,0x79,0x00} + }, + { + /* No.106 delta:1564 weight:925 */ + 11213, + 40, + 9, + 1, + {(0x00000000), + (0xd288f412), + (0x0b33c7cb), + (0xd9bb33d9), + (0xe0d006a8), + (0x3258f2ba), + (0xebe3c163), + (0x396b3571), + (0x00004470), + (0xd288b062), + (0x0b3383bb), + (0xd9bb77a9), + (0xe0d042d8), + (0x3258b6ca), + (0xebe38513), + (0x396b7101)}, + {(0x00000000), + (0x46794000), + (0x004d0000), + (0x46344000), + (0x007f0200), + (0x46064200), + (0x00320200), + (0x464b4200), + (0x00235e00), + (0x465a1e00), + (0x006e5e00), + (0x46171e00), + (0x005c5c00), + (0x46251c00), + (0x00115c00), + (0x46681c00)}, + {(0x3f800000), + (0x3fa33ca0), + (0x3f802680), + (0x3fa31a20), + (0x3f803f81), + (0x3fa30321), + (0x3f801901), + (0x3fa325a1), + (0x3f8011af), + (0x3fa32d0f), + (0x3f80372f), + (0x3fa30b8f), + (0x3f802e2e), + (0x3fa3128e), + (0x3f8008ae), + (0x3fa3340e)}, + (0xfff80000), + {0xc1,0x6e,0x8a,0x33,0x35,0x9f,0x07,0x03,0x76,0x11, + 0xe5,0x21,0xc3,0xdb,0x18,0x40,0x05,0xa0,0xd6,0x4c,0x00} + }, + { + /* No.107 delta:3386 weight:1851 */ + 11213, + 48, + 2, + 6, + {(0x00000000), + (0x5199ee9c), + (0x0606c085), + (0x579f2e19), + (0xe08006b0), + (0xb119e82c), + (0xe686c635), + (0xb71f28a9), + (0x000086be), + (0x51996822), + (0x0606463b), + (0x579fa8a7), + (0xe080800e), + (0xb1196e92), + (0xe686408b), + (0xb71fae17)}, + {(0x00000000), + (0x08250000), + (0x0a400800), + (0x02650800), + (0x10040000), + (0x18210000), + (0x1a440800), + (0x12610800), + (0x01081e00), + (0x092d1e00), + (0x0b481600), + (0x036d1600), + (0x110c1e00), + (0x19291e00), + (0x1b4c1600), + (0x13691600)}, + {(0x3f800000), + (0x3f841280), + (0x3f852004), + (0x3f813284), + (0x3f880200), + (0x3f8c1080), + (0x3f8d2204), + (0x3f893084), + (0x3f80840f), + (0x3f84968f), + (0x3f85a40b), + (0x3f81b68b), + (0x3f88860f), + (0x3f8c948f), + (0x3f8da60b), + (0x3f89b48b)}, + (0xfff80000), + {0x29,0x35,0xcb,0x9b,0x48,0x71,0x31,0x12,0x0b,0x2e, + 0xd3,0x98,0x06,0x50,0x05,0x3b,0xac,0x89,0x1a,0xaf,0x00} + }, + { + /* No.108 delta:2469 weight:1251 */ + 11213, + 31, + 7, + 1, + {(0x00000000), + (0x4efca9a0), + (0x3a920750), + (0x746eaef0), + (0x958006ce), + (0xdb7caf6e), + (0xaf12019e), + (0xe1eea83e), + (0x0000ee3c), + (0x4efc479c), + (0x3a92e96c), + (0x746e40cc), + (0x9580e8f2), + (0xdb7c4152), + (0xaf12efa2), + (0xe1ee4602)}, + {(0x00000000), + (0x405c0000), + (0x004ba200), + (0x4017a200), + (0x00600000), + (0x403c0000), + (0x002ba200), + (0x4077a200), + (0x80481e00), + (0xc0141e00), + (0x8003bc00), + (0xc05fbc00), + (0x80281e00), + (0xc0741e00), + (0x8063bc00), + (0xc03fbc00)}, + {(0x3f800000), + (0x3fa02e00), + (0x3f8025d1), + (0x3fa00bd1), + (0x3f803000), + (0x3fa01e00), + (0x3f8015d1), + (0x3fa03bd1), + (0x3fc0240f), + (0x3fe00a0f), + (0x3fc001de), + (0x3fe02fde), + (0x3fc0140f), + (0x3fe03a0f), + (0x3fc031de), + (0x3fe01fde)}, + (0xfff80000), + {0x05,0x56,0x79,0x8d,0xa1,0x79,0x94,0x9b,0xfd,0xfa, + 0xd1,0xd9,0x42,0x04,0x4f,0x21,0x8e,0xdb,0xe7,0xa3,0x00} + }, + { + /* No.109 delta:997 weight:681 */ + 11213, + 71, + 19, + 5, + {(0x00000000), + (0x21e405bb), + (0x4f97981d), + (0x6e739da6), + (0x01d006d7), + (0x2034036c), + (0x4e479eca), + (0x6fa39b71), + (0x0000e6b0), + (0x21e4e30b), + (0x4f977ead), + (0x6e737b16), + (0x01d0e067), + (0x2034e5dc), + (0x4e47787a), + (0x6fa37dc1)}, + {(0x00000000), + (0x40267000), + (0x0682d000), + (0x46a4a000), + (0x11832000), + (0x51a55000), + (0x1701f000), + (0x57278000), + (0x40581e00), + (0x007e6e00), + (0x46dace00), + (0x06fcbe00), + (0x51db3e00), + (0x11fd4e00), + (0x5759ee00), + (0x177f9e00)}, + {(0x3f800000), + (0x3fa01338), + (0x3f834168), + (0x3fa35250), + (0x3f88c190), + (0x3fa8d2a8), + (0x3f8b80f8), + (0x3fab93c0), + (0x3fa02c0f), + (0x3f803f37), + (0x3fa36d67), + (0x3f837e5f), + (0x3fa8ed9f), + (0x3f88fea7), + (0x3fabacf7), + (0x3f8bbfcf)}, + (0xfff80000), + {0x33,0x76,0x14,0x81,0x56,0x54,0xf0,0x4f,0x12,0x90, + 0xf4,0x03,0x95,0x9c,0x1a,0x38,0x51,0x80,0x32,0xaa,0x00} + }, + { + /* No.110 delta:2599 weight:829 */ + 11213, + 8, + 11, + 14, + {(0x00000000), + (0x4654c25e), + (0xa8eaa1d6), + (0xeebe6388), + (0x52a006e9), + (0x14f4c4b7), + (0xfa4aa73f), + (0xbc1e6561), + (0x000019ff), + (0x4654dba1), + (0xa8eab829), + (0xeebe7a77), + (0x52a01f16), + (0x14f4dd48), + (0xfa4abec0), + (0xbc1e7c9e)}, + {(0x00000000), + (0xfe272000), + (0x8207f000), + (0x7c20d000), + (0x4e344800), + (0xb0136800), + (0xcc33b800), + (0x32149800), + (0x01611e00), + (0xff463e00), + (0x8366ee00), + (0x7d41ce00), + (0x4f555600), + (0xb1727600), + (0xcd52a600), + (0x33758600)}, + {(0x3f800000), + (0x3fff1390), + (0x3fc103f8), + (0x3fbe1068), + (0x3fa71a24), + (0x3fd809b4), + (0x3fe619dc), + (0x3f990a4c), + (0x3f80b08f), + (0x3fffa31f), + (0x3fc1b377), + (0x3fbea0e7), + (0x3fa7aaab), + (0x3fd8b93b), + (0x3fe6a953), + (0x3f99bac3)}, + (0xfff80000), + {0xe0,0x6d,0x93,0xcf,0x6d,0x0f,0xa4,0xf7,0x93,0x3e, + 0x67,0xb4,0x20,0xb8,0x51,0x3a,0xe3,0xcd,0xf5,0xf9,0x00} + }, + { + /* No.111 delta:2048 weight:1649 */ + 11213, + 12, + 8, + 4, + {(0x00000000), + (0xbf40afc5), + (0xdef9c9b1), + (0x61b96674), + (0xfee006f8), + (0x41a0a93d), + (0x2019cf49), + (0x9f59608c), + (0x000056c5), + (0xbf40f900), + (0xdef99f74), + (0x61b930b1), + (0xfee0503d), + (0x41a0fff8), + (0x2019998c), + (0x9f593649)}, + {(0x00000000), + (0x2fa10c00), + (0x020b0000), + (0x2daa0c00), + (0x020ae000), + (0x2dabec00), + (0x0001e000), + (0x2fa0ec00), + (0x28021e00), + (0x07a31200), + (0x2a091e00), + (0x05a81200), + (0x2a08fe00), + (0x05a9f200), + (0x2803fe00), + (0x07a2f200)}, + {(0x3f800000), + (0x3f97d086), + (0x3f810580), + (0x3f96d506), + (0x3f810570), + (0x3f96d5f6), + (0x3f8000f0), + (0x3f97d076), + (0x3f94010f), + (0x3f83d189), + (0x3f95048f), + (0x3f82d409), + (0x3f95047f), + (0x3f82d4f9), + (0x3f9401ff), + (0x3f83d179)}, + (0xfff80000), + {0x4a,0x15,0xe8,0xf1,0x59,0x83,0x8e,0x52,0x8a,0x24, + 0xa4,0xb5,0x5d,0xfb,0x87,0xe0,0x22,0xe3,0x18,0x37,0x00} + }, + { + /* No.112 delta:2152 weight:1369 */ + 11213, + 81, + 6, + 10, + {(0x00000000), + (0xfbea0aeb), + (0xba6ad14d), + (0x4180dba6), + (0x21e00705), + (0xda0a0dee), + (0x9b8ad648), + (0x6060dca3), + (0x00003510), + (0xfbea3ffb), + (0xba6ae45d), + (0x4180eeb6), + (0x21e03215), + (0xda0a38fe), + (0x9b8ae358), + (0x6060e9b3)}, + {(0x00000000), + (0x01428000), + (0x04604000), + (0x0522c000), + (0x00081000), + (0x014a9000), + (0x04685000), + (0x052ad000), + (0x20261e00), + (0x21649e00), + (0x24465e00), + (0x2504de00), + (0x202e0e00), + (0x216c8e00), + (0x244e4e00), + (0x250cce00)}, + {(0x3f800000), + (0x3f80a140), + (0x3f823020), + (0x3f829160), + (0x3f800408), + (0x3f80a548), + (0x3f823428), + (0x3f829568), + (0x3f90130f), + (0x3f90b24f), + (0x3f92232f), + (0x3f92826f), + (0x3f901707), + (0x3f90b647), + (0x3f922727), + (0x3f928667)}, + (0xfff80000), + {0xfd,0x72,0xaa,0xc7,0x20,0x44,0x28,0xe5,0x03,0x3e, + 0xf9,0x6d,0xad,0x9f,0x51,0x0c,0x87,0xf6,0x43,0x0f,0x00} + }, + { + /* No.113 delta:1871 weight:1215 */ + 11213, + 61, + 14, + 10, + {(0x00000000), + (0x11856d8d), + (0xaac84415), + (0xbb4d2998), + (0xae00071a), + (0xbf856a97), + (0x04c8430f), + (0x154d2e82), + (0x0000cc65), + (0x1185a1e8), + (0xaac88870), + (0xbb4de5fd), + (0xae00cb7f), + (0xbf85a6f2), + (0x04c88f6a), + (0x154de2e7)}, + {(0x00000000), + (0x006d0000), + (0x00b10000), + (0x00dc0000), + (0x04490000), + (0x04240000), + (0x04f80000), + (0x04950000), + (0x00421e00), + (0x002f1e00), + (0x00f31e00), + (0x009e1e00), + (0x040b1e00), + (0x04661e00), + (0x04ba1e00), + (0x04d71e00)}, + {(0x3f800000), + (0x3f803680), + (0x3f805880), + (0x3f806e00), + (0x3f822480), + (0x3f821200), + (0x3f827c00), + (0x3f824a80), + (0x3f80210f), + (0x3f80178f), + (0x3f80798f), + (0x3f804f0f), + (0x3f82058f), + (0x3f82330f), + (0x3f825d0f), + (0x3f826b8f)}, + (0xfff80000), + {0x98,0xdb,0x7c,0xec,0x90,0x27,0x75,0x69,0x6c,0x3d, + 0x4c,0xec,0xa2,0x8e,0xab,0xef,0xd1,0x0c,0xbd,0x8c,0x00} + }, + { + /* No.114 delta:1950 weight:1011 */ + 11213, + 20, + 16, + 13, + {(0x00000000), + (0xbdc99be3), + (0xaafee229), + (0x173779ca), + (0x7300072e), + (0xcec99ccd), + (0xd9fee507), + (0x64377ee4), + (0x0000b504), + (0xbdc92ee7), + (0xaafe572d), + (0x1737ccce), + (0x7300b22a), + (0xcec929c9), + (0xd9fe5003), + (0x6437cbe0)}, + {(0x00000000), + (0x016ec000), + (0x8ce40000), + (0x8d8ac000), + (0x1c548000), + (0x1d3a4000), + (0x90b08000), + (0x91de4000), + (0x00869e00), + (0x01e85e00), + (0x8c629e00), + (0x8d0c5e00), + (0x1cd21e00), + (0x1dbcde00), + (0x90361e00), + (0x9158de00)}, + {(0x3f800000), + (0x3f80b760), + (0x3fc67200), + (0x3fc6c560), + (0x3f8e2a40), + (0x3f8e9d20), + (0x3fc85840), + (0x3fc8ef20), + (0x3f80434f), + (0x3f80f42f), + (0x3fc6314f), + (0x3fc6862f), + (0x3f8e690f), + (0x3f8ede6f), + (0x3fc81b0f), + (0x3fc8ac6f)}, + (0xfff80000), + {0xa7,0x22,0x09,0x2a,0x46,0xe2,0xa3,0x13,0x26,0x8f, + 0xa5,0xc6,0x28,0x12,0x57,0xb4,0x27,0x86,0xf1,0x1d,0x00} + }, + { + /* No.115 delta:1732 weight:1445 */ + 11213, + 29, + 17, + 7, + {(0x00000000), + (0xdb25076c), + (0x9bf7e9d5), + (0x40d2eeb9), + (0x95f00737), + (0x4ed5005b), + (0x0e07eee2), + (0xd522e98e), + (0x0000f5f0), + (0xdb25f29c), + (0x9bf71c25), + (0x40d21b49), + (0x95f0f2c7), + (0x4ed5f5ab), + (0x0e071b12), + (0xd5221c7e)}, + {(0x00000000), + (0x481e0000), + (0x80040000), + (0xc81a0000), + (0x007d0000), + (0x48630000), + (0x80790000), + (0xc8670000), + (0x40121e00), + (0x080c1e00), + (0xc0161e00), + (0x88081e00), + (0x406f1e00), + (0x08711e00), + (0xc06b1e00), + (0x88751e00)}, + {(0x3f800000), + (0x3fa40f00), + (0x3fc00200), + (0x3fe40d00), + (0x3f803e80), + (0x3fa43180), + (0x3fc03c80), + (0x3fe43380), + (0x3fa0090f), + (0x3f84060f), + (0x3fe00b0f), + (0x3fc4040f), + (0x3fa0378f), + (0x3f84388f), + (0x3fe0358f), + (0x3fc43a8f)}, + (0xfff80000), + {0xc2,0xbe,0xe6,0x5f,0xf5,0x28,0x34,0x1c,0x5e,0x2d, + 0xdb,0x99,0x36,0x5d,0x04,0x52,0x9e,0x76,0x9e,0xd5,0x00} + }, + { + /* No.116 delta:1707 weight:1245 */ + 11213, + 30, + 22, + 3, + {(0x00000000), + (0x0a5f3de8), + (0xcb7a16d6), + (0xc1252b3e), + (0xfcd0074b), + (0xf68f3aa3), + (0x37aa119d), + (0x3df52c75), + (0x00003bff), + (0x0a5f0617), + (0xcb7a2d29), + (0xc12510c1), + (0xfcd03cb4), + (0xf68f015c), + (0x37aa2a62), + (0x3df5178a)}, + {(0x00000000), + (0x402e8e00), + (0x0073d000), + (0x405d5e00), + (0x206d4000), + (0x6043ce00), + (0x201e9000), + (0x60301e00), + (0xd21dfe00), + (0x92337000), + (0xd26e2e00), + (0x9240a000), + (0xf270be00), + (0xb25e3000), + (0xf2036e00), + (0xb22de000)}, + {(0x3f800000), + (0x3fa01747), + (0x3f8039e8), + (0x3fa02eaf), + (0x3f9036a0), + (0x3fb021e7), + (0x3f900f48), + (0x3fb0180f), + (0x3fe90eff), + (0x3fc919b8), + (0x3fe93717), + (0x3fc92050), + (0x3ff9385f), + (0x3fd92f18), + (0x3ff901b7), + (0x3fd916f0)}, + (0xfff80000), + {0x30,0x04,0x85,0xba,0x89,0x2d,0x19,0x39,0x57,0x1c, + 0x54,0xa8,0xb5,0x0a,0x05,0x27,0x14,0x6a,0xed,0xe0,0x00} + }, + { + /* No.117 delta:2917 weight:1743 */ + 11213, + 37, + 3, + 6, + {(0x00000000), + (0x501b23d2), + (0x2770f82a), + (0x776bdbf8), + (0x67300751), + (0x372b2483), + (0x4040ff7b), + (0x105bdca9), + (0x000038d9), + (0x501b1b0b), + (0x2770c0f3), + (0x776be321), + (0x67303f88), + (0x372b1c5a), + (0x4040c7a2), + (0x105be470)}, + {(0x00000000), + (0x20542200), + (0x40387800), + (0x606c5a00), + (0x14402000), + (0x34140200), + (0x54785800), + (0x742c7a00), + (0x1840de00), + (0x3814fc00), + (0x5878a600), + (0x782c8400), + (0x0c00fe00), + (0x2c54dc00), + (0x4c388600), + (0x6c6ca400)}, + {(0x3f800000), + (0x3f902a11), + (0x3fa01c3c), + (0x3fb0362d), + (0x3f8a2010), + (0x3f9a0a01), + (0x3faa3c2c), + (0x3fba163d), + (0x3f8c206f), + (0x3f9c0a7e), + (0x3fac3c53), + (0x3fbc1642), + (0x3f86007f), + (0x3f962a6e), + (0x3fa61c43), + (0x3fb63652)}, + (0xfff80000), + {0x86,0x60,0xf6,0x24,0x0e,0xff,0xd7,0xe9,0x32,0x8f, + 0xa0,0x96,0x77,0x67,0x0e,0xec,0x2e,0x00,0x3b,0x13,0x00} + }, + { + /* No.118 delta:983 weight:1587 */ + 11213, + 62, + 19, + 4, + {(0x00000000), + (0x08eb784a), + (0x736759ff), + (0x7b8c21b5), + (0x0ca0076b), + (0x044b7f21), + (0x7fc75e94), + (0x772c26de), + (0x0000d939), + (0x08eba173), + (0x736780c6), + (0x7b8cf88c), + (0x0ca0de52), + (0x044ba618), + (0x7fc787ad), + (0x772cffe7)}, + {(0x00000000), + (0x041eba00), + (0x20637000), + (0x247dca00), + (0x20110000), + (0x240fba00), + (0x00727000), + (0x046cca00), + (0x10001e00), + (0x141ea400), + (0x30636e00), + (0x347dd400), + (0x30111e00), + (0x340fa400), + (0x10726e00), + (0x146cd400)}, + {(0x3f800000), + (0x3f820f5d), + (0x3f9031b8), + (0x3f923ee5), + (0x3f900880), + (0x3f9207dd), + (0x3f803938), + (0x3f823665), + (0x3f88000f), + (0x3f8a0f52), + (0x3f9831b7), + (0x3f9a3eea), + (0x3f98088f), + (0x3f9a07d2), + (0x3f883937), + (0x3f8a366a)}, + (0xfff80000), + {0x70,0x7f,0x78,0xb3,0x65,0x02,0xe0,0xb9,0x55,0x49, + 0xd9,0x60,0xa8,0xa5,0xd3,0x10,0x3b,0x08,0x55,0xfd,0x00} + }, + { + /* No.119 delta:1397 weight:1065 */ + 11213, + 57, + 14, + 11, + {(0x00000000), + (0x5e421ddc), + (0x638787c4), + (0x3dc59a18), + (0x8e00077d), + (0xd0421aa1), + (0xed8780b9), + (0xb3c59d65), + (0x0000840f), + (0x5e4299d3), + (0x638703cb), + (0x3dc51e17), + (0x8e008372), + (0xd0429eae), + (0xed8704b6), + (0xb3c5196a)}, + {(0x00000000), + (0x08e5ec00), + (0x245ea400), + (0x2cbb4800), + (0x00688000), + (0x088d6c00), + (0x24362400), + (0x2cd3c800), + (0x00111e00), + (0x08f4f200), + (0x244fba00), + (0x2caa5600), + (0x00799e00), + (0x089c7200), + (0x24273a00), + (0x2cc2d600)}, + {(0x3f800000), + (0x3f8472f6), + (0x3f922f52), + (0x3f965da4), + (0x3f803440), + (0x3f8446b6), + (0x3f921b12), + (0x3f9669e4), + (0x3f80088f), + (0x3f847a79), + (0x3f9227dd), + (0x3f96552b), + (0x3f803ccf), + (0x3f844e39), + (0x3f92139d), + (0x3f96616b)}, + (0xfff80000), + {0x72,0xeb,0x0e,0x84,0xe7,0xef,0x0c,0xb3,0x72,0xaf, + 0x80,0x06,0xbc,0xf9,0x33,0xf7,0x21,0x72,0x27,0x28,0x00} + }, + { + /* No.120 delta:1392 weight:827 */ + 11213, + 46, + 15, + 17, + {(0x00000000), + (0xa557bf77), + (0x097f4c89), + (0xac28f3fe), + (0xffa0078b), + (0x5af7b8fc), + (0xf6df4b02), + (0x5388f475), + (0x00006249), + (0xa557dd3e), + (0x097f2ec0), + (0xac2891b7), + (0xffa065c2), + (0x5af7dab5), + (0xf6df294b), + (0x5388963c)}, + {(0x00000000), + (0x68748400), + (0x4200ae00), + (0x2a742a00), + (0x1c086000), + (0x747ce400), + (0x5e08ce00), + (0x367c4a00), + (0x0046fe00), + (0x68327a00), + (0x42465000), + (0x2a32d400), + (0x1c4e9e00), + (0x743a1a00), + (0x5e4e3000), + (0x363ab400)}, + {(0x3f800000), + (0x3fb43a42), + (0x3fa10057), + (0x3f953a15), + (0x3f8e0430), + (0x3fba3e72), + (0x3faf0467), + (0x3f9b3e25), + (0x3f80237f), + (0x3fb4193d), + (0x3fa12328), + (0x3f95196a), + (0x3f8e274f), + (0x3fba1d0d), + (0x3faf2718), + (0x3f9b1d5a)}, + (0xfff80000), + {0x7c,0xb2,0x6a,0xff,0x78,0xfa,0xd6,0x78,0xf9,0xba, + 0x77,0x1d,0x84,0xb3,0xbb,0xcb,0xac,0xdf,0x51,0x54,0x00} + }, + { + /* No.121 delta:1663 weight:775 */ + 11213, + 82, + 9, + 17, + {(0x00000000), + (0x1fe0e7a6), + (0x67406a7f), + (0x78a08dd9), + (0xda30079d), + (0xc5d0e03b), + (0xbd706de2), + (0xa2908a44), + (0x000076e4), + (0x1fe09142), + (0x67401c9b), + (0x78a0fb3d), + (0xda307179), + (0xc5d096df), + (0xbd701b06), + (0xa290fca0)}, + {(0x00000000), + (0x100e1000), + (0x20734000), + (0x307d5000), + (0x5008e200), + (0x4006f200), + (0x707ba200), + (0x6075b200), + (0x3242de00), + (0x224cce00), + (0x12319e00), + (0x023f8e00), + (0x624a3c00), + (0x72442c00), + (0x42397c00), + (0x52376c00)}, + {(0x3f800000), + (0x3f880708), + (0x3f9039a0), + (0x3f983ea8), + (0x3fa80471), + (0x3fa00379), + (0x3fb83dd1), + (0x3fb03ad9), + (0x3f99216f), + (0x3f912667), + (0x3f8918cf), + (0x3f811fc7), + (0x3fb1251e), + (0x3fb92216), + (0x3fa11cbe), + (0x3fa91bb6)}, + (0xfff80000), + {0xa3,0xd8,0x55,0xcd,0x98,0xdc,0x68,0x95,0x68,0xdf, + 0xcf,0x56,0x03,0x2a,0x94,0x75,0x10,0xec,0x0a,0x47,0x00} + }, + { + /* No.122 delta:1328 weight:751 */ + 11213, + 67, + 14, + 18, + {(0x00000000), + (0x76bc3aa8), + (0xc37bdc20), + (0xb5c7e688), + (0x38b007ab), + (0x4e0c3d03), + (0xfbcbdb8b), + (0x8d77e123), + (0x0000bd02), + (0x76bc87aa), + (0xc37b6122), + (0xb5c75b8a), + (0x38b0baa9), + (0x4e0c8001), + (0xfbcb6689), + (0x8d775c21)}, + {(0x00000000), + (0x1112c200), + (0x18038000), + (0x09114200), + (0x00065000), + (0x11149200), + (0x1805d000), + (0x09171200), + (0x6001de00), + (0x71131c00), + (0x78025e00), + (0x69109c00), + (0x60078e00), + (0x71154c00), + (0x78040e00), + (0x6916cc00)}, + {(0x3f800000), + (0x3f888961), + (0x3f8c01c0), + (0x3f8488a1), + (0x3f800328), + (0x3f888a49), + (0x3f8c02e8), + (0x3f848b89), + (0x3fb000ef), + (0x3fb8898e), + (0x3fbc012f), + (0x3fb4884e), + (0x3fb003c7), + (0x3fb88aa6), + (0x3fbc0207), + (0x3fb48b66)}, + (0xfff80000), + {0x18,0x30,0x32,0x35,0xe2,0xb4,0xcd,0x94,0x5e,0x6b, + 0xe6,0x78,0x0b,0x92,0x24,0xbf,0x7c,0xc5,0xbd,0xe2,0x00} + }, + { + /* No.123 delta:1657 weight:573 */ + 11213, + 70, + 8, + 19, + {(0x00000000), + (0xfd59bd60), + (0xe0041fb5), + (0x1d5da2d5), + (0x836007b6), + (0x7e39bad6), + (0x63641803), + (0x9e3da563), + (0x0000b1f6), + (0xfd590c96), + (0xe004ae43), + (0x1d5d1323), + (0x8360b640), + (0x7e390b20), + (0x6364a9f5), + (0x9e3d1495)}, + {(0x00000000), + (0x57b14800), + (0x5868a400), + (0x0fd9ec00), + (0x084c7000), + (0x5ffd3800), + (0x5024d400), + (0x07959c00), + (0x10121e00), + (0x47a35600), + (0x487aba00), + (0x1fcbf200), + (0x185e6e00), + (0x4fef2600), + (0x4036ca00), + (0x17878200)}, + {(0x3f800000), + (0x3fabd8a4), + (0x3fac3452), + (0x3f87ecf6), + (0x3f842638), + (0x3faffe9c), + (0x3fa8126a), + (0x3f83cace), + (0x3f88090f), + (0x3fa3d1ab), + (0x3fa43d5d), + (0x3f8fe5f9), + (0x3f8c2f37), + (0x3fa7f793), + (0x3fa01b65), + (0x3f8bc3c1)}, + (0xfff80000), + {0x0b,0xcb,0xeb,0x33,0x75,0x01,0x5e,0xeb,0x92,0xfb, + 0x37,0xae,0xe0,0x31,0x80,0xa9,0x06,0x42,0x2b,0xd5,0x00} + }, + { + /* No.124 delta:1016 weight:1635 */ + 11213, + 80, + 18, + 2, + {(0x00000000), + (0x65b814ab), + (0x0b7c8e4e), + (0x6ec49ae5), + (0xe3e007c1), + (0x8658136a), + (0xe89c898f), + (0x8d249d24), + (0x00008bf0), + (0x65b89f5b), + (0x0b7c05be), + (0x6ec41115), + (0xe3e08c31), + (0x8658989a), + (0xe89c027f), + (0x8d2416d4)}, + {(0x00000000), + (0x110f9000), + (0x08401000), + (0x194f8000), + (0x20040000), + (0x310b9000), + (0x28441000), + (0x394b8000), + (0x00025e00), + (0x110dce00), + (0x08424e00), + (0x194dde00), + (0x20065e00), + (0x3109ce00), + (0x28464e00), + (0x3949de00)}, + {(0x3f800000), + (0x3f8887c8), + (0x3f842008), + (0x3f8ca7c0), + (0x3f900200), + (0x3f9885c8), + (0x3f942208), + (0x3f9ca5c0), + (0x3f80012f), + (0x3f8886e7), + (0x3f842127), + (0x3f8ca6ef), + (0x3f90032f), + (0x3f9884e7), + (0x3f942327), + (0x3f9ca4ef)}, + (0xfff80000), + {0xe8,0x1c,0x9d,0xcc,0x4a,0x97,0xc0,0xae,0x9d,0xce, + 0x1a,0xa4,0x45,0x9f,0x41,0xc4,0x83,0xd9,0x62,0x72,0x00} + }, + { + /* No.125 delta:1214 weight:1175 */ + 11213, + 28, + 16, + 7, + {(0x00000000), + (0x7b11b46d), + (0x3e529f7c), + (0x45432b11), + (0x05a007d2), + (0x7eb1b3bf), + (0x3bf298ae), + (0x40e32cc3), + (0x00001cdb), + (0x7b11a8b6), + (0x3e5283a7), + (0x454337ca), + (0x05a01b09), + (0x7eb1af64), + (0x3bf28475), + (0x40e33018)}, + {(0x00000000), + (0x4460cc00), + (0x50de6c00), + (0x14bea000), + (0x43011e00), + (0x0761d200), + (0x13df7200), + (0x57bfbe00), + (0x41807e00), + (0x05e0b200), + (0x115e1200), + (0x553ede00), + (0x02816000), + (0x46e1ac00), + (0x525f0c00), + (0x163fc000)}, + {(0x3f800000), + (0x3fa23066), + (0x3fa86f36), + (0x3f8a5f50), + (0x3fa1808f), + (0x3f83b0e9), + (0x3f89efb9), + (0x3fabdfdf), + (0x3fa0c03f), + (0x3f82f059), + (0x3f88af09), + (0x3faa9f6f), + (0x3f8140b0), + (0x3fa370d6), + (0x3fa92f86), + (0x3f8b1fe0)}, + (0xfff80000), + {0x5c,0x4c,0x7f,0xad,0xc2,0x0a,0xe3,0x36,0x3a,0x3f, + 0x3e,0x28,0x97,0x26,0xce,0xab,0xdb,0x12,0x07,0x20,0x00} + }, + { + /* No.126 delta:962 weight:1245 */ + 11213, + 78, + 18, + 5, + {(0x00000000), + (0xee0a807e), + (0xe937683b), + (0x073de845), + (0x14f007e9), + (0xfafa8797), + (0xfdc76fd2), + (0x13cdefac), + (0x0000f676), + (0xee0a7608), + (0xe9379e4d), + (0x073d1e33), + (0x14f0f19f), + (0xfafa71e1), + (0xfdc799a4), + (0x13cd19da)}, + {(0x00000000), + (0xb028bc00), + (0x08652400), + (0xb84d9800), + (0x14028600), + (0xa42a3a00), + (0x1c67a200), + (0xac4f1e00), + (0x00023e00), + (0xb02a8200), + (0x08671a00), + (0xb84fa600), + (0x1400b800), + (0xa4280400), + (0x1c659c00), + (0xac4d2000)}, + {(0x3f800000), + (0x3fd8145e), + (0x3f843292), + (0x3fdc26cc), + (0x3f8a0143), + (0x3fd2151d), + (0x3f8e33d1), + (0x3fd6278f), + (0x3f80011f), + (0x3fd81541), + (0x3f84338d), + (0x3fdc27d3), + (0x3f8a005c), + (0x3fd21402), + (0x3f8e32ce), + (0x3fd62690)}, + (0xfff80000), + {0x1f,0xae,0x0b,0x7d,0x23,0x0c,0x6e,0xd6,0x26,0xa9, + 0x92,0xbc,0x8f,0x01,0x36,0x36,0x07,0xe9,0x2c,0x7c,0x00} + }, + { + /* No.127 delta:4656 weight:1185 */ + 11213, + 57, + 4, + 13, + {(0x00000000), + (0x7d46d3c2), + (0x107426fc), + (0x6d32f53e), + (0x832007fe), + (0xfe66d43c), + (0x93542102), + (0xee12f2c0), + (0x00009ac7), + (0x7d464905), + (0x1074bc3b), + (0x6d326ff9), + (0x83209d39), + (0xfe664efb), + (0x9354bbc5), + (0xee126807)}, + {(0x00000000), + (0x18401000), + (0x14000000), + (0x0c401000), + (0x00000000), + (0x18401000), + (0x14000000), + (0x0c401000), + (0x10001e00), + (0x08400e00), + (0x04001e00), + (0x1c400e00), + (0x10001e00), + (0x08400e00), + (0x04001e00), + (0x1c400e00)}, + {(0x3f800000), + (0x3f8c2008), + (0x3f8a0000), + (0x3f862008), + (0x3f800000), + (0x3f8c2008), + (0x3f8a0000), + (0x3f862008), + (0x3f88000f), + (0x3f842007), + (0x3f82000f), + (0x3f8e2007), + (0x3f88000f), + (0x3f842007), + (0x3f82000f), + (0x3f8e2007)}, + (0xfff80000), + {0x2b,0xf5,0xba,0x2c,0x06,0x65,0xd9,0xae,0x19,0x02, + 0xa4,0xb4,0xe6,0x92,0x03,0x77,0xb7,0xb8,0x8c,0x23,0x00} + }, + { + /* No.128 delta:2214 weight:1371 */ + 11213, + 43, + 23, + 2, + {(0x00000000), + (0xe454d1ab), + (0x80c8a6d9), + (0x649c7772), + (0xb0f00808), + (0x54a4d9a3), + (0x3038aed1), + (0xd46c7f7a), + (0x00004f07), + (0xe4549eac), + (0x80c8e9de), + (0x649c3875), + (0xb0f0470f), + (0x54a496a4), + (0x3038e1d6), + (0xd46c307d)}, + {(0x00000000), + (0xca494000), + (0x00538000), + (0xca1ac000), + (0x10bd2000), + (0xdaf46000), + (0x10eea000), + (0xdaa7e000), + (0x18905e00), + (0xd2d91e00), + (0x18c3de00), + (0xd28a9e00), + (0x082d7e00), + (0xc2643e00), + (0x087efe00), + (0xc237be00)}, + {(0x3f800000), + (0x3fe524a0), + (0x3f8029c0), + (0x3fe50d60), + (0x3f885e90), + (0x3fed7a30), + (0x3f887750), + (0x3fed53f0), + (0x3f8c482f), + (0x3fe96c8f), + (0x3f8c61ef), + (0x3fe9454f), + (0x3f8416bf), + (0x3fe1321f), + (0x3f843f7f), + (0x3fe11bdf)}, + (0xfff80000), + {0xf5,0xb9,0x08,0x2c,0x6d,0x40,0xb0,0x38,0xb5,0x70, + 0x83,0x86,0xbb,0x54,0xe7,0x5e,0x64,0xf3,0x40,0x61,0x00} + }, + { + /* No.129 delta:886 weight:915 */ + 11213, + 70, + 12, + 5, + {(0x00000000), + (0x242c5090), + (0x10199a60), + (0x3435caf0), + (0xe9d00810), + (0xcdfc5880), + (0xf9c99270), + (0xdde5c2e0), + (0x00003567), + (0x242c65f7), + (0x1019af07), + (0x3435ff97), + (0xe9d03d77), + (0xcdfc6de7), + (0xf9c9a717), + (0xdde5f787)}, + {(0x00000000), + (0x004e4000), + (0x1009c000), + (0x10478000), + (0x0e025000), + (0x0e4c1000), + (0x1e0b9000), + (0x1e45d000), + (0x10345e00), + (0x107a1e00), + (0x003d9e00), + (0x0073de00), + (0x1e360e00), + (0x1e784e00), + (0x0e3fce00), + (0x0e718e00)}, + {(0x3f800000), + (0x3f802720), + (0x3f8804e0), + (0x3f8823c0), + (0x3f870128), + (0x3f872608), + (0x3f8f05c8), + (0x3f8f22e8), + (0x3f881a2f), + (0x3f883d0f), + (0x3f801ecf), + (0x3f8039ef), + (0x3f8f1b07), + (0x3f8f3c27), + (0x3f871fe7), + (0x3f8738c7)}, + (0xfff80000), + {0xae,0x65,0xce,0x51,0xe8,0x63,0xb5,0x45,0x7a,0xb5, + 0x06,0x3b,0x06,0x88,0x6c,0x62,0x83,0x37,0xc8,0xb9,0x00} + }, + { + /* No.130 delta:2602 weight:1519 */ + 11213, + 18, + 4, + 6, + {(0x00000000), + (0x4fa484d0), + (0x546a6e84), + (0x1bceea54), + (0x4a400828), + (0x05e48cf8), + (0x1e2a66ac), + (0x518ee27c), + (0x0000bda0), + (0x4fa43970), + (0x546ad324), + (0x1bce57f4), + (0x4a40b588), + (0x05e43158), + (0x1e2adb0c), + (0x518e5fdc)}, + {(0x00000000), + (0x13081400), + (0x928e3000), + (0x81862400), + (0x010e0000), + (0x12061400), + (0x93803000), + (0x80882400), + (0x10581e00), + (0x03500a00), + (0x82d62e00), + (0x91de3a00), + (0x11561e00), + (0x025e0a00), + (0x83d82e00), + (0x90d03a00)}, + {(0x3f800000), + (0x3f89840a), + (0x3fc94718), + (0x3fc0c312), + (0x3f808700), + (0x3f89030a), + (0x3fc9c018), + (0x3fc04412), + (0x3f882c0f), + (0x3f81a805), + (0x3fc16b17), + (0x3fc8ef1d), + (0x3f88ab0f), + (0x3f812f05), + (0x3fc1ec17), + (0x3fc8681d)}, + (0xfff80000), + {0x13,0xcb,0x05,0x11,0x83,0xac,0xeb,0x2f,0xe5,0x15, + 0x1a,0x0e,0x87,0xa3,0x4d,0xac,0x2b,0xc0,0xb1,0xf3,0x00} + }, + { + /* No.131 delta:1006 weight:1303 */ + 11213, + 69, + 16, + 7, + {(0x00000000), + (0xeb61a85a), + (0x9e89b6f4), + (0x75e81eae), + (0x4cc00837), + (0xa7a1a06d), + (0xd249bec3), + (0x39281699), + (0x00006d8a), + (0xeb61c5d0), + (0x9e89db7e), + (0x75e87324), + (0x4cc065bd), + (0xa7a1cde7), + (0xd249d349), + (0x39287b13)}, + {(0x00000000), + (0x20bcb000), + (0x51807000), + (0x713cc000), + (0x02a2a000), + (0x221e1000), + (0x5322d000), + (0x739e6000), + (0x00067e00), + (0x20bace00), + (0x51860e00), + (0x713abe00), + (0x02a4de00), + (0x22186e00), + (0x5324ae00), + (0x73981e00)}, + {(0x3f800000), + (0x3f905e58), + (0x3fa8c038), + (0x3fb89e60), + (0x3f815150), + (0x3f910f08), + (0x3fa99168), + (0x3fb9cf30), + (0x3f80033f), + (0x3f905d67), + (0x3fa8c307), + (0x3fb89d5f), + (0x3f81526f), + (0x3f910c37), + (0x3fa99257), + (0x3fb9cc0f)}, + (0xfff80000), + {0xbc,0xf3,0x6a,0x80,0xa6,0xee,0x61,0x78,0xa8,0x3d, + 0x8c,0xee,0x29,0x1b,0x5e,0xfd,0x0b,0xf8,0x0f,0x41,0x00} + }, + { + /* No.132 delta:2899 weight:1651 */ + 11213, + 57, + 3, + 7, + {(0x00000000), + (0x7a913b6d), + (0xeed573ba), + (0x944448d7), + (0xfbd00846), + (0x8141332b), + (0x15057bfc), + (0x6f944091), + (0x00003675), + (0x7a910d18), + (0xeed545cf), + (0x94447ea2), + (0xfbd03e33), + (0x8141055e), + (0x15054d89), + (0x6f9476e4)}, + {(0x00000000), + (0x28410000), + (0x92350000), + (0xba740000), + (0x00601000), + (0x28211000), + (0x92551000), + (0xba141000), + (0x20161e00), + (0x08571e00), + (0xb2231e00), + (0x9a621e00), + (0x20760e00), + (0x08370e00), + (0xb2430e00), + (0x9a020e00)}, + {(0x3f800000), + (0x3f942080), + (0x3fc91a80), + (0x3fdd3a00), + (0x3f803008), + (0x3f941088), + (0x3fc92a88), + (0x3fdd0a08), + (0x3f900b0f), + (0x3f842b8f), + (0x3fd9118f), + (0x3fcd310f), + (0x3f903b07), + (0x3f841b87), + (0x3fd92187), + (0x3fcd0107)}, + (0xfff80000), + {0xa3,0x9f,0x64,0xc5,0x74,0x6d,0x2d,0x17,0xe7,0x4d, + 0x9a,0xaf,0xeb,0xb2,0x24,0x54,0x22,0x7a,0x93,0x8c,0x00} + }, + { + /* No.133 delta:1681 weight:1015 */ + 11213, + 83, + 12, + 13, + {(0x00000000), + (0x9193fd4c), + (0x375d8e7c), + (0xa6ce7330), + (0xbf90085d), + (0x2e03f511), + (0x88cd8621), + (0x195e7b6d), + (0x0000af07), + (0x9193524b), + (0x375d217b), + (0xa6cedc37), + (0xbf90a75a), + (0x2e035a16), + (0x88cd2926), + (0x195ed46a)}, + {(0x00000000), + (0x10740800), + (0x200dc000), + (0x3079c800), + (0x2056f800), + (0x3022f000), + (0x005b3800), + (0x102f3000), + (0x20031e00), + (0x30771600), + (0x000ede00), + (0x107ad600), + (0x0055e600), + (0x1021ee00), + (0x20582600), + (0x302c2e00)}, + {(0x3f800000), + (0x3f883a04), + (0x3f9006e0), + (0x3f983ce4), + (0x3f902b7c), + (0x3f981178), + (0x3f802d9c), + (0x3f881798), + (0x3f90018f), + (0x3f983b8b), + (0x3f80076f), + (0x3f883d6b), + (0x3f802af3), + (0x3f8810f7), + (0x3f902c13), + (0x3f981617)}, + (0xfff80000), + {0x5d,0xf8,0x9e,0x34,0xb8,0x35,0x07,0x7f,0xd2,0xe0, + 0x4f,0x6e,0x48,0xe9,0xdf,0x81,0x9d,0xf1,0x95,0x7a,0x00} + }, + { + /* No.134 delta:2863 weight:755 */ + 11213, + 84, + 6, + 17, + {(0x00000000), + (0x61047d07), + (0x957fb46d), + (0xf47bc96a), + (0x58f00860), + (0x39f47567), + (0xcd8fbc0d), + (0xac8bc10a), + (0x0000e9c0), + (0x610494c7), + (0x957f5dad), + (0xf47b20aa), + (0x58f0e1a0), + (0x39f49ca7), + (0xcd8f55cd), + (0xac8b28ca)}, + {(0x00000000), + (0x007c0000), + (0x13020000), + (0x137e0000), + (0x00020000), + (0x007e0000), + (0x13000000), + (0x137c0000), + (0x20401e00), + (0x203c1e00), + (0x33421e00), + (0x333e1e00), + (0x20421e00), + (0x203e1e00), + (0x33401e00), + (0x333c1e00)}, + {(0x3f800000), + (0x3f803e00), + (0x3f898100), + (0x3f89bf00), + (0x3f800100), + (0x3f803f00), + (0x3f898000), + (0x3f89be00), + (0x3f90200f), + (0x3f901e0f), + (0x3f99a10f), + (0x3f999f0f), + (0x3f90210f), + (0x3f901f0f), + (0x3f99a00f), + (0x3f999e0f)}, + (0xfff80000), + {0xcf,0x5c,0xa5,0xf4,0x7e,0x51,0x25,0xe8,0xf1,0x2b, + 0x6a,0x13,0xd4,0x31,0xca,0x76,0x97,0x3d,0x44,0x1a,0x00} + }, + { + /* No.135 delta:1967 weight:665 */ + 11213, + 19, + 12, + 19, + {(0x00000000), + (0xabbc4cf0), + (0xdf51c626), + (0x74ed8ad6), + (0x74a00878), + (0xdf1c4488), + (0xabf1ce5e), + (0x004d82ae), + (0x0000858b), + (0xabbcc97b), + (0xdf5143ad), + (0x74ed0f5d), + (0x74a08df3), + (0xdf1cc103), + (0xabf14bd5), + (0x004d0725)}, + {(0x00000000), + (0x41001800), + (0x004bc000), + (0x414bd800), + (0x8b86e200), + (0xca86fa00), + (0x8bcd2200), + (0xcacd3a00), + (0x70061e00), + (0x31060600), + (0x704dde00), + (0x314dc600), + (0xfb80fc00), + (0xba80e400), + (0xfbcb3c00), + (0xbacb2400)}, + {(0x3f800000), + (0x3fa0800c), + (0x3f8025e0), + (0x3fa0a5ec), + (0x3fc5c371), + (0x3fe5437d), + (0x3fc5e691), + (0x3fe5669d), + (0x3fb8030f), + (0x3f988303), + (0x3fb826ef), + (0x3f98a6e3), + (0x3ffdc07e), + (0x3fdd4072), + (0x3ffde59e), + (0x3fdd6592)}, + (0xfff80000), + {0xa2,0xe2,0xa5,0xe0,0x11,0x34,0xcb,0xf8,0xfc,0xa2, + 0x71,0xc6,0xbf,0xe2,0xc1,0x23,0x58,0x95,0xf4,0x86,0x00} + }, + { + /* No.136 delta:918 weight:1551 */ + 11213, + 47, + 18, + 4, + {(0x00000000), + (0x09adadc4), + (0xf017020b), + (0xf9baafcf), + (0x03300880), + (0x0a9da544), + (0xf3270a8b), + (0xfa8aa74f), + (0x0000cfe7), + (0x09ad6223), + (0xf017cdec), + (0xf9ba6028), + (0x0330c767), + (0x0a9d6aa3), + (0xf327c56c), + (0xfa8a68a8)}, + {(0x00000000), + (0x18747400), + (0x204c5200), + (0x38382600), + (0x10026c00), + (0x08761800), + (0x304e3e00), + (0x283a4a00), + (0x10011e00), + (0x08756a00), + (0x304d4c00), + (0x28393800), + (0x00037200), + (0x18770600), + (0x204f2000), + (0x383b5400)}, + {(0x3f800000), + (0x3f8c3a3a), + (0x3f902629), + (0x3f9c1c13), + (0x3f880136), + (0x3f843b0c), + (0x3f98271f), + (0x3f941d25), + (0x3f88008f), + (0x3f843ab5), + (0x3f9826a6), + (0x3f941c9c), + (0x3f8001b9), + (0x3f8c3b83), + (0x3f902790), + (0x3f9c1daa)}, + (0xfff80000), + {0xfd,0x25,0x81,0xf4,0x6f,0xd2,0x73,0x0c,0x1d,0xf8, + 0x3c,0x0b,0xd2,0x33,0xf7,0x54,0x12,0xcd,0x88,0xa9,0x00} + }, + { + /* No.137 delta:4265 weight:777 */ + 11213, + 70, + 4, + 16, + {(0x00000000), + (0xab4eac1a), + (0x41078fcd), + (0xea4923d7), + (0x9d20089e), + (0x366ea484), + (0xdc278753), + (0x77692b49), + (0x0000022a), + (0xab4eae30), + (0x41078de7), + (0xea4921fd), + (0x9d200ab4), + (0x366ea6ae), + (0xdc278579), + (0x77692963)}, + {(0x00000000), + (0x39400800), + (0x00500000), + (0x39100800), + (0x002c0000), + (0x396c0800), + (0x007c0000), + (0x393c0800), + (0x5e881e00), + (0x67c81600), + (0x5ed81e00), + (0x67981600), + (0x5ea41e00), + (0x67e41600), + (0x5ef41e00), + (0x67b41600)}, + {(0x3f800000), + (0x3f9ca004), + (0x3f802800), + (0x3f9c8804), + (0x3f801600), + (0x3f9cb604), + (0x3f803e00), + (0x3f9c9e04), + (0x3faf440f), + (0x3fb3e40b), + (0x3faf6c0f), + (0x3fb3cc0b), + (0x3faf520f), + (0x3fb3f20b), + (0x3faf7a0f), + (0x3fb3da0b)}, + (0xfff80000), + {0x67,0x39,0x69,0x46,0xb6,0xab,0xda,0x03,0x1a,0xb9, + 0x33,0x5c,0x43,0x1d,0xc4,0x88,0x65,0xf4,0x6e,0x7c,0x00} + }, + { + /* No.138 delta:2916 weight:1245 */ + 11213, + 12, + 4, + 11, + {(0x00000000), + (0x8bd9bb70), + (0x6536d8cf), + (0xeeef63bf), + (0x83c008a3), + (0x0819b3d3), + (0xe6f6d06c), + (0x6d2f6b1c), + (0x00006357), + (0x8bd9d827), + (0x6536bb98), + (0xeeef00e8), + (0x83c06bf4), + (0x0819d084), + (0xe6f6b33b), + (0x6d2f084b)}, + {(0x00000000), + (0x10b20000), + (0x0c210800), + (0x1c930800), + (0x29540000), + (0x39e60000), + (0x25750800), + (0x35c70800), + (0x02001e00), + (0x12b21e00), + (0x0e211600), + (0x1e931600), + (0x2b541e00), + (0x3be61e00), + (0x27751600), + (0x37c71600)}, + {(0x3f800000), + (0x3f885900), + (0x3f861084), + (0x3f8e4984), + (0x3f94aa00), + (0x3f9cf300), + (0x3f92ba84), + (0x3f9ae384), + (0x3f81000f), + (0x3f89590f), + (0x3f87108b), + (0x3f8f498b), + (0x3f95aa0f), + (0x3f9df30f), + (0x3f93ba8b), + (0x3f9be38b)}, + (0xfff80000), + {0xf3,0xfa,0x1e,0x93,0x8b,0xfb,0x73,0xc0,0xf1,0x10, + 0x33,0xac,0x5f,0x6b,0x3c,0x07,0xa2,0x98,0x75,0xed,0x00} + }, + { + /* No.139 delta:2344 weight:1631 */ + 11213, + 19, + 8, + 7, + {(0x00000000), + (0xfee0d62f), + (0x304aad76), + (0xceaa7b59), + (0x871008b7), + (0x79f0de98), + (0xb75aa5c1), + (0x49ba73ee), + (0x00003b80), + (0xfee0edaf), + (0x304a96f6), + (0xceaa40d9), + (0x87103337), + (0x79f0e518), + (0xb75a9e41), + (0x49ba486e)}, + {(0x00000000), + (0x05204400), + (0x00640000), + (0x05444400), + (0x00510400), + (0x05714000), + (0x00350400), + (0x05154000), + (0x32861e00), + (0x37a65a00), + (0x32e21e00), + (0x37c25a00), + (0x32d71a00), + (0x37f75e00), + (0x32b31a00), + (0x37935e00)}, + {(0x3f800000), + (0x3f829022), + (0x3f803200), + (0x3f82a222), + (0x3f802882), + (0x3f82b8a0), + (0x3f801a82), + (0x3f828aa0), + (0x3f99430f), + (0x3f9bd32d), + (0x3f99710f), + (0x3f9be12d), + (0x3f996b8d), + (0x3f9bfbaf), + (0x3f99598d), + (0x3f9bc9af)}, + (0xfff80000), + {0xaf,0x4b,0x20,0xda,0xcd,0xb1,0xdf,0x23,0x89,0x9b, + 0x9d,0xaf,0x0b,0x97,0x1e,0x9f,0x61,0xe7,0xfc,0x34,0x00} + }, + { + /* No.140 delta:8901 weight:1275 */ + 11213, + 67, + 29, + 1, + {(0x00000000), + (0x10b888fe), + (0xd0c6c2fc), + (0xc07e4a02), + (0x93a008cd), + (0x83188033), + (0x4366ca31), + (0x53de42cf), + (0x00009412), + (0x10b81cec), + (0xd0c656ee), + (0xc07ede10), + (0x93a09cdf), + (0x83181421), + (0x43665e23), + (0x53ded6dd)}, + {(0x00000000), + (0xd4620000), + (0x7a000000), + (0xae620000), + (0x01000000), + (0xd5620000), + (0x7b000000), + (0xaf620000), + (0x20a01e00), + (0xf4c21e00), + (0x5aa01e00), + (0x8ec21e00), + (0x21a01e00), + (0xf5c21e00), + (0x5ba01e00), + (0x8fc21e00)}, + {(0x3f800000), + (0x3fea3100), + (0x3fbd0000), + (0x3fd73100), + (0x3f808000), + (0x3feab100), + (0x3fbd8000), + (0x3fd7b100), + (0x3f90500f), + (0x3ffa610f), + (0x3fad500f), + (0x3fc7610f), + (0x3f90d00f), + (0x3ffae10f), + (0x3fadd00f), + (0x3fc7e10f)}, + (0xfff80000), + {0x57,0x96,0xba,0x7d,0x3b,0x31,0xd7,0xac,0xb2,0xe9, + 0x75,0xcd,0xdb,0x72,0x76,0x05,0x30,0x7f,0xd3,0xa0,0x00} + }, + { + /* No.141 delta:3341 weight:741 */ + 11213, + 28, + 3, + 17, + {(0x00000000), + (0xac86815e), + (0xed762180), + (0x41f0a0de), + (0x81a008df), + (0x2d268981), + (0x6cd6295f), + (0xc050a801), + (0x00004e54), + (0xac86cf0a), + (0xed766fd4), + (0x41f0ee8a), + (0x81a0468b), + (0x2d26c7d5), + (0x6cd6670b), + (0xc050e655)}, + {(0x00000000), + (0x9a110000), + (0x0f440000), + (0x95550000), + (0x70020000), + (0xea130000), + (0x7f460000), + (0xe5570000), + (0x21561e00), + (0xbb471e00), + (0x2e121e00), + (0xb4031e00), + (0x51541e00), + (0xcb451e00), + (0x5e101e00), + (0xc4011e00)}, + {(0x3f800000), + (0x3fcd0880), + (0x3f87a200), + (0x3fcaaa80), + (0x3fb80100), + (0x3ff50980), + (0x3fbfa300), + (0x3ff2ab80), + (0x3f90ab0f), + (0x3fdda38f), + (0x3f97090f), + (0x3fda018f), + (0x3fa8aa0f), + (0x3fe5a28f), + (0x3faf080f), + (0x3fe2008f)}, + (0xfff80000), + {0xcc,0x1d,0x62,0xab,0x61,0x1e,0xfc,0xe5,0x74,0x60, + 0xa2,0x5f,0xd0,0xfe,0xff,0x0e,0xfb,0x75,0x83,0x88,0x00} + }, + { + /* No.142 delta:5114 weight:1233 */ + 11213, + 35, + 30, + 3, + {(0x00000000), + (0xb9cd2997), + (0xab1ac509), + (0x12d7ec9e), + (0x5b2008ea), + (0xe2ed217d), + (0xf03acde3), + (0x49f7e474), + (0x0000ef9a), + (0xb9cdc60d), + (0xab1a2a93), + (0x12d70304), + (0x5b20e770), + (0xe2edcee7), + (0xf03a2279), + (0x49f70bee)}, + {(0x00000000), + (0xaef05000), + (0x2d801000), + (0x83704000), + (0x2390a000), + (0x8d60f000), + (0x0e10b000), + (0xa0e0e000), + (0x01001e00), + (0xaff04e00), + (0x2c800e00), + (0x82705e00), + (0x2290be00), + (0x8c60ee00), + (0x0f10ae00), + (0xa1e0fe00)}, + {(0x3f800000), + (0x3fd77828), + (0x3f96c008), + (0x3fc1b820), + (0x3f91c850), + (0x3fc6b078), + (0x3f870858), + (0x3fd07070), + (0x3f80800f), + (0x3fd7f827), + (0x3f964007), + (0x3fc1382f), + (0x3f91485f), + (0x3fc63077), + (0x3f878857), + (0x3fd0f07f)}, + (0xfff80000), + {0xce,0x51,0x4c,0x65,0x7a,0xae,0xbf,0x6f,0xd6,0x0b, + 0x0c,0x85,0x1c,0x15,0xe3,0xbf,0x9f,0x2b,0xa1,0x5d,0x00} + }, + { + /* No.143 delta:2115 weight:1465 */ + 11213, + 11, + 20, + 4, + {(0x00000000), + (0x34795048), + (0x0b9d244e), + (0x3fe47406), + (0xc90008f2), + (0xfd7958ba), + (0xc29d2cbc), + (0xf6e47cf4), + (0x00008680), + (0x3479d6c8), + (0x0b9da2ce), + (0x3fe4f286), + (0xc9008e72), + (0xfd79de3a), + (0xc29daa3c), + (0xf6e4fa74)}, + {(0x00000000), + (0xc1592000), + (0x16984000), + (0xd7c16000), + (0x21226800), + (0xe07b4800), + (0x37ba2800), + (0xf6e30800), + (0x08345e00), + (0xc96d7e00), + (0x1eac1e00), + (0xdff53e00), + (0x29163600), + (0xe84f1600), + (0x3f8e7600), + (0xfed75600)}, + {(0x3f800000), + (0x3fe0ac90), + (0x3f8b4c20), + (0x3febe0b0), + (0x3f909134), + (0x3ff03da4), + (0x3f9bdd14), + (0x3ffb7184), + (0x3f841a2f), + (0x3fe4b6bf), + (0x3f8f560f), + (0x3feffa9f), + (0x3f948b1b), + (0x3ff4278b), + (0x3f9fc73b), + (0x3fff6bab)}, + (0xfff80000), + {0x5f,0xfb,0xd7,0xc9,0x75,0x94,0x0e,0x84,0xfd,0x2b, + 0xc3,0xaf,0xdd,0x54,0xe4,0x81,0x96,0x14,0x65,0x90,0x00} + }, + { + /* No.144 delta:1074 weight:843 */ + 11213, + 50, + 20, + 11, + {(0x00000000), + (0xa8afcf6f), + (0x497c3b6e), + (0xe1d3f401), + (0xa0800909), + (0x082fc666), + (0xe9fc3267), + (0x4153fd08), + (0x0000ec80), + (0xa8af23ef), + (0x497cd7ee), + (0xe1d31881), + (0xa080e589), + (0x082f2ae6), + (0xe9fcdee7), + (0x41531188)}, + {(0x00000000), + (0x00081200), + (0xd0708400), + (0xd0789600), + (0x70302a00), + (0x70383800), + (0xa040ae00), + (0xa048bc00), + (0x1040fe00), + (0x1048ec00), + (0xc0307a00), + (0xc0386800), + (0x6070d400), + (0x6078c600), + (0xb0005000), + (0xb0084200)}, + {(0x3f800000), + (0x3f800409), + (0x3fe83842), + (0x3fe83c4b), + (0x3fb81815), + (0x3fb81c1c), + (0x3fd02057), + (0x3fd0245e), + (0x3f88207f), + (0x3f882476), + (0x3fe0183d), + (0x3fe01c34), + (0x3fb0386a), + (0x3fb03c63), + (0x3fd80028), + (0x3fd80421)}, + (0xfff80000), + {0x95,0xfe,0x49,0x06,0x5e,0xae,0xca,0x7d,0x0a,0xce, + 0xf9,0x80,0x70,0xc2,0x03,0xa9,0x1c,0xc3,0x16,0x85,0x00} + }, + { + /* No.145 delta:2918 weight:721 */ + 11213, + 86, + 4, + 18, + {(0x00000000), + (0x3bebda5d), + (0x18b70390), + (0x235cd9cd), + (0x2c60091c), + (0x178bd341), + (0x34d70a8c), + (0x0f3cd0d1), + (0x00004f75), + (0x3beb9528), + (0x18b74ce5), + (0x235c96b8), + (0x2c604669), + (0x178b9c34), + (0x34d745f9), + (0x0f3c9fa4)}, + {(0x00000000), + (0x1e710200), + (0x09800000), + (0x17f10200), + (0x00c00000), + (0x1eb10200), + (0x09400000), + (0x17310200), + (0x30431e00), + (0x2e321c00), + (0x39c31e00), + (0x27b21c00), + (0x30831e00), + (0x2ef21c00), + (0x39031e00), + (0x27721c00)}, + {(0x3f800000), + (0x3f8f3881), + (0x3f84c000), + (0x3f8bf881), + (0x3f806000), + (0x3f8f5881), + (0x3f84a000), + (0x3f8b9881), + (0x3f98218f), + (0x3f97190e), + (0x3f9ce18f), + (0x3f93d90e), + (0x3f98418f), + (0x3f97790e), + (0x3f9c818f), + (0x3f93b90e)}, + (0xfff80000), + {0x3c,0x75,0x1b,0x34,0x56,0x7d,0x5c,0xdd,0x5a,0x41, + 0xb9,0x82,0x6d,0x1d,0xc3,0x11,0x62,0x1e,0xea,0x73,0x00} + }, + { + /* No.146 delta:2588 weight:993 */ + 11213, + 77, + 8, + 11, + {(0x00000000), + (0x29b76eba), + (0xb5519d7b), + (0x9ce6f3c1), + (0xac000920), + (0x85b7679a), + (0x1951945b), + (0x30e6fae1), + (0x00002499), + (0x29b74a23), + (0xb551b9e2), + (0x9ce6d758), + (0xac002db9), + (0x85b74303), + (0x1951b0c2), + (0x30e6de78)}, + {(0x00000000), + (0x00350000), + (0x086a0000), + (0x085f0000), + (0x085c0000), + (0x08690000), + (0x00360000), + (0x00030000), + (0x00519e00), + (0x00649e00), + (0x083b9e00), + (0x080e9e00), + (0x080d9e00), + (0x08389e00), + (0x00679e00), + (0x00529e00)}, + {(0x3f800000), + (0x3f801a80), + (0x3f843500), + (0x3f842f80), + (0x3f842e00), + (0x3f843480), + (0x3f801b00), + (0x3f800180), + (0x3f8028cf), + (0x3f80324f), + (0x3f841dcf), + (0x3f84074f), + (0x3f8406cf), + (0x3f841c4f), + (0x3f8033cf), + (0x3f80294f)}, + (0xfff80000), + {0x78,0x89,0x90,0x85,0x2f,0x08,0xc7,0x62,0xed,0x99, + 0x04,0xfb,0xe5,0x26,0x8d,0x20,0x08,0xbc,0xe0,0x01,0x00} + }, + { + /* No.147 delta:1196 weight:1067 */ + 11213, + 65, + 15, + 11, + {(0x00000000), + (0xe3c8118c), + (0x346df077), + (0xd7a5e1fb), + (0xff500936), + (0x1c9818ba), + (0xcb3df941), + (0x28f5e8cd), + (0x000057eb), + (0xe3c84667), + (0x346da79c), + (0xd7a5b610), + (0xff505edd), + (0x1c984f51), + (0xcb3daeaa), + (0x28f5bf26)}, + {(0x00000000), + (0x00411800), + (0x0c060000), + (0x0c471800), + (0x260a4800), + (0x264b5000), + (0x2a0c4800), + (0x2a4d5000), + (0x0003de00), + (0x0042c600), + (0x0c05de00), + (0x0c44c600), + (0x26099600), + (0x26488e00), + (0x2a0f9600), + (0x2a4e8e00)}, + {(0x3f800000), + (0x3f80208c), + (0x3f860300), + (0x3f86238c), + (0x3f930524), + (0x3f9325a8), + (0x3f950624), + (0x3f9526a8), + (0x3f8001ef), + (0x3f802163), + (0x3f8602ef), + (0x3f862263), + (0x3f9304cb), + (0x3f932447), + (0x3f9507cb), + (0x3f952747)}, + (0xfff80000), + {0x1f,0x15,0xcd,0x92,0x4b,0xcd,0x46,0xa5,0x75,0xd5, + 0x6e,0xf4,0x6d,0x40,0x10,0x3c,0xa0,0xd0,0x56,0x85,0x00} + }, + { + /* No.148 delta:995 weight:1397 */ + 11213, + 48, + 20, + 6, + {(0x00000000), + (0x81afe9a5), + (0xc7e54dd3), + (0x464aa476), + (0x42100942), + (0xc3bfe0e7), + (0x85f54491), + (0x045aad34), + (0x0000d573), + (0x81af3cd6), + (0xc7e598a0), + (0x464a7105), + (0x4210dc31), + (0xc3bf3594), + (0x85f591e2), + (0x045a7847)}, + {(0x00000000), + (0x0c8d6800), + (0x00660a00), + (0x0ceb6200), + (0x0190c600), + (0x0d1dae00), + (0x01f6cc00), + (0x0d7ba400), + (0x00423e00), + (0x0ccf5600), + (0x00243400), + (0x0ca95c00), + (0x01d2f800), + (0x0d5f9000), + (0x01b4f200), + (0x0d399a00)}, + {(0x3f800000), + (0x3f8646b4), + (0x3f803305), + (0x3f8675b1), + (0x3f80c863), + (0x3f868ed7), + (0x3f80fb66), + (0x3f86bdd2), + (0x3f80211f), + (0x3f8667ab), + (0x3f80121a), + (0x3f8654ae), + (0x3f80e97c), + (0x3f86afc8), + (0x3f80da79), + (0x3f869ccd)}, + (0xfff80000), + {0x89,0x5d,0x4d,0xfa,0x78,0x98,0x8a,0x59,0x89,0x1d, + 0x43,0x6d,0x08,0x21,0x0f,0x15,0xef,0xea,0xed,0x4f,0x00} + }, + { + /* No.149 delta:1807 weight:1017 */ + 11213, + 25, + 17, + 13, + {(0x00000000), + (0x95fafe23), + (0x258e5d0f), + (0xb074a32c), + (0x5b000950), + (0xcefaf773), + (0x7e8e545f), + (0xeb74aa7c), + (0x00007b8f), + (0x95fa85ac), + (0x258e2680), + (0xb074d8a3), + (0x5b0072df), + (0xcefa8cfc), + (0x7e8e2fd0), + (0xeb74d1f3)}, + {(0x00000000), + (0x40690000), + (0x107c4000), + (0x50154000), + (0x0061b000), + (0x4008b000), + (0x101df000), + (0x5074f000), + (0x10221e00), + (0x504b1e00), + (0x005e5e00), + (0x40375e00), + (0x1043ae00), + (0x502aae00), + (0x003fee00), + (0x4056ee00)}, + {(0x3f800000), + (0x3fa03480), + (0x3f883e20), + (0x3fa80aa0), + (0x3f8030d8), + (0x3fa00458), + (0x3f880ef8), + (0x3fa83a78), + (0x3f88110f), + (0x3fa8258f), + (0x3f802f2f), + (0x3fa01baf), + (0x3f8821d7), + (0x3fa81557), + (0x3f801ff7), + (0x3fa02b77)}, + (0xfff80000), + {0x29,0x89,0x68,0x24,0x4a,0xc0,0x9c,0xf5,0x79,0x14, + 0xc1,0x26,0xaa,0x3d,0xbf,0x1f,0xc1,0xbc,0x78,0x97,0x00} + }, + { + /* No.150 delta:2642 weight:975 */ + 11213, + 44, + 4, + 15, + {(0x00000000), + (0x3cf7745b), + (0x8a251fd4), + (0xb6d26b8f), + (0xc580096c), + (0xf9777d37), + (0x4fa516b8), + (0x735262e3), + (0x00007ddf), + (0x3cf70984), + (0x8a25620b), + (0xb6d21650), + (0xc58074b3), + (0xf97700e8), + (0x4fa56b67), + (0x73521f3c)}, + {(0x00000000), + (0x52780000), + (0x13600000), + (0x41180000), + (0x08cc0000), + (0x5ab40000), + (0x1bac0000), + (0x49d40000), + (0x02401e00), + (0x50381e00), + (0x11201e00), + (0x43581e00), + (0x0a8c1e00), + (0x58f41e00), + (0x19ec1e00), + (0x4b941e00)}, + {(0x3f800000), + (0x3fa93c00), + (0x3f89b000), + (0x3fa08c00), + (0x3f846600), + (0x3fad5a00), + (0x3f8dd600), + (0x3fa4ea00), + (0x3f81200f), + (0x3fa81c0f), + (0x3f88900f), + (0x3fa1ac0f), + (0x3f85460f), + (0x3fac7a0f), + (0x3f8cf60f), + (0x3fa5ca0f)}, + (0xfff80000), + {0x1c,0x8b,0x81,0x2c,0x97,0xf0,0xd6,0x0f,0xeb,0x28, + 0x7e,0xbd,0x56,0xe1,0x95,0x03,0x72,0xd1,0x0a,0x96,0x00} + }, + { + /* No.151 delta:962 weight:1023 */ + 11213, + 78, + 12, + 10, + {(0x00000000), + (0x171b50e0), + (0x997dbf57), + (0x8e66efb7), + (0x22d00977), + (0x35cb5997), + (0xbbadb620), + (0xacb6e6c0), + (0x00001c87), + (0x171b4c67), + (0x997da3d0), + (0x8e66f330), + (0x22d015f0), + (0x35cb4510), + (0xbbadaaa7), + (0xacb6fa47)}, + {(0x00000000), + (0x21821c00), + (0x10102200), + (0x31923e00), + (0x60023000), + (0x41802c00), + (0x70121200), + (0x51900e00), + (0xc100de00), + (0xe082c200), + (0xd110fc00), + (0xf092e000), + (0xa102ee00), + (0x8080f200), + (0xb112cc00), + (0x9090d000)}, + {(0x3f800000), + (0x3f90c10e), + (0x3f880811), + (0x3f98c91f), + (0x3fb00118), + (0x3fa0c016), + (0x3fb80909), + (0x3fa8c807), + (0x3fe0806f), + (0x3ff04161), + (0x3fe8887e), + (0x3ff84970), + (0x3fd08177), + (0x3fc04079), + (0x3fd88966), + (0x3fc84868)}, + (0xfff80000), + {0xb1,0xe6,0xdf,0x01,0xdd,0xa2,0x07,0x31,0x50,0x37, + 0xbf,0x34,0x2a,0x6d,0xfe,0xe7,0x69,0x32,0x01,0x60,0x00} + }, + { + /* No.152 delta:1302 weight:1041 */ + 11213, + 55, + 10, + 13, + {(0x00000000), + (0xd0556a8d), + (0x5e4d31d5), + (0x8e185b58), + (0xe6600989), + (0x36356304), + (0xb82d385c), + (0x687852d1), + (0x000019bb), + (0xd0557336), + (0x5e4d286e), + (0x8e1842e3), + (0xe6601032), + (0x36357abf), + (0xb82d21e7), + (0x68784b6a)}, + {(0x00000000), + (0x627c4000), + (0x3034a000), + (0x5248e000), + (0x00038000), + (0x627fc000), + (0x30372000), + (0x524b6000), + (0x80519e00), + (0xe22dde00), + (0xb0653e00), + (0xd2197e00), + (0x80521e00), + (0xe22e5e00), + (0xb066be00), + (0xd21afe00)}, + {(0x3f800000), + (0x3fb13e20), + (0x3f981a50), + (0x3fa92470), + (0x3f8001c0), + (0x3fb13fe0), + (0x3f981b90), + (0x3fa925b0), + (0x3fc028cf), + (0x3ff116ef), + (0x3fd8329f), + (0x3fe90cbf), + (0x3fc0290f), + (0x3ff1172f), + (0x3fd8335f), + (0x3fe90d7f)}, + (0xfff80000), + {0x9e,0x26,0x73,0xd6,0xbe,0x9b,0x5f,0x7d,0x6b,0x71, + 0x07,0x55,0x14,0xdf,0x70,0x0c,0x4a,0x9a,0xdd,0x23,0x00} + }, + { + /* No.153 delta:1084 weight:1123 */ + 11213, + 86, + 22, + 10, + {(0x00000000), + (0xa2a5f5b9), + (0x2b421269), + (0x89e7e7d0), + (0x54300991), + (0xf695fc28), + (0x7f721bf8), + (0xddd7ee41), + (0x0000dc1c), + (0xa2a529a5), + (0x2b42ce75), + (0x89e73bcc), + (0x5430d58d), + (0xf6952034), + (0x7f72c7e4), + (0xddd7325d)}, + {(0x00000000), + (0x40cc0000), + (0x50668000), + (0x10aa8000), + (0x0069f000), + (0x40a5f000), + (0x500f7000), + (0x10c37000), + (0x00131e00), + (0x40df1e00), + (0x50759e00), + (0x10b99e00), + (0x007aee00), + (0x40b6ee00), + (0x501c6e00), + (0x10d06e00)}, + {(0x3f800000), + (0x3fa06600), + (0x3fa83340), + (0x3f885540), + (0x3f8034f8), + (0x3fa052f8), + (0x3fa807b8), + (0x3f8861b8), + (0x3f80098f), + (0x3fa06f8f), + (0x3fa83acf), + (0x3f885ccf), + (0x3f803d77), + (0x3fa05b77), + (0x3fa80e37), + (0x3f886837)}, + (0xfff80000), + {0x27,0x0a,0x3f,0xdb,0x36,0x65,0x8b,0x9c,0x6c,0x3d, + 0xb3,0x81,0x6b,0x82,0xdc,0xaf,0x77,0xa0,0x5a,0x41,0x00} + }, + { + /* No.154 delta:1218 weight:1125 */ + 11213, + 33, + 17, + 9, + {(0x00000000), + (0xbc510269), + (0x187fd997), + (0xa42edbfe), + (0xb2d009a5), + (0x0e810bcc), + (0xaaafd032), + (0x16fed25b), + (0x00008c92), + (0xbc518efb), + (0x187f5505), + (0xa42e576c), + (0xb2d08537), + (0x0e81875e), + (0xaaaf5ca0), + (0x16fe5ec9)}, + {(0x00000000), + (0x4674aa00), + (0x20885800), + (0x66fcf200), + (0x010e8000), + (0x477a2a00), + (0x2186d800), + (0x67f27200), + (0x02c2be00), + (0x44b61400), + (0x224ae600), + (0x643e4c00), + (0x03cc3e00), + (0x45b89400), + (0x23446600), + (0x6530cc00)}, + {(0x3f800000), + (0x3fa33a55), + (0x3f90442c), + (0x3fb37e79), + (0x3f808740), + (0x3fa3bd15), + (0x3f90c36c), + (0x3fb3f939), + (0x3f81615f), + (0x3fa25b0a), + (0x3f912573), + (0x3fb21f26), + (0x3f81e61f), + (0x3fa2dc4a), + (0x3f91a233), + (0x3fb29866)}, + (0xfff80000), + {0xcd,0xbe,0xfa,0xd2,0x8e,0x1c,0x9f,0x67,0x49,0xe4, + 0xe4,0x6c,0xb2,0xe3,0x01,0xb2,0x56,0x77,0x18,0x40,0x00} + }, + { + /* No.155 delta:2783 weight:955 */ + 11213, + 59, + 3, + 7, + {(0x00000000), + (0xac2382ba), + (0x6ff1673d), + (0xc3d2e587), + (0x37a009b6), + (0x9b838b0c), + (0x58516e8b), + (0xf472ec31), + (0x0000a5f6), + (0xac23274c), + (0x6ff1c2cb), + (0xc3d24071), + (0x37a0ac40), + (0x9b832efa), + (0x5851cb7d), + (0xf47249c7)}, + {(0x00000000), + (0x14228400), + (0x00948000), + (0x14b60400), + (0x206a8400), + (0x34480000), + (0x20fe0400), + (0x34dc8000), + (0x40481e00), + (0x546a9a00), + (0x40dc9e00), + (0x54fe1a00), + (0x60229a00), + (0x74001e00), + (0x60b61a00), + (0x74949e00)}, + {(0x3f800000), + (0x3f8a1142), + (0x3f804a40), + (0x3f8a5b02), + (0x3f903542), + (0x3f9a2400), + (0x3f907f02), + (0x3f9a6e40), + (0x3fa0240f), + (0x3faa354d), + (0x3fa06e4f), + (0x3faa7f0d), + (0x3fb0114d), + (0x3fba000f), + (0x3fb05b0d), + (0x3fba4a4f)}, + (0xfff80000), + {0x12,0x6b,0xe6,0x1a,0xef,0x03,0x6a,0x5a,0xeb,0x44, + 0xd4,0xe8,0x31,0x17,0x3d,0x9b,0xd7,0xd8,0xc8,0xd2,0x00} + }, + { + /* No.156 delta:2681 weight:1401 */ + 11213, + 11, + 22, + 2, + {(0x00000000), + (0xfcaff0c3), + (0xee10c3a9), + (0x12bf336a), + (0x1ea009c1), + (0xe20ff902), + (0xf0b0ca68), + (0x0c1f3aab), + (0x0000a3d9), + (0xfcaf531a), + (0xee106070), + (0x12bf90b3), + (0x1ea0aa18), + (0xe20f5adb), + (0xf0b069b1), + (0x0c1f9972)}, + {(0x00000000), + (0x8f318000), + (0x0c949000), + (0x83a51000), + (0x61378000), + (0xee060000), + (0x6da31000), + (0xe2929000), + (0x4044de00), + (0xcf755e00), + (0x4cd04e00), + (0xc3e1ce00), + (0x21735e00), + (0xae42de00), + (0x2de7ce00), + (0xa2d64e00)}, + {(0x3f800000), + (0x3fc798c0), + (0x3f864a48), + (0x3fc1d288), + (0x3fb09bc0), + (0x3ff70300), + (0x3fb6d188), + (0x3ff14948), + (0x3fa0226f), + (0x3fe7baaf), + (0x3fa66827), + (0x3fe1f0e7), + (0x3f90b9af), + (0x3fd7216f), + (0x3f96f3e7), + (0x3fd16b27)}, + (0xfff80000), + {0x83,0x73,0xde,0x94,0x03,0x6f,0x08,0x85,0xc8,0x3c, + 0xf2,0x9a,0xb2,0x9d,0x8f,0xfe,0x34,0x78,0xe6,0x25,0x00} + }, + { + /* No.157 delta:4633 weight:903 */ + 11213, + 70, + 1, + 8, + {(0x00000000), + (0x4732d8c0), + (0x2145def8), + (0x66770638), + (0x8ec009da), + (0xc9f2d11a), + (0xaf85d722), + (0xe8b70fe2), + (0x000031b0), + (0x4732e970), + (0x2145ef48), + (0x66773788), + (0x8ec0386a), + (0xc9f2e0aa), + (0xaf85e692), + (0xe8b73e52)}, + {(0x00000000), + (0x10400000), + (0x24640000), + (0x34240000), + (0x81100000), + (0x91500000), + (0xa5740000), + (0xb5340000), + (0x08431e00), + (0x18031e00), + (0x2c271e00), + (0x3c671e00), + (0x89531e00), + (0x99131e00), + (0xad371e00), + (0xbd771e00)}, + {(0x3f800000), + (0x3f882000), + (0x3f923200), + (0x3f9a1200), + (0x3fc08800), + (0x3fc8a800), + (0x3fd2ba00), + (0x3fda9a00), + (0x3f84218f), + (0x3f8c018f), + (0x3f96138f), + (0x3f9e338f), + (0x3fc4a98f), + (0x3fcc898f), + (0x3fd69b8f), + (0x3fdebb8f)}, + (0xfff80000), + {0xdd,0x87,0x76,0x32,0x84,0x88,0x7a,0x5f,0x52,0x09, + 0x1f,0x36,0xae,0xfa,0x7b,0x5d,0xb4,0x87,0x91,0x59,0x00} + }, + { + /* No.158 delta:1376 weight:811 */ + 11213, + 58, + 16, + 14, + {(0x00000000), + (0xdc5d3a0a), + (0x023cad01), + (0xde61970b), + (0x286009e2), + (0xf43d33e8), + (0x2a5ca4e3), + (0xf6019ee9), + (0x00009c42), + (0xdc5da648), + (0x023c3143), + (0xde610b49), + (0x286095a0), + (0xf43dafaa), + (0x2a5c38a1), + (0xf60102ab)}, + {(0x00000000), + (0x50081000), + (0x305e0000), + (0x60561000), + (0x10324000), + (0x403a5000), + (0x206c4000), + (0x70645000), + (0x20011e00), + (0x70090e00), + (0x105f1e00), + (0x40570e00), + (0x30335e00), + (0x603b4e00), + (0x006d5e00), + (0x50654e00)}, + {(0x3f800000), + (0x3fa80408), + (0x3f982f00), + (0x3fb02b08), + (0x3f881920), + (0x3fa01d28), + (0x3f903620), + (0x3fb83228), + (0x3f90008f), + (0x3fb80487), + (0x3f882f8f), + (0x3fa02b87), + (0x3f9819af), + (0x3fb01da7), + (0x3f8036af), + (0x3fa832a7)}, + (0xfff80000), + {0x96,0xe0,0xc6,0xef,0x7f,0x51,0x3c,0x13,0xd3,0x63, + 0x80,0x54,0xc8,0x09,0xcc,0x33,0x8e,0x9a,0x15,0x91,0x00} + }, + { + /* No.159 delta:1647 weight:1099 */ + 11213, + 91, + 19, + 1, + {(0x00000000), + (0xeffea95b), + (0x587a7fdf), + (0xb784d684), + (0xe5b009ff), + (0x0a4ea0a4), + (0xbdca7620), + (0x5234df7b), + (0x00008677), + (0xeffe2f2c), + (0x587af9a8), + (0xb78450f3), + (0xe5b08f88), + (0x0a4e26d3), + (0xbdcaf057), + (0x5234590c)}, + {(0x00000000), + (0xc3463c00), + (0x20814400), + (0xe3c77800), + (0x04036400), + (0xc7455800), + (0x24822000), + (0xe7c41c00), + (0x66201e00), + (0xa5662200), + (0x46a15a00), + (0x85e76600), + (0x62237a00), + (0xa1654600), + (0x42a23e00), + (0x81e40200)}, + {(0x3f800000), + (0x3fe1a31e), + (0x3f9040a2), + (0x3ff1e3bc), + (0x3f8201b2), + (0x3fe3a2ac), + (0x3f924110), + (0x3ff3e20e), + (0x3fb3100f), + (0x3fd2b311), + (0x3fa350ad), + (0x3fc2f3b3), + (0x3fb111bd), + (0x3fd0b2a3), + (0x3fa1511f), + (0x3fc0f201)}, + (0xfff80000), + {0x6f,0x70,0x62,0xec,0x14,0x82,0xd2,0xb9,0xc0,0x8d, + 0xdc,0xae,0x07,0x77,0xb8,0x94,0x3f,0xc9,0xfa,0xe1,0x00} + }, + { + /* No.160 delta:2967 weight:901 */ + 11213, + 53, + 18, + 1, + {(0x00000000), + (0xaa302b40), + (0x85ec337a), + (0x2fdc183a), + (0x3dd00a0a), + (0x97e0214a), + (0xb83c3970), + (0x120c1230), + (0x00008c9b), + (0xaa30a7db), + (0x85ecbfe1), + (0x2fdc94a1), + (0x3dd08691), + (0x97e0add1), + (0xb83cb5eb), + (0x120c9eab)}, + {(0x00000000), + (0x0e4c0c00), + (0x81a28000), + (0x8fee8c00), + (0x01009400), + (0x0f4c9800), + (0x80a21400), + (0x8eee1800), + (0x09031e00), + (0x074f1200), + (0x88a19e00), + (0x86ed9200), + (0x08038a00), + (0x064f8600), + (0x89a10a00), + (0x87ed0600)}, + {(0x3f800000), + (0x3f872606), + (0x3fc0d140), + (0x3fc7f746), + (0x3f80804a), + (0x3f87a64c), + (0x3fc0510a), + (0x3fc7770c), + (0x3f84818f), + (0x3f83a789), + (0x3fc450cf), + (0x3fc376c9), + (0x3f8401c5), + (0x3f8327c3), + (0x3fc4d085), + (0x3fc3f683)}, + (0xfff80000), + {0xa5,0x57,0xd6,0x41,0xd4,0x56,0x49,0x1c,0xe3,0x86, + 0x35,0xfc,0x5c,0xaf,0xd9,0xba,0x3e,0xfe,0x47,0xfa,0x00} + }, + { + /* No.161 delta:1285 weight:1009 */ + 11213, + 44, + 22, + 9, + {(0x00000000), + (0xbd887100), + (0x6d55a627), + (0xd0ddd727), + (0x18000a1d), + (0xa5887b1d), + (0x7555ac3a), + (0xc8dddd3a), + (0x0000acfa), + (0xbd88ddfa), + (0x6d550add), + (0xd0dd7bdd), + (0x1800a6e7), + (0xa588d7e7), + (0x755500c0), + (0xc8dd71c0)}, + {(0x00000000), + (0x0fe36000), + (0x0045f000), + (0x0fa69000), + (0x121a2000), + (0x1df94000), + (0x125fd000), + (0x1dbcb000), + (0x00371e00), + (0x0fd47e00), + (0x0072ee00), + (0x0f918e00), + (0x122d3e00), + (0x1dce5e00), + (0x1268ce00), + (0x1d8bae00)}, + {(0x3f800000), + (0x3f87f1b0), + (0x3f8022f8), + (0x3f87d348), + (0x3f890d10), + (0x3f8efca0), + (0x3f892fe8), + (0x3f8ede58), + (0x3f801b8f), + (0x3f87ea3f), + (0x3f803977), + (0x3f87c8c7), + (0x3f89169f), + (0x3f8ee72f), + (0x3f893467), + (0x3f8ec5d7)}, + (0xfff80000), + {0xb3,0x0a,0x52,0x2b,0xc3,0xf3,0x8a,0x0b,0x60,0xcf, + 0x50,0x81,0xc0,0x03,0x13,0x44,0x56,0xfd,0xe5,0x6c,0x00} + }, + { + /* No.162 delta:3653 weight:1321 */ + 11213, + 77, + 4, + 1, + {(0x00000000), + (0x09eb7b50), + (0x4a578b30), + (0x43bcf060), + (0xc4b00a20), + (0xcd5b7170), + (0x8ee78110), + (0x870cfa40), + (0x000030d1), + (0x09eb4b81), + (0x4a57bbe1), + (0x43bcc0b1), + (0xc4b03af1), + (0xcd5b41a1), + (0x8ee7b1c1), + (0x870cca91)}, + {(0x00000000), + (0x20630000), + (0x00492000), + (0x202a2000), + (0x400a0000), + (0x60690000), + (0x40432000), + (0x60202000), + (0xc0027e00), + (0xe0617e00), + (0xc04b5e00), + (0xe0285e00), + (0x80087e00), + (0xa06b7e00), + (0x80415e00), + (0xa0225e00)}, + {(0x3f800000), + (0x3f903180), + (0x3f802490), + (0x3f901510), + (0x3fa00500), + (0x3fb03480), + (0x3fa02190), + (0x3fb01010), + (0x3fe0013f), + (0x3ff030bf), + (0x3fe025af), + (0x3ff0142f), + (0x3fc0043f), + (0x3fd035bf), + (0x3fc020af), + (0x3fd0112f)}, + (0xfff80000), + {0xed,0x0f,0x9e,0x5d,0x4b,0x7d,0x2e,0x4f,0xf8,0xa4, + 0xba,0x23,0xdc,0x1a,0x0e,0x40,0xa9,0xff,0xbf,0xfa,0x00} + }, + { + /* No.163 delta:1515 weight:829 */ + 11213, + 59, + 16, + 13, + {(0x00000000), + (0xd192fecc), + (0x1c86d46e), + (0xcd142aa2), + (0xeb900a31), + (0x3a02f4fd), + (0xf716de5f), + (0x26842093), + (0x00009c7b), + (0xd19262b7), + (0x1c864815), + (0xcd14b6d9), + (0xeb90964a), + (0x3a026886), + (0xf7164224), + (0x2684bce8)}, + {(0x00000000), + (0xc9688000), + (0x08400200), + (0xc1288200), + (0x082d4800), + (0xc145c800), + (0x006d4a00), + (0xc905ca00), + (0x312c1e00), + (0xf8449e00), + (0x396c1c00), + (0xf0049c00), + (0x39015600), + (0xf069d600), + (0x31415400), + (0xf829d400)}, + {(0x3f800000), + (0x3fe4b440), + (0x3f842001), + (0x3fe09441), + (0x3f8416a4), + (0x3fe0a2e4), + (0x3f8036a5), + (0x3fe482e5), + (0x3f98960f), + (0x3ffc224f), + (0x3f9cb60e), + (0x3ff8024e), + (0x3f9c80ab), + (0x3ff834eb), + (0x3f98a0aa), + (0x3ffc14ea)}, + (0xfff80000), + {0xe7,0xaa,0x57,0x85,0x45,0x30,0x30,0xea,0x81,0x55, + 0x14,0xf3,0xe1,0x42,0x5a,0xfc,0x05,0x1e,0xd3,0xf0,0x00} + }, + { + /* No.164 delta:5942 weight:1403 */ + 11213, + 49, + 28, + 3, + {(0x00000000), + (0x96bbff16), + (0x0e2dd758), + (0x9896284e), + (0xb5700a46), + (0x23cbf550), + (0xbb5ddd1e), + (0x2de62208), + (0x0000f8d9), + (0x96bb07cf), + (0x0e2d2f81), + (0x9896d097), + (0xb570f29f), + (0x23cb0d89), + (0xbb5d25c7), + (0x2de6dad1)}, + {(0x00000000), + (0x44940000), + (0x03b80000), + (0x472c0000), + (0x48600000), + (0x0cf40000), + (0x4bd80000), + (0x0f4c0000), + (0x00801e00), + (0x44141e00), + (0x03381e00), + (0x47ac1e00), + (0x48e01e00), + (0x0c741e00), + (0x4b581e00), + (0x0fcc1e00)}, + {(0x3f800000), + (0x3fa24a00), + (0x3f81dc00), + (0x3fa39600), + (0x3fa43000), + (0x3f867a00), + (0x3fa5ec00), + (0x3f87a600), + (0x3f80400f), + (0x3fa20a0f), + (0x3f819c0f), + (0x3fa3d60f), + (0x3fa4700f), + (0x3f863a0f), + (0x3fa5ac0f), + (0x3f87e60f)}, + (0xfff80000), + {0x76,0xb3,0x89,0x96,0x29,0x62,0x6a,0x7b,0xe7,0x6f, + 0x4a,0x6c,0x1f,0xbf,0x96,0x66,0x8d,0x45,0x5d,0x5c,0x00} + }, + { + /* No.165 delta:1776 weight:1045 */ + 11213, + 74, + 10, + 13, + {(0x00000000), + (0x24c994e7), + (0x7e990c2c), + (0x5a5098cb), + (0xb9000a51), + (0x9dc99eb6), + (0xc799067d), + (0xe350929a), + (0x0000dbdf), + (0x24c94f38), + (0x7e99d7f3), + (0x5a504314), + (0xb900d18e), + (0x9dc94569), + (0xc799dda2), + (0xe3504945)}, + {(0x00000000), + (0x00764000), + (0x000d4000), + (0x007b0000), + (0x102e6000), + (0x10582000), + (0x10232000), + (0x10556000), + (0x10065e00), + (0x10701e00), + (0x100b1e00), + (0x107d5e00), + (0x00283e00), + (0x005e7e00), + (0x00257e00), + (0x00533e00)}, + {(0x3f800000), + (0x3f803b20), + (0x3f8006a0), + (0x3f803d80), + (0x3f881730), + (0x3f882c10), + (0x3f881190), + (0x3f882ab0), + (0x3f88032f), + (0x3f88380f), + (0x3f88058f), + (0x3f883eaf), + (0x3f80141f), + (0x3f802f3f), + (0x3f8012bf), + (0x3f80299f)}, + (0xfff80000), + {0xfb,0xa8,0xac,0x3b,0xfe,0x8d,0xcc,0xf9,0x64,0x79, + 0xa4,0xdb,0x6d,0x8f,0x8a,0x6e,0xb9,0x91,0x4e,0xb0,0x00} + }, + { + /* No.166 delta:1278 weight:1311 */ + 11213, + 38, + 18, + 10, + {(0x00000000), + (0x1aa51933), + (0x7654762b), + (0x6cf16f18), + (0x06100a69), + (0x1cb5135a), + (0x70447c42), + (0x6ae16571), + (0x0000bdcd), + (0x1aa5a4fe), + (0x7654cbe6), + (0x6cf1d2d5), + (0x0610b7a4), + (0x1cb5ae97), + (0x7044c18f), + (0x6ae1d8bc)}, + {(0x00000000), + (0x1c020c00), + (0x10065600), + (0x0c045a00), + (0xa4030000), + (0xb8010c00), + (0xb4055600), + (0xa8075a00), + (0x51205e00), + (0x4d225200), + (0x41260800), + (0x5d240400), + (0xf5235e00), + (0xe9215200), + (0xe5250800), + (0xf9270400)}, + {(0x3f800000), + (0x3f8e0106), + (0x3f88032b), + (0x3f86022d), + (0x3fd20180), + (0x3fdc0086), + (0x3fda02ab), + (0x3fd403ad), + (0x3fa8902f), + (0x3fa69129), + (0x3fa09304), + (0x3fae9202), + (0x3ffa91af), + (0x3ff490a9), + (0x3ff29284), + (0x3ffc9382)}, + (0xfff80000), + {0xe2,0x4b,0xe1,0x7f,0x94,0x30,0x00,0x2c,0x65,0x3e, + 0x59,0x45,0xfa,0x87,0x44,0x25,0xe1,0x2d,0xb7,0xdb,0x00} + }, + { + /* No.167 delta:1582 weight:743 */ + 11213, + 80, + 9, + 17, + {(0x00000000), + (0xf8f257fd), + (0x26c7fb3f), + (0xde35acc2), + (0xba900a7d), + (0x42625d80), + (0x9c57f142), + (0x64a5a6bf), + (0x00008157), + (0xf8f2d6aa), + (0x26c77a68), + (0xde352d95), + (0xba908b2a), + (0x4262dcd7), + (0x9c577015), + (0x64a527e8)}, + {(0x00000000), + (0x884f0000), + (0x11035000), + (0x994c5000), + (0x2a02a800), + (0xa24da800), + (0x3b01f800), + (0xb34ef800), + (0x10041e00), + (0x984b1e00), + (0x01074e00), + (0x89484e00), + (0x3a06b600), + (0xb249b600), + (0x2b05e600), + (0xa34ae600)}, + {(0x3f800000), + (0x3fc42780), + (0x3f8881a8), + (0x3fcca628), + (0x3f950154), + (0x3fd126d4), + (0x3f9d80fc), + (0x3fd9a77c), + (0x3f88020f), + (0x3fcc258f), + (0x3f8083a7), + (0x3fc4a427), + (0x3f9d035b), + (0x3fd924db), + (0x3f9582f3), + (0x3fd1a573)}, + (0xfff80000), + {0xad,0x7a,0xb9,0x26,0x28,0x43,0xfc,0x1a,0x71,0x9d, + 0xfe,0xb1,0x4c,0x75,0x45,0xcd,0x8d,0xf2,0x4d,0x01,0x00} + }, + { + /* No.168 delta:7619 weight:787 */ + 11213, + 59, + 26, + 1, + {(0x00000000), + (0x98e8b6d9), + (0x521f6a74), + (0xcaf7dcad), + (0x53d00a8d), + (0xcb38bc54), + (0x01cf60f9), + (0x9927d620), + (0x0000f72a), + (0x98e841f3), + (0x521f9d5e), + (0xcaf72b87), + (0x53d0fda7), + (0xcb384b7e), + (0x01cf97d3), + (0x9927210a)}, + {(0x00000000), + (0x4e200000), + (0x70542000), + (0x3e742000), + (0x13942000), + (0x5db42000), + (0x63c00000), + (0x2de00000), + (0x08001e00), + (0x46201e00), + (0x78543e00), + (0x36743e00), + (0x1b943e00), + (0x55b43e00), + (0x6bc01e00), + (0x25e01e00)}, + {(0x3f800000), + (0x3fa71000), + (0x3fb82a10), + (0x3f9f3a10), + (0x3f89ca10), + (0x3faeda10), + (0x3fb1e000), + (0x3f96f000), + (0x3f84000f), + (0x3fa3100f), + (0x3fbc2a1f), + (0x3f9b3a1f), + (0x3f8dca1f), + (0x3faada1f), + (0x3fb5e00f), + (0x3f92f00f)}, + (0xfff80000), + {0xc1,0xeb,0x15,0xa4,0x17,0x5a,0x94,0xa3,0xdd,0x5b, + 0x7e,0x69,0x12,0x88,0xd9,0x94,0x36,0x81,0x7c,0x98,0x00} + }, + { + /* No.169 delta:1710 weight:1097 */ + 11213, + 57, + 14, + 11, + {(0x00000000), + (0x5e421ddc), + (0x638787c4), + (0x3dc59a18), + (0x8e000a9d), + (0xd0421741), + (0xed878d59), + (0xb3c59085), + (0x0000840f), + (0x5e4299d3), + (0x638703cb), + (0x3dc51e17), + (0x8e008e92), + (0xd042934e), + (0xed870956), + (0xb3c5148a)}, + {(0x00000000), + (0x4a752000), + (0xd0784000), + (0x9a0d6000), + (0x004c4000), + (0x4a396000), + (0xd0340000), + (0x9a412000), + (0x00005e00), + (0x4a757e00), + (0xd0781e00), + (0x9a0d3e00), + (0x004c1e00), + (0x4a393e00), + (0xd0345e00), + (0x9a417e00)}, + {(0x3f800000), + (0x3fa53a90), + (0x3fe83c20), + (0x3fcd06b0), + (0x3f802620), + (0x3fa51cb0), + (0x3fe81a00), + (0x3fcd2090), + (0x3f80002f), + (0x3fa53abf), + (0x3fe83c0f), + (0x3fcd069f), + (0x3f80260f), + (0x3fa51c9f), + (0x3fe81a2f), + (0x3fcd20bf)}, + (0xfff80000), + {0x63,0xf7,0x0a,0xee,0x1d,0x41,0x31,0xa9,0xfd,0x48, + 0x35,0xb8,0x86,0xd8,0xf7,0x25,0x29,0x14,0xb0,0x12,0x00} + }, + { + /* No.170 delta:4372 weight:995 */ + 11213, + 28, + 7, + 15, + {(0x00000000), + (0x655fb5a5), + (0x87997101), + (0xe2c6c4a4), + (0x59f00aaa), + (0x3cafbf0f), + (0xde697bab), + (0xbb36ce0e), + (0x0000365d), + (0x655f83f8), + (0x8799475c), + (0xe2c6f2f9), + (0x59f03cf7), + (0x3caf8952), + (0xde694df6), + (0xbb36f853)}, + {(0x00000000), + (0x81898800), + (0xd202e000), + (0x538b6800), + (0x08d64000), + (0x895fc800), + (0xdad4a000), + (0x5b5d2800), + (0x004a5e00), + (0x81c3d600), + (0xd248be00), + (0x53c13600), + (0x089c1e00), + (0x89159600), + (0xda9efe00), + (0x5b177600)}, + {(0x3f800000), + (0x3fc0c4c4), + (0x3fe90170), + (0x3fa9c5b4), + (0x3f846b20), + (0x3fc4afe4), + (0x3fed6a50), + (0x3fadae94), + (0x3f80252f), + (0x3fc0e1eb), + (0x3fe9245f), + (0x3fa9e09b), + (0x3f844e0f), + (0x3fc48acb), + (0x3fed4f7f), + (0x3fad8bbb)}, + (0xfff80000), + {0x89,0xf5,0x02,0x8f,0x88,0xdf,0x79,0x2c,0xc5,0x67, + 0x1f,0x9e,0xd5,0x1b,0xd9,0x5a,0x79,0x7f,0xca,0x71,0x00} + }, + { + /* No.171 delta:3023 weight:1549 */ + 11213, + 32, + 3, + 5, + {(0x00000000), + (0x3b134609), + (0x5b97bf87), + (0x6084f98e), + (0xdbb00aba), + (0xe0a34cb3), + (0x8027b53d), + (0xbb34f334), + (0x00000ea9), + (0x3b1348a0), + (0x5b97b12e), + (0x6084f727), + (0xdbb00413), + (0xe0a3421a), + (0x8027bb94), + (0xbb34fd9d)}, + {(0x00000000), + (0x04030000), + (0x00c10000), + (0x04c20000), + (0x304a8000), + (0x34498000), + (0x308b8000), + (0x34888000), + (0x00679e00), + (0x04649e00), + (0x00a69e00), + (0x04a59e00), + (0x302d1e00), + (0x342e1e00), + (0x30ec1e00), + (0x34ef1e00)}, + {(0x3f800000), + (0x3f820180), + (0x3f806080), + (0x3f826100), + (0x3f982540), + (0x3f9a24c0), + (0x3f9845c0), + (0x3f9a4440), + (0x3f8033cf), + (0x3f82324f), + (0x3f80534f), + (0x3f8252cf), + (0x3f98168f), + (0x3f9a170f), + (0x3f98760f), + (0x3f9a778f)}, + (0xfff80000), + {0x6a,0x8d,0xed,0xda,0x5f,0x16,0x85,0xea,0xd9,0x4c, + 0x66,0x31,0xe2,0x9c,0xc6,0x66,0x6d,0x10,0xa0,0xf9,0x00} + }, + { + /* No.172 delta:892 weight:1461 */ + 11213, + 41, + 18, + 4, + {(0x00000000), + (0x51c43b00), + (0x7d71cf7d), + (0x2cb5f47d), + (0x03f00ac3), + (0x523431c3), + (0x7e81c5be), + (0x2f45febe), + (0x0000fa3d), + (0x51c4c13d), + (0x7d713540), + (0x2cb50e40), + (0x03f0f0fe), + (0x5234cbfe), + (0x7e813f83), + (0x2f450483)}, + {(0x00000000), + (0x40672000), + (0x00508c00), + (0x4037ac00), + (0x00402000), + (0x40270000), + (0x0010ac00), + (0x40778c00), + (0x0e881e00), + (0x4eef3e00), + (0x0ed89200), + (0x4ebfb200), + (0x0ec83e00), + (0x4eaf1e00), + (0x0e98b200), + (0x4eff9200)}, + {(0x3f800000), + (0x3fa03390), + (0x3f802846), + (0x3fa01bd6), + (0x3f802010), + (0x3fa01380), + (0x3f800856), + (0x3fa03bc6), + (0x3f87440f), + (0x3fa7779f), + (0x3f876c49), + (0x3fa75fd9), + (0x3f87641f), + (0x3fa7578f), + (0x3f874c59), + (0x3fa77fc9)}, + (0xfff80000), + {0xc5,0x69,0xaf,0xb6,0xea,0x60,0x78,0x16,0x94,0x3a, + 0x36,0x01,0x5a,0xe7,0x99,0xcd,0x09,0x6a,0xda,0xea,0x00} + }, + { + /* No.173 delta:1013 weight:1115 */ + 11213, + 91, + 19, + 8, + {(0x00000000), + (0xd6aede60), + (0xdfd2815a), + (0x097c5f3a), + (0xbc400ad0), + (0x6aeed4b0), + (0x63928b8a), + (0xb53c55ea), + (0x000003b3), + (0xd6aeddd3), + (0xdfd282e9), + (0x097c5c89), + (0xbc400963), + (0x6aeed703), + (0x63928839), + (0xb53c5659)}, + {(0x00000000), + (0x24590000), + (0x08c14000), + (0x2c984000), + (0x80083800), + (0xa4513800), + (0x88c97800), + (0xac907800), + (0x50c09e00), + (0x74999e00), + (0x5801de00), + (0x7c58de00), + (0xd0c8a600), + (0xf491a600), + (0xd809e600), + (0xfc50e600)}, + {(0x3f800000), + (0x3f922c80), + (0x3f8460a0), + (0x3f964c20), + (0x3fc0041c), + (0x3fd2289c), + (0x3fc464bc), + (0x3fd6483c), + (0x3fa8604f), + (0x3fba4ccf), + (0x3fac00ef), + (0x3fbe2c6f), + (0x3fe86453), + (0x3ffa48d3), + (0x3fec04f3), + (0x3ffe2873)}, + (0xfff80000), + {0x55,0x6c,0x5c,0x20,0xdd,0xbb,0xa3,0xad,0x56,0x67, + 0x8f,0xcf,0xc9,0xb4,0xc5,0x0f,0x66,0xf7,0xb5,0xcc,0x00} + }, + { + /* No.174 delta:2582 weight:887 */ + 11213, + 40, + 4, + 1, + {(0x00000000), + (0x70c0d1b0), + (0x83972550), + (0xf357f4e0), + (0xbbc00ae0), + (0xcb00db50), + (0x38572fb0), + (0x4897fe00), + (0x0000df80), + (0x70c00e30), + (0x8397fad0), + (0xf3572b60), + (0xbbc0d560), + (0xcb0004d0), + (0x3857f030), + (0x48972180)}, + {(0x00000000), + (0x00b20200), + (0x00430000), + (0x00f10200), + (0x40024000), + (0x40b04200), + (0x40414000), + (0x40f34200), + (0x6401de00), + (0x64b3dc00), + (0x6442de00), + (0x64f0dc00), + (0x24039e00), + (0x24b19c00), + (0x24409e00), + (0x24f29c00)}, + {(0x3f800000), + (0x3f805901), + (0x3f802180), + (0x3f807881), + (0x3fa00120), + (0x3fa05821), + (0x3fa020a0), + (0x3fa079a1), + (0x3fb200ef), + (0x3fb259ee), + (0x3fb2216f), + (0x3fb2786e), + (0x3f9201cf), + (0x3f9258ce), + (0x3f92204f), + (0x3f92794e)}, + (0xfff80000), + {0x4c,0x34,0x96,0xf1,0x70,0xe5,0x52,0xb1,0x4d,0x38, + 0xa6,0x34,0x26,0x09,0x5c,0x6c,0x92,0xe6,0x3e,0xcb,0x00} + }, + { + /* No.175 delta:1962 weight:1531 */ + 11213, + 11, + 19, + 4, + {(0x00000000), + (0x91291aaa), + (0xdc02bffd), + (0x4d2ba557), + (0x1fc00af5), + (0x8ee9105f), + (0xc3c2b508), + (0x52ebafa2), + (0x0000add4), + (0x9129b77e), + (0xdc021229), + (0x4d2b0883), + (0x1fc0a721), + (0x8ee9bd8b), + (0xc3c218dc), + (0x52eb0276)}, + {(0x00000000), + (0x09ff0c00), + (0x09141600), + (0x00eb1a00), + (0x7510e000), + (0x7cefec00), + (0x7c04f600), + (0x75fbfa00), + (0x05509e00), + (0x0caf9200), + (0x0c448800), + (0x05bb8400), + (0x70407e00), + (0x79bf7200), + (0x79546800), + (0x70ab6400)}, + {(0x3f800000), + (0x3f84ff86), + (0x3f848a0b), + (0x3f80758d), + (0x3fba8870), + (0x3fbe77f6), + (0x3fbe027b), + (0x3fbafdfd), + (0x3f82a84f), + (0x3f8657c9), + (0x3f862244), + (0x3f82ddc2), + (0x3fb8203f), + (0x3fbcdfb9), + (0x3fbcaa34), + (0x3fb855b2)}, + (0xfff80000), + {0x6f,0x40,0xbd,0xae,0x48,0xc1,0x7d,0x5b,0x99,0x1a, + 0x4e,0x57,0x61,0x39,0x1e,0x68,0xb1,0xf7,0xc0,0xaa,0x00} + }, + { + /* No.176 delta:2277 weight:543 */ + 11213, + 33, + 7, + 19, + {(0x00000000), + (0x8044010b), + (0x62414b11), + (0xe2054a1a), + (0xbb700b06), + (0x3b340a0d), + (0xd9314017), + (0x5975411c), + (0x000099a5), + (0x804498ae), + (0x6241d2b4), + (0xe205d3bf), + (0xbb7092a3), + (0x3b3493a8), + (0xd931d9b2), + (0x5975d8b9)}, + {(0x00000000), + (0x0c3e0800), + (0x80be4000), + (0x8c804800), + (0x00483000), + (0x0c763800), + (0x80f67000), + (0x8cc87800), + (0x00791e00), + (0x0c471600), + (0x80c75e00), + (0x8cf95600), + (0x00312e00), + (0x0c0f2600), + (0x808f6e00), + (0x8cb16600)}, + {(0x3f800000), + (0x3f861f04), + (0x3fc05f20), + (0x3fc64024), + (0x3f802418), + (0x3f863b1c), + (0x3fc07b38), + (0x3fc6643c), + (0x3f803c8f), + (0x3f86238b), + (0x3fc063af), + (0x3fc67cab), + (0x3f801897), + (0x3f860793), + (0x3fc047b7), + (0x3fc658b3)}, + (0xfff80000), + {0xfc,0x12,0x23,0x4e,0x03,0xa3,0xf6,0xfd,0x28,0xb5, + 0xf1,0x0e,0x0a,0x5d,0xed,0x4a,0xd6,0x0c,0xfa,0x9a,0x00} + }, + { + /* No.177 delta:1331 weight:1179 */ + 11213, + 60, + 21, + 4, + {(0x00000000), + (0x8e46be68), + (0x9cb3a5d7), + (0x12f51bbf), + (0x3e600b12), + (0xb026b57a), + (0xa2d3aec5), + (0x2c9510ad), + (0x0000942c), + (0x8e462a44), + (0x9cb331fb), + (0x12f58f93), + (0x3e609f3e), + (0xb0262156), + (0xa2d33ae9), + (0x2c958481)}, + {(0x00000000), + (0x0940a800), + (0x388c9000), + (0x31cc3800), + (0x00608000), + (0x09202800), + (0x38ec1000), + (0x31acb800), + (0x005c7e00), + (0x091cd600), + (0x38d0ee00), + (0x31904600), + (0x003cfe00), + (0x097c5600), + (0x38b06e00), + (0x31f0c600)}, + {(0x3f800000), + (0x3f84a054), + (0x3f9c4648), + (0x3f98e61c), + (0x3f803040), + (0x3f849014), + (0x3f9c7608), + (0x3f98d65c), + (0x3f802e3f), + (0x3f848e6b), + (0x3f9c6877), + (0x3f98c823), + (0x3f801e7f), + (0x3f84be2b), + (0x3f9c5837), + (0x3f98f863)}, + (0xfff80000), + {0x71,0x3a,0xb0,0x71,0xe1,0x95,0x3c,0xba,0x23,0x00, + 0x7a,0xc6,0x93,0x4a,0xb7,0xb8,0xeb,0x00,0x24,0x8e,0x00} + }, + { + /* No.178 delta:3213 weight:657 */ + 11213, + 46, + 22, + 1, + {(0x00000000), + (0x53fdf1b0), + (0x32502578), + (0x61add4c8), + (0x46c00b28), + (0x153dfa98), + (0x74902e50), + (0x276ddfe0), + (0x00006d70), + (0x53fd9cc0), + (0x32504808), + (0x61adb9b8), + (0x46c06658), + (0x153d97e8), + (0x74904320), + (0x276db290)}, + {(0x00000000), + (0x44633a00), + (0x44038600), + (0x0060bc00), + (0x1819c200), + (0x5c7af800), + (0x5c1a4400), + (0x18797e00), + (0x21eade00), + (0x6589e400), + (0x65e95800), + (0x218a6200), + (0x39f31c00), + (0x7d902600), + (0x7df09a00), + (0x3993a000)}, + {(0x3f800000), + (0x3fa2319d), + (0x3fa201c3), + (0x3f80305e), + (0x3f8c0ce1), + (0x3fae3d7c), + (0x3fae0d22), + (0x3f8c3cbf), + (0x3f90f56f), + (0x3fb2c4f2), + (0x3fb2f4ac), + (0x3f90c531), + (0x3f9cf98e), + (0x3fbec813), + (0x3fbef84d), + (0x3f9cc9d0)}, + (0xfff80000), + {0x92,0xb2,0x66,0x1e,0x08,0xb8,0x0b,0x02,0x41,0xdc, + 0x45,0x04,0x59,0x65,0x0b,0x55,0x82,0x05,0xeb,0x6d,0x00} + }, + { + /* No.179 delta:4972 weight:1077 */ + 11213, + 16, + 7, + 11, + {(0x00000000), + (0xcf8962a9), + (0xb6d797a1), + (0x795ef508), + (0xfbb00b36), + (0x3439699f), + (0x4d679c97), + (0x82eefe3e), + (0x0000d59e), + (0xcf89b737), + (0xb6d7423f), + (0x795e2096), + (0xfbb0dea8), + (0x3439bc01), + (0x4d674909), + (0x82ee2ba0)}, + {(0x00000000), + (0x01000000), + (0x08000000), + (0x09000000), + (0x41800000), + (0x40800000), + (0x49800000), + (0x48800000), + (0x06801e00), + (0x07801e00), + (0x0e801e00), + (0x0f801e00), + (0x47001e00), + (0x46001e00), + (0x4f001e00), + (0x4e001e00)}, + {(0x3f800000), + (0x3f808000), + (0x3f840000), + (0x3f848000), + (0x3fa0c000), + (0x3fa04000), + (0x3fa4c000), + (0x3fa44000), + (0x3f83400f), + (0x3f83c00f), + (0x3f87400f), + (0x3f87c00f), + (0x3fa3800f), + (0x3fa3000f), + (0x3fa7800f), + (0x3fa7000f)}, + (0xfff80000), + {0x55,0xe1,0xc9,0x26,0x74,0x2c,0xed,0x5c,0x7b,0x89, + 0xe7,0x1d,0x6f,0x5d,0x02,0xe5,0x4a,0x19,0x8c,0x04,0x00} + }, + { + /* No.180 delta:2285 weight:1529 */ + 11213, + 16, + 5, + 9, + {(0x00000000), + (0x5766ef3b), + (0xaca2499c), + (0xfbc4a6a7), + (0x63800b4f), + (0x34e6e474), + (0xcf2242d3), + (0x9844ade8), + (0x0000a19a), + (0x57664ea1), + (0xaca2e806), + (0xfbc4073d), + (0x6380aad5), + (0x34e645ee), + (0xcf22e349), + (0x98440c72)}, + {(0x00000000), + (0x1901c200), + (0x88118000), + (0x91104200), + (0x02ef8000), + (0x1bee4200), + (0x8afe0000), + (0x93ffc200), + (0x3087be00), + (0x29867c00), + (0xb8963e00), + (0xa197fc00), + (0x32683e00), + (0x2b69fc00), + (0xba79be00), + (0xa3787c00)}, + {(0x3f800000), + (0x3f8c80e1), + (0x3fc408c0), + (0x3fc88821), + (0x3f8177c0), + (0x3f8df721), + (0x3fc57f00), + (0x3fc9ffe1), + (0x3f9843df), + (0x3f94c33e), + (0x3fdc4b1f), + (0x3fd0cbfe), + (0x3f99341f), + (0x3f95b4fe), + (0x3fdd3cdf), + (0x3fd1bc3e)}, + (0xfff80000), + {0x4b,0xb0,0xb5,0xae,0x6b,0x5e,0xea,0x44,0x3b,0xd2, + 0xcf,0xf3,0x8a,0x8d,0xff,0x98,0xeb,0xce,0xa2,0x22,0x00} + }, + { + /* No.181 delta:1320 weight:1461 */ + 11213, + 52, + 21, + 2, + {(0x00000000), + (0x2f3716e3), + (0x0af6f7a8), + (0x25c1e14b), + (0x67900b5a), + (0x48a71db9), + (0x6d66fcf2), + (0x4251ea11), + (0x0000adb5), + (0x2f37bb56), + (0x0af65a1d), + (0x25c14cfe), + (0x6790a6ef), + (0x48a7b00c), + (0x6d665147), + (0x425147a4)}, + {(0x00000000), + (0xc0295e00), + (0x1040c000), + (0xd0699e00), + (0x20fc7200), + (0xe0d52c00), + (0x30bcb200), + (0xf095ec00), + (0x10a2fe00), + (0xd08ba000), + (0x00e23e00), + (0xc0cb6000), + (0x305e8c00), + (0xf077d200), + (0x201e4c00), + (0xe0371200)}, + {(0x3f800000), + (0x3fe014af), + (0x3f882060), + (0x3fe834cf), + (0x3f907e39), + (0x3ff06a96), + (0x3f985e59), + (0x3ff84af6), + (0x3f88517f), + (0x3fe845d0), + (0x3f80711f), + (0x3fe065b0), + (0x3f982f46), + (0x3ff83be9), + (0x3f900f26), + (0x3ff01b89)}, + (0xfff80000), + {0xf7,0x30,0xe4,0xa1,0xb6,0x2c,0xff,0x05,0x23,0x13, + 0xd1,0x56,0xd7,0x7a,0x22,0x24,0xae,0xf7,0x34,0x52,0x00} + }, + { + /* No.182 delta:789 weight:1509 */ + 11213, + 74, + 16, + 6, + {(0x00000000), + (0xe416060b), + (0xd6bfa624), + (0x32a9a02f), + (0x56500b61), + (0xb2460d6a), + (0x80efad45), + (0x64f9ab4e), + (0x0000ce80), + (0xe416c88b), + (0xd6bf68a4), + (0x32a96eaf), + (0x5650c5e1), + (0xb246c3ea), + (0x80ef63c5), + (0x64f965ce)}, + {(0x00000000), + (0x804c0800), + (0x107ad000), + (0x9036d800), + (0x0055c400), + (0x8019cc00), + (0x102f1400), + (0x90631c00), + (0x00033e00), + (0x804f3600), + (0x1079ee00), + (0x9035e600), + (0x0056fa00), + (0x801af200), + (0x102c2a00), + (0x90602200)}, + {(0x3f800000), + (0x3fc02604), + (0x3f883d68), + (0x3fc81b6c), + (0x3f802ae2), + (0x3fc00ce6), + (0x3f88178a), + (0x3fc8318e), + (0x3f80019f), + (0x3fc0279b), + (0x3f883cf7), + (0x3fc81af3), + (0x3f802b7d), + (0x3fc00d79), + (0x3f881615), + (0x3fc83011)}, + (0xfff80000), + {0xa2,0xea,0x48,0x1e,0xb3,0x8f,0x99,0x75,0xba,0x12, + 0x9c,0xbf,0xf3,0xa0,0xb8,0x5d,0xc8,0x38,0xc4,0x74,0x00} + }, + { + /* No.183 delta:1268 weight:1109 */ + 11213, + 42, + 13, + 11, + {(0x00000000), + (0xf7ba01fd), + (0xaa8773ac), + (0x5d3d7251), + (0xa5b00b7d), + (0x520a0a80), + (0x0f3778d1), + (0xf88d792c), + (0x0000aaa0), + (0xf7baab5d), + (0xaa87d90c), + (0x5d3dd8f1), + (0xa5b0a1dd), + (0x520aa020), + (0x0f37d271), + (0xf88dd38c)}, + {(0x00000000), + (0x92abc000), + (0x00443800), + (0x92eff800), + (0x00c12000), + (0x926ae000), + (0x00851800), + (0x922ed800), + (0x02a3be00), + (0x90087e00), + (0x02e78600), + (0x904c4600), + (0x02629e00), + (0x90c95e00), + (0x0226a600), + (0x908d6600)}, + {(0x3f800000), + (0x3fc955e0), + (0x3f80221c), + (0x3fc977fc), + (0x3f806090), + (0x3fc93570), + (0x3f80428c), + (0x3fc9176c), + (0x3f8151df), + (0x3fc8043f), + (0x3f8173c3), + (0x3fc82623), + (0x3f81314f), + (0x3fc864af), + (0x3f811353), + (0x3fc846b3)}, + (0xfff80000), + {0xb0,0x87,0xce,0x22,0xcb,0xa4,0x10,0xdc,0xdb,0x3e, + 0x3c,0x67,0xae,0x3f,0xc0,0x11,0xf3,0xec,0xe2,0x06,0x00} + }, + { + /* No.184 delta:1092 weight:1295 */ + 11213, + 91, + 17, + 8, + {(0x00000000), + (0x2fa2608e), + (0x8f680761), + (0xa0ca67ef), + (0x13a00b8a), + (0x3c026b04), + (0x9cc80ceb), + (0xb36a6c65), + (0x00007f70), + (0x2fa21ffe), + (0x8f687811), + (0xa0ca189f), + (0x13a074fa), + (0x3c021474), + (0x9cc8739b), + (0xb36a1315)}, + {(0x00000000), + (0x089d6000), + (0x1a00e200), + (0x129d8200), + (0x40df4000), + (0x48422000), + (0x5adfa200), + (0x5242c200), + (0x01a6de00), + (0x093bbe00), + (0x1ba63c00), + (0x133b5c00), + (0x41799e00), + (0x49e4fe00), + (0x5b797c00), + (0x53e41c00)}, + {(0x3f800000), + (0x3f844eb0), + (0x3f8d0071), + (0x3f894ec1), + (0x3fa06fa0), + (0x3fa42110), + (0x3fad6fd1), + (0x3fa92161), + (0x3f80d36f), + (0x3f849ddf), + (0x3f8dd31e), + (0x3f899dae), + (0x3fa0bccf), + (0x3fa4f27f), + (0x3fadbcbe), + (0x3fa9f20e)}, + (0xfff80000), + {0xbd,0xb1,0x02,0x3f,0x46,0x65,0xe7,0x62,0xbf,0x9e, + 0x12,0xaa,0x33,0xbe,0x0c,0x5d,0x89,0x02,0x2c,0x11,0x00} + }, + { + /* No.185 delta:8146 weight:1071 */ + 11213, + 66, + 28, + 1, + {(0x00000000), + (0xc3e3a60c), + (0x80c000cd), + (0x4323a6c1), + (0xda800b97), + (0x1963ad9b), + (0x5a400b5a), + (0x99a3ad56), + (0x000058ad), + (0xc3e3fea1), + (0x80c05860), + (0x4323fe6c), + (0xda80533a), + (0x1963f536), + (0x5a4053f7), + (0x99a3f5fb)}, + {(0x00000000), + (0x89d10a00), + (0x403c0200), + (0xc9ed0800), + (0x53520000), + (0xda830a00), + (0x136e0200), + (0x9abf0800), + (0x61609e00), + (0xe8b19400), + (0x215c9c00), + (0xa88d9600), + (0x32329e00), + (0xbbe39400), + (0x720e9c00), + (0xfbdf9600)}, + {(0x3f800000), + (0x3fc4e885), + (0x3fa01e01), + (0x3fe4f684), + (0x3fa9a900), + (0x3fed4185), + (0x3f89b701), + (0x3fcd5f84), + (0x3fb0b04f), + (0x3ff458ca), + (0x3f90ae4e), + (0x3fd446cb), + (0x3f99194f), + (0x3fddf1ca), + (0x3fb9074e), + (0x3ffdefcb)}, + (0xfff80000), + {0xf7,0x3c,0x9c,0x40,0x28,0xb5,0xbe,0xbf,0x2e,0x97, + 0xa1,0x61,0x3b,0x82,0x2a,0x82,0x3c,0xa0,0xf4,0x90,0x00} + }, + { + /* No.186 delta:2697 weight:1957 */ + 11213, + 43, + 3, + 3, + {(0x00000000), + (0x7a0a48fc), + (0x29a53edd), + (0x53af7621), + (0x08f00ba0), + (0x72fa435c), + (0x2155357d), + (0x5b5f7d81), + (0x0000132f), + (0x7a0a5bd3), + (0x29a52df2), + (0x53af650e), + (0x08f0188f), + (0x72fa5073), + (0x21552652), + (0x5b5f6eae)}, + {(0x00000000), + (0xa2021000), + (0x1c21a000), + (0xbe23b000), + (0x00408000), + (0xa2429000), + (0x1c612000), + (0xbe633000), + (0x40201e00), + (0xe2220e00), + (0x5c01be00), + (0xfe03ae00), + (0x40609e00), + (0xe2628e00), + (0x5c413e00), + (0xfe432e00)}, + {(0x3f800000), + (0x3fd10108), + (0x3f8e10d0), + (0x3fdf11d8), + (0x3f802040), + (0x3fd12148), + (0x3f8e3090), + (0x3fdf3198), + (0x3fa0100f), + (0x3ff11107), + (0x3fae00df), + (0x3fff01d7), + (0x3fa0304f), + (0x3ff13147), + (0x3fae209f), + (0x3fff2197)}, + (0xfff80000), + {0x1a,0x79,0x8f,0xb2,0xec,0xc2,0xee,0xfc,0x66,0xa6, + 0x37,0x12,0x4d,0x78,0x6e,0xa1,0xb0,0x60,0x86,0x37,0x00} + }, + { + /* No.187 delta:2420 weight:1355 */ + 11213, + 28, + 24, + 3, + {(0x00000000), + (0x9580742c), + (0xce45a063), + (0x5bc5d44f), + (0x33f00bbf), + (0xa6707f93), + (0xfdb5abdc), + (0x6835dff0), + (0x000009ba), + (0x95807d96), + (0xce45a9d9), + (0x5bc5ddf5), + (0x33f00205), + (0xa6707629), + (0xfdb5a266), + (0x6835d64a)}, + {(0x00000000), + (0x40bf0200), + (0x00440200), + (0x40fb0000), + (0x3411d000), + (0x74aed200), + (0x3455d200), + (0x74ead000), + (0x9082de00), + (0xd03ddc00), + (0x90c6dc00), + (0xd079de00), + (0xa4930e00), + (0xe42c0c00), + (0xa4d70c00), + (0xe4680e00)}, + {(0x3f800000), + (0x3fa05f81), + (0x3f802201), + (0x3fa07d80), + (0x3f9a08e8), + (0x3fba5769), + (0x3f9a2ae9), + (0x3fba7568), + (0x3fc8416f), + (0x3fe81eee), + (0x3fc8636e), + (0x3fe83cef), + (0x3fd24987), + (0x3ff21606), + (0x3fd26b86), + (0x3ff23407)}, + (0xfff80000), + {0x20,0xe4,0x7d,0x6c,0xc4,0xf9,0x2b,0x4a,0x56,0x18, + 0x64,0xa0,0x86,0x2c,0x45,0x4a,0x42,0x3a,0x22,0x49,0x00} + }, + { + /* No.188 delta:1841 weight:735 */ + 11213, + 62, + 10, + 18, + {(0x00000000), + (0x179a1ef2), + (0x535f148e), + (0x44c50a7c), + (0x33700bc3), + (0x24ea1531), + (0x602f1f4d), + (0x77b501bf), + (0x0000df3e), + (0x179ac1cc), + (0x535fcbb0), + (0x44c5d542), + (0x3370d4fd), + (0x24eaca0f), + (0x602fc073), + (0x77b5de81)}, + {(0x00000000), + (0x30b90000), + (0x083c0000), + (0x38850000), + (0x40070000), + (0x70be0000), + (0x483b0000), + (0x78820000), + (0x10651e00), + (0x20dc1e00), + (0x18591e00), + (0x28e01e00), + (0x50621e00), + (0x60db1e00), + (0x585e1e00), + (0x68e71e00)}, + {(0x3f800000), + (0x3f985c80), + (0x3f841e00), + (0x3f9c4280), + (0x3fa00380), + (0x3fb85f00), + (0x3fa41d80), + (0x3fbc4100), + (0x3f88328f), + (0x3f906e0f), + (0x3f8c2c8f), + (0x3f94700f), + (0x3fa8310f), + (0x3fb06d8f), + (0x3fac2f0f), + (0x3fb4738f)}, + (0xfff80000), + {0x09,0x19,0x78,0xe9,0x81,0xea,0xdf,0x95,0x46,0x6b, + 0x4f,0xa1,0xca,0x2b,0xbc,0x8a,0x0b,0xaa,0x44,0xb8,0x00} + }, + { + /* No.189 delta:3318 weight:1469 */ + 11213, + 81, + 3, + 7, + {(0x00000000), + (0xfe728280), + (0xcc048331), + (0x327601b1), + (0x42200bd8), + (0xbc528958), + (0x8e2488e9), + (0x70560a69), + (0x0000d1a0), + (0xfe725320), + (0xcc045291), + (0x3276d011), + (0x4220da78), + (0xbc5258f8), + (0x8e245949), + (0x7056dbc9)}, + {(0x00000000), + (0x12010000), + (0x53002000), + (0x41012000), + (0x20414000), + (0x32404000), + (0x73416000), + (0x61406000), + (0x20321e00), + (0x32331e00), + (0x73323e00), + (0x61333e00), + (0x00735e00), + (0x12725e00), + (0x53737e00), + (0x41727e00)}, + {(0x3f800000), + (0x3f890080), + (0x3fa98010), + (0x3fa08090), + (0x3f9020a0), + (0x3f992020), + (0x3fb9a0b0), + (0x3fb0a030), + (0x3f90190f), + (0x3f99198f), + (0x3fb9991f), + (0x3fb0999f), + (0x3f8039af), + (0x3f89392f), + (0x3fa9b9bf), + (0x3fa0b93f)}, + (0xfff80000), + {0x0f,0xe8,0x93,0xf4,0x0e,0x7e,0xaf,0xe0,0x5d,0x20, + 0x58,0x92,0x06,0x7d,0x5b,0x05,0x6b,0x8a,0x33,0xbb,0x00} + }, + { + /* No.190 delta:1194 weight:821 */ + 11213, + 72, + 11, + 16, + {(0x00000000), + (0x44a385c2), + (0xca99870f), + (0x8e3a02cd), + (0x06c00be8), + (0x42638e2a), + (0xcc598ce7), + (0x88fa0925), + (0x00007bbf), + (0x44a3fe7d), + (0xca99fcb0), + (0x8e3a7972), + (0x06c07057), + (0x4263f595), + (0xcc59f758), + (0x88fa729a)}, + {(0x00000000), + (0x00527800), + (0x302f0400), + (0x307d7c00), + (0x20017000), + (0x20530800), + (0x102e7400), + (0x107c0c00), + (0x10429e00), + (0x1010e600), + (0x206d9a00), + (0x203fe200), + (0x3043ee00), + (0x30119600), + (0x006cea00), + (0x003e9200)}, + {(0x3f800000), + (0x3f80293c), + (0x3f981782), + (0x3f983ebe), + (0x3f9000b8), + (0x3f902984), + (0x3f88173a), + (0x3f883e06), + (0x3f88214f), + (0x3f880873), + (0x3f9036cd), + (0x3f901ff1), + (0x3f9821f7), + (0x3f9808cb), + (0x3f803675), + (0x3f801f49)}, + (0xfff80000), + {0xba,0xb7,0xd5,0xc8,0xec,0x6d,0xc1,0x0c,0x19,0x38, + 0x0f,0x27,0x93,0x3b,0xd8,0x74,0xa1,0x4e,0x45,0x89,0x00} + }, + { + /* No.191 delta:1166 weight:1045 */ + 11213, + 40, + 11, + 9, + {(0x00000000), + (0x212f4af4), + (0xf0432caf), + (0xd16c665b), + (0x11d00bf5), + (0x30ff4101), + (0xe193275a), + (0xc0bc6dae), + (0x0000755f), + (0x212f3fab), + (0xf04359f0), + (0xd16c1304), + (0x11d07eaa), + (0x30ff345e), + (0xe1935205), + (0xc0bc18f1)}, + {(0x00000000), + (0x08528000), + (0x70084000), + (0x785ac000), + (0x00196000), + (0x084be000), + (0x70112000), + (0x7843a000), + (0x0024fe00), + (0x08767e00), + (0x702cbe00), + (0x787e3e00), + (0x003d9e00), + (0x086f1e00), + (0x7035de00), + (0x78675e00)}, + {(0x3f800000), + (0x3f842940), + (0x3fb80420), + (0x3fbc2d60), + (0x3f800cb0), + (0x3f8425f0), + (0x3fb80890), + (0x3fbc21d0), + (0x3f80127f), + (0x3f843b3f), + (0x3fb8165f), + (0x3fbc3f1f), + (0x3f801ecf), + (0x3f84378f), + (0x3fb81aef), + (0x3fbc33af)}, + (0xfff80000), + {0xbf,0xbb,0x3f,0x17,0x6e,0x1a,0x3b,0x45,0x9b,0x2a, + 0x22,0x6a,0xf9,0xf5,0xba,0x97,0x3b,0x8a,0xec,0xd6,0x00} + }, + { + /* No.192 delta:4742 weight:1471 */ + 11213, + 27, + 4, + 6, + {(0x00000000), + (0x7fe173b6), + (0x51169f27), + (0x2ef7ec91), + (0x0a300c00), + (0x75d17fb6), + (0x5b269327), + (0x24c7e091), + (0x00001d00), + (0x7fe16eb6), + (0x51168227), + (0x2ef7f191), + (0x0a301100), + (0x75d162b6), + (0x5b268e27), + (0x24c7fd91)}, + {(0x00000000), + (0x41420000), + (0x00010000), + (0x41430000), + (0x29040000), + (0x68460000), + (0x29050000), + (0x68470000), + (0x00009e00), + (0x41429e00), + (0x00019e00), + (0x41439e00), + (0x29049e00), + (0x68469e00), + (0x29059e00), + (0x68479e00)}, + {(0x3f800000), + (0x3fa0a100), + (0x3f800080), + (0x3fa0a180), + (0x3f948200), + (0x3fb42300), + (0x3f948280), + (0x3fb42380), + (0x3f80004f), + (0x3fa0a14f), + (0x3f8000cf), + (0x3fa0a1cf), + (0x3f94824f), + (0x3fb4234f), + (0x3f9482cf), + (0x3fb423cf)}, + (0xfff80000), + {0xb4,0x7f,0x09,0x23,0xa8,0x3f,0x5e,0x9d,0x3e,0x97, + 0x19,0x94,0x35,0x0d,0xdb,0x3e,0xb7,0x43,0x15,0xd0,0x00} + }, + { + /* No.193 delta:3962 weight:1171 */ + 11213, + 92, + 2, + 13, + {(0x00000000), + (0x76b1ced9), + (0x928504d8), + (0xe434ca01), + (0x7b300c1a), + (0x0d81c2c3), + (0xe9b508c2), + (0x9f04c61b), + (0x0000b2d9), + (0x76b17c00), + (0x9285b601), + (0xe43478d8), + (0x7b30bec3), + (0x0d81701a), + (0xe9b5ba1b), + (0x9f0474c2)}, + {(0x00000000), + (0x08400000), + (0x06400000), + (0x0e000000), + (0x01200000), + (0x09600000), + (0x07600000), + (0x0f200000), + (0x04c01e00), + (0x0c801e00), + (0x02801e00), + (0x0ac01e00), + (0x05e01e00), + (0x0da01e00), + (0x03a01e00), + (0x0be01e00)}, + {(0x3f800000), + (0x3f842000), + (0x3f832000), + (0x3f870000), + (0x3f809000), + (0x3f84b000), + (0x3f83b000), + (0x3f879000), + (0x3f82600f), + (0x3f86400f), + (0x3f81400f), + (0x3f85600f), + (0x3f82f00f), + (0x3f86d00f), + (0x3f81d00f), + (0x3f85f00f)}, + (0xfff80000), + {0xdf,0x8e,0xab,0x00,0xdb,0xdc,0xe0,0x7a,0xb0,0x4c, + 0x05,0x23,0x25,0x58,0x15,0xac,0x74,0x97,0x22,0x21,0x00} + }, + { + /* No.194 delta:2215 weight:1499 */ + 11213, + 63, + 26, + 4, + {(0x00000000), + (0xad5d413b), + (0x166ef61d), + (0xbb33b726), + (0x9e000c20), + (0x335d4d1b), + (0x886efa3d), + (0x2533bb06), + (0x000077be), + (0xad5d3685), + (0x166e81a3), + (0xbb33c098), + (0x9e007b9e), + (0x335d3aa5), + (0x886e8d83), + (0x2533ccb8)}, + {(0x00000000), + (0x10720800), + (0x60810000), + (0x70f30800), + (0x300c0000), + (0x207e0800), + (0x508d0000), + (0x40ff0800), + (0x20023e00), + (0x30703600), + (0x40833e00), + (0x50f13600), + (0x100e3e00), + (0x007c3600), + (0x708f3e00), + (0x60fd3600)}, + {(0x3f800000), + (0x3f883904), + (0x3fb04080), + (0x3fb87984), + (0x3f980600), + (0x3f903f04), + (0x3fa84680), + (0x3fa07f84), + (0x3f90011f), + (0x3f98381b), + (0x3fa0419f), + (0x3fa8789b), + (0x3f88071f), + (0x3f803e1b), + (0x3fb8479f), + (0x3fb07e9b)}, + (0xfff80000), + {0x31,0xb8,0x64,0x9a,0xae,0xad,0xc8,0xf8,0x53,0x9b, + 0x64,0x2d,0x8c,0xb0,0xd2,0xa7,0x8e,0x2e,0xb2,0xc8,0x00} + }, + { + /* No.195 delta:1215 weight:463 */ + 11213, + 60, + 13, + 19, + {(0x00000000), + (0x60520e47), + (0xae8c1b90), + (0xcede15d7), + (0xc1c00c35), + (0xa1920272), + (0x6f4c17a5), + (0x0f1e19e2), + (0x0000fd60), + (0x6052f327), + (0xae8ce6f0), + (0xcedee8b7), + (0xc1c0f155), + (0xa192ff12), + (0x6f4ceac5), + (0x0f1ee482)}, + {(0x00000000), + (0x02e00000), + (0x10cd1200), + (0x122d1200), + (0x14101800), + (0x16f01800), + (0x04dd0a00), + (0x063d0a00), + (0x0057be00), + (0x02b7be00), + (0x109aac00), + (0x127aac00), + (0x1447a600), + (0x16a7a600), + (0x048ab400), + (0x066ab400)}, + {(0x3f800000), + (0x3f817000), + (0x3f886689), + (0x3f891689), + (0x3f8a080c), + (0x3f8b780c), + (0x3f826e85), + (0x3f831e85), + (0x3f802bdf), + (0x3f815bdf), + (0x3f884d56), + (0x3f893d56), + (0x3f8a23d3), + (0x3f8b53d3), + (0x3f82455a), + (0x3f83355a)}, + (0xfff80000), + {0x67,0xca,0xf4,0x9a,0x13,0x78,0x64,0xbc,0x60,0x6d, + 0x5b,0x5f,0x48,0x61,0x94,0x6f,0xdf,0x81,0x2c,0xe1,0x00} + }, + { + /* No.196 delta:939 weight:857 */ + 11213, + 51, + 11, + 2, + {(0x00000000), + (0xa61e8990), + (0x67d50999), + (0xc1cb8009), + (0xa3f00c49), + (0x05ee85d9), + (0xc42505d0), + (0x623b8c40), + (0x00000381), + (0xa61e8a11), + (0x67d50a18), + (0xc1cb8388), + (0xa3f00fc8), + (0x05ee8658), + (0xc4250651), + (0x623b8fc1)}, + {(0x00000000), + (0x08105000), + (0x0002b000), + (0x0812e000), + (0x90084c00), + (0x98181c00), + (0x900afc00), + (0x981aac00), + (0x01007e00), + (0x09102e00), + (0x0102ce00), + (0x09129e00), + (0x91083200), + (0x99186200), + (0x910a8200), + (0x991ad200)}, + {(0x3f800000), + (0x3f840828), + (0x3f800158), + (0x3f840970), + (0x3fc80426), + (0x3fcc0c0e), + (0x3fc8057e), + (0x3fcc0d56), + (0x3f80803f), + (0x3f848817), + (0x3f808167), + (0x3f84894f), + (0x3fc88419), + (0x3fcc8c31), + (0x3fc88541), + (0x3fcc8d69)}, + (0xfff80000), + {0x2c,0x79,0x20,0x40,0xe3,0x10,0xb7,0x11,0xcb,0xf4, + 0xba,0x4e,0xee,0x8b,0x5f,0x86,0xa9,0xdb,0x06,0x27,0x00} + }, + { + /* No.197 delta:1583 weight:1233 */ + 11213, + 26, + 20, + 8, + {(0x00000000), + (0x33fe7f24), + (0x88809fc9), + (0xbb7ee0ed), + (0x68600c55), + (0x5b9e7371), + (0xe0e0939c), + (0xd31eecb8), + (0x0000a12e), + (0x33fede0a), + (0x88803ee7), + (0xbb7e41c3), + (0x6860ad7b), + (0x5b9ed25f), + (0xe0e032b2), + (0xd31e4d96)}, + {(0x00000000), + (0x08750800), + (0x82099800), + (0x8a7c9000), + (0x40e2b000), + (0x4897b800), + (0xc2eb2800), + (0xca9e2000), + (0x004a5e00), + (0x083f5600), + (0x8243c600), + (0x8a36ce00), + (0x40a8ee00), + (0x48dde600), + (0xc2a17600), + (0xcad47e00)}, + {(0x3f800000), + (0x3f843a84), + (0x3fc104cc), + (0x3fc53e48), + (0x3fa07158), + (0x3fa44bdc), + (0x3fe17594), + (0x3fe54f10), + (0x3f80252f), + (0x3f841fab), + (0x3fc121e3), + (0x3fc51b67), + (0x3fa05477), + (0x3fa46ef3), + (0x3fe150bb), + (0x3fe56a3f)}, + (0xfff80000), + {0xb0,0x71,0x91,0x64,0xda,0x74,0xb8,0xc6,0x16,0x8c, + 0x5a,0x7e,0x26,0xea,0x75,0x3a,0x0b,0xa4,0xd9,0x97,0x00} + }, + { + /* No.198 delta:1565 weight:813 */ + 11213, + 89, + 7, + 10, + {(0x00000000), + (0x98a83f1e), + (0xcb779281), + (0x53dfad9f), + (0x20400c6a), + (0xb8e83374), + (0xeb379eeb), + (0x739fa1f5), + (0x000092fc), + (0x98a8ade2), + (0xcb77007d), + (0x53df3f63), + (0x20409e96), + (0xb8e8a188), + (0xeb370c17), + (0x739f3309)}, + {(0x00000000), + (0x467a0c00), + (0x04059800), + (0x427f9400), + (0x01093800), + (0x47733400), + (0x050ca000), + (0x4376ac00), + (0xc0e0fe00), + (0x869af200), + (0xc4e56600), + (0x829f6a00), + (0xc1e9c600), + (0x8793ca00), + (0xc5ec5e00), + (0x83965200)}, + {(0x3f800000), + (0x3fa33d06), + (0x3f8202cc), + (0x3fa13fca), + (0x3f80849c), + (0x3fa3b99a), + (0x3f828650), + (0x3fa1bb56), + (0x3fe0707f), + (0x3fc34d79), + (0x3fe272b3), + (0x3fc14fb5), + (0x3fe0f4e3), + (0x3fc3c9e5), + (0x3fe2f62f), + (0x3fc1cb29)}, + (0xfff80000), + {0x32,0x19,0xd6,0x71,0xe2,0x9d,0xf7,0x99,0x34,0xa0, + 0x5f,0x9a,0xcd,0x9f,0x42,0x5a,0x43,0x47,0xee,0xb2,0x00} + }, + { + /* No.199 delta:3921 weight:921 */ + 11213, + 49, + 3, + 15, + {(0x00000000), + (0xbb75ffa8), + (0xa0b33e2a), + (0x1bc6c182), + (0x9a200c78), + (0x2155f3d0), + (0x3a933252), + (0x81e6cdfa), + (0x000015a3), + (0xbb75ea0b), + (0xa0b32b89), + (0x1bc6d421), + (0x9a2019db), + (0x2155e673), + (0x3a9327f1), + (0x81e6d859)}, + {(0x00000000), + (0x45540800), + (0x07840400), + (0x42d00c00), + (0x08200000), + (0x4d740800), + (0x0fa40400), + (0x4af00c00), + (0x02201e00), + (0x47741600), + (0x05a41a00), + (0x40f01200), + (0x0a001e00), + (0x4f541600), + (0x0d841a00), + (0x48d01200)}, + {(0x3f800000), + (0x3fa2aa04), + (0x3f83c202), + (0x3fa16806), + (0x3f841000), + (0x3fa6ba04), + (0x3f87d202), + (0x3fa57806), + (0x3f81100f), + (0x3fa3ba0b), + (0x3f82d20d), + (0x3fa07809), + (0x3f85000f), + (0x3fa7aa0b), + (0x3f86c20d), + (0x3fa46809)}, + (0xfff80000), + {0x86,0x1e,0x5b,0xa8,0x8e,0x65,0x6f,0xc0,0xbc,0x92, + 0x81,0x1c,0x6d,0xbd,0x0f,0xd4,0x7e,0x57,0x5f,0x75,0x00} + } +}; + +#endif + + diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia/curand/include/curand_normal_static.h b/llmeval-env/lib/python3.10/site-packages/nvidia/curand/include/curand_normal_static.h new file mode 100644 index 0000000000000000000000000000000000000000..f731101c22504b9cb68c1c5694d6087bc66df18a --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nvidia/curand/include/curand_normal_static.h @@ -0,0 +1,134 @@ + /* Copyright 2010-2014 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * The source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * The Licensed Deliverables contained herein are PROPRIETARY and + * CONFIDENTIAL to NVIDIA and are being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ +#ifndef CURAND_NORMAL_STATIC_H +#define CURAND_NORMAL_STATIC_H + +#define QUALIFIERS_STATIC __host__ __device__ __forceinline__ + +#include +#if defined(HOST_HAVE_ERFCINVF) + #define IF_DEVICE_OR_HAVE_ERFCINVF(t, f) _NV_BLOCK_EXPAND(t) +#else + #define IF_DEVICE_OR_HAVE_ERFCINVF(t, f) NV_IF_ELSE_TARGET(NV_IS_DEVICE, t, f) +#endif + +QUALIFIERS_STATIC float _curand_normal_icdf(unsigned int x) +{ +IF_DEVICE_OR_HAVE_ERFCINVF( + float s = CURAND_SQRT2; + // Mirror to avoid loss of precision + if(x > 0x80000000UL) { + x = 0xffffffffUL - x; + s = -s; + } + float p = x * CURAND_2POW32_INV + (CURAND_2POW32_INV/2.0f); + // p is in (0, 0.5], 2p is in (0, 1] + return s * erfcinvf(2.0f * p); +, + x++; //suppress warnings + return 0.0f; +) +} + +QUALIFIERS_STATIC float _curand_normal_icdf(unsigned long long x) +{ +IF_DEVICE_OR_HAVE_ERFCINVF( + unsigned int t = (unsigned int)(x >> 32); + float s = CURAND_SQRT2; + // Mirror to avoid loss of precision + if(t > 0x80000000UL) { + t = 0xffffffffUL - t; + s = -s; + } + float p = t * CURAND_2POW32_INV + (CURAND_2POW32_INV/2.0f); + // p is in (0 - 0.5] 2p is in (0 - 1] + return s * erfcinvf(2.0f * p); +, + x++; + return 0.0f; +) +} + +QUALIFIERS_STATIC double _curand_normal_icdf_double(unsigned int x) +{ +IF_DEVICE_OR_HAVE_ERFCINVF( + double s = CURAND_SQRT2_DOUBLE; + // Mirror to avoid loss of precision + if(x > 0x80000000UL) { + x = 0xffffffffUL - x; + s = -s; + } + double p = x * CURAND_2POW32_INV_DOUBLE + (CURAND_2POW32_INV_DOUBLE/2.0); + // p is in (0 - 0.5] 2p is in (0 - 1] + return s * erfcinv(2.0 * p); +, + x++; + return 0.0; +) +} + +QUALIFIERS_STATIC double _curand_normal_icdf_double(unsigned long long x) +{ +IF_DEVICE_OR_HAVE_ERFCINVF( + double s = CURAND_SQRT2_DOUBLE; + x >>= 11; + // Mirror to avoid loss of precision + if(x > 0x10000000000000UL) { + x = 0x1fffffffffffffUL - x; + s = -s; + } + double p = x * CURAND_2POW53_INV_DOUBLE + (CURAND_2POW53_INV_DOUBLE/2.0); + // p is in (0 - 0.5] 2p is in (0 - 1] + return s * erfcinv(2.0 * p); +, + x++; + return 0.0; +) +} +#undef QUALIFIERS_STATIC +#endif diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia/curand/include/curand_poisson.h b/llmeval-env/lib/python3.10/site-packages/nvidia/curand/include/curand_poisson.h new file mode 100644 index 0000000000000000000000000000000000000000..7881194cb868be14a197b581d7c82bf9dd16d617 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nvidia/curand/include/curand_poisson.h @@ -0,0 +1,763 @@ + + /* Copyright 2010-2014 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * The source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * The Licensed Deliverables contained herein are PROPRIETARY and + * CONFIDENTIAL to NVIDIA and are being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + + +#if !defined(CURAND_POISSON_H_) +#define CURAND_POISSON_H_ + +/** + * \defgroup DEVICE Device API + * + * @{ + */ + +#ifndef __CUDACC_RTC__ +#include +#endif // __CUDACC_RTC__ + +#include + +#include "curand_mrg32k3a.h" +#include "curand_mtgp32_kernel.h" +#include "curand_philox4x32_x.h" + +#define CR_CUDART_PI 3.1415926535897931e+0 +#define CR_CUDART_TWO_TO_52 4503599627370496.0 + + +QUALIFIERS float __cr_rsqrt(float a) +{ +NV_IF_ELSE_TARGET(NV_IS_DEVICE, + asm ("rsqrt.approx.f32.ftz %0, %1;" : "=f"(a) : "f"(a)); +, + a = 1.0f / sqrtf (a); +) + return a; +} + +QUALIFIERS float __cr_exp (float a) +{ +NV_IF_ELSE_TARGET(NV_IS_DEVICE, + a = a * 1.4426950408889634074; + asm ("ex2.approx.f32.ftz %0, %1;" : "=f"(a) : "f"(a)); +, + a = expf (a); +) + return a; +} + +QUALIFIERS float __cr_log (float a) +{ +NV_IF_ELSE_TARGET(NV_IS_DEVICE, + asm ("lg2.approx.f32.ftz %0, %1;" : "=f"(a) : "f"(a)); + a = a * 0.69314718055994530942; +, + a = logf (a); +) + return a; +} + +QUALIFIERS float __cr_rcp (float a) +{ +NV_IF_ELSE_TARGET(NV_IS_DEVICE, + asm ("rcp.approx.f32.ftz %0, %1;" : "=f"(a) : "f"(a)); +, + a = 1.0f / a; +) + return a; +} + +/* Computes regularized gamma function: gammainc(a,x)/gamma(a) */ +QUALIFIERS float __cr_pgammainc (float a, float x) +{ + float t, alpha, beta; + + /* First level parametrization constants */ + float ma1 = 1.43248035075540910f, + ma2 = 0.12400979329415655f, + ma3 = 0.00025361074907033f, + mb1 = 0.21096734870196546f, + mb2 = 1.97381164089999420f, + mb3 = 0.94201734077887530f; + + /* Second level parametrization constants (depends only on a) */ + + alpha = __cr_rsqrt (a - ma2); + alpha = ma1 * alpha + ma3; + beta = __cr_rsqrt (a - mb2); + beta = mb1 * beta + mb3; + + /* Final approximation (depends on a and x) */ + + t = a - x; + t = alpha * t - beta; + t = 1.0f + __cr_exp (t); + t = t * t; + t = __cr_rcp (t); + + /* Negative a,x or a,x=NAN requires special handling */ + //t = !(x > 0 && a >= 0) ? 0.0 : t; + + return t; +} + +/* Computes inverse of pgammainc */ +QUALIFIERS float __cr_pgammaincinv (float a, float y) +{ + float t, alpha, beta; + + /* First level parametrization constants */ + + float ma1 = 1.43248035075540910f, + ma2 = 0.12400979329415655f, + ma3 = 0.00025361074907033f, + mb1 = 0.21096734870196546f, + mb2 = 1.97381164089999420f, + mb3 = 0.94201734077887530f; + + /* Second level parametrization constants (depends only on a) */ + + alpha = __cr_rsqrt (a - ma2); + alpha = ma1 * alpha + ma3; + beta = __cr_rsqrt (a - mb2); + beta = mb1 * beta + mb3; + + /* Final approximation (depends on a and y) */ + + t = __cr_rsqrt (y) - 1.0f; + t = __cr_log (t); + t = beta + t; + t = - t * __cr_rcp (alpha) + a; + /* Negative a,x or a,x=NAN requires special handling */ + //t = !(y > 0 && a >= 0) ? 0.0 : t; + return t; +} + +#if defined(__CUDACC_RDC__) && (__cplusplus >= 201703L) && defined(__cpp_inline_variables) +inline __constant__ double __cr_lgamma_table [] = { +#else +static __constant__ double __cr_lgamma_table [] = { +#endif + 0.000000000000000000e-1, + 0.000000000000000000e-1, + 6.931471805599453094e-1, + 1.791759469228055001e0, + 3.178053830347945620e0, + 4.787491742782045994e0, + 6.579251212010100995e0, + 8.525161361065414300e0, + 1.060460290274525023e1 +}; + + +QUALIFIERS double __cr_lgamma_integer(int a) +{ + double s; + double t; + double fa = fabs((float)a); + double sum; + + if (a > 8) { + /* Stirling approximation; coefficients from Hart et al, "Computer + * Approximations", Wiley 1968. Approximation 5404. + */ + s = 1.0 / fa; + t = s * s; + sum = -0.1633436431e-2; + sum = sum * t + 0.83645878922e-3; + sum = sum * t - 0.5951896861197e-3; + sum = sum * t + 0.793650576493454e-3; + sum = sum * t - 0.277777777735865004e-2; + sum = sum * t + 0.833333333333331018375e-1; + sum = sum * s + 0.918938533204672; + s = 0.5 * log (fa); + t = fa - 0.5; + s = s * t; + t = s - fa; + s = s + sum; + t = t + s; + return t; + } else { +NV_IF_ELSE_TARGET(NV_IS_DEVICE, + return __cr_lgamma_table [(int) fa-1]; +, + switch(a) { + case 1: return 0.000000000000000000e-1; + case 2: return 0.000000000000000000e-1; + case 3: return 6.931471805599453094e-1; + case 4: return 1.791759469228055001e0; + case 5: return 3.178053830347945620e0; + case 6: return 4.787491742782045994e0; + case 7: return 6.579251212010100995e0; + case 8: return 8.525161361065414300e0; + default: return 1.060460290274525023e1; + } +) + } +} + +#define KNUTH_FLOAT_CONST 60.0 +template +// Donald E. Knuth Seminumerical Algorithms. The Art of Computer Programming, Volume 2 +QUALIFIERS unsigned int curand_poisson_knuth(T *state, float lambda) +{ + unsigned int k = 0; + float p = expf(lambda); + do{ + k++; + p *= curand_uniform(state); + }while (p > 1.0); + return k-1; +} + +template +// Donald E. Knuth Seminumerical Algorithms. The Art of Computer Programming, Volume 2 +QUALIFIERS uint4 curand_poisson_knuth4(T *state, float lambda) +{ + uint4 k = {0,0,0,0}; + float exp_lambda = expf(lambda); + float4 p={ exp_lambda,exp_lambda,exp_lambda,exp_lambda }; + do{ + k.x++; + p.x *= curand_uniform(state); + }while (p.x > 1.0); + do{ + k.y++; + p.y *= curand_uniform(state); + }while (p.y > 1.0); + do{ + k.z++; + p.z *= curand_uniform(state); + }while (p.z > 1.0); + do{ + k.w++; + p.w *= curand_uniform(state); + }while (p.w > 1.0); + + k.x--; + k.y--; + k.z--; + k.w--; + return k; +} + +template +// Marsaglia, Tsang, Wang Journal of Statistical Software, square histogram. +QUALIFIERS unsigned int _curand_M2_double(T x, curandDistributionM2Shift_t distributionM2) +{ + double u = _curand_uniform_double(x); + int j = (int) floor(distributionM2->length*u); + + double histogramVj; + unsigned int histogramKj; +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_35, + histogramVj = __ldg( &(distributionM2->histogram->V[j])); + histogramKj = __ldg( &(distributionM2->histogram->K[j])); +, + histogramVj = distributionM2->histogram->V[j]; + histogramKj = distributionM2->histogram->K[j]; +) + //if (u < distributionM2->histogram->V[j]) return distributionM2->shift + j; + //return distributionM2->shift + distributionM2->histogram->K[j]; + if (u < histogramVj) return distributionM2->shift + j; + return distributionM2->shift + histogramKj; +} + +template +// Marsaglia, Tsang, Wang Journal of Statistical Software, square histogram. +QUALIFIERS uint4 _curand_M2_double4(T x, curandDistributionM2Shift_t distributionM2) +{ + double4 u; + uint4 result = {0,0,0,0}; + int4 flag = {1,1,1,1}; + + u.x = _curand_uniform_double(x.x); + u.y = _curand_uniform_double(x.y); + u.z = _curand_uniform_double(x.z); + u.w = _curand_uniform_double(x.w); + + int4 j; + j.x = (int) floor(distributionM2->length*u.x); + j.y = (int) floor(distributionM2->length*u.y); + j.z = (int) floor(distributionM2->length*u.z); + j.w = (int) floor(distributionM2->length*u.w); +// int result; + + double histogramVjx; + double histogramVjy; + double histogramVjz; + double histogramVjw; + unsigned int histogramKjx; + unsigned int histogramKjy; + unsigned int histogramKjz; + unsigned int histogramKjw; +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_35, + histogramVjx = __ldg( &(distributionM2->histogram->V[j.x])); + histogramVjy = __ldg( &(distributionM2->histogram->V[j.y])); + histogramVjz = __ldg( &(distributionM2->histogram->V[j.z])); + histogramVjw = __ldg( &(distributionM2->histogram->V[j.w])); + + histogramKjx = __ldg( &(distributionM2->histogram->K[j.x])); + histogramKjy = __ldg( &(distributionM2->histogram->K[j.y])); + histogramKjz = __ldg( &(distributionM2->histogram->K[j.z])); + histogramKjw = __ldg( &(distributionM2->histogram->K[j.w])); +, + histogramVjx = distributionM2->histogram->V[j.x]; + histogramVjy = distributionM2->histogram->V[j.y]; + histogramVjz = distributionM2->histogram->V[j.z]; + histogramVjw = distributionM2->histogram->V[j.w]; + + histogramKjx = distributionM2->histogram->K[j.x]; + histogramKjy = distributionM2->histogram->K[j.y]; + histogramKjz = distributionM2->histogram->K[j.z]; + histogramKjw = distributionM2->histogram->K[j.w]; +) + + if (u.x < histogramVjx){ result.x = distributionM2->shift + j.x; flag.x = 0; } + if (u.y < histogramVjy){ result.y = distributionM2->shift + j.y; flag.y = 0; } + if (u.z < histogramVjz){ result.z = distributionM2->shift + j.z; flag.z = 0; } + if (u.w < histogramVjw){ result.w = distributionM2->shift + j.w; flag.w = 0; } + //return distributionM2->shift + distributionM2->histogram->K[j]; + + if(flag.x) result.x = distributionM2->shift + histogramKjx; + if(flag.y) result.y = distributionM2->shift + histogramKjy; + if(flag.z) result.z = distributionM2->shift + histogramKjz; + if(flag.w) result.w = distributionM2->shift + histogramKjw; + + return result; +} + +template +QUALIFIERS unsigned int curand_M2_double(STATE *state, curandDistributionM2Shift_t distributionM2) +{ + return _curand_M2_double(curand(state), distributionM2); +} + +template +QUALIFIERS uint4 curand_M2_double4(STATE *state, curandDistributionM2Shift_t distributionM2) +{ + return _curand_M2_double4(curand4(state), distributionM2); +} + + +template +QUALIFIERS unsigned int _curand_binary_search_double(T x, curandDistributionShift_t distribution) +{ + double u = _curand_uniform_double(x); + int min = 0; + int max = distribution->length-1; + do{ + int mid = (max + min)/2; + double probability_mid; +NV_IF_ELSE_TARGET(NV_PROVIDES_SM_35, + probability_mid = __ldg( &(distribution->probability[mid])); +, + probability_mid = distribution->probability[mid]; +) + if (u <= probability_mid){ + max = mid; + }else{ + min = mid+1; + } + }while (min < max); + return distribution->shift + min; +} + +template +QUALIFIERS unsigned int curand_binary_search_double(STATE *state, curandDistributionShift_t distribution) +{ + return _curand_binary_search_double(curand(state), distribution); +} + +// Generates uniformly distributed double values in range (0.0; 1.0) from uniformly distributed +// unsigned int. We can't use standard _curand_uniform_double since it can generate 1.0. +// This is required only for _curand_poisson_ITR_double. +QUALIFIERS double _curand_uniform_double_excluding_one(unsigned int x) +{ + return x * CURAND_2POW32_INV_DOUBLE + (CURAND_2POW32_INV_DOUBLE/2.0); +} + +// Overload for unsigned long long. +// This is required only for _curand_poisson_ITR_double. +QUALIFIERS double _curand_uniform_double_excluding_one(unsigned long long x) +{ + return (x >> 11) * CURAND_2POW53_INV_DOUBLE + (CURAND_2POW53_INV_DOUBLE/4.0); +} + +#define MAGIC_DOUBLE_CONST 500.0 +template +//George S. Fishman Discrete-event simulation: modeling, programming, and analysis +QUALIFIERS unsigned int _curand_poisson_ITR_double(T x, double lambda) +{ + double L,p = 1.0; + double q = 1.0; + unsigned int k = 0; + int pow=0; + // This algorithm requires u to be in (0;1) range, however, _curand_uniform_double + // returns a number in range (0;1]. If u is 1.0 the inner loop never ends. The + // following operation transforms the range from (0;1] to (0;1). + double u = _curand_uniform_double_excluding_one(x); + do{ + if (lambda > (double)(pow+MAGIC_DOUBLE_CONST)){ + L = exp(-MAGIC_DOUBLE_CONST); + }else{ + L = exp((double)(pow - lambda)); + } + p *= L; + q *= L; + pow += (int) MAGIC_DOUBLE_CONST; + while (u > q){ + k++; + p *= ((double)lambda / (double) k); + q += p; + } + }while((double)pow < lambda); + return k; +} + +template +/* Rejection Method for Poisson distribution based on gammainc approximation */ +QUALIFIERS unsigned int curand_poisson_gammainc(T state, float lambda){ + float y, x, t, z,v; + float logl = __cr_log (lambda); + while (true) { + y = curand_uniform (state); + x = __cr_pgammaincinv (lambda, y); + x = floorf (x); + z = curand_uniform (state); + v = (__cr_pgammainc (lambda, x + 1.0f) - __cr_pgammainc (lambda, x)) * 1.3f; + z = z*v; + t = (float)__cr_exp (-lambda + x * logl - (float)__cr_lgamma_integer ((int)(1.0f + x))); + if ((z < t) && (v>=1e-20)) + break; + } + return (unsigned int)x; +} + +template +/* Rejection Method for Poisson distribution based on gammainc approximation */ +QUALIFIERS uint4 curand_poisson_gammainc4(T state, float lambda){ + uint4 result; + float y, x, t, z,v; + float logl = __cr_log (lambda); + while (true) { + y = curand_uniform(state); + x = __cr_pgammaincinv (lambda, y); + x = floorf (x); + z = curand_uniform (state); + v = (__cr_pgammainc (lambda, x + 1.0f) - __cr_pgammainc (lambda, x)) * 1.3f; + z = z*v; + t = (float)__cr_exp (-lambda + x * logl - (float)__cr_lgamma_integer ((int)(1.0f + x))); + if ((z < t) && (v>=1e-20)) + break; + } + result.x = (unsigned int)x; + + while (true) { + y = curand_uniform(state); + x = __cr_pgammaincinv (lambda, y); + x = floorf (x); + z = curand_uniform (state); + v = (__cr_pgammainc (lambda, x + 1.0f) - __cr_pgammainc (lambda, x)) * 1.3f; + z = z*v; + t = (float)__cr_exp (-lambda + x * logl - (float)__cr_lgamma_integer ((int)(1.0f + x))); + if ((z < t) && (v>=1e-20)) + break; + } + result.y = (unsigned int)x; + + while (true) { + y = curand_uniform(state); + x = __cr_pgammaincinv (lambda, y); + x = floorf (x); + z = curand_uniform (state); + v = (__cr_pgammainc (lambda, x + 1.0f) - __cr_pgammainc (lambda, x)) * 1.3f; + z = z*v; + t = (float)__cr_exp (-lambda + x * logl - (float)__cr_lgamma_integer ((int)(1.0f + x))); + if ((z < t) && (v>=1e-20)) + break; + } + result.z = (unsigned int)x; + + while (true) { + y = curand_uniform(state); + x = __cr_pgammaincinv (lambda, y); + x = floorf (x); + z = curand_uniform (state); + v = (__cr_pgammainc (lambda, x + 1.0f) - __cr_pgammainc (lambda, x)) * 1.3f; + z = z*v; + t = (float)__cr_exp (-lambda + x * logl - (float)__cr_lgamma_integer ((int)(1.0f + x))); + if ((z < t) && (v>=1e-20)) + break; + } + result.w = (unsigned int)x; + + return result; +} +// Note below that the round to nearest integer, where needed,is done in line with code that +// assumes the range of values is < 2**32 + +template +QUALIFIERS unsigned int _curand_poisson(T x, double lambda) +{ + if (lambda < 1000) + return _curand_poisson_ITR_double(x, lambda); + return (unsigned int)((sqrt(lambda) * _curand_normal_icdf_double(x)) + lambda + 0.5); //Round to nearest +} + +template +QUALIFIERS unsigned int _curand_poisson_from_normal(T x, double lambda) +{ + return (unsigned int)((sqrt(lambda) * _curand_normal_icdf(x)) + lambda + 0.5); //Round to nearest +} + +template +QUALIFIERS unsigned int curand_poisson_from_normal(STATE state, double lambda) +{ + return (unsigned int)((sqrt(lambda) * curand_normal(state)) + lambda + 0.5); //Round to nearest +} + +template +QUALIFIERS uint4 curand_poisson_from_normal4(STATE state, double lambda) +{ + uint4 result; + float4 _res; + + _res = curand_normal4(state); + + result.x = (unsigned int)((sqrt(lambda) * _res.x) + lambda + 0.5); //Round to nearest + result.y = (unsigned int)((sqrt(lambda) * _res.y) + lambda + 0.5); //Round to nearest + result.z = (unsigned int)((sqrt(lambda) * _res.z) + lambda + 0.5); //Round to nearest + result.w = (unsigned int)((sqrt(lambda) * _res.w) + lambda + 0.5); //Round to nearest + return result; //Round to nearest +} + +/** + * \brief Return a Poisson-distributed unsigned int from a XORWOW generator. + * + * Return a single unsigned int from a Poisson + * distribution with lambda \p lambda from the XORWOW generator in \p state, + * increment the position of the generator by a variable amount, depending + * on the algorithm used. + * + * \param state - Pointer to state to update + * \param lambda - Lambda of the Poisson distribution + * + * \return Poisson-distributed unsigned int with lambda \p lambda + */ +QUALIFIERS unsigned int curand_poisson(curandStateXORWOW_t *state, double lambda) +{ + if (lambda < 64) + return curand_poisson_knuth(state, (float)lambda); + if (lambda > 4000) + return (unsigned int)((sqrt(lambda) * curand_normal_double(state)) + lambda + 0.5); //Round to nearest + return curand_poisson_gammainc(state, (float)lambda); +} + +/** + * \brief Return a Poisson-distributed unsigned int from a Philox4_32_10 generator. + * + * Return a single unsigned int from a Poisson + * distribution with lambda \p lambda from the Philox4_32_10 generator in \p state, + * increment the position of the generator by a variable amount, depending + * on the algorithm used. + * + * \param state - Pointer to state to update + * \param lambda - Lambda of the Poisson distribution + * + * \return Poisson-distributed unsigned int with lambda \p lambda + */ +QUALIFIERS unsigned int curand_poisson(curandStatePhilox4_32_10_t *state, double lambda) +{ + if (lambda < 64) + return curand_poisson_knuth(state, (float)lambda); + if (lambda > 4000) + return (unsigned int)((sqrt(lambda) * curand_normal_double(state)) + lambda + 0.5); //Round to nearest + return curand_poisson_gammainc(state, (float)lambda); +} +/** + * \brief Return four Poisson-distributed unsigned ints from a Philox4_32_10 generator. + * + * Return a four unsigned ints from a Poisson + * distribution with lambda \p lambda from the Philox4_32_10 generator in \p state, + * increment the position of the generator by a variable amount, depending + * on the algorithm used. + * + * \param state - Pointer to state to update + * \param lambda - Lambda of the Poisson distribution + * + * \return Poisson-distributed unsigned int with lambda \p lambda + */ +QUALIFIERS uint4 curand_poisson4(curandStatePhilox4_32_10_t *state, double lambda) +{ + uint4 result; + double4 _res; + if (lambda < 64) + return curand_poisson_knuth4(state, (float)lambda); + if (lambda > 4000) { + _res = curand_normal4_double(state); + result.x = (unsigned int)((sqrt(lambda) * _res.x) + lambda + 0.5); //Round to nearest + result.y = (unsigned int)((sqrt(lambda) * _res.y) + lambda + 0.5); //Round to nearest + result.z = (unsigned int)((sqrt(lambda) * _res.z) + lambda + 0.5); //Round to nearest + result.w = (unsigned int)((sqrt(lambda) * _res.w) + lambda + 0.5); //Round to nearest + return result; + } + return curand_poisson_gammainc4(state, (float)lambda); +} + + + +/** + * \brief Return a Poisson-distributed unsigned int from a MRG32k3A generator. + * + * Return a single unsigned int from a Poisson + * distribution with lambda \p lambda from the MRG32k3a generator in \p state, + * increment the position of the generator by a variable amount, depending + * on the algorithm used. + * + * \param state - Pointer to state to update + * \param lambda - Lambda of the Poisson distribution + * + * \return Poisson-distributed unsigned int with lambda \p lambda + */ +QUALIFIERS unsigned int curand_poisson(curandStateMRG32k3a_t *state, double lambda) +{ + if (lambda < 64) + return curand_poisson_knuth(state, (float)lambda); + if (lambda > 4000) + return (unsigned int)((sqrt(lambda) * curand_normal_double(state)) + lambda + 0.5); //Round to nearest + return curand_poisson_gammainc(state, (float)lambda); +} + +/** + * \brief Return a Poisson-distributed unsigned int from a MTGP32 generator. + * + * Return a single int from a Poisson + * distribution with lambda \p lambda from the MTGP32 generator in \p state, + * increment the position of the generator by one. + * + * \param state - Pointer to state to update + * \param lambda - Lambda of the Poisson distribution + * + * \return Poisson-distributed unsigned int with lambda \p lambda + */ +QUALIFIERS unsigned int curand_poisson(curandStateMtgp32_t *state, double lambda) +{ + return _curand_poisson(curand(state), lambda); +} + +/** + * \brief Return a Poisson-distributed unsigned int from a Sobol32 generator. + * + * Return a single unsigned int from a Poisson + * distribution with lambda \p lambda from the Sobol32 generator in \p state, + * increment the position of the generator by one. + * + * \param state - Pointer to state to update + * \param lambda - Lambda of the Poisson distribution + * + * \return Poisson-distributed unsigned int with lambda \p lambda + */ + +QUALIFIERS unsigned int curand_poisson(curandStateSobol32_t *state, double lambda) +{ + return _curand_poisson(curand(state), lambda); +} + +/** + * \brief Return a Poisson-distributed unsigned int from a scrambled Sobol32 generator. + * + * Return a single unsigned int from a Poisson + * distribution with lambda \p lambda from the scrambled Sobol32 generator in \p state, + * increment the position of the generator by one. + * + * \param state - Pointer to state to update + * \param lambda - Lambda of the Poisson distribution + * + * \return Poisson-distributed unsigned int with lambda \p lambda + */ +QUALIFIERS unsigned int curand_poisson(curandStateScrambledSobol32_t *state, double lambda) +{ + return _curand_poisson(curand(state), lambda); +} + +/** + * \brief Return a Poisson-distributed unsigned int from a Sobol64 generator. + * + * Return a single unsigned int from a Poisson + * distribution with lambda \p lambda from the Sobol64 generator in \p state, + * increment position of generator by one. + * + * \param state - Pointer to state to update + * \param lambda - Lambda of the Poisson distribution + * + * \return Poisson-distributed unsigned int with lambda \p lambda + */ +QUALIFIERS unsigned int curand_poisson(curandStateSobol64_t *state, double lambda) +{ + return _curand_poisson(curand(state), lambda); +} + +/** + * \brief Return a Poisson-distributed unsigned int from a scrambled Sobol64 generator. + * + * Return a single unsigned int from a Poisson + * distribution with lambda \p lambda from the scrambled Sobol64 generator in \p state, + * increment position of generator by one. + * + * \param state - Pointer to state to update + * \param lambda - Lambda of the Poisson distribution + * + * \return Poisson-distributed unsigned int with lambda \p lambda + */ +QUALIFIERS unsigned int curand_poisson(curandStateScrambledSobol64_t *state, double lambda) +{ + return _curand_poisson(curand(state), lambda); +} +#endif // !defined(CURAND_POISSON_H_) diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia/curand/include/curand_uniform.h b/llmeval-env/lib/python3.10/site-packages/nvidia/curand/include/curand_uniform.h new file mode 100644 index 0000000000000000000000000000000000000000..7a4af8afa328c186d9ea33a8c8226e19aba4793e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nvidia/curand/include/curand_uniform.h @@ -0,0 +1,498 @@ + + /* Copyright 2010-2018 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * The source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * The Licensed Deliverables contained herein are PROPRIETARY and + * CONFIDENTIAL to NVIDIA and are being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ + + +#if !defined(CURAND_UNIFORM_H_) +#define CURAND_UNIFORM_H_ + +/** + * \defgroup DEVICE Device API + * + * @{ + */ + +#ifndef __CUDACC_RTC__ +#include +#endif // __CUDACC_RTC__ + +#include "curand_mrg32k3a.h" +#include "curand_mtgp32_kernel.h" +#include "curand_philox4x32_x.h" + + +QUALIFIERS float _curand_uniform(unsigned int x) +{ + return x * CURAND_2POW32_INV + (CURAND_2POW32_INV/2.0f); +} + +QUALIFIERS float4 _curand_uniform4(uint4 x) +{ + float4 y; + y.x = x.x * CURAND_2POW32_INV + (CURAND_2POW32_INV/2.0f); + y.y = x.y * CURAND_2POW32_INV + (CURAND_2POW32_INV/2.0f); + y.z = x.z * CURAND_2POW32_INV + (CURAND_2POW32_INV/2.0f); + y.w = x.w * CURAND_2POW32_INV + (CURAND_2POW32_INV/2.0f); + return y; +} + +QUALIFIERS float _curand_uniform(unsigned long long x) +{ + unsigned int t; + t = (unsigned int)(x >> 32); + return t * CURAND_2POW32_INV + (CURAND_2POW32_INV/2.0f); +} + +QUALIFIERS double _curand_uniform_double(unsigned int x) +{ + return x * CURAND_2POW32_INV_DOUBLE + CURAND_2POW32_INV_DOUBLE; +} + +QUALIFIERS double _curand_uniform_double(unsigned long long x) +{ + return (x >> 11) * CURAND_2POW53_INV_DOUBLE + (CURAND_2POW53_INV_DOUBLE/2.0); +} + +QUALIFIERS double _curand_uniform_double_hq(unsigned int x, unsigned int y) +{ + unsigned long long z = (unsigned long long)x ^ + ((unsigned long long)y << (53 - 32)); + return z * CURAND_2POW53_INV_DOUBLE + (CURAND_2POW53_INV_DOUBLE/2.0); +} + +QUALIFIERS float curand_uniform(curandStateTest_t *state) +{ + return _curand_uniform(curand(state)); +} + +QUALIFIERS double curand_uniform_double(curandStateTest_t *state) +{ + return _curand_uniform_double(curand(state)); +} + +/** + * \brief Return a uniformly distributed float from an XORWOW generator. + * + * Return a uniformly distributed float between \p 0.0f and \p 1.0f + * from the XORWOW generator in \p state, increment position of generator. + * Output range excludes \p 0.0f but includes \p 1.0f. Denormalized floating + * point outputs are never returned. + * + * The implementation may use any number of calls to \p curand() to + * get enough random bits to create the return value. The current + * implementation uses one call. + * + * \param state - Pointer to state to update + * + * \return uniformly distributed float between \p 0.0f and \p 1.0f + */ +QUALIFIERS float curand_uniform(curandStateXORWOW_t *state) +{ + return _curand_uniform(curand(state)); +} + +/** + * \brief Return a uniformly distributed double from an XORWOW generator. + * + * Return a uniformly distributed double between \p 0.0 and \p 1.0 + * from the XORWOW generator in \p state, increment position of generator. + * Output range excludes \p 0.0 but includes \p 1.0. Denormalized floating + * point outputs are never returned. + * + * The implementation may use any number of calls to \p curand() to + * get enough random bits to create the return value. The current + * implementation uses exactly two calls. + * + * \param state - Pointer to state to update + * + * \return uniformly distributed double between \p 0.0 and \p 1.0 + */ +QUALIFIERS double curand_uniform_double(curandStateXORWOW_t *state) +{ + unsigned int x, y; + x = curand(state); + y = curand(state); + return _curand_uniform_double_hq(x, y); +} +/** + * \brief Return a uniformly distributed float from an MRG32k3a generator. + * + * Return a uniformly distributed float between \p 0.0f and \p 1.0f + * from the MRG32k3a generator in \p state, increment position of generator. + * Output range excludes \p 0.0f but includes \p 1.0f. Denormalized floating + * point outputs are never returned. + * + * The implementation returns up to 23 bits of mantissa, with the minimum + * return value \f$ 2^{-32} \f$ + * + * \param state - Pointer to state to update + * + * \return uniformly distributed float between \p 0.0f and \p 1.0f + */ +QUALIFIERS float curand_uniform(curandStateMRG32k3a_t *state) +{ + return ((float)(curand_MRG32k3a(state)*MRG32K3A_NORM)); +} + +/** + * \brief Return a uniformly distributed double from an MRG32k3a generator. + * + * Return a uniformly distributed double between \p 0.0 and \p 1.0 + * from the MRG32k3a generator in \p state, increment position of generator. + * Output range excludes \p 0.0 but includes \p 1.0. Denormalized floating + * point outputs are never returned. + * + * Note the implementation returns at most 32 random bits of mantissa as + * outlined in the seminal paper by L'Ecuyer. + * + * \param state - Pointer to state to update + * + * \return uniformly distributed double between \p 0.0 and \p 1.0 + */ +QUALIFIERS double curand_uniform_double(curandStateMRG32k3a_t *state) +{ + return curand_MRG32k3a(state)*MRG32K3A_NORM; +} + + + +/** + * \brief Return a uniformly distributed tuple of 2 doubles from an Philox4_32_10 generator. + * + * Return a uniformly distributed 2 doubles (double4) between \p 0.0 and \p 1.0 + * from the Philox4_32_10 generator in \p state, increment position of generator by 4. + * Output range excludes \p 0.0 but includes \p 1.0. Denormalized floating + * point outputs are never returned. + * + * \param state - Pointer to state to update + * + * \return 2 uniformly distributed doubles between \p 0.0 and \p 1.0 + */ + +QUALIFIERS double2 curand_uniform2_double(curandStatePhilox4_32_10_t *state) +{ + uint4 _x; + double2 result; + _x = curand4(state); + result.x = _curand_uniform_double_hq(_x.x,_x.y); + result.y = _curand_uniform_double_hq(_x.z,_x.w); + return result; +} + + +// not a part of API +QUALIFIERS double4 curand_uniform4_double(curandStatePhilox4_32_10_t *state) +{ + uint4 _x, _y; + double4 result; + _x = curand4(state); + _y = curand4(state); + result.x = _curand_uniform_double_hq(_x.x,_x.y); + result.y = _curand_uniform_double_hq(_x.z,_x.w); + result.z = _curand_uniform_double_hq(_y.x,_y.y); + result.w = _curand_uniform_double_hq(_y.z,_y.w); + return result; +} + +/** + * \brief Return a uniformly distributed float from a Philox4_32_10 generator. + * + * Return a uniformly distributed float between \p 0.0f and \p 1.0f + * from the Philox4_32_10 generator in \p state, increment position of generator. + * Output range excludes \p 0.0f but includes \p 1.0f. Denormalized floating + * point outputs are never returned. + * + * \param state - Pointer to state to update + * + * \return uniformly distributed float between \p 0.0 and \p 1.0 + * + */ +QUALIFIERS float curand_uniform(curandStatePhilox4_32_10_t *state) +{ + return _curand_uniform(curand(state)); +} + +/** + * \brief Return a uniformly distributed tuple of 4 floats from a Philox4_32_10 generator. + * + * Return a uniformly distributed 4 floats between \p 0.0f and \p 1.0f + * from the Philox4_32_10 generator in \p state, increment position of generator by 4. + * Output range excludes \p 0.0f but includes \p 1.0f. Denormalized floating + * point outputs are never returned. + * + * \param state - Pointer to state to update + * + * \return uniformly distributed float between \p 0.0 and \p 1.0 + * + */ +QUALIFIERS float4 curand_uniform4(curandStatePhilox4_32_10_t *state) +{ + return _curand_uniform4(curand4(state)); +} + +/** + * \brief Return a uniformly distributed float from a MTGP32 generator. + * + * Return a uniformly distributed float between \p 0.0f and \p 1.0f + * from the MTGP32 generator in \p state, increment position of generator. + * Output range excludes \p 0.0f but includes \p 1.0f. Denormalized floating + * point outputs are never returned. + * + * \param state - Pointer to state to update + * + * \return uniformly distributed float between \p 0.0f and \p 1.0f + */ +QUALIFIERS float curand_uniform(curandStateMtgp32_t *state) +{ + return _curand_uniform(curand(state)); +} +/** + * \brief Return a uniformly distributed double from a MTGP32 generator. + * + * Return a uniformly distributed double between \p 0.0f and \p 1.0f + * from the MTGP32 generator in \p state, increment position of generator. + * Output range excludes \p 0.0f but includes \p 1.0f. Denormalized floating + * point outputs are never returned. + * + * Note that the implementation uses only 32 random bits to generate a single double + * precision value. + * + * \param state - Pointer to state to update + * + * \return uniformly distributed double between \p 0.0f and \p 1.0f + */ +QUALIFIERS double curand_uniform_double(curandStateMtgp32_t *state) +{ + return _curand_uniform_double(curand(state)); +} + +/** + * \brief Return a uniformly distributed double from a Philox4_32_10 generator. + * + * Return a uniformly distributed double between \p 0.0f and \p 1.0f + * from the Philox4_32_10 generator in \p state, increment position of generator. + * Output range excludes \p 0.0f but includes \p 1.0f. Denormalized floating + * point outputs are never returned. + * + * Note that the implementation uses only 32 random bits to generate a single double + * precision value. + * + * \p curand_uniform2_double() is recommended for higher quality uniformly distributed + * double precision values. + * + * \param state - Pointer to state to update + * + * \return uniformly distributed double between \p 0.0f and \p 1.0f + */ + +QUALIFIERS double curand_uniform_double(curandStatePhilox4_32_10_t *state) +{ + return _curand_uniform_double(curand(state)); +} + + +/** + * \brief Return a uniformly distributed float from a Sobol32 generator. + * + * Return a uniformly distributed float between \p 0.0f and \p 1.0f + * from the Sobol32 generator in \p state, increment position of generator. + * Output range excludes \p 0.0f but includes \p 1.0f. Denormalized floating + * point outputs are never returned. + * + * The implementation is guaranteed to use a single call to \p curand(). + * + * \param state - Pointer to state to update + * + * \return uniformly distributed float between \p 0.0f and \p 1.0f + */ +QUALIFIERS float curand_uniform(curandStateSobol32_t *state) +{ + return _curand_uniform(curand(state)); +} + +/** + * \brief Return a uniformly distributed double from a Sobol32 generator. + * + * Return a uniformly distributed double between \p 0.0 and \p 1.0 + * from the Sobol32 generator in \p state, increment position of generator. + * Output range excludes \p 0.0 but includes \p 1.0. Denormalized floating + * point outputs are never returned. + * + * The implementation is guaranteed to use a single call to \p curand() + * to preserve the quasirandom properties of the sequence. + * + * Note that the implementation uses only 32 random bits to generate a single double + * precision value. + * + * \param state - Pointer to state to update + * + * \return uniformly distributed double between \p 0.0 and \p 1.0 + */ +QUALIFIERS double curand_uniform_double(curandStateSobol32_t *state) +{ + return _curand_uniform_double(curand(state)); +} +/** + * \brief Return a uniformly distributed float from a scrambled Sobol32 generator. + * + * Return a uniformly distributed float between \p 0.0f and \p 1.0f + * from the scrambled Sobol32 generator in \p state, increment position of generator. + * Output range excludes \p 0.0f but includes \p 1.0f. Denormalized floating + * point outputs are never returned. + * + * The implementation is guaranteed to use a single call to \p curand(). + * + * \param state - Pointer to state to update + * + * \return uniformly distributed float between \p 0.0f and \p 1.0f + */ +QUALIFIERS float curand_uniform(curandStateScrambledSobol32_t *state) +{ + return _curand_uniform(curand(state)); +} + +/** + * \brief Return a uniformly distributed double from a scrambled Sobol32 generator. + * + * Return a uniformly distributed double between \p 0.0 and \p 1.0 + * from the scrambled Sobol32 generator in \p state, increment position of generator. + * Output range excludes \p 0.0 but includes \p 1.0. Denormalized floating + * point outputs are never returned. + * + * The implementation is guaranteed to use a single call to \p curand() + * to preserve the quasirandom properties of the sequence. + * + * Note that the implementation uses only 32 random bits to generate a single double + * precision value. + * + * \param state - Pointer to state to update + * + * \return uniformly distributed double between \p 0.0 and \p 1.0 + */ +QUALIFIERS double curand_uniform_double(curandStateScrambledSobol32_t *state) +{ + return _curand_uniform_double(curand(state)); +} +/** + * \brief Return a uniformly distributed float from a Sobol64 generator. + * + * Return a uniformly distributed float between \p 0.0f and \p 1.0f + * from the Sobol64 generator in \p state, increment position of generator. + * Output range excludes \p 0.0f but includes \p 1.0f. Denormalized floating + * point outputs are never returned. + * + * The implementation is guaranteed to use a single call to \p curand(). + * + * \param state - Pointer to state to update + * + * \return uniformly distributed float between \p 0.0f and \p 1.0f + */ +QUALIFIERS float curand_uniform(curandStateSobol64_t *state) +{ + return _curand_uniform(curand(state)); +} + +/** + * \brief Return a uniformly distributed double from a Sobol64 generator. + * + * Return a uniformly distributed double between \p 0.0 and \p 1.0 + * from the Sobol64 generator in \p state, increment position of generator. + * Output range excludes \p 0.0 but includes \p 1.0. Denormalized floating + * point outputs are never returned. + * + * The implementation is guaranteed to use a single call to \p curand() + * to preserve the quasirandom properties of the sequence. + * + * \param state - Pointer to state to update + * + * \return uniformly distributed double between \p 0.0 and \p 1.0 + */ +QUALIFIERS double curand_uniform_double(curandStateSobol64_t *state) +{ + return _curand_uniform_double(curand(state)); +} +/** + * \brief Return a uniformly distributed float from a scrambled Sobol64 generator. + * + * Return a uniformly distributed float between \p 0.0f and \p 1.0f + * from the scrambled Sobol64 generator in \p state, increment position of generator. + * Output range excludes \p 0.0f but includes \p 1.0f. Denormalized floating + * point outputs are never returned. + * + * The implementation is guaranteed to use a single call to \p curand(). + * + * \param state - Pointer to state to update + * + * \return uniformly distributed float between \p 0.0f and \p 1.0f + */ +QUALIFIERS float curand_uniform(curandStateScrambledSobol64_t *state) +{ + return _curand_uniform(curand(state)); +} + +/** + * \brief Return a uniformly distributed double from a scrambled Sobol64 generator. + * + * Return a uniformly distributed double between \p 0.0 and \p 1.0 + * from the scrambled Sobol64 generator in \p state, increment position of generator. + * Output range excludes \p 0.0 but includes \p 1.0. Denormalized floating + * point outputs are never returned. + * + * The implementation is guaranteed to use a single call to \p curand() + * to preserve the quasirandom properties of the sequence. + * + * \param state - Pointer to state to update + * + * \return uniformly distributed double between \p 0.0 and \p 1.0 + */ +QUALIFIERS double curand_uniform_double(curandStateScrambledSobol64_t *state) +{ + return _curand_uniform_double(curand(state)); +} + +#endif // !defined(CURAND_UNIFORM_H_) diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia/curand/lib/__init__.py b/llmeval-env/lib/python3.10/site-packages/nvidia/curand/lib/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia/curand/lib/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nvidia/curand/lib/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8e9df00d580648f715db83eac483383d598835ae Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nvidia/curand/lib/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia/cusparse/__init__.py b/llmeval-env/lib/python3.10/site-packages/nvidia/cusparse/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia/cusparse/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nvidia/cusparse/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fd9adf0ec76c389155281a6e316b30e10b809c5f Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nvidia/cusparse/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia/cusparse/include/__init__.py b/llmeval-env/lib/python3.10/site-packages/nvidia/cusparse/include/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia/cusparse/include/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nvidia/cusparse/include/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5e8d788a4ca3d270d74e2d2ad78212324fd65f9f Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nvidia/cusparse/include/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia/cusparse/include/cusparse.h b/llmeval-env/lib/python3.10/site-packages/nvidia/cusparse/include/cusparse.h new file mode 100644 index 0000000000000000000000000000000000000000..a4df07f130786b46049fd4becc5cde3758e40512 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nvidia/cusparse/include/cusparse.h @@ -0,0 +1,5811 @@ +/* + * Copyright 1993-2023 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ +#if !defined(CUSPARSE_H_) +#define CUSPARSE_H_ + +#include // cuComplex +#include // cudaStream_t +#include // CUDA_R_32F +#include // int64_t +#include // FILE* + +#if defined(__cplusplus) +# include // __half +#endif // defined(__cplusplus) + +//############################################################################## +//# CUSPARSE VERSION INFORMATION +//############################################################################## + +#define CUSPARSE_VER_MAJOR 12 +#define CUSPARSE_VER_MINOR 1 +#define CUSPARSE_VER_PATCH 0 +#define CUSPARSE_VER_BUILD 106 +#define CUSPARSE_VERSION (CUSPARSE_VER_MAJOR * 1000 + \ + CUSPARSE_VER_MINOR * 100 + \ + CUSPARSE_VER_PATCH) + +// ############################################################################# +// # BASIC MACROS +// ############################################################################# + +#if !defined(CUSPARSEAPI) +# if defined(_WIN32) +# define CUSPARSEAPI __stdcall +# else +# define CUSPARSEAPI +# endif +#endif + +//------------------------------------------------------------------------------ + +#if !defined(_MSC_VER) +# define CUSPARSE_CPP_VERSION __cplusplus +#elif _MSC_FULL_VER >= 190024210 // Visual Studio 2015 Update 3 +# define CUSPARSE_CPP_VERSION _MSVC_LANG +#else +# define CUSPARSE_CPP_VERSION 0 +#endif + +// ############################################################################# +// # CUSPARSE_DEPRECATED MACRO +// ############################################################################# + +#if !defined(DISABLE_CUSPARSE_DEPRECATED) + +# if CUSPARSE_CPP_VERSION >= 201402L + +# define CUSPARSE_DEPRECATED(new_func) \ + [[deprecated("please use " #new_func " instead")]] + +# elif defined(_MSC_VER) + +# define CUSPARSE_DEPRECATED(new_func) \ + __declspec(deprecated("please use " #new_func " instead")) + +# elif defined(__INTEL_COMPILER) || defined(__clang__) || \ + (defined(__GNUC__) && \ + (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5))) + +# define CUSPARSE_DEPRECATED(new_func) \ + __attribute__((deprecated("please use " #new_func " instead"))) + +# elif defined(__GNUC__) || defined(__xlc__) + +# define CUSPARSE_DEPRECATED(new_func) \ + __attribute__((deprecated)) + +# else + +# define CUSPARSE_DEPRECATED(new_func) + +# endif // defined(__cplusplus) && __cplusplus >= 201402L +//------------------------------------------------------------------------------ + +# if CUSPARSE_CPP_VERSION >= 201703L + +# define CUSPARSE_DEPRECATED_ENUM(new_enum) \ + [[deprecated("please use " #new_enum " instead")]] + +# elif defined(__clang__) || \ + (defined(__GNUC__) && __GNUC__ >= 6 && !defined(__PGI)) + +# define CUSPARSE_DEPRECATED_ENUM(new_enum) \ + __attribute__((deprecated("please use " #new_enum " instead"))) + +# else + +# define CUSPARSE_DEPRECATED_ENUM(new_enum) + +# endif // defined(__cplusplus) && __cplusplus >= 201402L + +#else // defined(DISABLE_CUSPARSE_DEPRECATED) + +# define CUSPARSE_DEPRECATED(new_func) +# define CUSPARSE_DEPRECATED_ENUM(new_enum) + +#endif // !defined(DISABLE_CUSPARSE_DEPRECATED) + +#undef CUSPARSE_CPP_VERSION + +//------------------------------------------------------------------------------ + +#if defined(__cplusplus) +extern "C" { +#endif // defined(__cplusplus) + +//############################################################################## +//# OPAQUE DATA STRUCTURES +//############################################################################## + +struct cusparseContext; +typedef struct cusparseContext* cusparseHandle_t; + +struct cusparseMatDescr; +typedef struct cusparseMatDescr* cusparseMatDescr_t; + +struct bsrsv2Info; +typedef struct bsrsv2Info* bsrsv2Info_t; + +struct bsrsm2Info; +typedef struct bsrsm2Info* bsrsm2Info_t; + +struct csric02Info; +typedef struct csric02Info* csric02Info_t; + +struct bsric02Info; +typedef struct bsric02Info* bsric02Info_t; + +struct csrilu02Info; +typedef struct csrilu02Info* csrilu02Info_t; + +struct bsrilu02Info; +typedef struct bsrilu02Info* bsrilu02Info_t; + +struct csru2csrInfo; +typedef struct csru2csrInfo* csru2csrInfo_t; + +struct cusparseColorInfo; +typedef struct cusparseColorInfo* cusparseColorInfo_t; + +struct pruneInfo; +typedef struct pruneInfo* pruneInfo_t; + +//############################################################################## +//# ENUMERATORS +//############################################################################## + +typedef enum { + CUSPARSE_STATUS_SUCCESS = 0, + CUSPARSE_STATUS_NOT_INITIALIZED = 1, + CUSPARSE_STATUS_ALLOC_FAILED = 2, + CUSPARSE_STATUS_INVALID_VALUE = 3, + CUSPARSE_STATUS_ARCH_MISMATCH = 4, + CUSPARSE_STATUS_MAPPING_ERROR = 5, + CUSPARSE_STATUS_EXECUTION_FAILED = 6, + CUSPARSE_STATUS_INTERNAL_ERROR = 7, + CUSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED = 8, + CUSPARSE_STATUS_ZERO_PIVOT = 9, + CUSPARSE_STATUS_NOT_SUPPORTED = 10, + CUSPARSE_STATUS_INSUFFICIENT_RESOURCES = 11 +} cusparseStatus_t; + +typedef enum { + CUSPARSE_POINTER_MODE_HOST = 0, + CUSPARSE_POINTER_MODE_DEVICE = 1 +} cusparsePointerMode_t; + +typedef enum { + CUSPARSE_ACTION_SYMBOLIC = 0, + CUSPARSE_ACTION_NUMERIC = 1 +} cusparseAction_t; + +typedef enum { + CUSPARSE_MATRIX_TYPE_GENERAL = 0, + CUSPARSE_MATRIX_TYPE_SYMMETRIC = 1, + CUSPARSE_MATRIX_TYPE_HERMITIAN = 2, + CUSPARSE_MATRIX_TYPE_TRIANGULAR = 3 +} cusparseMatrixType_t; + +typedef enum { + CUSPARSE_FILL_MODE_LOWER = 0, + CUSPARSE_FILL_MODE_UPPER = 1 +} cusparseFillMode_t; + +typedef enum { + CUSPARSE_DIAG_TYPE_NON_UNIT = 0, + CUSPARSE_DIAG_TYPE_UNIT = 1 +} cusparseDiagType_t; + +typedef enum { + CUSPARSE_INDEX_BASE_ZERO = 0, + CUSPARSE_INDEX_BASE_ONE = 1 +} cusparseIndexBase_t; + +typedef enum { + CUSPARSE_OPERATION_NON_TRANSPOSE = 0, + CUSPARSE_OPERATION_TRANSPOSE = 1, + CUSPARSE_OPERATION_CONJUGATE_TRANSPOSE = 2 +} cusparseOperation_t; + +typedef enum { + CUSPARSE_DIRECTION_ROW = 0, + CUSPARSE_DIRECTION_COLUMN = 1 +} cusparseDirection_t; + +typedef enum { + CUSPARSE_SOLVE_POLICY_NO_LEVEL = 0, + CUSPARSE_SOLVE_POLICY_USE_LEVEL = 1 +} cusparseSolvePolicy_t; + +typedef enum { + CUSPARSE_COLOR_ALG0 = 0, // default + CUSPARSE_COLOR_ALG1 = 1 +} cusparseColorAlg_t; + +//############################################################################## +//# INITIALIZATION AND MANAGEMENT ROUTINES +//############################################################################## + +cusparseStatus_t CUSPARSEAPI +cusparseCreate(cusparseHandle_t* handle); + +cusparseStatus_t CUSPARSEAPI +cusparseDestroy(cusparseHandle_t handle); + +cusparseStatus_t CUSPARSEAPI +cusparseGetVersion(cusparseHandle_t handle, + int* version); + +cusparseStatus_t CUSPARSEAPI +cusparseGetProperty(libraryPropertyType type, + int* value); + +const char* CUSPARSEAPI +cusparseGetErrorName(cusparseStatus_t status); + +const char* CUSPARSEAPI +cusparseGetErrorString(cusparseStatus_t status); + +cusparseStatus_t CUSPARSEAPI +cusparseSetStream(cusparseHandle_t handle, + cudaStream_t streamId); + +cusparseStatus_t CUSPARSEAPI +cusparseGetStream(cusparseHandle_t handle, + cudaStream_t* streamId); + +cusparseStatus_t CUSPARSEAPI +cusparseGetPointerMode(cusparseHandle_t handle, + cusparsePointerMode_t* mode); + +cusparseStatus_t CUSPARSEAPI +cusparseSetPointerMode(cusparseHandle_t handle, + cusparsePointerMode_t mode); + +//############################################################################## +//# LOGGING APIs +//############################################################################## + +typedef void (*cusparseLoggerCallback_t)(int logLevel, + const char* functionName, + const char* message); + +cusparseStatus_t CUSPARSEAPI +cusparseLoggerSetCallback(cusparseLoggerCallback_t callback); + +cusparseStatus_t CUSPARSEAPI +cusparseLoggerSetFile(FILE* file); + +cusparseStatus_t CUSPARSEAPI +cusparseLoggerOpenFile(const char* logFile); + +cusparseStatus_t CUSPARSEAPI +cusparseLoggerSetLevel(int level); + +cusparseStatus_t CUSPARSEAPI +cusparseLoggerSetMask(int mask); + +cusparseStatus_t CUSPARSEAPI +cusparseLoggerForceDisable(void); + +//############################################################################## +//# HELPER ROUTINES +//############################################################################## + +cusparseStatus_t CUSPARSEAPI +cusparseCreateMatDescr(cusparseMatDescr_t* descrA); + +cusparseStatus_t CUSPARSEAPI +cusparseDestroyMatDescr(cusparseMatDescr_t descrA); + +//cusparseStatus_t CUSPARSEAPI +//cusparseCopyMatDescr(cusparseMatDescr_t dest, +// const cusparseMatDescr_t src); + +cusparseStatus_t CUSPARSEAPI +cusparseSetMatType(cusparseMatDescr_t descrA, + cusparseMatrixType_t type); + +cusparseMatrixType_t CUSPARSEAPI +cusparseGetMatType(const cusparseMatDescr_t descrA); + +cusparseStatus_t CUSPARSEAPI +cusparseSetMatFillMode(cusparseMatDescr_t descrA, + cusparseFillMode_t fillMode); + +cusparseFillMode_t CUSPARSEAPI +cusparseGetMatFillMode(const cusparseMatDescr_t descrA); + +cusparseStatus_t CUSPARSEAPI +cusparseSetMatDiagType(cusparseMatDescr_t descrA, + cusparseDiagType_t diagType); + +cusparseDiagType_t CUSPARSEAPI +cusparseGetMatDiagType(const cusparseMatDescr_t descrA); + +cusparseStatus_t CUSPARSEAPI +cusparseSetMatIndexBase(cusparseMatDescr_t descrA, + cusparseIndexBase_t base); + +cusparseIndexBase_t CUSPARSEAPI +cusparseGetMatIndexBase(const cusparseMatDescr_t descrA); + +cusparseStatus_t CUSPARSEAPI +cusparseCreateCsric02Info(csric02Info_t* info); + +cusparseStatus_t CUSPARSEAPI +cusparseDestroyCsric02Info(csric02Info_t info); + +cusparseStatus_t CUSPARSEAPI +cusparseCreateBsric02Info(bsric02Info_t* info); + +cusparseStatus_t CUSPARSEAPI +cusparseDestroyBsric02Info(bsric02Info_t info); + +cusparseStatus_t CUSPARSEAPI +cusparseCreateCsrilu02Info(csrilu02Info_t* info); + +cusparseStatus_t CUSPARSEAPI +cusparseDestroyCsrilu02Info(csrilu02Info_t info); + +cusparseStatus_t CUSPARSEAPI +cusparseCreateBsrilu02Info(bsrilu02Info_t* info); + +cusparseStatus_t CUSPARSEAPI +cusparseDestroyBsrilu02Info(bsrilu02Info_t info); + +cusparseStatus_t CUSPARSEAPI +cusparseCreateBsrsv2Info(bsrsv2Info_t* info); + +cusparseStatus_t CUSPARSEAPI +cusparseDestroyBsrsv2Info(bsrsv2Info_t info); + +cusparseStatus_t CUSPARSEAPI +cusparseCreateBsrsm2Info(bsrsm2Info_t* info); + +cusparseStatus_t CUSPARSEAPI +cusparseDestroyBsrsm2Info(bsrsm2Info_t info); + +cusparseStatus_t CUSPARSEAPI +cusparseCreateCsru2csrInfo(csru2csrInfo_t* info); + +cusparseStatus_t CUSPARSEAPI +cusparseDestroyCsru2csrInfo(csru2csrInfo_t info); + +cusparseStatus_t CUSPARSEAPI +cusparseCreateColorInfo(cusparseColorInfo_t* info); + +cusparseStatus_t CUSPARSEAPI +cusparseDestroyColorInfo(cusparseColorInfo_t info); + +cusparseStatus_t CUSPARSEAPI +cusparseCreatePruneInfo(pruneInfo_t* info); + +cusparseStatus_t CUSPARSEAPI +cusparseDestroyPruneInfo(pruneInfo_t info); + +//############################################################################## +//# SPARSE LEVEL 2 ROUTINES +//############################################################################## + +cusparseStatus_t CUSPARSEAPI +cusparseSgemvi(cusparseHandle_t handle, + cusparseOperation_t transA, + int m, + int n, + const float* alpha, + const float* A, + int lda, + int nnz, + const float* xVal, + const int* xInd, + const float* beta, + float* y, + cusparseIndexBase_t idxBase, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseSgemvi_bufferSize(cusparseHandle_t handle, + cusparseOperation_t transA, + int m, + int n, + int nnz, + int* pBufferSize); + +cusparseStatus_t CUSPARSEAPI +cusparseDgemvi(cusparseHandle_t handle, + cusparseOperation_t transA, + int m, + int n, + const double* alpha, + const double* A, + int lda, + int nnz, + const double* xVal, + const int* xInd, + const double* beta, + double* y, + cusparseIndexBase_t idxBase, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseDgemvi_bufferSize(cusparseHandle_t handle, + cusparseOperation_t transA, + int m, + int n, + int nnz, + int* pBufferSize); + +cusparseStatus_t CUSPARSEAPI +cusparseCgemvi(cusparseHandle_t handle, + cusparseOperation_t transA, + int m, + int n, + const cuComplex* alpha, + const cuComplex* A, + int lda, + int nnz, + const cuComplex* xVal, + const int* xInd, + const cuComplex* beta, + cuComplex* y, + cusparseIndexBase_t idxBase, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseCgemvi_bufferSize(cusparseHandle_t handle, + cusparseOperation_t transA, + int m, + int n, + int nnz, + int* pBufferSize); + +cusparseStatus_t CUSPARSEAPI +cusparseZgemvi(cusparseHandle_t handle, + cusparseOperation_t transA, + int m, + int n, + const cuDoubleComplex* alpha, + const cuDoubleComplex* A, + int lda, + int nnz, + const cuDoubleComplex* xVal, + const int* xInd, + const cuDoubleComplex* beta, + cuDoubleComplex* y, + cusparseIndexBase_t idxBase, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseZgemvi_bufferSize(cusparseHandle_t handle, + cusparseOperation_t transA, + int m, + int n, + int nnz, + int* pBufferSize); + +cusparseStatus_t CUSPARSEAPI +cusparseSbsrmv(cusparseHandle_t handle, + cusparseDirection_t dirA, + cusparseOperation_t transA, + int mb, + int nb, + int nnzb, + const float* alpha, + const cusparseMatDescr_t descrA, + const float* bsrSortedValA, + const int* bsrSortedRowPtrA, + const int* bsrSortedColIndA, + int blockDim, + const float* x, + const float* beta, + float* y); + +cusparseStatus_t CUSPARSEAPI +cusparseDbsrmv(cusparseHandle_t handle, + cusparseDirection_t dirA, + cusparseOperation_t transA, + int mb, + int nb, + int nnzb, + const double* alpha, + const cusparseMatDescr_t descrA, + const double* bsrSortedValA, + const int* bsrSortedRowPtrA, + const int* bsrSortedColIndA, + int blockDim, + const double* x, + const double* beta, + double* y); + +cusparseStatus_t CUSPARSEAPI +cusparseCbsrmv(cusparseHandle_t handle, + cusparseDirection_t dirA, + cusparseOperation_t transA, + int mb, + int nb, + int nnzb, + const cuComplex* alpha, + const cusparseMatDescr_t descrA, + const cuComplex* bsrSortedValA, + const int* bsrSortedRowPtrA, + const int* bsrSortedColIndA, + int blockDim, + const cuComplex* x, + const cuComplex* beta, + cuComplex* y); + +cusparseStatus_t CUSPARSEAPI +cusparseZbsrmv(cusparseHandle_t handle, + cusparseDirection_t dirA, + cusparseOperation_t transA, + int mb, + int nb, + int nnzb, + const cuDoubleComplex* alpha, + const cusparseMatDescr_t descrA, + const cuDoubleComplex* bsrSortedValA, + const int* bsrSortedRowPtrA, + const int* bsrSortedColIndA, + int blockDim, + const cuDoubleComplex* x, + const cuDoubleComplex* beta, + cuDoubleComplex* y); + +cusparseStatus_t CUSPARSEAPI +cusparseSbsrxmv(cusparseHandle_t handle, + cusparseDirection_t dirA, + cusparseOperation_t transA, + int sizeOfMask, + int mb, + int nb, + int nnzb, + const float* alpha, + const cusparseMatDescr_t descrA, + const float* bsrSortedValA, + const int* bsrSortedMaskPtrA, + const int* bsrSortedRowPtrA, + const int* bsrSortedEndPtrA, + const int* bsrSortedColIndA, + int blockDim, + const float* x, + const float* beta, + float* y); + +cusparseStatus_t CUSPARSEAPI +cusparseDbsrxmv(cusparseHandle_t handle, + cusparseDirection_t dirA, + cusparseOperation_t transA, + int sizeOfMask, + int mb, + int nb, + int nnzb, + const double* alpha, + const cusparseMatDescr_t descrA, + const double* bsrSortedValA, + const int* bsrSortedMaskPtrA, + const int* bsrSortedRowPtrA, + const int* bsrSortedEndPtrA, + const int* bsrSortedColIndA, + int blockDim, + const double* x, + const double* beta, + double* y); + +cusparseStatus_t CUSPARSEAPI +cusparseCbsrxmv(cusparseHandle_t handle, + cusparseDirection_t dirA, + cusparseOperation_t transA, + int sizeOfMask, + int mb, + int nb, + int nnzb, + const cuComplex* alpha, + const cusparseMatDescr_t descrA, + const cuComplex* bsrSortedValA, + const int* bsrSortedMaskPtrA, + const int* bsrSortedRowPtrA, + const int* bsrSortedEndPtrA, + const int* bsrSortedColIndA, + int blockDim, + const cuComplex* x, + const cuComplex* beta, + cuComplex* y); + +cusparseStatus_t CUSPARSEAPI +cusparseZbsrxmv(cusparseHandle_t handle, + cusparseDirection_t dirA, + cusparseOperation_t transA, + int sizeOfMask, + int mb, + int nb, + int nnzb, + const cuDoubleComplex* alpha, + const cusparseMatDescr_t descrA, + const cuDoubleComplex* bsrSortedValA, + const int* bsrSortedMaskPtrA, + const int* bsrSortedRowPtrA, + const int* bsrSortedEndPtrA, + const int* bsrSortedColIndA, + int blockDim, + const cuDoubleComplex* x, + const cuDoubleComplex* beta, + cuDoubleComplex* y); + +cusparseStatus_t CUSPARSEAPI +cusparseXbsrsv2_zeroPivot(cusparseHandle_t handle, + bsrsv2Info_t info, + int* position); + +cusparseStatus_t CUSPARSEAPI +cusparseSbsrsv2_bufferSize(cusparseHandle_t handle, + cusparseDirection_t dirA, + cusparseOperation_t transA, + int mb, + int nnzb, + const cusparseMatDescr_t descrA, + float* bsrSortedValA, + const int* bsrSortedRowPtrA, + const int* bsrSortedColIndA, + int blockDim, + bsrsv2Info_t info, + int* pBufferSizeInBytes); + +cusparseStatus_t CUSPARSEAPI +cusparseDbsrsv2_bufferSize(cusparseHandle_t handle, + cusparseDirection_t dirA, + cusparseOperation_t transA, + int mb, + int nnzb, + const cusparseMatDescr_t descrA, + double* bsrSortedValA, + const int* bsrSortedRowPtrA, + const int* bsrSortedColIndA, + int blockDim, + bsrsv2Info_t info, + int* pBufferSizeInBytes); + +cusparseStatus_t CUSPARSEAPI +cusparseCbsrsv2_bufferSize(cusparseHandle_t handle, + cusparseDirection_t dirA, + cusparseOperation_t transA, + int mb, + int nnzb, + const cusparseMatDescr_t descrA, + cuComplex* bsrSortedValA, + const int* bsrSortedRowPtrA, + const int* bsrSortedColIndA, + int blockDim, + bsrsv2Info_t info, + int* pBufferSizeInBytes); + +cusparseStatus_t CUSPARSEAPI +cusparseZbsrsv2_bufferSize(cusparseHandle_t handle, + cusparseDirection_t dirA, + cusparseOperation_t transA, + int mb, + int nnzb, + const cusparseMatDescr_t descrA, + cuDoubleComplex* bsrSortedValA, + const int* bsrSortedRowPtrA, + const int* bsrSortedColIndA, + int blockDim, + bsrsv2Info_t info, + int* pBufferSizeInBytes); + +cusparseStatus_t CUSPARSEAPI +cusparseSbsrsv2_bufferSizeExt(cusparseHandle_t handle, + cusparseDirection_t dirA, + cusparseOperation_t transA, + int mb, + int nnzb, + const cusparseMatDescr_t descrA, + float* bsrSortedValA, + const int* bsrSortedRowPtrA, + const int* bsrSortedColIndA, + int blockSize, + bsrsv2Info_t info, + size_t* pBufferSize); + +cusparseStatus_t CUSPARSEAPI +cusparseDbsrsv2_bufferSizeExt(cusparseHandle_t handle, + cusparseDirection_t dirA, + cusparseOperation_t transA, + int mb, + int nnzb, + const cusparseMatDescr_t descrA, + double* bsrSortedValA, + const int* bsrSortedRowPtrA, + const int* bsrSortedColIndA, + int blockSize, + bsrsv2Info_t info, + size_t* pBufferSize); + +cusparseStatus_t CUSPARSEAPI +cusparseCbsrsv2_bufferSizeExt(cusparseHandle_t handle, + cusparseDirection_t dirA, + cusparseOperation_t transA, + int mb, + int nnzb, + const cusparseMatDescr_t descrA, + cuComplex* bsrSortedValA, + const int* bsrSortedRowPtrA, + const int* bsrSortedColIndA, + int blockSize, + bsrsv2Info_t info, + size_t* pBufferSize); + +cusparseStatus_t CUSPARSEAPI +cusparseZbsrsv2_bufferSizeExt(cusparseHandle_t handle, + cusparseDirection_t dirA, + cusparseOperation_t transA, + int mb, + int nnzb, + const cusparseMatDescr_t descrA, + cuDoubleComplex* bsrSortedValA, + const int* bsrSortedRowPtrA, + const int* bsrSortedColIndA, + int blockSize, + bsrsv2Info_t info, + size_t* pBufferSize); + +cusparseStatus_t CUSPARSEAPI +cusparseSbsrsv2_analysis(cusparseHandle_t handle, + cusparseDirection_t dirA, + cusparseOperation_t transA, + int mb, + int nnzb, + const cusparseMatDescr_t descrA, + const float* bsrSortedValA, + const int* bsrSortedRowPtrA, + const int* bsrSortedColIndA, + int blockDim, + bsrsv2Info_t info, + cusparseSolvePolicy_t policy, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseDbsrsv2_analysis(cusparseHandle_t handle, + cusparseDirection_t dirA, + cusparseOperation_t transA, + int mb, + int nnzb, + const cusparseMatDescr_t descrA, + const double* bsrSortedValA, + const int* bsrSortedRowPtrA, + const int* bsrSortedColIndA, + int blockDim, + bsrsv2Info_t info, + cusparseSolvePolicy_t policy, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseCbsrsv2_analysis(cusparseHandle_t handle, + cusparseDirection_t dirA, + cusparseOperation_t transA, + int mb, + int nnzb, + const cusparseMatDescr_t descrA, + const cuComplex* bsrSortedValA, + const int* bsrSortedRowPtrA, + const int* bsrSortedColIndA, + int blockDim, + bsrsv2Info_t info, + cusparseSolvePolicy_t policy, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseZbsrsv2_analysis(cusparseHandle_t handle, + cusparseDirection_t dirA, + cusparseOperation_t transA, + int mb, + int nnzb, + const cusparseMatDescr_t descrA, + const cuDoubleComplex* bsrSortedValA, + const int* bsrSortedRowPtrA, + const int* bsrSortedColIndA, + int blockDim, + bsrsv2Info_t info, + cusparseSolvePolicy_t policy, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseSbsrsv2_solve(cusparseHandle_t handle, + cusparseDirection_t dirA, + cusparseOperation_t transA, + int mb, + int nnzb, + const float* alpha, + const cusparseMatDescr_t descrA, + const float* bsrSortedValA, + const int* bsrSortedRowPtrA, + const int* bsrSortedColIndA, + int blockDim, + bsrsv2Info_t info, + const float* f, + float* x, + cusparseSolvePolicy_t policy, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseDbsrsv2_solve(cusparseHandle_t handle, + cusparseDirection_t dirA, + cusparseOperation_t transA, + int mb, + int nnzb, + const double* alpha, + const cusparseMatDescr_t descrA, + const double* bsrSortedValA, + const int* bsrSortedRowPtrA, + const int* bsrSortedColIndA, + int blockDim, + bsrsv2Info_t info, + const double* f, + double* x, + cusparseSolvePolicy_t policy, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseCbsrsv2_solve(cusparseHandle_t handle, + cusparseDirection_t dirA, + cusparseOperation_t transA, + int mb, + int nnzb, + const cuComplex* alpha, + const cusparseMatDescr_t descrA, + const cuComplex* bsrSortedValA, + const int* bsrSortedRowPtrA, + const int* bsrSortedColIndA, + int blockDim, + bsrsv2Info_t info, + const cuComplex* f, + cuComplex* x, + cusparseSolvePolicy_t policy, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseZbsrsv2_solve(cusparseHandle_t handle, + cusparseDirection_t dirA, + cusparseOperation_t transA, + int mb, + int nnzb, + const cuDoubleComplex* alpha, + const cusparseMatDescr_t descrA, + const cuDoubleComplex* bsrSortedValA, + const int* bsrSortedRowPtrA, + const int* bsrSortedColIndA, + int blockDim, + bsrsv2Info_t info, + const cuDoubleComplex* f, + cuDoubleComplex* x, + cusparseSolvePolicy_t policy, + void* pBuffer); + +//############################################################################## +//# SPARSE LEVEL 3 ROUTINES +//############################################################################## + +cusparseStatus_t CUSPARSEAPI +cusparseSbsrmm(cusparseHandle_t handle, + cusparseDirection_t dirA, + cusparseOperation_t transA, + cusparseOperation_t transB, + int mb, + int n, + int kb, + int nnzb, + const float* alpha, + const cusparseMatDescr_t descrA, + const float* bsrSortedValA, + const int* bsrSortedRowPtrA, + const int* bsrSortedColIndA, + const int blockSize, + const float* B, + const int ldb, + const float* beta, + float* C, + int ldc); + +cusparseStatus_t CUSPARSEAPI +cusparseDbsrmm(cusparseHandle_t handle, + cusparseDirection_t dirA, + cusparseOperation_t transA, + cusparseOperation_t transB, + int mb, + int n, + int kb, + int nnzb, + const double* alpha, + const cusparseMatDescr_t descrA, + const double* bsrSortedValA, + const int* bsrSortedRowPtrA, + const int* bsrSortedColIndA, + const int blockSize, + const double* B, + const int ldb, + const double* beta, + double* C, + int ldc); + +cusparseStatus_t CUSPARSEAPI +cusparseCbsrmm(cusparseHandle_t handle, + cusparseDirection_t dirA, + cusparseOperation_t transA, + cusparseOperation_t transB, + int mb, + int n, + int kb, + int nnzb, + const cuComplex* alpha, + const cusparseMatDescr_t descrA, + const cuComplex* bsrSortedValA, + const int* bsrSortedRowPtrA, + const int* bsrSortedColIndA, + const int blockSize, + const cuComplex* B, + const int ldb, + const cuComplex* beta, + cuComplex* C, + int ldc); + +cusparseStatus_t CUSPARSEAPI + cusparseZbsrmm(cusparseHandle_t handle, + cusparseDirection_t dirA, + cusparseOperation_t transA, + cusparseOperation_t transB, + int mb, + int n, + int kb, + int nnzb, + const cuDoubleComplex* alpha, + const cusparseMatDescr_t descrA, + const cuDoubleComplex* bsrSortedValA, + const int* bsrSortedRowPtrA, + const int* bsrSortedColIndA, + const int blockSize, + const cuDoubleComplex* B, + const int ldb, + const cuDoubleComplex* beta, + cuDoubleComplex* C, + int ldc); + +cusparseStatus_t CUSPARSEAPI +cusparseXbsrsm2_zeroPivot(cusparseHandle_t handle, + bsrsm2Info_t info, + int* position); + +cusparseStatus_t CUSPARSEAPI +cusparseSbsrsm2_bufferSize(cusparseHandle_t handle, + cusparseDirection_t dirA, + cusparseOperation_t transA, + cusparseOperation_t transXY, + int mb, + int n, + int nnzb, + const cusparseMatDescr_t descrA, + float* bsrSortedVal, + const int* bsrSortedRowPtr, + const int* bsrSortedColInd, + int blockSize, + bsrsm2Info_t info, + int* pBufferSizeInBytes); + +cusparseStatus_t CUSPARSEAPI +cusparseDbsrsm2_bufferSize(cusparseHandle_t handle, + cusparseDirection_t dirA, + cusparseOperation_t transA, + cusparseOperation_t transXY, + int mb, + int n, + int nnzb, + const cusparseMatDescr_t descrA, + double* bsrSortedVal, + const int* bsrSortedRowPtr, + const int* bsrSortedColInd, + int blockSize, + bsrsm2Info_t info, + int* pBufferSizeInBytes); + +cusparseStatus_t CUSPARSEAPI +cusparseCbsrsm2_bufferSize(cusparseHandle_t handle, + cusparseDirection_t dirA, + cusparseOperation_t transA, + cusparseOperation_t transXY, + int mb, + int n, + int nnzb, + const cusparseMatDescr_t descrA, + cuComplex* bsrSortedVal, + const int* bsrSortedRowPtr, + const int* bsrSortedColInd, + int blockSize, + bsrsm2Info_t info, + int* pBufferSizeInBytes); + +cusparseStatus_t CUSPARSEAPI +cusparseZbsrsm2_bufferSize(cusparseHandle_t handle, + cusparseDirection_t dirA, + cusparseOperation_t transA, + cusparseOperation_t transXY, + int mb, + int n, + int nnzb, + const cusparseMatDescr_t descrA, + cuDoubleComplex* bsrSortedVal, + const int* bsrSortedRowPtr, + const int* bsrSortedColInd, + int blockSize, + bsrsm2Info_t info, + int* pBufferSizeInBytes); + +cusparseStatus_t CUSPARSEAPI +cusparseSbsrsm2_bufferSizeExt(cusparseHandle_t handle, + cusparseDirection_t dirA, + cusparseOperation_t transA, + cusparseOperation_t transB, + int mb, + int n, + int nnzb, + const cusparseMatDescr_t descrA, + float* bsrSortedVal, + const int* bsrSortedRowPtr, + const int* bsrSortedColInd, + int blockSize, + bsrsm2Info_t info, + size_t* pBufferSize); + +cusparseStatus_t CUSPARSEAPI +cusparseDbsrsm2_bufferSizeExt(cusparseHandle_t handle, + cusparseDirection_t dirA, + cusparseOperation_t transA, + cusparseOperation_t transB, + int mb, + int n, + int nnzb, + const cusparseMatDescr_t descrA, + double* bsrSortedVal, + const int* bsrSortedRowPtr, + const int* bsrSortedColInd, + int blockSize, + bsrsm2Info_t info, + size_t* pBufferSize); + +cusparseStatus_t CUSPARSEAPI +cusparseCbsrsm2_bufferSizeExt(cusparseHandle_t handle, + cusparseDirection_t dirA, + cusparseOperation_t transA, + cusparseOperation_t transB, + int mb, + int n, + int nnzb, + const cusparseMatDescr_t descrA, + cuComplex* bsrSortedVal, + const int* bsrSortedRowPtr, + const int* bsrSortedColInd, + int blockSize, + bsrsm2Info_t info, + size_t* pBufferSize); + +cusparseStatus_t CUSPARSEAPI +cusparseZbsrsm2_bufferSizeExt(cusparseHandle_t handle, + cusparseDirection_t dirA, + cusparseOperation_t transA, + cusparseOperation_t transB, + int mb, + int n, + int nnzb, + const cusparseMatDescr_t descrA, + cuDoubleComplex* bsrSortedVal, + const int* bsrSortedRowPtr, + const int* bsrSortedColInd, + int blockSize, + bsrsm2Info_t info, + size_t* pBufferSize); + +cusparseStatus_t CUSPARSEAPI +cusparseSbsrsm2_analysis(cusparseHandle_t handle, + cusparseDirection_t dirA, + cusparseOperation_t transA, + cusparseOperation_t transXY, + int mb, + int n, + int nnzb, + const cusparseMatDescr_t descrA, + const float* bsrSortedVal, + const int* bsrSortedRowPtr, + const int* bsrSortedColInd, + int blockSize, + bsrsm2Info_t info, + cusparseSolvePolicy_t policy, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseDbsrsm2_analysis(cusparseHandle_t handle, + cusparseDirection_t dirA, + cusparseOperation_t transA, + cusparseOperation_t transXY, + int mb, + int n, + int nnzb, + const cusparseMatDescr_t descrA, + const double* bsrSortedVal, + const int* bsrSortedRowPtr, + const int* bsrSortedColInd, + int blockSize, + bsrsm2Info_t info, + cusparseSolvePolicy_t policy, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseCbsrsm2_analysis(cusparseHandle_t handle, + cusparseDirection_t dirA, + cusparseOperation_t transA, + cusparseOperation_t transXY, + int mb, + int n, + int nnzb, + const cusparseMatDescr_t descrA, + const cuComplex* bsrSortedVal, + const int* bsrSortedRowPtr, + const int* bsrSortedColInd, + int blockSize, + bsrsm2Info_t info, + cusparseSolvePolicy_t policy, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseZbsrsm2_analysis(cusparseHandle_t handle, + cusparseDirection_t dirA, + cusparseOperation_t transA, + cusparseOperation_t transXY, + int mb, + int n, + int nnzb, + const cusparseMatDescr_t descrA, + const cuDoubleComplex* bsrSortedVal, + const int* bsrSortedRowPtr, + const int* bsrSortedColInd, + int blockSize, + bsrsm2Info_t info, + cusparseSolvePolicy_t policy, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseSbsrsm2_solve(cusparseHandle_t handle, + cusparseDirection_t dirA, + cusparseOperation_t transA, + cusparseOperation_t transXY, + int mb, + int n, + int nnzb, + const float* alpha, + const cusparseMatDescr_t descrA, + const float* bsrSortedVal, + const int* bsrSortedRowPtr, + const int* bsrSortedColInd, + int blockSize, + bsrsm2Info_t info, + const float* B, + int ldb, + float* X, + int ldx, + cusparseSolvePolicy_t policy, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseDbsrsm2_solve(cusparseHandle_t handle, + cusparseDirection_t dirA, + cusparseOperation_t transA, + cusparseOperation_t transXY, + int mb, + int n, + int nnzb, + const double* alpha, + const cusparseMatDescr_t descrA, + const double* bsrSortedVal, + const int* bsrSortedRowPtr, + const int* bsrSortedColInd, + int blockSize, + bsrsm2Info_t info, + const double* B, + int ldb, + double* X, + int ldx, + cusparseSolvePolicy_t policy, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseCbsrsm2_solve(cusparseHandle_t handle, + cusparseDirection_t dirA, + cusparseOperation_t transA, + cusparseOperation_t transXY, + int mb, + int n, + int nnzb, + const cuComplex* alpha, + const cusparseMatDescr_t descrA, + const cuComplex* bsrSortedVal, + const int* bsrSortedRowPtr, + const int* bsrSortedColInd, + int blockSize, + bsrsm2Info_t info, + const cuComplex* B, + int ldb, + cuComplex* X, + int ldx, + cusparseSolvePolicy_t policy, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseZbsrsm2_solve(cusparseHandle_t handle, + cusparseDirection_t dirA, + cusparseOperation_t transA, + cusparseOperation_t transXY, + int mb, + int n, + int nnzb, + const cuDoubleComplex* alpha, + const cusparseMatDescr_t descrA, + const cuDoubleComplex* bsrSortedVal, + const int* bsrSortedRowPtr, + const int* bsrSortedColInd, + int blockSize, + bsrsm2Info_t info, + const cuDoubleComplex* B, + int ldb, + cuDoubleComplex* X, + int ldx, + cusparseSolvePolicy_t policy, + void* pBuffer); + +//############################################################################## +//# PRECONDITIONERS +//############################################################################## + +cusparseStatus_t CUSPARSEAPI +cusparseScsrilu02_numericBoost(cusparseHandle_t handle, + csrilu02Info_t info, + int enable_boost, + double* tol, + float* boost_val); + +cusparseStatus_t CUSPARSEAPI +cusparseDcsrilu02_numericBoost(cusparseHandle_t handle, + csrilu02Info_t info, + int enable_boost, + double* tol, + double* boost_val); + +cusparseStatus_t CUSPARSEAPI +cusparseCcsrilu02_numericBoost(cusparseHandle_t handle, + csrilu02Info_t info, + int enable_boost, + double* tol, + cuComplex* boost_val); + +cusparseStatus_t CUSPARSEAPI +cusparseZcsrilu02_numericBoost(cusparseHandle_t handle, + csrilu02Info_t info, + int enable_boost, + double* tol, + cuDoubleComplex* boost_val); + +cusparseStatus_t CUSPARSEAPI +cusparseXcsrilu02_zeroPivot(cusparseHandle_t handle, + csrilu02Info_t info, + int* position); + +cusparseStatus_t CUSPARSEAPI +cusparseScsrilu02_bufferSize(cusparseHandle_t handle, + int m, + int nnz, + const cusparseMatDescr_t descrA, + float* csrSortedValA, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + csrilu02Info_t info, + int* pBufferSizeInBytes); + +cusparseStatus_t CUSPARSEAPI +cusparseDcsrilu02_bufferSize(cusparseHandle_t handle, + int m, + int nnz, + const cusparseMatDescr_t descrA, + double* csrSortedValA, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + csrilu02Info_t info, + int* pBufferSizeInBytes); + +cusparseStatus_t CUSPARSEAPI +cusparseCcsrilu02_bufferSize(cusparseHandle_t handle, + int m, + int nnz, + const cusparseMatDescr_t descrA, + cuComplex* csrSortedValA, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + csrilu02Info_t info, + int* pBufferSizeInBytes); + +cusparseStatus_t CUSPARSEAPI +cusparseZcsrilu02_bufferSize(cusparseHandle_t handle, + int m, + int nnz, + const cusparseMatDescr_t descrA, + cuDoubleComplex* csrSortedValA, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + csrilu02Info_t info, + int* pBufferSizeInBytes); + +cusparseStatus_t CUSPARSEAPI +cusparseScsrilu02_bufferSizeExt(cusparseHandle_t handle, + int m, + int nnz, + const cusparseMatDescr_t descrA, + float* csrSortedVal, + const int* csrSortedRowPtr, + const int* csrSortedColInd, + csrilu02Info_t info, + size_t* pBufferSize); + +cusparseStatus_t CUSPARSEAPI +cusparseDcsrilu02_bufferSizeExt(cusparseHandle_t handle, + int m, + int nnz, + const cusparseMatDescr_t descrA, + double* csrSortedVal, + const int* csrSortedRowPtr, + const int* csrSortedColInd, + csrilu02Info_t info, + size_t* pBufferSize); + +cusparseStatus_t CUSPARSEAPI +cusparseCcsrilu02_bufferSizeExt(cusparseHandle_t handle, + int m, + int nnz, + const cusparseMatDescr_t descrA, + cuComplex* csrSortedVal, + const int* csrSortedRowPtr, + const int* csrSortedColInd, + csrilu02Info_t info, + size_t* pBufferSize); + +cusparseStatus_t CUSPARSEAPI +cusparseZcsrilu02_bufferSizeExt(cusparseHandle_t handle, + int m, + int nnz, + const cusparseMatDescr_t descrA, + cuDoubleComplex* csrSortedVal, + const int* csrSortedRowPtr, + const int* csrSortedColInd, + csrilu02Info_t info, + size_t* pBufferSize); + +cusparseStatus_t CUSPARSEAPI +cusparseScsrilu02_analysis(cusparseHandle_t handle, + int m, + int nnz, + const cusparseMatDescr_t descrA, + const float* csrSortedValA, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + csrilu02Info_t info, + cusparseSolvePolicy_t policy, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseDcsrilu02_analysis(cusparseHandle_t handle, + int m, + int nnz, + const cusparseMatDescr_t descrA, + const double* csrSortedValA, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + csrilu02Info_t info, + cusparseSolvePolicy_t policy, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseCcsrilu02_analysis(cusparseHandle_t handle, + int m, + int nnz, + const cusparseMatDescr_t descrA, + const cuComplex* csrSortedValA, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + csrilu02Info_t info, + cusparseSolvePolicy_t policy, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseZcsrilu02_analysis(cusparseHandle_t handle, + int m, + int nnz, + const cusparseMatDescr_t descrA, + const cuDoubleComplex* csrSortedValA, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + csrilu02Info_t info, + cusparseSolvePolicy_t policy, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseScsrilu02(cusparseHandle_t handle, + int m, + int nnz, + const cusparseMatDescr_t descrA, + float* csrSortedValA_valM, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + csrilu02Info_t info, + cusparseSolvePolicy_t policy, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseDcsrilu02(cusparseHandle_t handle, + int m, + int nnz, + const cusparseMatDescr_t descrA, + double* csrSortedValA_valM, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + csrilu02Info_t info, + cusparseSolvePolicy_t policy, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseCcsrilu02(cusparseHandle_t handle, + int m, + int nnz, + const cusparseMatDescr_t descrA, + cuComplex* csrSortedValA_valM, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + csrilu02Info_t info, + cusparseSolvePolicy_t policy, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseZcsrilu02(cusparseHandle_t handle, + int m, + int nnz, + const cusparseMatDescr_t descrA, + cuDoubleComplex* csrSortedValA_valM, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + csrilu02Info_t info, + cusparseSolvePolicy_t policy, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseSbsrilu02_numericBoost(cusparseHandle_t handle, + bsrilu02Info_t info, + int enable_boost, + double* tol, + float* boost_val); + +cusparseStatus_t CUSPARSEAPI +cusparseDbsrilu02_numericBoost(cusparseHandle_t handle, + bsrilu02Info_t info, + int enable_boost, + double* tol, + double* boost_val); + +cusparseStatus_t CUSPARSEAPI +cusparseCbsrilu02_numericBoost(cusparseHandle_t handle, + bsrilu02Info_t info, + int enable_boost, + double* tol, + cuComplex* boost_val); + +cusparseStatus_t CUSPARSEAPI +cusparseZbsrilu02_numericBoost(cusparseHandle_t handle, + bsrilu02Info_t info, + int enable_boost, + double* tol, + cuDoubleComplex* boost_val); + +cusparseStatus_t CUSPARSEAPI +cusparseXbsrilu02_zeroPivot(cusparseHandle_t handle, + bsrilu02Info_t info, + int* position); + +cusparseStatus_t CUSPARSEAPI +cusparseSbsrilu02_bufferSize(cusparseHandle_t handle, + cusparseDirection_t dirA, + int mb, + int nnzb, + const cusparseMatDescr_t descrA, + float* bsrSortedVal, + const int* bsrSortedRowPtr, + const int* bsrSortedColInd, + int blockDim, + bsrilu02Info_t info, + int* pBufferSizeInBytes); + +cusparseStatus_t CUSPARSEAPI +cusparseDbsrilu02_bufferSize(cusparseHandle_t handle, + cusparseDirection_t dirA, + int mb, + int nnzb, + const cusparseMatDescr_t descrA, + double* bsrSortedVal, + const int* bsrSortedRowPtr, + const int* bsrSortedColInd, + int blockDim, + bsrilu02Info_t info, + int* pBufferSizeInBytes); + +cusparseStatus_t CUSPARSEAPI +cusparseCbsrilu02_bufferSize(cusparseHandle_t handle, + cusparseDirection_t dirA, + int mb, + int nnzb, + const cusparseMatDescr_t descrA, + cuComplex* bsrSortedVal, + const int* bsrSortedRowPtr, + const int* bsrSortedColInd, + int blockDim, + bsrilu02Info_t info, + int* pBufferSizeInBytes); + +cusparseStatus_t CUSPARSEAPI +cusparseZbsrilu02_bufferSize(cusparseHandle_t handle, + cusparseDirection_t dirA, + int mb, + int nnzb, + const cusparseMatDescr_t descrA, + cuDoubleComplex* bsrSortedVal, + const int* bsrSortedRowPtr, + const int* bsrSortedColInd, + int blockDim, + bsrilu02Info_t info, + int* pBufferSizeInBytes); + +cusparseStatus_t CUSPARSEAPI +cusparseSbsrilu02_bufferSizeExt(cusparseHandle_t handle, + cusparseDirection_t dirA, + int mb, + int nnzb, + const cusparseMatDescr_t descrA, + float* bsrSortedVal, + const int* bsrSortedRowPtr, + const int* bsrSortedColInd, + int blockSize, + bsrilu02Info_t info, + size_t* pBufferSize); + +cusparseStatus_t CUSPARSEAPI +cusparseDbsrilu02_bufferSizeExt(cusparseHandle_t handle, + cusparseDirection_t dirA, + int mb, + int nnzb, + const cusparseMatDescr_t descrA, + double* bsrSortedVal, + const int* bsrSortedRowPtr, + const int* bsrSortedColInd, + int blockSize, + bsrilu02Info_t info, + size_t* pBufferSize); + +cusparseStatus_t CUSPARSEAPI +cusparseCbsrilu02_bufferSizeExt(cusparseHandle_t handle, + cusparseDirection_t dirA, + int mb, + int nnzb, + const cusparseMatDescr_t descrA, + cuComplex* bsrSortedVal, + const int* bsrSortedRowPtr, + const int* bsrSortedColInd, + int blockSize, + bsrilu02Info_t info, + size_t* pBufferSize); + +cusparseStatus_t CUSPARSEAPI +cusparseZbsrilu02_bufferSizeExt(cusparseHandle_t handle, + cusparseDirection_t dirA, + int mb, + int nnzb, + const cusparseMatDescr_t descrA, + cuDoubleComplex* bsrSortedVal, + const int* bsrSortedRowPtr, + const int* bsrSortedColInd, + int blockSize, + bsrilu02Info_t info, + size_t* pBufferSize); + +cusparseStatus_t CUSPARSEAPI +cusparseSbsrilu02_analysis(cusparseHandle_t handle, + cusparseDirection_t dirA, + int mb, + int nnzb, + const cusparseMatDescr_t descrA, + float* bsrSortedVal, + const int* bsrSortedRowPtr, + const int* bsrSortedColInd, + int blockDim, + bsrilu02Info_t info, + cusparseSolvePolicy_t policy, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseDbsrilu02_analysis(cusparseHandle_t handle, + cusparseDirection_t dirA, + int mb, + int nnzb, + const cusparseMatDescr_t descrA, + double* bsrSortedVal, + const int* bsrSortedRowPtr, + const int* bsrSortedColInd, + int blockDim, + bsrilu02Info_t info, + cusparseSolvePolicy_t policy, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseCbsrilu02_analysis(cusparseHandle_t handle, + cusparseDirection_t dirA, + int mb, + int nnzb, + const cusparseMatDescr_t descrA, + cuComplex* bsrSortedVal, + const int* bsrSortedRowPtr, + const int* bsrSortedColInd, + int blockDim, + bsrilu02Info_t info, + cusparseSolvePolicy_t policy, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseZbsrilu02_analysis(cusparseHandle_t handle, + cusparseDirection_t dirA, + int mb, + int nnzb, + const cusparseMatDescr_t descrA, + cuDoubleComplex* bsrSortedVal, + const int* bsrSortedRowPtr, + const int* bsrSortedColInd, + int blockDim, + bsrilu02Info_t info, + cusparseSolvePolicy_t policy, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseSbsrilu02(cusparseHandle_t handle, + cusparseDirection_t dirA, + int mb, + int nnzb, + const cusparseMatDescr_t descrA, + float* bsrSortedVal, + const int* bsrSortedRowPtr, + const int* bsrSortedColInd, + int blockDim, + bsrilu02Info_t info, + cusparseSolvePolicy_t policy, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseDbsrilu02(cusparseHandle_t handle, + cusparseDirection_t dirA, + int mb, + int nnzb, + const cusparseMatDescr_t descrA, + double* bsrSortedVal, + const int* bsrSortedRowPtr, + const int* bsrSortedColInd, + int blockDim, + bsrilu02Info_t info, + cusparseSolvePolicy_t policy, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseCbsrilu02(cusparseHandle_t handle, + cusparseDirection_t dirA, + int mb, + int nnzb, + const cusparseMatDescr_t descrA, + cuComplex* bsrSortedVal, + const int* bsrSortedRowPtr, + const int* bsrSortedColInd, + int blockDim, + bsrilu02Info_t info, + cusparseSolvePolicy_t policy, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseZbsrilu02(cusparseHandle_t handle, + cusparseDirection_t dirA, + int mb, + int nnzb, + const cusparseMatDescr_t descrA, + cuDoubleComplex* bsrSortedVal, + const int* bsrSortedRowPtr, + const int* bsrSortedColInd, + int blockDim, + bsrilu02Info_t info, + cusparseSolvePolicy_t policy, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseXcsric02_zeroPivot(cusparseHandle_t handle, + csric02Info_t info, + int* position); + +cusparseStatus_t CUSPARSEAPI +cusparseScsric02_bufferSize(cusparseHandle_t handle, + int m, + int nnz, + const cusparseMatDescr_t descrA, + float* csrSortedValA, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + csric02Info_t info, + int* pBufferSizeInBytes); + +cusparseStatus_t CUSPARSEAPI +cusparseDcsric02_bufferSize(cusparseHandle_t handle, + int m, + int nnz, + const cusparseMatDescr_t descrA, + double* csrSortedValA, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + csric02Info_t info, + int* pBufferSizeInBytes); + +cusparseStatus_t CUSPARSEAPI +cusparseCcsric02_bufferSize(cusparseHandle_t handle, + int m, + int nnz, + const cusparseMatDescr_t descrA, + cuComplex* csrSortedValA, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + csric02Info_t info, + int* pBufferSizeInBytes); + +cusparseStatus_t CUSPARSEAPI +cusparseZcsric02_bufferSize(cusparseHandle_t handle, + int m, + int nnz, + const cusparseMatDescr_t descrA, + cuDoubleComplex* csrSortedValA, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + csric02Info_t info, + int* pBufferSizeInBytes); + +cusparseStatus_t CUSPARSEAPI +cusparseScsric02_bufferSizeExt(cusparseHandle_t handle, + int m, + int nnz, + const cusparseMatDescr_t descrA, + float* csrSortedVal, + const int* csrSortedRowPtr, + const int* csrSortedColInd, + csric02Info_t info, + size_t* pBufferSize); + +cusparseStatus_t CUSPARSEAPI +cusparseDcsric02_bufferSizeExt(cusparseHandle_t handle, + int m, + int nnz, + const cusparseMatDescr_t descrA, + double* csrSortedVal, + const int* csrSortedRowPtr, + const int* csrSortedColInd, + csric02Info_t info, + size_t* pBufferSize); + +cusparseStatus_t CUSPARSEAPI +cusparseCcsric02_bufferSizeExt(cusparseHandle_t handle, + int m, + int nnz, + const cusparseMatDescr_t descrA, + cuComplex* csrSortedVal, + const int* csrSortedRowPtr, + const int* csrSortedColInd, + csric02Info_t info, + size_t* pBufferSize); + +cusparseStatus_t CUSPARSEAPI +cusparseZcsric02_bufferSizeExt(cusparseHandle_t handle, + int m, + int nnz, + const cusparseMatDescr_t descrA, + cuDoubleComplex* csrSortedVal, + const int* csrSortedRowPtr, + const int* csrSortedColInd, + csric02Info_t info, + size_t* pBufferSize); + +cusparseStatus_t CUSPARSEAPI +cusparseScsric02_analysis(cusparseHandle_t handle, + int m, + int nnz, + const cusparseMatDescr_t descrA, + const float* csrSortedValA, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + csric02Info_t info, + cusparseSolvePolicy_t policy, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseDcsric02_analysis(cusparseHandle_t handle, + int m, + int nnz, + const cusparseMatDescr_t descrA, + const double* csrSortedValA, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + csric02Info_t info, + cusparseSolvePolicy_t policy, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseCcsric02_analysis(cusparseHandle_t handle, + int m, + int nnz, + const cusparseMatDescr_t descrA, + const cuComplex* csrSortedValA, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + csric02Info_t info, + cusparseSolvePolicy_t policy, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseZcsric02_analysis(cusparseHandle_t handle, + int m, + int nnz, + const cusparseMatDescr_t descrA, + const cuDoubleComplex* csrSortedValA, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + csric02Info_t info, + cusparseSolvePolicy_t policy, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseScsric02(cusparseHandle_t handle, + int m, + int nnz, + const cusparseMatDescr_t descrA, + float* csrSortedValA_valM, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + csric02Info_t info, + cusparseSolvePolicy_t policy, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseDcsric02(cusparseHandle_t handle, + int m, + int nnz, + const cusparseMatDescr_t descrA, + double* csrSortedValA_valM, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + csric02Info_t info, + cusparseSolvePolicy_t policy, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseCcsric02(cusparseHandle_t handle, + int m, + int nnz, + const cusparseMatDescr_t descrA, + cuComplex* csrSortedValA_valM, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + csric02Info_t info, + cusparseSolvePolicy_t policy, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseZcsric02(cusparseHandle_t handle, + int m, + int nnz, + const cusparseMatDescr_t descrA, + cuDoubleComplex* csrSortedValA_valM, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + csric02Info_t info, + cusparseSolvePolicy_t policy, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseXbsric02_zeroPivot(cusparseHandle_t handle, + bsric02Info_t info, + int* position); + +cusparseStatus_t CUSPARSEAPI +cusparseSbsric02_bufferSize(cusparseHandle_t handle, + cusparseDirection_t dirA, + int mb, + int nnzb, + const cusparseMatDescr_t descrA, + float* bsrSortedVal, + const int* bsrSortedRowPtr, + const int* bsrSortedColInd, + int blockDim, + bsric02Info_t info, + int* pBufferSizeInBytes); + +cusparseStatus_t CUSPARSEAPI +cusparseDbsric02_bufferSize(cusparseHandle_t handle, + cusparseDirection_t dirA, + int mb, + int nnzb, + const cusparseMatDescr_t descrA, + double* bsrSortedVal, + const int* bsrSortedRowPtr, + const int* bsrSortedColInd, + int blockDim, + bsric02Info_t info, + int* pBufferSizeInBytes); + +cusparseStatus_t CUSPARSEAPI +cusparseCbsric02_bufferSize(cusparseHandle_t handle, + cusparseDirection_t dirA, + int mb, + int nnzb, + const cusparseMatDescr_t descrA, + cuComplex* bsrSortedVal, + const int* bsrSortedRowPtr, + const int* bsrSortedColInd, + int blockDim, + bsric02Info_t info, + int* pBufferSizeInBytes); + +cusparseStatus_t CUSPARSEAPI +cusparseZbsric02_bufferSize(cusparseHandle_t handle, + cusparseDirection_t dirA, + int mb, + int nnzb, + const cusparseMatDescr_t descrA, + cuDoubleComplex* bsrSortedVal, + const int* bsrSortedRowPtr, + const int* bsrSortedColInd, + int blockDim, + bsric02Info_t info, + int* pBufferSizeInBytes); + +cusparseStatus_t CUSPARSEAPI +cusparseSbsric02_bufferSizeExt(cusparseHandle_t handle, + cusparseDirection_t dirA, + int mb, + int nnzb, + const cusparseMatDescr_t descrA, + float* bsrSortedVal, + const int* bsrSortedRowPtr, + const int* bsrSortedColInd, + int blockSize, + bsric02Info_t info, + size_t* pBufferSize); + +cusparseStatus_t CUSPARSEAPI +cusparseDbsric02_bufferSizeExt(cusparseHandle_t handle, + cusparseDirection_t dirA, + int mb, + int nnzb, + const cusparseMatDescr_t descrA, + double* bsrSortedVal, + const int* bsrSortedRowPtr, + const int* bsrSortedColInd, + int blockSize, + bsric02Info_t info, + size_t* pBufferSize); + +cusparseStatus_t CUSPARSEAPI +cusparseCbsric02_bufferSizeExt(cusparseHandle_t handle, + cusparseDirection_t dirA, + int mb, + int nnzb, + const cusparseMatDescr_t descrA, + cuComplex* bsrSortedVal, + const int* bsrSortedRowPtr, + const int* bsrSortedColInd, + int blockSize, + bsric02Info_t info, + size_t* pBufferSize); + +cusparseStatus_t CUSPARSEAPI +cusparseZbsric02_bufferSizeExt(cusparseHandle_t handle, + cusparseDirection_t dirA, + int mb, + int nnzb, + const cusparseMatDescr_t descrA, + cuDoubleComplex* bsrSortedVal, + const int* bsrSortedRowPtr, + const int* bsrSortedColInd, + int blockSize, + bsric02Info_t info, + size_t* pBufferSize); + +cusparseStatus_t CUSPARSEAPI +cusparseSbsric02_analysis(cusparseHandle_t handle, + cusparseDirection_t dirA, + int mb, + int nnzb, + const cusparseMatDescr_t descrA, + const float* bsrSortedVal, + const int* bsrSortedRowPtr, + const int* bsrSortedColInd, + int blockDim, + bsric02Info_t info, + cusparseSolvePolicy_t policy, + void* pInputBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseDbsric02_analysis(cusparseHandle_t handle, + cusparseDirection_t dirA, + int mb, + int nnzb, + const cusparseMatDescr_t descrA, + const double* bsrSortedVal, + const int* bsrSortedRowPtr, + const int* bsrSortedColInd, + int blockDim, + bsric02Info_t info, + cusparseSolvePolicy_t policy, + void* pInputBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseCbsric02_analysis(cusparseHandle_t handle, + cusparseDirection_t dirA, + int mb, + int nnzb, + const cusparseMatDescr_t descrA, + const cuComplex* bsrSortedVal, + const int* bsrSortedRowPtr, + const int* bsrSortedColInd, + int blockDim, + bsric02Info_t info, + cusparseSolvePolicy_t policy, + void* pInputBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseZbsric02_analysis(cusparseHandle_t handle, + cusparseDirection_t dirA, + int mb, + int nnzb, + const cusparseMatDescr_t descrA, + const cuDoubleComplex* bsrSortedVal, + const int* bsrSortedRowPtr, + const int* bsrSortedColInd, + int blockDim, + bsric02Info_t info, + cusparseSolvePolicy_t policy, + void* pInputBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseSbsric02(cusparseHandle_t handle, + cusparseDirection_t dirA, + int mb, + int nnzb, + const cusparseMatDescr_t descrA, + float* bsrSortedVal, + const int* bsrSortedRowPtr, + const int* bsrSortedColInd, + int blockDim, + bsric02Info_t info, + cusparseSolvePolicy_t policy, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseDbsric02(cusparseHandle_t handle, + cusparseDirection_t dirA, + int mb, + int nnzb, + const cusparseMatDescr_t descrA, + double* bsrSortedVal, + const int* bsrSortedRowPtr, + const int* bsrSortedColInd, + int blockDim, + bsric02Info_t info, + cusparseSolvePolicy_t policy, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseCbsric02(cusparseHandle_t handle, + cusparseDirection_t dirA, + int mb, + int nnzb, + const cusparseMatDescr_t descrA, + cuComplex* bsrSortedVal, + const int* bsrSortedRowPtr, + const int* + bsrSortedColInd, + int blockDim, + bsric02Info_t info, + cusparseSolvePolicy_t policy, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseZbsric02(cusparseHandle_t handle, + cusparseDirection_t dirA, + int mb, + int nnzb, + const cusparseMatDescr_t descrA, + cuDoubleComplex* bsrSortedVal, + const int* bsrSortedRowPtr, + const int* bsrSortedColInd, + int blockDim, + bsric02Info_t info, + cusparseSolvePolicy_t policy, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseSgtsv2_bufferSizeExt(cusparseHandle_t handle, + int m, + int n, + const float* dl, + const float* d, + const float* du, + const float* B, + int ldb, + size_t* bufferSizeInBytes); + +cusparseStatus_t CUSPARSEAPI +cusparseDgtsv2_bufferSizeExt(cusparseHandle_t handle, + int m, + int n, + const double* dl, + const double* d, + const double* du, + const double* B, + int ldb, + size_t* bufferSizeInBytes); + +cusparseStatus_t CUSPARSEAPI +cusparseCgtsv2_bufferSizeExt(cusparseHandle_t handle, + int m, + int n, + const cuComplex* dl, + const cuComplex* d, + const cuComplex* du, + const cuComplex* B, + int ldb, + size_t* bufferSizeInBytes); + +cusparseStatus_t CUSPARSEAPI +cusparseZgtsv2_bufferSizeExt(cusparseHandle_t handle, + int m, + int n, + const cuDoubleComplex* dl, + const cuDoubleComplex* d, + const cuDoubleComplex* du, + const cuDoubleComplex* B, + int ldb, + size_t* bufferSizeInBytes); + +cusparseStatus_t CUSPARSEAPI +cusparseSgtsv2(cusparseHandle_t handle, + int m, + int n, + const float* dl, + const float* d, + const float* du, + float* B, + int ldb, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseDgtsv2(cusparseHandle_t handle, + int m, + int n, + const double* dl, + const double* d, + const double* du, + double* B, + int ldb, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseCgtsv2(cusparseHandle_t handle, + int m, + int n, + const cuComplex* dl, + const cuComplex* d, + const cuComplex* du, + cuComplex* B, + int ldb, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseZgtsv2(cusparseHandle_t handle, + int m, + int n, + const cuDoubleComplex* dl, + const cuDoubleComplex* d, + const cuDoubleComplex* du, + cuDoubleComplex* B, + int ldb, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseSgtsv2_nopivot_bufferSizeExt(cusparseHandle_t handle, + int m, + int n, + const float* dl, + const float* d, + const float* du, + const float* B, + int ldb, + size_t* bufferSizeInBytes); + +cusparseStatus_t CUSPARSEAPI +cusparseDgtsv2_nopivot_bufferSizeExt(cusparseHandle_t handle, + int m, + int n, + const double* dl, + const double* d, + const double* du, + const double* B, + int ldb, + size_t* bufferSizeInBytes); + +cusparseStatus_t CUSPARSEAPI +cusparseCgtsv2_nopivot_bufferSizeExt(cusparseHandle_t handle, + int m, + int n, + const cuComplex* dl, + const cuComplex* d, + const cuComplex* du, + const cuComplex* B, + int ldb, + size_t* bufferSizeInBytes); + +cusparseStatus_t CUSPARSEAPI +cusparseZgtsv2_nopivot_bufferSizeExt(cusparseHandle_t handle, + int m, + int n, + const cuDoubleComplex* dl, + const cuDoubleComplex* d, + const cuDoubleComplex* du, + const cuDoubleComplex* B, + int ldb, + size_t* bufferSizeInBytes); + +cusparseStatus_t CUSPARSEAPI +cusparseSgtsv2_nopivot(cusparseHandle_t handle, + int m, + int n, + const float* dl, + const float* d, + const float* du, + float* B, + int ldb, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseDgtsv2_nopivot(cusparseHandle_t handle, + int m, + int n, + const double* dl, + const double* d, + const double* du, + double* B, + int ldb, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseCgtsv2_nopivot(cusparseHandle_t handle, + int m, + int n, + const cuComplex* dl, + const cuComplex* d, + const cuComplex* du, + cuComplex* B, + int ldb, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseZgtsv2_nopivot(cusparseHandle_t handle, + int m, + int n, + const cuDoubleComplex* dl, + const cuDoubleComplex* d, + const cuDoubleComplex* du, + cuDoubleComplex* B, + int ldb, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseSgtsv2StridedBatch_bufferSizeExt(cusparseHandle_t handle, + int m, + const float* dl, + const float* d, + const float* du, + const float* x, + int batchCount, + int batchStride, + size_t* bufferSizeInBytes); + +cusparseStatus_t CUSPARSEAPI +cusparseDgtsv2StridedBatch_bufferSizeExt(cusparseHandle_t handle, + int m, + const double* dl, + const double* d, + const double* du, + const double* x, + int batchCount, + int batchStride, + size_t* bufferSizeInBytes); + +cusparseStatus_t CUSPARSEAPI +cusparseCgtsv2StridedBatch_bufferSizeExt(cusparseHandle_t handle, + int m, + const cuComplex* dl, + const cuComplex* d, + const cuComplex* du, + const cuComplex* x, + int batchCount, + int batchStride, + size_t* bufferSizeInBytes); + +cusparseStatus_t CUSPARSEAPI +cusparseZgtsv2StridedBatch_bufferSizeExt(cusparseHandle_t handle, + int m, + const cuDoubleComplex* dl, + const cuDoubleComplex* d, + const cuDoubleComplex* du, + const cuDoubleComplex* x, + int batchCount, + int batchStride, + size_t* bufferSizeInBytes); + +cusparseStatus_t CUSPARSEAPI +cusparseSgtsv2StridedBatch(cusparseHandle_t handle, + int m, + const float* dl, + const float* d, + const float* du, + float* x, + int batchCount, + int batchStride, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseDgtsv2StridedBatch(cusparseHandle_t handle, + int m, + const double* dl, + const double* d, + const double* du, + double* x, + int batchCount, + int batchStride, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseCgtsv2StridedBatch(cusparseHandle_t handle, + int m, + const cuComplex* dl, + const cuComplex* d, + const cuComplex* du, + cuComplex* x, + int batchCount, + int batchStride, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseZgtsv2StridedBatch(cusparseHandle_t handle, + int m, + const cuDoubleComplex* dl, + const cuDoubleComplex* d, + const cuDoubleComplex* du, + cuDoubleComplex* x, + int batchCount, + int batchStride, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseSgtsvInterleavedBatch_bufferSizeExt(cusparseHandle_t handle, + int algo, + int m, + const float* dl, + const float* d, + const float* du, + const float* x, + int batchCount, + size_t* pBufferSizeInBytes); + +cusparseStatus_t CUSPARSEAPI +cusparseDgtsvInterleavedBatch_bufferSizeExt(cusparseHandle_t handle, + int algo, + int m, + const double* dl, + const double* d, + const double* du, + const double* x, + int batchCount, + size_t* pBufferSizeInBytes); + +cusparseStatus_t CUSPARSEAPI +cusparseCgtsvInterleavedBatch_bufferSizeExt(cusparseHandle_t handle, + int algo, + int m, + const cuComplex* dl, + const cuComplex* d, + const cuComplex* du, + const cuComplex* x, + int batchCount, + size_t* pBufferSizeInBytes); + +cusparseStatus_t CUSPARSEAPI +cusparseZgtsvInterleavedBatch_bufferSizeExt(cusparseHandle_t handle, + int algo, + int m, + const cuDoubleComplex* dl, + const cuDoubleComplex* d, + const cuDoubleComplex* du, + const cuDoubleComplex* x, + int batchCount, + size_t* pBufferSizeInBytes); + +cusparseStatus_t CUSPARSEAPI +cusparseSgtsvInterleavedBatch(cusparseHandle_t handle, + int algo, + int m, + float* dl, + float* d, + float* du, + float* x, + int batchCount, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseDgtsvInterleavedBatch(cusparseHandle_t handle, + int algo, + int m, + double* dl, + double* d, + double* du, + double* x, + int batchCount, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseCgtsvInterleavedBatch(cusparseHandle_t handle, + int algo, + int m, + cuComplex* dl, + cuComplex* d, + cuComplex* du, + cuComplex* x, + int batchCount, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseZgtsvInterleavedBatch(cusparseHandle_t handle, + int algo, + int m, + cuDoubleComplex* dl, + cuDoubleComplex* d, + cuDoubleComplex* du, + cuDoubleComplex* x, + int batchCount, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseSgpsvInterleavedBatch_bufferSizeExt(cusparseHandle_t handle, + int algo, + int m, + const float* ds, + const float* dl, + const float* d, + const float* du, + const float* dw, + const float* x, + int batchCount, + size_t* pBufferSizeInBytes); + +cusparseStatus_t CUSPARSEAPI +cusparseDgpsvInterleavedBatch_bufferSizeExt(cusparseHandle_t handle, + int algo, + int m, + const double* ds, + const double* dl, + const double* d, + const double* du, + const double* dw, + const double* x, + int batchCount, + size_t* pBufferSizeInBytes); + +cusparseStatus_t CUSPARSEAPI +cusparseCgpsvInterleavedBatch_bufferSizeExt(cusparseHandle_t handle, + int algo, + int m, + const cuComplex* ds, + const cuComplex* dl, + const cuComplex* d, + const cuComplex* du, + const cuComplex* dw, + const cuComplex* x, + int batchCount, + size_t* pBufferSizeInBytes); + +cusparseStatus_t CUSPARSEAPI +cusparseZgpsvInterleavedBatch_bufferSizeExt(cusparseHandle_t handle, + int algo, + int m, + const cuDoubleComplex* ds, + const cuDoubleComplex* dl, + const cuDoubleComplex* d, + const cuDoubleComplex* du, + const cuDoubleComplex* dw, + const cuDoubleComplex* x, + int batchCount, + size_t* pBufferSizeInBytes); + +cusparseStatus_t CUSPARSEAPI +cusparseSgpsvInterleavedBatch(cusparseHandle_t handle, + int algo, + int m, + float* ds, + float* dl, + float* d, + float* du, + float* dw, + float* x, + int batchCount, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseDgpsvInterleavedBatch(cusparseHandle_t handle, + int algo, + int m, + double* ds, + double* dl, + double* d, + double* du, + double* dw, + double* x, + int batchCount, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseCgpsvInterleavedBatch(cusparseHandle_t handle, + int algo, + int m, + cuComplex* ds, + cuComplex* dl, + cuComplex* d, + cuComplex* du, + cuComplex* dw, + cuComplex* x, + int batchCount, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseZgpsvInterleavedBatch(cusparseHandle_t handle, + int algo, + int m, + cuDoubleComplex* ds, + cuDoubleComplex* dl, + cuDoubleComplex* d, + cuDoubleComplex* du, + cuDoubleComplex* dw, + cuDoubleComplex* x, + int batchCount, + void* pBuffer); + +//############################################################################## +//# EXTRA ROUTINES +//############################################################################## + +cusparseStatus_t CUSPARSEAPI +cusparseScsrgeam2_bufferSizeExt(cusparseHandle_t handle, + int m, + int n, + const float* alpha, + const cusparseMatDescr_t descrA, + int nnzA, + const float* csrSortedValA, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + const float* beta, + const cusparseMatDescr_t descrB, + int nnzB, + const float* csrSortedValB, + const int* csrSortedRowPtrB, + const int* csrSortedColIndB, + const cusparseMatDescr_t descrC, + const float* csrSortedValC, + const int* csrSortedRowPtrC, + const int* csrSortedColIndC, + size_t* pBufferSizeInBytes); + +cusparseStatus_t CUSPARSEAPI +cusparseDcsrgeam2_bufferSizeExt(cusparseHandle_t handle, + int m, + int n, + const double* alpha, + const cusparseMatDescr_t descrA, + int nnzA, + const double* csrSortedValA, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + const double* beta, + const cusparseMatDescr_t descrB, + int nnzB, + const double* csrSortedValB, + const int* csrSortedRowPtrB, + const int* csrSortedColIndB, + const cusparseMatDescr_t descrC, + const double* csrSortedValC, + const int* csrSortedRowPtrC, + const int* csrSortedColIndC, + size_t* pBufferSizeInBytes); + +cusparseStatus_t CUSPARSEAPI +cusparseCcsrgeam2_bufferSizeExt(cusparseHandle_t handle, + int m, + int n, + const cuComplex* alpha, + const cusparseMatDescr_t descrA, + int nnzA, + const cuComplex* csrSortedValA, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + const cuComplex* beta, + const cusparseMatDescr_t descrB, + int nnzB, + const cuComplex* csrSortedValB, + const int* csrSortedRowPtrB, + const int* csrSortedColIndB, + const cusparseMatDescr_t descrC, + const cuComplex* csrSortedValC, + const int* csrSortedRowPtrC, + const int* csrSortedColIndC, + size_t* pBufferSizeInBytes); + +cusparseStatus_t CUSPARSEAPI +cusparseZcsrgeam2_bufferSizeExt(cusparseHandle_t handle, + int m, + int n, + const cuDoubleComplex* alpha, + const cusparseMatDescr_t descrA, + int nnzA, + const cuDoubleComplex* csrSortedValA, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + const cuDoubleComplex* beta, + const cusparseMatDescr_t descrB, + int nnzB, + const cuDoubleComplex* csrSortedValB, + const int* csrSortedRowPtrB, + const int* csrSortedColIndB, + const cusparseMatDescr_t descrC, + const cuDoubleComplex* csrSortedValC, + const int* csrSortedRowPtrC, + const int* csrSortedColIndC, + size_t* pBufferSizeInBytes); + +cusparseStatus_t CUSPARSEAPI +cusparseXcsrgeam2Nnz(cusparseHandle_t handle, + int m, + int n, + const cusparseMatDescr_t descrA, + int nnzA, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + const cusparseMatDescr_t descrB, + int nnzB, + const int* csrSortedRowPtrB, + const int* csrSortedColIndB, + const cusparseMatDescr_t descrC, + int* csrSortedRowPtrC, + int* nnzTotalDevHostPtr, + void* workspace); + +cusparseStatus_t CUSPARSEAPI +cusparseScsrgeam2(cusparseHandle_t handle, + int m, + int n, + const float* alpha, + const cusparseMatDescr_t descrA, + int nnzA, + const float* csrSortedValA, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + const float* beta, + const cusparseMatDescr_t descrB, + int nnzB, + const float* csrSortedValB, + const int* csrSortedRowPtrB, + const int* csrSortedColIndB, + const cusparseMatDescr_t descrC, + float* csrSortedValC, + int* csrSortedRowPtrC, + int* csrSortedColIndC, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseDcsrgeam2(cusparseHandle_t handle, + int m, + int n, + const double* alpha, + const cusparseMatDescr_t descrA, + int nnzA, + const double* csrSortedValA, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + const double* beta, + const cusparseMatDescr_t descrB, + int nnzB, + const double* csrSortedValB, + const int* csrSortedRowPtrB, + const int* csrSortedColIndB, + const cusparseMatDescr_t descrC, + double* csrSortedValC, + int* csrSortedRowPtrC, + int* csrSortedColIndC, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseCcsrgeam2(cusparseHandle_t handle, + int m, + int n, + const cuComplex* alpha, + const cusparseMatDescr_t descrA, + int nnzA, + const cuComplex* csrSortedValA, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + const cuComplex* beta, + const cusparseMatDescr_t descrB, + int nnzB, + const cuComplex* csrSortedValB, + const int* csrSortedRowPtrB, + const int* csrSortedColIndB, + const cusparseMatDescr_t descrC, + cuComplex* csrSortedValC, + int* csrSortedRowPtrC, + int* csrSortedColIndC, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseZcsrgeam2(cusparseHandle_t handle, + int m, + int n, + const cuDoubleComplex* alpha, + const cusparseMatDescr_t descrA, + int nnzA, + const cuDoubleComplex* csrSortedValA, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + const cuDoubleComplex* beta, + const cusparseMatDescr_t descrB, + int nnzB, + const cuDoubleComplex* csrSortedValB, + const int* csrSortedRowPtrB, + const int* csrSortedColIndB, + const cusparseMatDescr_t descrC, + cuDoubleComplex* csrSortedValC, + int* csrSortedRowPtrC, + int* csrSortedColIndC, + void* pBuffer); + +//############################################################################## +//# SPARSE MATRIX REORDERING +//############################################################################## + +cusparseStatus_t CUSPARSEAPI +cusparseScsrcolor(cusparseHandle_t handle, + int m, + int nnz, + const cusparseMatDescr_t descrA, + const float* csrSortedValA, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + const float* fractionToColor, + int* ncolors, + int* coloring, + int* reordering, + const cusparseColorInfo_t info); + +cusparseStatus_t CUSPARSEAPI +cusparseDcsrcolor(cusparseHandle_t handle, + int m, + int nnz, + const cusparseMatDescr_t descrA, + const double* csrSortedValA, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + const double* fractionToColor, + int* ncolors, + int* coloring, + int* reordering, + const cusparseColorInfo_t info); + +cusparseStatus_t CUSPARSEAPI +cusparseCcsrcolor(cusparseHandle_t handle, + int m, + int nnz, + const cusparseMatDescr_t descrA, + const cuComplex* csrSortedValA, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + const float* fractionToColor, + int* ncolors, + int* coloring, + int* reordering, + const cusparseColorInfo_t info); + +cusparseStatus_t CUSPARSEAPI +cusparseZcsrcolor(cusparseHandle_t handle, + int m, + int nnz, + const cusparseMatDescr_t descrA, + const cuDoubleComplex* csrSortedValA, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + const double* fractionToColor, + int* ncolors, + int* coloring, + int* reordering, + const cusparseColorInfo_t info); + +//############################################################################## +//# SPARSE FORMAT CONVERSION +//############################################################################## + +cusparseStatus_t CUSPARSEAPI +cusparseSnnz(cusparseHandle_t handle, + cusparseDirection_t dirA, + int m, + int n, + const cusparseMatDescr_t descrA, + const float* A, + int lda, + int* nnzPerRowCol, + int* nnzTotalDevHostPtr); + +cusparseStatus_t CUSPARSEAPI +cusparseDnnz(cusparseHandle_t handle, + cusparseDirection_t dirA, + int m, + int n, + const cusparseMatDescr_t descrA, + const double* A, + int lda, + int* nnzPerRowCol, + int* nnzTotalDevHostPtr); + +cusparseStatus_t CUSPARSEAPI +cusparseCnnz(cusparseHandle_t handle, + cusparseDirection_t dirA, + int m, + int n, + const cusparseMatDescr_t descrA, + const cuComplex* A, + int lda, + int* nnzPerRowCol, + int* nnzTotalDevHostPtr); + +cusparseStatus_t CUSPARSEAPI +cusparseZnnz(cusparseHandle_t handle, + cusparseDirection_t dirA, + int m, + int n, + const cusparseMatDescr_t descrA, + const cuDoubleComplex* A, + int lda, + int* nnzPerRowCol, + int* nnzTotalDevHostPtr); + +//############################################################################## +//# SPARSE FORMAT CONVERSION +//############################################################################## + +cusparseStatus_t CUSPARSEAPI +cusparseSnnz_compress(cusparseHandle_t handle, + int m, + const cusparseMatDescr_t descr, + const float* csrSortedValA, + const int* csrSortedRowPtrA, + int* nnzPerRow, + int* nnzC, + float tol); + +cusparseStatus_t CUSPARSEAPI +cusparseDnnz_compress(cusparseHandle_t handle, + int m, + const cusparseMatDescr_t descr, + const double* csrSortedValA, + const int* csrSortedRowPtrA, + int* nnzPerRow, + int* nnzC, + double tol); + +cusparseStatus_t CUSPARSEAPI +cusparseCnnz_compress(cusparseHandle_t handle, + int m, + const cusparseMatDescr_t descr, + const cuComplex* csrSortedValA, + const int* csrSortedRowPtrA, + int* nnzPerRow, + int* nnzC, + cuComplex tol); + +cusparseStatus_t CUSPARSEAPI +cusparseZnnz_compress(cusparseHandle_t handle, + int m, + const cusparseMatDescr_t descr, + const cuDoubleComplex* csrSortedValA, + const int* csrSortedRowPtrA, + int* nnzPerRow, + int* nnzC, + cuDoubleComplex tol); + +cusparseStatus_t CUSPARSEAPI +cusparseScsr2csr_compress(cusparseHandle_t handle, + int m, + int n, + const cusparseMatDescr_t descrA, + const float* csrSortedValA, + const int* csrSortedColIndA, + const int* csrSortedRowPtrA, + int nnzA, + const int* nnzPerRow, + float* csrSortedValC, + int* csrSortedColIndC, + int* csrSortedRowPtrC, + float tol); + +cusparseStatus_t CUSPARSEAPI +cusparseDcsr2csr_compress(cusparseHandle_t handle, + int m, + int n, + const cusparseMatDescr_t descrA, + const double* csrSortedValA, + const int* csrSortedColIndA, + const int* csrSortedRowPtrA, + int nnzA, + const int* nnzPerRow, + double* csrSortedValC, + int* csrSortedColIndC, + int* csrSortedRowPtrC, + double tol); + +cusparseStatus_t CUSPARSEAPI +cusparseCcsr2csr_compress(cusparseHandle_t handle, + int m, + int n, + const cusparseMatDescr_t descrA, + const cuComplex* csrSortedValA, + const int* csrSortedColIndA, + const int* csrSortedRowPtrA, + int nnzA, + const int* nnzPerRow, + cuComplex* csrSortedValC, + int* csrSortedColIndC, + int* csrSortedRowPtrC, + cuComplex tol); + +cusparseStatus_t CUSPARSEAPI +cusparseZcsr2csr_compress(cusparseHandle_t handle, + int m, + int n, + const cusparseMatDescr_t descrA, + const cuDoubleComplex* csrSortedValA, + const int* csrSortedColIndA, + const int* csrSortedRowPtrA, + int nnzA, + const int* nnzPerRow, + cuDoubleComplex* csrSortedValC, + int* csrSortedColIndC, + int* csrSortedRowPtrC, + cuDoubleComplex tol); + +cusparseStatus_t CUSPARSEAPI +cusparseXcoo2csr(cusparseHandle_t handle, + const int* cooRowInd, + int nnz, + int m, + int* csrSortedRowPtr, + cusparseIndexBase_t idxBase); + +cusparseStatus_t CUSPARSEAPI +cusparseXcsr2coo(cusparseHandle_t handle, + const int* csrSortedRowPtr, + int nnz, + int m, + int* cooRowInd, + cusparseIndexBase_t idxBase); + +cusparseStatus_t CUSPARSEAPI +cusparseXcsr2bsrNnz(cusparseHandle_t handle, + cusparseDirection_t dirA, + int m, + int n, + const cusparseMatDescr_t descrA, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + int blockDim, + const cusparseMatDescr_t descrC, + int* bsrSortedRowPtrC, + int* nnzTotalDevHostPtr); + +cusparseStatus_t CUSPARSEAPI +cusparseScsr2bsr(cusparseHandle_t handle, + cusparseDirection_t dirA, + int m, + int n, + const cusparseMatDescr_t descrA, + const float* csrSortedValA, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + int blockDim, + const cusparseMatDescr_t descrC, + float* bsrSortedValC, + int* bsrSortedRowPtrC, + int* bsrSortedColIndC); + +cusparseStatus_t CUSPARSEAPI +cusparseDcsr2bsr(cusparseHandle_t handle, + cusparseDirection_t dirA, + int m, + int n, + const cusparseMatDescr_t descrA, + const double* csrSortedValA, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + int blockDim, + const cusparseMatDescr_t descrC, + double* bsrSortedValC, + int* bsrSortedRowPtrC, + int* bsrSortedColIndC); + +cusparseStatus_t CUSPARSEAPI +cusparseCcsr2bsr(cusparseHandle_t handle, + cusparseDirection_t dirA, + int m, + int n, + const cusparseMatDescr_t descrA, + const cuComplex* csrSortedValA, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + int blockDim, + const cusparseMatDescr_t descrC, + cuComplex* bsrSortedValC, + int* bsrSortedRowPtrC, + int* bsrSortedColIndC); + +cusparseStatus_t CUSPARSEAPI +cusparseZcsr2bsr(cusparseHandle_t handle, + cusparseDirection_t dirA, + int m, + int n, + const cusparseMatDescr_t descrA, + const cuDoubleComplex* csrSortedValA, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + int blockDim, + const cusparseMatDescr_t descrC, + cuDoubleComplex* bsrSortedValC, + int* bsrSortedRowPtrC, + int* bsrSortedColIndC); + +cusparseStatus_t CUSPARSEAPI +cusparseSbsr2csr(cusparseHandle_t handle, + cusparseDirection_t dirA, + int mb, + int nb, + const cusparseMatDescr_t descrA, + const float* bsrSortedValA, + const int* bsrSortedRowPtrA, + const int* bsrSortedColIndA, + int blockDim, + const cusparseMatDescr_t descrC, + float* csrSortedValC, + int* csrSortedRowPtrC, + int* csrSortedColIndC); + +cusparseStatus_t CUSPARSEAPI +cusparseDbsr2csr(cusparseHandle_t handle, + cusparseDirection_t dirA, + int mb, + int nb, + const cusparseMatDescr_t descrA, + const double* bsrSortedValA, + const int* bsrSortedRowPtrA, + const int* bsrSortedColIndA, + int blockDim, + const cusparseMatDescr_t descrC, + double* csrSortedValC, + int* csrSortedRowPtrC, + int* csrSortedColIndC); + +cusparseStatus_t CUSPARSEAPI +cusparseCbsr2csr(cusparseHandle_t handle, + cusparseDirection_t dirA, + int mb, + int nb, + const cusparseMatDescr_t descrA, + const cuComplex* bsrSortedValA, + const int* bsrSortedRowPtrA, + const int* bsrSortedColIndA, + int blockDim, + const cusparseMatDescr_t descrC, + cuComplex* csrSortedValC, + int* csrSortedRowPtrC, + int* csrSortedColIndC); + +cusparseStatus_t CUSPARSEAPI +cusparseZbsr2csr(cusparseHandle_t handle, + cusparseDirection_t dirA, + int mb, + int nb, + const cusparseMatDescr_t descrA, + const cuDoubleComplex* bsrSortedValA, + const int* bsrSortedRowPtrA, + const int* bsrSortedColIndA, + int blockDim, + const cusparseMatDescr_t descrC, + cuDoubleComplex* csrSortedValC, + int* csrSortedRowPtrC, + int* csrSortedColIndC); + +cusparseStatus_t CUSPARSEAPI +cusparseSgebsr2gebsc_bufferSize(cusparseHandle_t handle, + int mb, + int nb, + int nnzb, + const float* bsrSortedVal, + const int* bsrSortedRowPtr, + const int* bsrSortedColInd, + int rowBlockDim, + int colBlockDim, + int* pBufferSizeInBytes); + +cusparseStatus_t CUSPARSEAPI +cusparseDgebsr2gebsc_bufferSize(cusparseHandle_t handle, + int mb, + int nb, + int nnzb, + const double* bsrSortedVal, + const int* bsrSortedRowPtr, + const int* bsrSortedColInd, + int rowBlockDim, + int colBlockDim, + int* pBufferSizeInBytes); + +cusparseStatus_t CUSPARSEAPI +cusparseCgebsr2gebsc_bufferSize(cusparseHandle_t handle, + int mb, + int nb, + int nnzb, + const cuComplex* bsrSortedVal, + const int* bsrSortedRowPtr, + const int* bsrSortedColInd, + int rowBlockDim, + int colBlockDim, + int* pBufferSizeInBytes); + +cusparseStatus_t CUSPARSEAPI +cusparseZgebsr2gebsc_bufferSize(cusparseHandle_t handle, + int mb, + int nb, + int nnzb, + const cuDoubleComplex* bsrSortedVal, + const int* bsrSortedRowPtr, + const int* bsrSortedColInd, + int rowBlockDim, + int colBlockDim, + int* pBufferSizeInBytes); + +cusparseStatus_t CUSPARSEAPI +cusparseSgebsr2gebsc_bufferSizeExt(cusparseHandle_t handle, + int mb, + int nb, + int nnzb, + const float* bsrSortedVal, + const int* bsrSortedRowPtr, + const int* bsrSortedColInd, + int rowBlockDim, + int colBlockDim, + size_t* pBufferSize); + +cusparseStatus_t CUSPARSEAPI +cusparseDgebsr2gebsc_bufferSizeExt(cusparseHandle_t handle, + int mb, + int nb, + int nnzb, + const double* bsrSortedVal, + const int* bsrSortedRowPtr, + const int* bsrSortedColInd, + int rowBlockDim, + int colBlockDim, + size_t* pBufferSize); + +cusparseStatus_t CUSPARSEAPI +cusparseCgebsr2gebsc_bufferSizeExt(cusparseHandle_t handle, + int mb, + int nb, + int nnzb, + const cuComplex* bsrSortedVal, + const int* bsrSortedRowPtr, + const int* bsrSortedColInd, + int rowBlockDim, + int colBlockDim, + size_t* pBufferSize); + +cusparseStatus_t CUSPARSEAPI +cusparseZgebsr2gebsc_bufferSizeExt(cusparseHandle_t handle, + int mb, + int nb, + int nnzb, + const cuDoubleComplex* bsrSortedVal, + const int* bsrSortedRowPtr, + const int* bsrSortedColInd, + int rowBlockDim, + int colBlockDim, + size_t* pBufferSize); + +cusparseStatus_t CUSPARSEAPI +cusparseSgebsr2gebsc(cusparseHandle_t handle, + int mb, + int nb, + int nnzb, + const float* bsrSortedVal, + const int* bsrSortedRowPtr, + const int* bsrSortedColInd, + int rowBlockDim, + int colBlockDim, + float* bscVal, + int* bscRowInd, + int* bscColPtr, + cusparseAction_t copyValues, + cusparseIndexBase_t idxBase, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseDgebsr2gebsc(cusparseHandle_t handle, + int mb, + int nb, + int nnzb, + const double* bsrSortedVal, + const int* bsrSortedRowPtr, + const int* bsrSortedColInd, + int rowBlockDim, + int colBlockDim, + double* bscVal, + int* bscRowInd, + int* bscColPtr, + cusparseAction_t copyValues, + cusparseIndexBase_t idxBase, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseCgebsr2gebsc(cusparseHandle_t handle, + int mb, + int nb, + int nnzb, + const cuComplex* bsrSortedVal, + const int* bsrSortedRowPtr, + const int* bsrSortedColInd, + int rowBlockDim, + int colBlockDim, + cuComplex* bscVal, + int* bscRowInd, + int* bscColPtr, + cusparseAction_t copyValues, + cusparseIndexBase_t idxBase, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseZgebsr2gebsc(cusparseHandle_t handle, + int mb, + int nb, + int nnzb, + const cuDoubleComplex* bsrSortedVal, + const int* bsrSortedRowPtr, + const int* bsrSortedColInd, + int rowBlockDim, + int colBlockDim, + cuDoubleComplex* bscVal, + int* bscRowInd, + int* bscColPtr, + cusparseAction_t copyValues, + cusparseIndexBase_t idxBase, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseXgebsr2csr(cusparseHandle_t handle, + cusparseDirection_t dirA, + int mb, + int nb, + const cusparseMatDescr_t descrA, + const int* bsrSortedRowPtrA, + const int* bsrSortedColIndA, + int rowBlockDim, + int colBlockDim, + const cusparseMatDescr_t descrC, + int* csrSortedRowPtrC, + int* csrSortedColIndC); + +cusparseStatus_t CUSPARSEAPI +cusparseSgebsr2csr(cusparseHandle_t handle, + cusparseDirection_t dirA, + int mb, + int nb, + const cusparseMatDescr_t descrA, + const float* bsrSortedValA, + const int* bsrSortedRowPtrA, + const int* bsrSortedColIndA, + int rowBlockDim, + int colBlockDim, + const cusparseMatDescr_t descrC, + float* csrSortedValC, + int* csrSortedRowPtrC, + int* csrSortedColIndC); + +cusparseStatus_t CUSPARSEAPI +cusparseDgebsr2csr(cusparseHandle_t handle, + cusparseDirection_t dirA, + int mb, + int nb, + const cusparseMatDescr_t descrA, + const double* bsrSortedValA, + const int* bsrSortedRowPtrA, + const int* bsrSortedColIndA, + int rowBlockDim, + int colBlockDim, + const cusparseMatDescr_t descrC, + double* csrSortedValC, + int* csrSortedRowPtrC, + int* csrSortedColIndC); + +cusparseStatus_t CUSPARSEAPI +cusparseCgebsr2csr(cusparseHandle_t handle, + cusparseDirection_t dirA, + int mb, + int nb, + const cusparseMatDescr_t descrA, + const cuComplex* bsrSortedValA, + const int* bsrSortedRowPtrA, + const int* bsrSortedColIndA, + int rowBlockDim, + int colBlockDim, + const cusparseMatDescr_t descrC, + cuComplex* csrSortedValC, + int* csrSortedRowPtrC, + int* csrSortedColIndC); + +cusparseStatus_t CUSPARSEAPI +cusparseZgebsr2csr(cusparseHandle_t handle, + cusparseDirection_t dirA, + int mb, + int nb, + const cusparseMatDescr_t descrA, + const cuDoubleComplex* bsrSortedValA, + const int* bsrSortedRowPtrA, + const int* bsrSortedColIndA, + int rowBlockDim, + int colBlockDim, + const cusparseMatDescr_t descrC, + cuDoubleComplex* csrSortedValC, + int* csrSortedRowPtrC, + int* csrSortedColIndC); + +cusparseStatus_t CUSPARSEAPI +cusparseScsr2gebsr_bufferSize(cusparseHandle_t handle, + cusparseDirection_t dirA, + int m, + int n, + const cusparseMatDescr_t descrA, + const float* csrSortedValA, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + int rowBlockDim, + int colBlockDim, + int* pBufferSizeInBytes); + +cusparseStatus_t CUSPARSEAPI +cusparseDcsr2gebsr_bufferSize(cusparseHandle_t handle, + cusparseDirection_t dirA, + int m, + int n, + const cusparseMatDescr_t descrA, + const double* csrSortedValA, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + int rowBlockDim, + int colBlockDim, + int* pBufferSizeInBytes); + +cusparseStatus_t CUSPARSEAPI +cusparseCcsr2gebsr_bufferSize(cusparseHandle_t handle, + cusparseDirection_t dirA, + int m, + int n, + const cusparseMatDescr_t descrA, + const cuComplex* csrSortedValA, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + int rowBlockDim, + int colBlockDim, + int* pBufferSizeInBytes); + +cusparseStatus_t CUSPARSEAPI +cusparseZcsr2gebsr_bufferSize(cusparseHandle_t handle, + cusparseDirection_t dirA, + int m, + int n, + const cusparseMatDescr_t descrA, + const cuDoubleComplex* csrSortedValA, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + int rowBlockDim, + int colBlockDim, + int* pBufferSizeInBytes); + +cusparseStatus_t CUSPARSEAPI +cusparseScsr2gebsr_bufferSizeExt(cusparseHandle_t handle, + cusparseDirection_t dirA, + int m, + int n, + const cusparseMatDescr_t descrA, + const float* csrSortedValA, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + int rowBlockDim, + int colBlockDim, + size_t* pBufferSize); + +cusparseStatus_t CUSPARSEAPI +cusparseDcsr2gebsr_bufferSizeExt(cusparseHandle_t handle, + cusparseDirection_t dirA, + int m, + int n, + const cusparseMatDescr_t descrA, + const double* csrSortedValA, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + int rowBlockDim, + int colBlockDim, + size_t* pBufferSize); + +cusparseStatus_t CUSPARSEAPI +cusparseCcsr2gebsr_bufferSizeExt(cusparseHandle_t handle, + cusparseDirection_t dirA, + int m, + int n, + const cusparseMatDescr_t descrA, + const cuComplex* csrSortedValA, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + int rowBlockDim, + int colBlockDim, + size_t* pBufferSize); + +cusparseStatus_t CUSPARSEAPI +cusparseZcsr2gebsr_bufferSizeExt(cusparseHandle_t handle, + cusparseDirection_t dirA, + int m, + int n, + const cusparseMatDescr_t descrA, + const cuDoubleComplex* csrSortedValA, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + int rowBlockDim, + int colBlockDim, + size_t* pBufferSize); + +cusparseStatus_t CUSPARSEAPI +cusparseXcsr2gebsrNnz(cusparseHandle_t handle, + cusparseDirection_t dirA, + int m, + int n, + const cusparseMatDescr_t descrA, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + const cusparseMatDescr_t descrC, + int* bsrSortedRowPtrC, + int rowBlockDim, + int colBlockDim, + int* nnzTotalDevHostPtr, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseScsr2gebsr(cusparseHandle_t handle, + cusparseDirection_t dirA, + int m, + int n, + const cusparseMatDescr_t descrA, + const float* csrSortedValA, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + const cusparseMatDescr_t descrC, + float* bsrSortedValC, + int* bsrSortedRowPtrC, + int* bsrSortedColIndC, + int rowBlockDim, + int colBlockDim, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseDcsr2gebsr(cusparseHandle_t handle, + cusparseDirection_t dirA, + int m, + int n, + const cusparseMatDescr_t descrA, + const double* csrSortedValA, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + const cusparseMatDescr_t descrC, + double* bsrSortedValC, + int* bsrSortedRowPtrC, + int* bsrSortedColIndC, + int rowBlockDim, + int colBlockDim, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseCcsr2gebsr(cusparseHandle_t handle, + cusparseDirection_t dirA, + int m, + int n, + const cusparseMatDescr_t descrA, + const cuComplex* csrSortedValA, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + const cusparseMatDescr_t descrC, + cuComplex* bsrSortedValC, + int* bsrSortedRowPtrC, + int* bsrSortedColIndC, + int rowBlockDim, + int colBlockDim, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseZcsr2gebsr(cusparseHandle_t handle, + cusparseDirection_t dirA, + int m, + int n, + const cusparseMatDescr_t descrA, + const cuDoubleComplex* csrSortedValA, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + const cusparseMatDescr_t descrC, + cuDoubleComplex* bsrSortedValC, + int* bsrSortedRowPtrC, + int* bsrSortedColIndC, + int rowBlockDim, + int colBlockDim, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseSgebsr2gebsr_bufferSize(cusparseHandle_t handle, + cusparseDirection_t dirA, + int mb, + int nb, + int nnzb, + const cusparseMatDescr_t descrA, + const float* bsrSortedValA, + const int* bsrSortedRowPtrA, + const int* bsrSortedColIndA, + int rowBlockDimA, + int colBlockDimA, + int rowBlockDimC, + int colBlockDimC, + int* pBufferSizeInBytes); + +cusparseStatus_t CUSPARSEAPI +cusparseDgebsr2gebsr_bufferSize(cusparseHandle_t handle, + cusparseDirection_t dirA, + int mb, + int nb, + int nnzb, + const cusparseMatDescr_t descrA, + const double* bsrSortedValA, + const int* bsrSortedRowPtrA, + const int* bsrSortedColIndA, + int rowBlockDimA, + int colBlockDimA, + int rowBlockDimC, + int colBlockDimC, + int* pBufferSizeInBytes); + +cusparseStatus_t CUSPARSEAPI +cusparseCgebsr2gebsr_bufferSize(cusparseHandle_t handle, + cusparseDirection_t dirA, + int mb, + int nb, + int nnzb, + const cusparseMatDescr_t descrA, + const cuComplex* bsrSortedValA, + const int* bsrSortedRowPtrA, + const int* bsrSortedColIndA, + int rowBlockDimA, + int colBlockDimA, + int rowBlockDimC, + int colBlockDimC, + int* pBufferSizeInBytes); + +cusparseStatus_t CUSPARSEAPI +cusparseZgebsr2gebsr_bufferSize(cusparseHandle_t handle, + cusparseDirection_t dirA, + int mb, + int nb, + int nnzb, + const cusparseMatDescr_t descrA, + const cuDoubleComplex* bsrSortedValA, + const int* bsrSortedRowPtrA, + const int* bsrSortedColIndA, + int rowBlockDimA, + int colBlockDimA, + int rowBlockDimC, + int colBlockDimC, + int* pBufferSizeInBytes); + +cusparseStatus_t CUSPARSEAPI +cusparseSgebsr2gebsr_bufferSizeExt(cusparseHandle_t handle, + cusparseDirection_t dirA, + int mb, + int nb, + int nnzb, + const cusparseMatDescr_t descrA, + const float* bsrSortedValA, + const int* bsrSortedRowPtrA, + const int* bsrSortedColIndA, + int rowBlockDimA, + int colBlockDimA, + int rowBlockDimC, + int colBlockDimC, + size_t* pBufferSize); + +cusparseStatus_t CUSPARSEAPI +cusparseDgebsr2gebsr_bufferSizeExt(cusparseHandle_t handle, + cusparseDirection_t dirA, + int mb, + int nb, + int nnzb, + const cusparseMatDescr_t descrA, + const double* bsrSortedValA, + const int* bsrSortedRowPtrA, + const int* bsrSortedColIndA, + int rowBlockDimA, + int colBlockDimA, + int rowBlockDimC, + int colBlockDimC, + size_t* pBufferSize); + +cusparseStatus_t CUSPARSEAPI +cusparseCgebsr2gebsr_bufferSizeExt(cusparseHandle_t handle, + cusparseDirection_t dirA, + int mb, + int nb, + int nnzb, + const cusparseMatDescr_t descrA, + const cuComplex* bsrSortedValA, + const int* bsrSortedRowPtrA, + const int* bsrSortedColIndA, + int rowBlockDimA, + int colBlockDimA, + int rowBlockDimC, + int colBlockDimC, + size_t* pBufferSize); + +cusparseStatus_t CUSPARSEAPI +cusparseZgebsr2gebsr_bufferSizeExt(cusparseHandle_t handle, + cusparseDirection_t dirA, + int mb, + int nb, + int nnzb, + const cusparseMatDescr_t descrA, + const cuDoubleComplex* bsrSortedValA, + const int* bsrSortedRowPtrA, + const int* bsrSortedColIndA, + int rowBlockDimA, + int colBlockDimA, + int rowBlockDimC, + int colBlockDimC, + size_t* pBufferSize); + +cusparseStatus_t CUSPARSEAPI +cusparseXgebsr2gebsrNnz(cusparseHandle_t handle, + cusparseDirection_t dirA, + int mb, + int nb, + int nnzb, + const cusparseMatDescr_t descrA, + const int* bsrSortedRowPtrA, + const int* bsrSortedColIndA, + int rowBlockDimA, + int colBlockDimA, + const cusparseMatDescr_t descrC, + int* bsrSortedRowPtrC, + int rowBlockDimC, + int colBlockDimC, + int* nnzTotalDevHostPtr, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseSgebsr2gebsr(cusparseHandle_t handle, + cusparseDirection_t dirA, + int mb, + int nb, + int nnzb, + const cusparseMatDescr_t descrA, + const float* bsrSortedValA, + const int* bsrSortedRowPtrA, + const int* bsrSortedColIndA, + int rowBlockDimA, + int colBlockDimA, + const cusparseMatDescr_t descrC, + float* bsrSortedValC, + int* bsrSortedRowPtrC, + int* bsrSortedColIndC, + int rowBlockDimC, + int colBlockDimC, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseDgebsr2gebsr(cusparseHandle_t handle, + cusparseDirection_t dirA, + int mb, + int nb, + int nnzb, + const cusparseMatDescr_t descrA, + const double* bsrSortedValA, + const int* bsrSortedRowPtrA, + const int* bsrSortedColIndA, + int rowBlockDimA, + int colBlockDimA, + const cusparseMatDescr_t descrC, + double* bsrSortedValC, + int* bsrSortedRowPtrC, + int* bsrSortedColIndC, + int rowBlockDimC, + int colBlockDimC, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseCgebsr2gebsr(cusparseHandle_t handle, + cusparseDirection_t dirA, + int mb, + int nb, + int nnzb, + const cusparseMatDescr_t descrA, + const cuComplex* bsrSortedValA, + const int* bsrSortedRowPtrA, + const int* bsrSortedColIndA, + int rowBlockDimA, + int colBlockDimA, + const cusparseMatDescr_t descrC, + cuComplex* bsrSortedValC, + int* bsrSortedRowPtrC, + int* bsrSortedColIndC, + int rowBlockDimC, + int colBlockDimC, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseZgebsr2gebsr(cusparseHandle_t handle, + cusparseDirection_t dirA, + int mb, + int nb, + int nnzb, + const cusparseMatDescr_t descrA, + const cuDoubleComplex* bsrSortedValA, + const int* bsrSortedRowPtrA, + const int* bsrSortedColIndA, + int rowBlockDimA, + int colBlockDimA, + const cusparseMatDescr_t descrC, + cuDoubleComplex* bsrSortedValC, + int* bsrSortedRowPtrC, + int* bsrSortedColIndC, + int rowBlockDimC, + int colBlockDimC, + void* pBuffer); + +//############################################################################## +//# SPARSE MATRIX SORTING +//############################################################################## + +cusparseStatus_t CUSPARSEAPI +cusparseCreateIdentityPermutation(cusparseHandle_t handle, + int n, + int* p); + +cusparseStatus_t CUSPARSEAPI +cusparseXcoosort_bufferSizeExt(cusparseHandle_t handle, + int m, + int n, + int nnz, + const int* cooRowsA, + const int* cooColsA, + size_t* pBufferSizeInBytes); + +cusparseStatus_t CUSPARSEAPI +cusparseXcoosortByRow(cusparseHandle_t handle, + int m, + int n, + int nnz, + int* cooRowsA, + int* cooColsA, + int* P, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseXcoosortByColumn(cusparseHandle_t handle, + int m, + int n, + int nnz, + int* cooRowsA, + int* cooColsA, + int* P, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseXcsrsort_bufferSizeExt(cusparseHandle_t handle, + int m, + int n, + int nnz, + const int* csrRowPtrA, + const int* csrColIndA, + size_t* pBufferSizeInBytes); + +cusparseStatus_t CUSPARSEAPI +cusparseXcsrsort(cusparseHandle_t handle, + int m, + int n, + int nnz, + const cusparseMatDescr_t descrA, + const int* csrRowPtrA, + int* csrColIndA, + int* P, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseXcscsort_bufferSizeExt(cusparseHandle_t handle, + int m, + int n, + int nnz, + const int* cscColPtrA, + const int* cscRowIndA, + size_t* pBufferSizeInBytes); + +cusparseStatus_t CUSPARSEAPI +cusparseXcscsort(cusparseHandle_t handle, + int m, + int n, + int nnz, + const cusparseMatDescr_t descrA, + const int* cscColPtrA, + int* cscRowIndA, + int* P, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseScsru2csr_bufferSizeExt(cusparseHandle_t handle, + int m, + int n, + int nnz, + float* csrVal, + const int* csrRowPtr, + int* csrColInd, + csru2csrInfo_t info, + size_t* pBufferSizeInBytes); + +cusparseStatus_t CUSPARSEAPI +cusparseDcsru2csr_bufferSizeExt(cusparseHandle_t handle, + int m, + int n, + int nnz, + double* csrVal, + const int* csrRowPtr, + int* csrColInd, + csru2csrInfo_t info, + size_t* pBufferSizeInBytes); + +cusparseStatus_t CUSPARSEAPI +cusparseCcsru2csr_bufferSizeExt(cusparseHandle_t handle, + int m, + int n, + int nnz, + cuComplex* csrVal, + const int* csrRowPtr, + int* csrColInd, + csru2csrInfo_t info, + size_t* pBufferSizeInBytes); + +cusparseStatus_t CUSPARSEAPI +cusparseZcsru2csr_bufferSizeExt(cusparseHandle_t handle, + int m, + int n, + int nnz, + cuDoubleComplex* csrVal, + const int* csrRowPtr, + int* csrColInd, + csru2csrInfo_t info, + size_t* pBufferSizeInBytes); + +cusparseStatus_t CUSPARSEAPI +cusparseScsru2csr(cusparseHandle_t handle, + int m, + int n, + int nnz, + const cusparseMatDescr_t descrA, + float* csrVal, + const int* csrRowPtr, + int* csrColInd, + csru2csrInfo_t info, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseDcsru2csr(cusparseHandle_t handle, + int m, + int n, + int nnz, + const cusparseMatDescr_t descrA, + double* csrVal, + const int* csrRowPtr, + int* csrColInd, + csru2csrInfo_t info, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseCcsru2csr(cusparseHandle_t handle, + int m, + int n, + int nnz, + const cusparseMatDescr_t descrA, + cuComplex* csrVal, + const int* csrRowPtr, + int* csrColInd, + csru2csrInfo_t info, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseZcsru2csr(cusparseHandle_t handle, + int m, + int n, + int nnz, + const cusparseMatDescr_t descrA, + cuDoubleComplex* csrVal, + const int* csrRowPtr, + int* csrColInd, + csru2csrInfo_t info, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseScsr2csru(cusparseHandle_t handle, + int m, + int n, + int nnz, + const cusparseMatDescr_t descrA, + float* csrVal, + const int* csrRowPtr, + int* csrColInd, + csru2csrInfo_t info, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseDcsr2csru(cusparseHandle_t handle, + int m, + int n, + int nnz, + const cusparseMatDescr_t descrA, + double* csrVal, + const int* csrRowPtr, + int* csrColInd, + csru2csrInfo_t info, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseCcsr2csru(cusparseHandle_t handle, + int m, + int n, + int nnz, + const cusparseMatDescr_t descrA, + cuComplex* csrVal, + const int* csrRowPtr, + int* csrColInd, + csru2csrInfo_t info, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseZcsr2csru(cusparseHandle_t handle, + int m, + int n, + int nnz, + const cusparseMatDescr_t descrA, + cuDoubleComplex* csrVal, + const int* csrRowPtr, + int* csrColInd, + csru2csrInfo_t info, + void* pBuffer); + +#if defined(__cplusplus) +cusparseStatus_t CUSPARSEAPI +cusparseHpruneDense2csr_bufferSizeExt(cusparseHandle_t handle, + int m, + int n, + const __half* A, + int lda, + const __half* threshold, + const cusparseMatDescr_t descrC, + const __half* csrSortedValC, + const int* csrSortedRowPtrC, + const int* csrSortedColIndC, + size_t* pBufferSizeInBytes); +#endif // defined(__cplusplus) + +cusparseStatus_t CUSPARSEAPI +cusparseSpruneDense2csr_bufferSizeExt(cusparseHandle_t handle, + int m, + int n, + const float* A, + int lda, + const float* threshold, + const cusparseMatDescr_t descrC, + const float* csrSortedValC, + const int* csrSortedRowPtrC, + const int* csrSortedColIndC, + size_t* pBufferSizeInBytes); + +cusparseStatus_t CUSPARSEAPI +cusparseDpruneDense2csr_bufferSizeExt(cusparseHandle_t handle, + int m, + int n, + const double* A, + int lda, + const double* threshold, + const cusparseMatDescr_t descrC, + const double* csrSortedValC, + const int* csrSortedRowPtrC, + const int* csrSortedColIndC, + size_t* pBufferSizeInBytes); + +#if defined(__cplusplus) +cusparseStatus_t CUSPARSEAPI +cusparseHpruneDense2csrNnz(cusparseHandle_t handle, + int m, + int n, + const __half* A, + int lda, + const __half* threshold, + const cusparseMatDescr_t descrC, + int* csrRowPtrC, + int* nnzTotalDevHostPtr, + void* pBuffer); +#endif // defined(__cplusplus) + +cusparseStatus_t CUSPARSEAPI +cusparseSpruneDense2csrNnz(cusparseHandle_t handle, + int m, + int n, + const float* A, + int lda, + const float* threshold, + const cusparseMatDescr_t descrC, + int* csrRowPtrC, + int* nnzTotalDevHostPtr, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseDpruneDense2csrNnz(cusparseHandle_t handle, + int m, + int n, + const double* A, + int lda, + const double* threshold, + const cusparseMatDescr_t descrC, + int* csrSortedRowPtrC, + int* nnzTotalDevHostPtr, + void* pBuffer); + +#if defined(__cplusplus) +cusparseStatus_t CUSPARSEAPI +cusparseHpruneDense2csr(cusparseHandle_t handle, + int m, + int n, + const __half* A, + int lda, + const __half* threshold, + const cusparseMatDescr_t descrC, + __half* csrSortedValC, + const int* csrSortedRowPtrC, + int* csrSortedColIndC, + void* pBuffer); +#endif // defined(__cplusplus) + +cusparseStatus_t CUSPARSEAPI +cusparseSpruneDense2csr(cusparseHandle_t handle, + int m, + int n, + const float* A, + int lda, + const float* threshold, + const cusparseMatDescr_t descrC, + float* csrSortedValC, + const int* csrSortedRowPtrC, + int* csrSortedColIndC, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseDpruneDense2csr(cusparseHandle_t handle, + int m, + int n, + const double* A, + int lda, + const double* threshold, + const cusparseMatDescr_t descrC, + double* csrSortedValC, + const int* csrSortedRowPtrC, + int* csrSortedColIndC, + void* pBuffer); + +#if defined(__cplusplus) +cusparseStatus_t CUSPARSEAPI +cusparseHpruneCsr2csr_bufferSizeExt(cusparseHandle_t handle, + int m, + int n, + int nnzA, + const cusparseMatDescr_t descrA, + const __half* csrSortedValA, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + const __half* threshold, + const cusparseMatDescr_t descrC, + const __half* csrSortedValC, + const int* csrSortedRowPtrC, + const int* csrSortedColIndC, + size_t* pBufferSizeInBytes); +#endif // defined(__cplusplus) + +cusparseStatus_t CUSPARSEAPI +cusparseSpruneCsr2csr_bufferSizeExt(cusparseHandle_t handle, + int m, + int n, + int nnzA, + const cusparseMatDescr_t descrA, + const float* csrSortedValA, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + const float* threshold, + const cusparseMatDescr_t descrC, + const float* csrSortedValC, + const int* csrSortedRowPtrC, + const int* csrSortedColIndC, + size_t* pBufferSizeInBytes); + +cusparseStatus_t CUSPARSEAPI +cusparseDpruneCsr2csr_bufferSizeExt(cusparseHandle_t handle, + int m, + int n, + int nnzA, + const cusparseMatDescr_t descrA, + const double* csrSortedValA, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + const double* threshold, + const cusparseMatDescr_t descrC, + const double* csrSortedValC, + const int* csrSortedRowPtrC, + const int* csrSortedColIndC, + size_t* pBufferSizeInBytes); + +#if defined(__cplusplus) +cusparseStatus_t CUSPARSEAPI +cusparseHpruneCsr2csrNnz(cusparseHandle_t handle, + int m, + int n, + int nnzA, + const cusparseMatDescr_t descrA, + const __half* csrSortedValA, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + const __half* threshold, + const cusparseMatDescr_t descrC, + int* csrSortedRowPtrC, + int* nnzTotalDevHostPtr, + void* pBuffer); +#endif // defined(__cplusplus) + +cusparseStatus_t CUSPARSEAPI +cusparseSpruneCsr2csrNnz(cusparseHandle_t handle, + int m, + int n, + int nnzA, + const cusparseMatDescr_t descrA, + const float* csrSortedValA, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + const float* threshold, + const cusparseMatDescr_t descrC, + int* csrSortedRowPtrC, + int* nnzTotalDevHostPtr, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI + cusparseDpruneCsr2csrNnz(cusparseHandle_t handle, + int m, + int n, + int nnzA, + const cusparseMatDescr_t descrA, + const double* csrSortedValA, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + const double* threshold, + const cusparseMatDescr_t descrC, + int* csrSortedRowPtrC, + int* nnzTotalDevHostPtr, + void* pBuffer); + +#if defined(__cplusplus) +cusparseStatus_t CUSPARSEAPI +cusparseHpruneCsr2csr(cusparseHandle_t handle, + int m, + int n, + int nnzA, + const cusparseMatDescr_t descrA, + const __half* csrSortedValA, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + const __half* threshold, + const cusparseMatDescr_t descrC, + __half* csrSortedValC, + const int* csrSortedRowPtrC, + int* csrSortedColIndC, + void* pBuffer); +#endif // defined(__cplusplus) + +cusparseStatus_t CUSPARSEAPI +cusparseSpruneCsr2csr(cusparseHandle_t handle, + int m, + int n, + int nnzA, + const cusparseMatDescr_t descrA, + const float* csrSortedValA, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + const float* threshold, + const cusparseMatDescr_t descrC, + float* csrSortedValC, + const int* csrSortedRowPtrC, + int* csrSortedColIndC, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseDpruneCsr2csr(cusparseHandle_t handle, + int m, + int n, + int nnzA, + const cusparseMatDescr_t descrA, + const double* csrSortedValA, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + const double* threshold, + const cusparseMatDescr_t descrC, + double* csrSortedValC, + const int* csrSortedRowPtrC, + int* csrSortedColIndC, + void* pBuffer); + +#if defined(__cplusplus) +cusparseStatus_t CUSPARSEAPI +cusparseHpruneDense2csrByPercentage_bufferSizeExt( + cusparseHandle_t handle, + int m, + int n, + const __half* A, + int lda, + float percentage, + const cusparseMatDescr_t descrC, + const __half* csrSortedValC, + const int* csrSortedRowPtrC, + const int* csrSortedColIndC, + pruneInfo_t info, + size_t* pBufferSizeInBytes); +#endif // defined(__cplusplus) + +cusparseStatus_t CUSPARSEAPI +cusparseSpruneDense2csrByPercentage_bufferSizeExt( + cusparseHandle_t handle, + int m, + int n, + const float* A, + int lda, + float percentage, + const cusparseMatDescr_t descrC, + const float* csrSortedValC, + const int* csrSortedRowPtrC, + const int* csrSortedColIndC, + pruneInfo_t info, + size_t* pBufferSizeInBytes); + +cusparseStatus_t CUSPARSEAPI +cusparseDpruneDense2csrByPercentage_bufferSizeExt( + cusparseHandle_t handle, + int m, + int n, + const double* A, + int lda, + float percentage, + const cusparseMatDescr_t descrC, + const double* csrSortedValC, + const int* csrSortedRowPtrC, + const int* csrSortedColIndC, + pruneInfo_t info, + size_t* pBufferSizeInBytes); + +#if defined(__cplusplus) +cusparseStatus_t CUSPARSEAPI +cusparseHpruneDense2csrNnzByPercentage( + cusparseHandle_t handle, + int m, + int n, + const __half* A, + int lda, + float percentage, + const cusparseMatDescr_t descrC, + int* csrRowPtrC, + int* nnzTotalDevHostPtr, + pruneInfo_t info, + void* pBuffer); +#endif // defined(__cplusplus) + +cusparseStatus_t CUSPARSEAPI +cusparseSpruneDense2csrNnzByPercentage( + cusparseHandle_t handle, + int m, + int n, + const float* A, + int lda, + float percentage, + const cusparseMatDescr_t descrC, + int* csrRowPtrC, + int* nnzTotalDevHostPtr, + pruneInfo_t info, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseDpruneDense2csrNnzByPercentage( + cusparseHandle_t handle, + int m, + int n, + const double* A, + int lda, + float percentage, + const cusparseMatDescr_t descrC, + int* csrRowPtrC, + int* nnzTotalDevHostPtr, + pruneInfo_t info, + void* pBuffer); + +#if defined(__cplusplus) +cusparseStatus_t CUSPARSEAPI +cusparseHpruneDense2csrByPercentage(cusparseHandle_t handle, + int m, + int n, + const __half* A, + int lda, + float percentage, + const cusparseMatDescr_t descrC, + __half* csrSortedValC, + const int* csrSortedRowPtrC, + int* csrSortedColIndC, + pruneInfo_t info, + void* pBuffer); +#endif // defined(__cplusplus) + +cusparseStatus_t CUSPARSEAPI +cusparseSpruneDense2csrByPercentage(cusparseHandle_t handle, + int m, + int n, + const float* A, + int lda, + float percentage, + const cusparseMatDescr_t descrC, + float* csrSortedValC, + const int* csrSortedRowPtrC, + int* csrSortedColIndC, + pruneInfo_t info, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseDpruneDense2csrByPercentage(cusparseHandle_t handle, + int m, + int n, + const double* A, + int lda, + float percentage, + const cusparseMatDescr_t descrC, + double* csrSortedValC, + const int* csrSortedRowPtrC, + int* csrSortedColIndC, + pruneInfo_t info, + void* pBuffer); + +#if defined(__cplusplus) + +cusparseStatus_t CUSPARSEAPI +cusparseHpruneCsr2csrByPercentage_bufferSizeExt( + cusparseHandle_t handle, + int m, + int n, + int nnzA, + const cusparseMatDescr_t descrA, + const __half* csrSortedValA, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + float percentage, + const cusparseMatDescr_t descrC, + const __half* csrSortedValC, + const int* csrSortedRowPtrC, + const int* csrSortedColIndC, + pruneInfo_t info, + size_t* pBufferSizeInBytes); + +#endif // defined(__cplusplus) + +cusparseStatus_t CUSPARSEAPI +cusparseSpruneCsr2csrByPercentage_bufferSizeExt( + cusparseHandle_t handle, + int m, + int n, + int nnzA, + const cusparseMatDescr_t descrA, + const float* csrSortedValA, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + float percentage, + const cusparseMatDescr_t descrC, + const float* csrSortedValC, + const int* csrSortedRowPtrC, + const int* csrSortedColIndC, + pruneInfo_t info, + size_t* pBufferSizeInBytes); + +cusparseStatus_t CUSPARSEAPI +cusparseDpruneCsr2csrByPercentage_bufferSizeExt( + cusparseHandle_t handle, + int m, + int n, + int nnzA, + const cusparseMatDescr_t descrA, + const double* csrSortedValA, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + float percentage, + const cusparseMatDescr_t descrC, + const double* csrSortedValC, + const int* csrSortedRowPtrC, + const int* csrSortedColIndC, + pruneInfo_t info, + size_t* pBufferSizeInBytes); + +#if defined(__cplusplus) + +cusparseStatus_t CUSPARSEAPI +cusparseHpruneCsr2csrNnzByPercentage( + cusparseHandle_t handle, + int m, + int n, + int nnzA, + const cusparseMatDescr_t descrA, + const __half* csrSortedValA, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + float percentage, + const cusparseMatDescr_t descrC, + int* csrSortedRowPtrC, + int* nnzTotalDevHostPtr, + pruneInfo_t info, + void* pBuffer); + +#endif // defined(__cplusplus) + +cusparseStatus_t CUSPARSEAPI +cusparseSpruneCsr2csrNnzByPercentage( + cusparseHandle_t handle, + int m, + int n, + int nnzA, + const cusparseMatDescr_t descrA, + const float* csrSortedValA, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + float percentage, + const cusparseMatDescr_t descrC, + int* csrSortedRowPtrC, + int* nnzTotalDevHostPtr, + pruneInfo_t info, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseDpruneCsr2csrNnzByPercentage( + cusparseHandle_t handle, + int m, + int n, + int nnzA, + const cusparseMatDescr_t descrA, + const double* csrSortedValA, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + float percentage, + const cusparseMatDescr_t descrC, + int* csrSortedRowPtrC, + int* nnzTotalDevHostPtr, + pruneInfo_t info, + void* pBuffer); + +#if defined(__cplusplus) +cusparseStatus_t CUSPARSEAPI +cusparseHpruneCsr2csrByPercentage(cusparseHandle_t handle, + int m, + int n, + int nnzA, + const cusparseMatDescr_t descrA, + const __half* csrSortedValA, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + float percentage, /* between 0 to 100 */ + const cusparseMatDescr_t descrC, + __half* csrSortedValC, + const int* csrSortedRowPtrC, + int* csrSortedColIndC, + pruneInfo_t info, + void* pBuffer); + +#endif // defined(__cplusplus) + +cusparseStatus_t CUSPARSEAPI +cusparseSpruneCsr2csrByPercentage(cusparseHandle_t handle, + int m, + int n, + int nnzA, + const cusparseMatDescr_t descrA, + const float* csrSortedValA, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + float percentage, + const cusparseMatDescr_t descrC, + float* csrSortedValC, + const int* csrSortedRowPtrC, + int* csrSortedColIndC, + pruneInfo_t info, + void* pBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseDpruneCsr2csrByPercentage(cusparseHandle_t handle, + int m, + int n, + int nnzA, + const cusparseMatDescr_t descrA, + const double* csrSortedValA, + const int* csrSortedRowPtrA, + const int* csrSortedColIndA, + float percentage, + const cusparseMatDescr_t descrC, + double* csrSortedValC, + const int* csrSortedRowPtrC, + int* csrSortedColIndC, + pruneInfo_t info, + void* pBuffer); + +//############################################################################## +//# CSR2CSC +//############################################################################## + +typedef enum { + CUSPARSE_CSR2CSC_ALG_DEFAULT = 1, + CUSPARSE_CSR2CSC_ALG1 = 1 +} cusparseCsr2CscAlg_t; + +cusparseStatus_t CUSPARSEAPI +cusparseCsr2cscEx2(cusparseHandle_t handle, + int m, + int n, + int nnz, + const void* csrVal, + const int* csrRowPtr, + const int* csrColInd, + void* cscVal, + int* cscColPtr, + int* cscRowInd, + cudaDataType valType, + cusparseAction_t copyValues, + cusparseIndexBase_t idxBase, + cusparseCsr2CscAlg_t alg, + void* buffer); + +cusparseStatus_t CUSPARSEAPI +cusparseCsr2cscEx2_bufferSize(cusparseHandle_t handle, + int m, + int n, + int nnz, + const void* csrVal, + const int* csrRowPtr, + const int* csrColInd, + void* cscVal, + int* cscColPtr, + int* cscRowInd, + cudaDataType valType, + cusparseAction_t copyValues, + cusparseIndexBase_t idxBase, + cusparseCsr2CscAlg_t alg, + size_t* bufferSize); + +// ############################################################################# +// # GENERIC APIs - Enumerators and Opaque Data Structures +// ############################################################################# + +typedef enum { + CUSPARSE_FORMAT_CSR = 1, ///< Compressed Sparse Row (CSR) + CUSPARSE_FORMAT_CSC = 2, ///< Compressed Sparse Column (CSC) + CUSPARSE_FORMAT_COO = 3, ///< Coordinate (COO) - Structure of Arrays + CUSPARSE_FORMAT_BLOCKED_ELL = 5, ///< Blocked ELL + CUSPARSE_FORMAT_BSR = 6, ///< Blocked Compressed Sparse Row (BSR) + CUSPARSE_FORMAT_SLICED_ELLPACK = 7 ///< Sliced ELL +} cusparseFormat_t; + +typedef enum { + CUSPARSE_ORDER_COL = 1, ///< Column-Major Order - Matrix memory layout + CUSPARSE_ORDER_ROW = 2 ///< Row-Major Order - Matrix memory layout +} cusparseOrder_t; + +typedef enum { + CUSPARSE_INDEX_16U = 1, ///< 16-bit unsigned integer for matrix/vector + ///< indices + CUSPARSE_INDEX_32I = 2, ///< 32-bit signed integer for matrix/vector indices + CUSPARSE_INDEX_64I = 3 ///< 64-bit signed integer for matrix/vector indices +} cusparseIndexType_t; + +//------------------------------------------------------------------------------ + +struct cusparseSpVecDescr; +struct cusparseDnVecDescr; +struct cusparseSpMatDescr; +struct cusparseDnMatDescr; + +typedef struct cusparseSpVecDescr* cusparseSpVecDescr_t; +typedef struct cusparseDnVecDescr* cusparseDnVecDescr_t; +typedef struct cusparseSpMatDescr* cusparseSpMatDescr_t; +typedef struct cusparseDnMatDescr* cusparseDnMatDescr_t; + +typedef struct cusparseSpVecDescr const* cusparseConstSpVecDescr_t; +typedef struct cusparseDnVecDescr const* cusparseConstDnVecDescr_t; +typedef struct cusparseSpMatDescr const* cusparseConstSpMatDescr_t; +typedef struct cusparseDnMatDescr const* cusparseConstDnMatDescr_t; + +// ############################################################################# +// # SPARSE VECTOR DESCRIPTOR +// ############################################################################# + +cusparseStatus_t CUSPARSEAPI +cusparseCreateSpVec(cusparseSpVecDescr_t* spVecDescr, + int64_t size, + int64_t nnz, + void* indices, + void* values, + cusparseIndexType_t idxType, + cusparseIndexBase_t idxBase, + cudaDataType valueType); + +cusparseStatus_t CUSPARSEAPI +cusparseCreateConstSpVec(cusparseConstSpVecDescr_t* spVecDescr, + int64_t size, + int64_t nnz, + const void* indices, + const void* values, + cusparseIndexType_t idxType, + cusparseIndexBase_t idxBase, + cudaDataType valueType); + +cusparseStatus_t CUSPARSEAPI +cusparseDestroySpVec(cusparseConstSpVecDescr_t spVecDescr); + +cusparseStatus_t CUSPARSEAPI +cusparseSpVecGet(cusparseSpVecDescr_t spVecDescr, + int64_t* size, + int64_t* nnz, + void** indices, + void** values, + cusparseIndexType_t* idxType, + cusparseIndexBase_t* idxBase, + cudaDataType* valueType); + +cusparseStatus_t CUSPARSEAPI +cusparseConstSpVecGet(cusparseConstSpVecDescr_t spVecDescr, + int64_t* size, + int64_t* nnz, + const void** indices, + const void** values, + cusparseIndexType_t* idxType, + cusparseIndexBase_t* idxBase, + cudaDataType* valueType); + +cusparseStatus_t CUSPARSEAPI +cusparseSpVecGetIndexBase(cusparseConstSpVecDescr_t spVecDescr, + cusparseIndexBase_t* idxBase); + +cusparseStatus_t CUSPARSEAPI +cusparseSpVecGetValues(cusparseSpVecDescr_t spVecDescr, + void** values); + +cusparseStatus_t CUSPARSEAPI +cusparseConstSpVecGetValues(cusparseConstSpVecDescr_t spVecDescr, + const void** values); + +cusparseStatus_t CUSPARSEAPI +cusparseSpVecSetValues(cusparseSpVecDescr_t spVecDescr, + void* values); + +// ############################################################################# +// # DENSE VECTOR DESCRIPTOR +// ############################################################################# + +cusparseStatus_t CUSPARSEAPI +cusparseCreateDnVec(cusparseDnVecDescr_t* dnVecDescr, + int64_t size, + void* values, + cudaDataType valueType); + +cusparseStatus_t CUSPARSEAPI +cusparseCreateConstDnVec(cusparseConstDnVecDescr_t* dnVecDescr, + int64_t size, + const void* values, + cudaDataType valueType); + +cusparseStatus_t CUSPARSEAPI +cusparseDestroyDnVec(cusparseConstDnVecDescr_t dnVecDescr); + +cusparseStatus_t CUSPARSEAPI +cusparseDnVecGet(cusparseDnVecDescr_t dnVecDescr, + int64_t* size, + void** values, + cudaDataType* valueType); + +cusparseStatus_t CUSPARSEAPI +cusparseConstDnVecGet(cusparseConstDnVecDescr_t dnVecDescr, + int64_t* size, + const void** values, + cudaDataType* valueType); + +cusparseStatus_t CUSPARSEAPI +cusparseDnVecGetValues(cusparseDnVecDescr_t dnVecDescr, + void** values); + +cusparseStatus_t CUSPARSEAPI +cusparseConstDnVecGetValues(cusparseConstDnVecDescr_t dnVecDescr, + const void** values); + +cusparseStatus_t CUSPARSEAPI +cusparseDnVecSetValues(cusparseDnVecDescr_t dnVecDescr, + void* values); + +// ############################################################################# +// # SPARSE MATRIX DESCRIPTOR +// ############################################################################# + +cusparseStatus_t CUSPARSEAPI +cusparseDestroySpMat(cusparseConstSpMatDescr_t spMatDescr); + + cusparseStatus_t CUSPARSEAPI +cusparseSpMatGetFormat(cusparseConstSpMatDescr_t spMatDescr, + cusparseFormat_t* format); + +cusparseStatus_t CUSPARSEAPI +cusparseSpMatGetIndexBase(cusparseConstSpMatDescr_t spMatDescr, + cusparseIndexBase_t* idxBase); + +cusparseStatus_t CUSPARSEAPI +cusparseSpMatGetValues(cusparseSpMatDescr_t spMatDescr, + void** values); + +cusparseStatus_t CUSPARSEAPI +cusparseConstSpMatGetValues(cusparseConstSpMatDescr_t spMatDescr, + const void** values); + +cusparseStatus_t CUSPARSEAPI +cusparseSpMatSetValues(cusparseSpMatDescr_t spMatDescr, + void* values); + +cusparseStatus_t CUSPARSEAPI +cusparseSpMatGetSize(cusparseConstSpMatDescr_t spMatDescr, + int64_t* rows, + int64_t* cols, + int64_t* nnz); + +cusparseStatus_t CUSPARSEAPI +cusparseSpMatGetStridedBatch(cusparseConstSpMatDescr_t spMatDescr, + int* batchCount); + +cusparseStatus_t CUSPARSEAPI +cusparseCooSetStridedBatch(cusparseSpMatDescr_t spMatDescr, + int batchCount, + int64_t batchStride); + +cusparseStatus_t CUSPARSEAPI +cusparseCsrSetStridedBatch(cusparseSpMatDescr_t spMatDescr, + int batchCount, + int64_t offsetsBatchStride, + int64_t columnsValuesBatchStride); + +cusparseStatus_t CUSPARSEAPI +cusparseBsrSetStridedBatch(cusparseSpMatDescr_t spMatDescr, + int batchCount, + int64_t offsetsBatchStride, + int64_t columnsValuesBatchStride, + int64_t ValuesBatchStride); + +typedef enum { + CUSPARSE_SPMAT_FILL_MODE, + CUSPARSE_SPMAT_DIAG_TYPE +} cusparseSpMatAttribute_t; + +cusparseStatus_t CUSPARSEAPI +cusparseSpMatGetAttribute(cusparseConstSpMatDescr_t spMatDescr, + cusparseSpMatAttribute_t attribute, + void* data, + size_t dataSize); + +cusparseStatus_t CUSPARSEAPI +cusparseSpMatSetAttribute(cusparseSpMatDescr_t spMatDescr, + cusparseSpMatAttribute_t attribute, + void* data, + size_t dataSize); + +//------------------------------------------------------------------------------ +// ### CSR ### + +cusparseStatus_t CUSPARSEAPI +cusparseCreateCsr(cusparseSpMatDescr_t* spMatDescr, + int64_t rows, + int64_t cols, + int64_t nnz, + void* csrRowOffsets, + void* csrColInd, + void* csrValues, + cusparseIndexType_t csrRowOffsetsType, + cusparseIndexType_t csrColIndType, + cusparseIndexBase_t idxBase, + cudaDataType valueType); + +cusparseStatus_t CUSPARSEAPI +cusparseCreateConstCsr(cusparseConstSpMatDescr_t* spMatDescr, + int64_t rows, + int64_t cols, + int64_t nnz, + const void* csrRowOffsets, + const void* csrColInd, + const void* csrValues, + cusparseIndexType_t csrRowOffsetsType, + cusparseIndexType_t csrColIndType, + cusparseIndexBase_t idxBase, + cudaDataType valueType); + +cusparseStatus_t CUSPARSEAPI +cusparseCreateCsc(cusparseSpMatDescr_t* spMatDescr, + int64_t rows, + int64_t cols, + int64_t nnz, + void* cscColOffsets, + void* cscRowInd, + void* cscValues, + cusparseIndexType_t cscColOffsetsType, + cusparseIndexType_t cscRowIndType, + cusparseIndexBase_t idxBase, + cudaDataType valueType); + +cusparseStatus_t CUSPARSEAPI +cusparseCreateConstCsc(cusparseConstSpMatDescr_t* spMatDescr, + int64_t rows, + int64_t cols, + int64_t nnz, + const void* cscColOffsets, + const void* cscRowInd, + const void* cscValues, + cusparseIndexType_t cscColOffsetsType, + cusparseIndexType_t cscRowIndType, + cusparseIndexBase_t idxBase, + cudaDataType valueType); + +cusparseStatus_t CUSPARSEAPI +cusparseCsrGet(cusparseSpMatDescr_t spMatDescr, + int64_t* rows, + int64_t* cols, + int64_t* nnz, + void** csrRowOffsets, + void** csrColInd, + void** csrValues, + cusparseIndexType_t* csrRowOffsetsType, + cusparseIndexType_t* csrColIndType, + cusparseIndexBase_t* idxBase, + cudaDataType* valueType); + +cusparseStatus_t CUSPARSEAPI +cusparseConstCsrGet(cusparseConstSpMatDescr_t spMatDescr, + int64_t* rows, + int64_t* cols, + int64_t* nnz, + const void** csrRowOffsets, + const void** csrColInd, + const void** csrValues, + cusparseIndexType_t* csrRowOffsetsType, + cusparseIndexType_t* csrColIndType, + cusparseIndexBase_t* idxBase, + cudaDataType* valueType); + +cusparseStatus_t CUSPARSEAPI +cusparseCscGet(cusparseSpMatDescr_t spMatDescr, + int64_t* rows, + int64_t* cols, + int64_t* nnz, + void** cscColOffsets, + void** cscRowInd, + void** cscValues, + cusparseIndexType_t* cscColOffsetsType, + cusparseIndexType_t* cscRowIndType, + cusparseIndexBase_t* idxBase, + cudaDataType* valueType); + +cusparseStatus_t CUSPARSEAPI +cusparseConstCscGet(cusparseConstSpMatDescr_t spMatDescr, + int64_t* rows, + int64_t* cols, + int64_t* nnz, + const void** cscColOffsets, + const void** cscRowInd, + const void** cscValues, + cusparseIndexType_t* cscColOffsetsType, + cusparseIndexType_t* cscRowIndType, + cusparseIndexBase_t* idxBase, + cudaDataType* valueType); + +cusparseStatus_t CUSPARSEAPI +cusparseCsrSetPointers(cusparseSpMatDescr_t spMatDescr, + void* csrRowOffsets, + void* csrColInd, + void* csrValues); + +cusparseStatus_t CUSPARSEAPI +cusparseCscSetPointers(cusparseSpMatDescr_t spMatDescr, + void* cscColOffsets, + void* cscRowInd, + void* cscValues); + +//------------------------------------------------------------------------------ +// ### BSR ### + +cusparseStatus_t CUSPARSEAPI +cusparseCreateBsr(cusparseSpMatDescr_t* spMatDescr, + int64_t brows, + int64_t bcols, + int64_t bnnz, + int64_t rowBlockDim, + int64_t colBlockDim, + void* bsrRowOffsets, + void* bsrColInd, + void* bsrValues, + cusparseIndexType_t bsrRowOffsetsType, + cusparseIndexType_t bsrColIndType, + cusparseIndexBase_t idxBase, + cudaDataType valueType, + cusparseOrder_t order); + +cusparseStatus_t CUSPARSEAPI +cusparseCreateConstBsr(cusparseConstSpMatDescr_t* spMatDescr, + int64_t brows, + int64_t bcols, + int64_t bnnz, + int64_t rowBlockDim, + int64_t colBlockDim, + const void* bsrRowOffsets, + const void* bsrColInd, + const void* bsrValues, + cusparseIndexType_t bsrRowOffsetsType, + cusparseIndexType_t bsrColIndType, + cusparseIndexBase_t idxBase, + cudaDataType valueType, + cusparseOrder_t order); + +//------------------------------------------------------------------------------ +// ### COO ### + +cusparseStatus_t CUSPARSEAPI +cusparseCreateCoo(cusparseSpMatDescr_t* spMatDescr, + int64_t rows, + int64_t cols, + int64_t nnz, + void* cooRowInd, + void* cooColInd, + void* cooValues, + cusparseIndexType_t cooIdxType, + cusparseIndexBase_t idxBase, + cudaDataType valueType); + +cusparseStatus_t CUSPARSEAPI +cusparseCreateConstCoo(cusparseConstSpMatDescr_t* spMatDescr, + int64_t rows, + int64_t cols, + int64_t nnz, + const void* cooRowInd, + const void* cooColInd, + const void* cooValues, + cusparseIndexType_t cooIdxType, + cusparseIndexBase_t idxBase, + cudaDataType valueType); + +cusparseStatus_t CUSPARSEAPI +cusparseCooGet(cusparseSpMatDescr_t spMatDescr, + int64_t* rows, + int64_t* cols, + int64_t* nnz, + void** cooRowInd, // COO row indices + void** cooColInd, // COO column indices + void** cooValues, // COO values + cusparseIndexType_t* idxType, + cusparseIndexBase_t* idxBase, + cudaDataType* valueType); + +cusparseStatus_t CUSPARSEAPI +cusparseConstCooGet(cusparseConstSpMatDescr_t spMatDescr, + int64_t* rows, + int64_t* cols, + int64_t* nnz, + const void** cooRowInd, // COO row indices + const void** cooColInd, // COO column indices + const void** cooValues, // COO values + cusparseIndexType_t* idxType, + cusparseIndexBase_t* idxBase, + cudaDataType* valueType); + +cusparseStatus_t CUSPARSEAPI +cusparseCooSetPointers(cusparseSpMatDescr_t spMatDescr, + void* cooRows, + void* cooColumns, + void* cooValues); + +//------------------------------------------------------------------------------ +// ### BLOCKED ELL ### + +cusparseStatus_t CUSPARSEAPI +cusparseCreateBlockedEll(cusparseSpMatDescr_t* spMatDescr, + int64_t rows, + int64_t cols, + int64_t ellBlockSize, + int64_t ellCols, + void* ellColInd, + void* ellValue, + cusparseIndexType_t ellIdxType, + cusparseIndexBase_t idxBase, + cudaDataType valueType); + +cusparseStatus_t CUSPARSEAPI +cusparseCreateConstBlockedEll(cusparseConstSpMatDescr_t* spMatDescr, + int64_t rows, + int64_t cols, + int64_t ellBlockSize, + int64_t ellCols, + const void* ellColInd, + const void* ellValue, + cusparseIndexType_t ellIdxType, + cusparseIndexBase_t idxBase, + cudaDataType valueType); + +cusparseStatus_t CUSPARSEAPI +cusparseBlockedEllGet(cusparseSpMatDescr_t spMatDescr, + int64_t* rows, + int64_t* cols, + int64_t* ellBlockSize, + int64_t* ellCols, + void** ellColInd, + void** ellValue, + cusparseIndexType_t* ellIdxType, + cusparseIndexBase_t* idxBase, + cudaDataType* valueType); + +cusparseStatus_t CUSPARSEAPI +cusparseConstBlockedEllGet(cusparseConstSpMatDescr_t spMatDescr, + int64_t* rows, + int64_t* cols, + int64_t* ellBlockSize, + int64_t* ellCols, + const void** ellColInd, + const void** ellValue, + cusparseIndexType_t* ellIdxType, + cusparseIndexBase_t* idxBase, + cudaDataType* valueType); + +//------------------------------------------------------------------------------ +// ### Sliced ELLPACK ### + +cusparseStatus_t CUSPARSEAPI +cusparseCreateSlicedEll(cusparseSpMatDescr_t* spMatDescr, + int64_t rows, + int64_t cols, + int64_t nnz, + int64_t sellValuesSize, + int64_t sliceSize, + void* sellSliceOffsets, + void* sellColInd, + void* sellValues, + cusparseIndexType_t sellSliceOffsetsType, + cusparseIndexType_t sellColIndType, + cusparseIndexBase_t idxBase, + cudaDataType valueType); + +cusparseStatus_t CUSPARSEAPI +cusparseCreateConstSlicedEll(cusparseConstSpMatDescr_t* spMatDescr, + int64_t rows, + int64_t cols, + int64_t nnz, + int64_t sellValuesSize, + int64_t sliceSize, + const void* sellSliceOffsets, + const void* sellColInd, + const void* sellValues, + cusparseIndexType_t sellSliceOffsetsType, + cusparseIndexType_t sellColIndType, + cusparseIndexBase_t idxBase, + cudaDataType valueType); + +// ############################################################################# +// # DENSE MATRIX DESCRIPTOR +// ############################################################################# + +cusparseStatus_t CUSPARSEAPI +cusparseCreateDnMat(cusparseDnMatDescr_t* dnMatDescr, + int64_t rows, + int64_t cols, + int64_t ld, + void* values, + cudaDataType valueType, + cusparseOrder_t order); + +cusparseStatus_t CUSPARSEAPI +cusparseCreateConstDnMat(cusparseConstDnMatDescr_t* dnMatDescr, + int64_t rows, + int64_t cols, + int64_t ld, + const void* values, + cudaDataType valueType, + cusparseOrder_t order); + +cusparseStatus_t CUSPARSEAPI +cusparseDestroyDnMat(cusparseConstDnMatDescr_t dnMatDescr); + +cusparseStatus_t CUSPARSEAPI +cusparseDnMatGet(cusparseDnMatDescr_t dnMatDescr, + int64_t* rows, + int64_t* cols, + int64_t* ld, + void** values, + cudaDataType* type, + cusparseOrder_t* order); + +cusparseStatus_t CUSPARSEAPI +cusparseConstDnMatGet(cusparseConstDnMatDescr_t dnMatDescr, + int64_t* rows, + int64_t* cols, + int64_t* ld, + const void** values, + cudaDataType* type, + cusparseOrder_t* order); + +cusparseStatus_t CUSPARSEAPI +cusparseDnMatGetValues(cusparseDnMatDescr_t dnMatDescr, + void** values); + +cusparseStatus_t CUSPARSEAPI +cusparseConstDnMatGetValues(cusparseConstDnMatDescr_t dnMatDescr, + const void** values); + +cusparseStatus_t CUSPARSEAPI +cusparseDnMatSetValues(cusparseDnMatDescr_t dnMatDescr, + void* values); + +cusparseStatus_t CUSPARSEAPI +cusparseDnMatSetStridedBatch(cusparseDnMatDescr_t dnMatDescr, + int batchCount, + int64_t batchStride); + +cusparseStatus_t CUSPARSEAPI +cusparseDnMatGetStridedBatch(cusparseConstDnMatDescr_t dnMatDescr, + int* batchCount, + int64_t* batchStride); + +// ############################################################################# +// # VECTOR-VECTOR OPERATIONS +// ############################################################################# + +cusparseStatus_t CUSPARSEAPI +cusparseAxpby(cusparseHandle_t handle, + const void* alpha, + cusparseConstSpVecDescr_t vecX, + const void* beta, + cusparseDnVecDescr_t vecY); + +cusparseStatus_t CUSPARSEAPI +cusparseGather(cusparseHandle_t handle, + cusparseConstDnVecDescr_t vecY, + cusparseSpVecDescr_t vecX); + +cusparseStatus_t CUSPARSEAPI +cusparseScatter(cusparseHandle_t handle, + cusparseConstSpVecDescr_t vecX, + cusparseDnVecDescr_t vecY); + +cusparseStatus_t CUSPARSEAPI +cusparseRot(cusparseHandle_t handle, + const void* c_coeff, + const void* s_coeff, + cusparseSpVecDescr_t vecX, + cusparseDnVecDescr_t vecY); + +cusparseStatus_t CUSPARSEAPI +cusparseSpVV_bufferSize(cusparseHandle_t handle, + cusparseOperation_t opX, + cusparseConstSpVecDescr_t vecX, + cusparseConstDnVecDescr_t vecY, + const void* result, + cudaDataType computeType, + size_t* bufferSize); + +cusparseStatus_t CUSPARSEAPI +cusparseSpVV(cusparseHandle_t handle, + cusparseOperation_t opX, + cusparseConstSpVecDescr_t vecX, + cusparseConstDnVecDescr_t vecY, + void* result, + cudaDataType computeType, + void* externalBuffer); + +// ############################################################################# +// # SPARSE TO DENSE +// ############################################################################# + +typedef enum { + CUSPARSE_SPARSETODENSE_ALG_DEFAULT = 0 +} cusparseSparseToDenseAlg_t; + +cusparseStatus_t CUSPARSEAPI +cusparseSparseToDense_bufferSize(cusparseHandle_t handle, + cusparseConstSpMatDescr_t matA, + cusparseDnMatDescr_t matB, + cusparseSparseToDenseAlg_t alg, + size_t* bufferSize); + +cusparseStatus_t CUSPARSEAPI +cusparseSparseToDense(cusparseHandle_t handle, + cusparseConstSpMatDescr_t matA, + cusparseDnMatDescr_t matB, + cusparseSparseToDenseAlg_t alg, + void* externalBuffer); + +// ############################################################################# +// # DENSE TO SPARSE +// ############################################################################# + +typedef enum { + CUSPARSE_DENSETOSPARSE_ALG_DEFAULT = 0 +} cusparseDenseToSparseAlg_t; + +cusparseStatus_t CUSPARSEAPI +cusparseDenseToSparse_bufferSize(cusparseHandle_t handle, + cusparseConstDnMatDescr_t matA, + cusparseSpMatDescr_t matB, + cusparseDenseToSparseAlg_t alg, + size_t* bufferSize); + +cusparseStatus_t CUSPARSEAPI +cusparseDenseToSparse_analysis(cusparseHandle_t handle, + cusparseConstDnMatDescr_t matA, + cusparseSpMatDescr_t matB, + cusparseDenseToSparseAlg_t alg, + void* externalBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseDenseToSparse_convert(cusparseHandle_t handle, + cusparseConstDnMatDescr_t matA, + cusparseSpMatDescr_t matB, + cusparseDenseToSparseAlg_t alg, + void* externalBuffer); + +// ############################################################################# +// # SPARSE MATRIX-VECTOR MULTIPLICATION +// ############################################################################# + +typedef enum { + CUSPARSE_SPMV_ALG_DEFAULT = 0, + CUSPARSE_SPMV_CSR_ALG1 = 2, + CUSPARSE_SPMV_CSR_ALG2 = 3, + CUSPARSE_SPMV_COO_ALG1 = 1, + CUSPARSE_SPMV_COO_ALG2 = 4, + CUSPARSE_SPMV_SELL_ALG1 = 5 +} cusparseSpMVAlg_t; + +cusparseStatus_t CUSPARSEAPI +cusparseSpMV(cusparseHandle_t handle, + cusparseOperation_t opA, + const void* alpha, + cusparseConstSpMatDescr_t matA, + cusparseConstDnVecDescr_t vecX, + const void* beta, + cusparseDnVecDescr_t vecY, + cudaDataType computeType, + cusparseSpMVAlg_t alg, + void* externalBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseSpMV_bufferSize(cusparseHandle_t handle, + cusparseOperation_t opA, + const void* alpha, + cusparseConstSpMatDescr_t matA, + cusparseConstDnVecDescr_t vecX, + const void* beta, + cusparseDnVecDescr_t vecY, + cudaDataType computeType, + cusparseSpMVAlg_t alg, + size_t* bufferSize); + +// ############################################################################# +// # SPARSE TRIANGULAR VECTOR SOLVE +// ############################################################################# + +typedef enum { + CUSPARSE_SPSV_ALG_DEFAULT = 0, +} cusparseSpSVAlg_t; + +typedef enum { + CUSPARSE_SPSV_UPDATE_GENERAL = 0, + CUSPARSE_SPSV_UPDATE_DIAGONAL = 1 +} cusparseSpSVUpdate_t; + +struct cusparseSpSVDescr; +typedef struct cusparseSpSVDescr* cusparseSpSVDescr_t; + +cusparseStatus_t CUSPARSEAPI +cusparseSpSV_createDescr(cusparseSpSVDescr_t* descr); + +cusparseStatus_t CUSPARSEAPI +cusparseSpSV_destroyDescr(cusparseSpSVDescr_t descr); + +cusparseStatus_t CUSPARSEAPI +cusparseSpSV_bufferSize(cusparseHandle_t handle, + cusparseOperation_t opA, + const void* alpha, + cusparseConstSpMatDescr_t matA, + cusparseConstDnVecDescr_t vecX, + cusparseDnVecDescr_t vecY, + cudaDataType computeType, + cusparseSpSVAlg_t alg, + cusparseSpSVDescr_t spsvDescr, + size_t* bufferSize); + +cusparseStatus_t CUSPARSEAPI +cusparseSpSV_analysis(cusparseHandle_t handle, + cusparseOperation_t opA, + const void* alpha, + cusparseConstSpMatDescr_t matA, + cusparseConstDnVecDescr_t vecX, + cusparseDnVecDescr_t vecY, + cudaDataType computeType, + cusparseSpSVAlg_t alg, + cusparseSpSVDescr_t spsvDescr, + void* externalBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseSpSV_solve(cusparseHandle_t handle, + cusparseOperation_t opA, + const void* alpha, + cusparseConstSpMatDescr_t matA, + cusparseConstDnVecDescr_t vecX, + cusparseDnVecDescr_t vecY, + cudaDataType computeType, + cusparseSpSVAlg_t alg, + cusparseSpSVDescr_t spsvDescr); + +cusparseStatus_t CUSPARSEAPI +cusparseSpSV_updateMatrix(cusparseHandle_t handle, + cusparseSpSVDescr_t spsvDescr, + void* newValues, + cusparseSpSVUpdate_t updatePart); + + + +// ############################################################################# +// # SPARSE TRIANGULAR MATRIX SOLVE +// ############################################################################# + +typedef enum { + CUSPARSE_SPSM_ALG_DEFAULT = 0, +} cusparseSpSMAlg_t; + +struct cusparseSpSMDescr; +typedef struct cusparseSpSMDescr* cusparseSpSMDescr_t; + +cusparseStatus_t CUSPARSEAPI +cusparseSpSM_createDescr(cusparseSpSMDescr_t* descr); + +cusparseStatus_t CUSPARSEAPI +cusparseSpSM_destroyDescr(cusparseSpSMDescr_t descr); + +cusparseStatus_t CUSPARSEAPI +cusparseSpSM_bufferSize(cusparseHandle_t handle, + cusparseOperation_t opA, + cusparseOperation_t opB, + const void* alpha, + cusparseConstSpMatDescr_t matA, + cusparseConstDnMatDescr_t matB, + cusparseDnMatDescr_t matC, + cudaDataType computeType, + cusparseSpSMAlg_t alg, + cusparseSpSMDescr_t spsmDescr, + size_t* bufferSize); + +cusparseStatus_t CUSPARSEAPI +cusparseSpSM_analysis(cusparseHandle_t handle, + cusparseOperation_t opA, + cusparseOperation_t opB, + const void* alpha, + cusparseConstSpMatDescr_t matA, + cusparseConstDnMatDescr_t matB, + cusparseDnMatDescr_t matC, + cudaDataType computeType, + cusparseSpSMAlg_t alg, + cusparseSpSMDescr_t spsmDescr, + void* externalBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseSpSM_solve(cusparseHandle_t handle, + cusparseOperation_t opA, + cusparseOperation_t opB, + const void* alpha, + cusparseConstSpMatDescr_t matA, + cusparseConstDnMatDescr_t matB, + cusparseDnMatDescr_t matC, + cudaDataType computeType, + cusparseSpSMAlg_t alg, + cusparseSpSMDescr_t spsmDescr); + +// ############################################################################# +// # SPARSE MATRIX-MATRIX MULTIPLICATION +// ############################################################################# + +typedef enum { + CUSPARSE_SPMM_ALG_DEFAULT = 0, + CUSPARSE_SPMM_COO_ALG1 = 1, + CUSPARSE_SPMM_COO_ALG2 = 2, + CUSPARSE_SPMM_COO_ALG3 = 3, + CUSPARSE_SPMM_COO_ALG4 = 5, + CUSPARSE_SPMM_CSR_ALG1 = 4, + CUSPARSE_SPMM_CSR_ALG2 = 6, + CUSPARSE_SPMM_CSR_ALG3 = 12, + CUSPARSE_SPMM_BLOCKED_ELL_ALG1 = 13 +} cusparseSpMMAlg_t; + +cusparseStatus_t CUSPARSEAPI +cusparseSpMM_bufferSize(cusparseHandle_t handle, + cusparseOperation_t opA, + cusparseOperation_t opB, + const void* alpha, + cusparseConstSpMatDescr_t matA, + cusparseConstDnMatDescr_t matB, + const void* beta, + cusparseDnMatDescr_t matC, + cudaDataType computeType, + cusparseSpMMAlg_t alg, + size_t* bufferSize); + +cusparseStatus_t CUSPARSEAPI +cusparseSpMM_preprocess(cusparseHandle_t handle, + cusparseOperation_t opA, + cusparseOperation_t opB, + const void* alpha, + cusparseConstSpMatDescr_t matA, + cusparseConstDnMatDescr_t matB, + const void* beta, + cusparseDnMatDescr_t matC, + cudaDataType computeType, + cusparseSpMMAlg_t alg, + void* externalBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseSpMM(cusparseHandle_t handle, + cusparseOperation_t opA, + cusparseOperation_t opB, + const void* alpha, + cusparseConstSpMatDescr_t matA, + cusparseConstDnMatDescr_t matB, + const void* beta, + cusparseDnMatDescr_t matC, + cudaDataType computeType, + cusparseSpMMAlg_t alg, + void* externalBuffer); + +// ############################################################################# +// # SPARSE MATRIX - SPARSE MATRIX MULTIPLICATION (SpGEMM) +// ############################################################################# + +typedef enum { + CUSPARSE_SPGEMM_DEFAULT = 0, + CUSPARSE_SPGEMM_CSR_ALG_DETERMINITIC = 1, + CUSPARSE_SPGEMM_CSR_ALG_NONDETERMINITIC = 2, + CUSPARSE_SPGEMM_ALG1 = 3, + CUSPARSE_SPGEMM_ALG2 = 4, + CUSPARSE_SPGEMM_ALG3 = 5 +} cusparseSpGEMMAlg_t; + +struct cusparseSpGEMMDescr; +typedef struct cusparseSpGEMMDescr* cusparseSpGEMMDescr_t; + +cusparseStatus_t CUSPARSEAPI +cusparseSpGEMM_createDescr(cusparseSpGEMMDescr_t* descr); + +cusparseStatus_t CUSPARSEAPI +cusparseSpGEMM_destroyDescr(cusparseSpGEMMDescr_t descr); + +cusparseStatus_t CUSPARSEAPI +cusparseSpGEMM_workEstimation(cusparseHandle_t handle, + cusparseOperation_t opA, + cusparseOperation_t opB, + const void* alpha, + cusparseConstSpMatDescr_t matA, + cusparseConstSpMatDescr_t matB, + const void* beta, + cusparseSpMatDescr_t matC, + cudaDataType computeType, + cusparseSpGEMMAlg_t alg, + cusparseSpGEMMDescr_t spgemmDescr, + size_t* bufferSize1, + void* externalBuffer1); + +cusparseStatus_t CUSPARSEAPI +cusparseSpGEMM_getNumProducts(cusparseSpGEMMDescr_t spgemmDescr, + int64_t* num_prods); + +cusparseStatus_t CUSPARSEAPI +cusparseSpGEMM_estimateMemory(cusparseHandle_t handle, + cusparseOperation_t opA, + cusparseOperation_t opB, + const void* alpha, + cusparseConstSpMatDescr_t matA, + cusparseConstSpMatDescr_t matB, + const void* beta, + cusparseSpMatDescr_t matC, + cudaDataType computeType, + cusparseSpGEMMAlg_t alg, + cusparseSpGEMMDescr_t spgemmDescr, + float chunk_fraction, + size_t* bufferSize3, + void* externalBuffer3, + size_t* bufferSize2); + +cusparseStatus_t CUSPARSEAPI +cusparseSpGEMM_compute(cusparseHandle_t handle, + cusparseOperation_t opA, + cusparseOperation_t opB, + const void* alpha, + cusparseConstSpMatDescr_t matA, + cusparseConstSpMatDescr_t matB, + const void* beta, + cusparseSpMatDescr_t matC, + cudaDataType computeType, + cusparseSpGEMMAlg_t alg, + cusparseSpGEMMDescr_t spgemmDescr, + size_t* bufferSize2, + void* externalBuffer2); + +cusparseStatus_t CUSPARSEAPI +cusparseSpGEMM_copy(cusparseHandle_t handle, + cusparseOperation_t opA, + cusparseOperation_t opB, + const void* alpha, + cusparseConstSpMatDescr_t matA, + cusparseConstSpMatDescr_t matB, + const void* beta, + cusparseSpMatDescr_t matC, + cudaDataType computeType, + cusparseSpGEMMAlg_t alg, + cusparseSpGEMMDescr_t spgemmDescr); + +// ############################################################################# +// # SPARSE MATRIX - SPARSE MATRIX MULTIPLICATION (SpGEMM) STRUCTURE REUSE +// ############################################################################# + +cusparseStatus_t CUSPARSEAPI +cusparseSpGEMMreuse_workEstimation(cusparseHandle_t handle, + cusparseOperation_t opA, + cusparseOperation_t opB, + cusparseConstSpMatDescr_t matA, + cusparseConstSpMatDescr_t matB, + cusparseSpMatDescr_t matC, + cusparseSpGEMMAlg_t alg, + cusparseSpGEMMDescr_t spgemmDescr, + size_t* bufferSize1, + void* externalBuffer1); + +cusparseStatus_t CUSPARSEAPI +cusparseSpGEMMreuse_nnz(cusparseHandle_t handle, + cusparseOperation_t opA, + cusparseOperation_t opB, + cusparseConstSpMatDescr_t matA, + cusparseConstSpMatDescr_t matB, + cusparseSpMatDescr_t matC, + cusparseSpGEMMAlg_t alg, + cusparseSpGEMMDescr_t spgemmDescr, + size_t* bufferSize2, + void* externalBuffer2, + size_t* bufferSize3, + void* externalBuffer3, + size_t* bufferSize4, + void* externalBuffer4); + +cusparseStatus_t CUSPARSEAPI +cusparseSpGEMMreuse_copy(cusparseHandle_t handle, + cusparseOperation_t opA, + cusparseOperation_t opB, + cusparseConstSpMatDescr_t matA, + cusparseConstSpMatDescr_t matB, + cusparseSpMatDescr_t matC, + cusparseSpGEMMAlg_t alg, + cusparseSpGEMMDescr_t spgemmDescr, + size_t* bufferSize5, + void* externalBuffer5); + +cusparseStatus_t CUSPARSEAPI +cusparseSpGEMMreuse_compute(cusparseHandle_t handle, + cusparseOperation_t opA, + cusparseOperation_t opB, + const void* alpha, + cusparseConstSpMatDescr_t matA, + cusparseConstSpMatDescr_t matB, + const void* beta, + cusparseSpMatDescr_t matC, + cudaDataType computeType, + cusparseSpGEMMAlg_t alg, + cusparseSpGEMMDescr_t spgemmDescr); + +// ############################################################################# +// # SAMPLED DENSE-DENSE MATRIX MULTIPLICATION +// ############################################################################# + +typedef enum { + CUSPARSE_SDDMM_ALG_DEFAULT = 0 +} cusparseSDDMMAlg_t; + +cusparseStatus_t CUSPARSEAPI +cusparseSDDMM_bufferSize(cusparseHandle_t handle, + cusparseOperation_t opA, + cusparseOperation_t opB, + const void* alpha, + cusparseConstDnMatDescr_t matA, + cusparseConstDnMatDescr_t matB, + const void* beta, + cusparseSpMatDescr_t matC, + cudaDataType computeType, + cusparseSDDMMAlg_t alg, + size_t* bufferSize); + +cusparseStatus_t CUSPARSEAPI +cusparseSDDMM_preprocess(cusparseHandle_t handle, + cusparseOperation_t opA, + cusparseOperation_t opB, + const void* alpha, + cusparseConstDnMatDescr_t matA, + cusparseConstDnMatDescr_t matB, + const void* beta, + cusparseSpMatDescr_t matC, + cudaDataType computeType, + cusparseSDDMMAlg_t alg, + void* externalBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseSDDMM(cusparseHandle_t handle, + cusparseOperation_t opA, + cusparseOperation_t opB, + const void* alpha, + cusparseConstDnMatDescr_t matA, + cusparseConstDnMatDescr_t matB, + const void* beta, + cusparseSpMatDescr_t matC, + cudaDataType computeType, + cusparseSDDMMAlg_t alg, + void* externalBuffer); + +// ############################################################################# +// # GENERIC APIs WITH CUSTOM OPERATORS (PREVIEW) +// ############################################################################# + +struct cusparseSpMMOpPlan; +typedef struct cusparseSpMMOpPlan* cusparseSpMMOpPlan_t; + +typedef enum { + CUSPARSE_SPMM_OP_ALG_DEFAULT +} cusparseSpMMOpAlg_t; + +cusparseStatus_t CUSPARSEAPI +cusparseSpMMOp_createPlan(cusparseHandle_t handle, + cusparseSpMMOpPlan_t* plan, + cusparseOperation_t opA, + cusparseOperation_t opB, + cusparseConstSpMatDescr_t matA, + cusparseConstDnMatDescr_t matB, + cusparseDnMatDescr_t matC, + cudaDataType computeType, + cusparseSpMMOpAlg_t alg, + const void* addOperationNvvmBuffer, + size_t addOperationBufferSize, + const void* mulOperationNvvmBuffer, + size_t mulOperationBufferSize, + const void* epilogueNvvmBuffer, + size_t epilogueBufferSize, + size_t* SpMMWorkspaceSize); + +cusparseStatus_t CUSPARSEAPI +cusparseSpMMOp(cusparseSpMMOpPlan_t plan, + void* externalBuffer); + +cusparseStatus_t CUSPARSEAPI +cusparseSpMMOp_destroyPlan(cusparseSpMMOpPlan_t plan); + +//------------------------------------------------------------------------------ + +#if defined(__cplusplus) +} // extern "C" +#endif // defined(__cplusplus) + +#undef CUSPARSE_DEPRECATED + +#endif // !defined(CUSPARSE_H_) diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia/cusparse/include/cusparse_v2.h b/llmeval-env/lib/python3.10/site-packages/nvidia/cusparse/include/cusparse_v2.h new file mode 100644 index 0000000000000000000000000000000000000000..f889e1f569d46d1116fe6e302429b3855de43c21 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nvidia/cusparse/include/cusparse_v2.h @@ -0,0 +1,54 @@ +/* + * Copyright 1993-2019 NVIDIA Corporation. All rights reserved. + * + * NOTICE TO LICENSEE: + * + * This source code and/or documentation ("Licensed Deliverables") are + * subject to NVIDIA intellectual property rights under U.S. and + * international Copyright laws. + * + * These Licensed Deliverables contained herein is PROPRIETARY and + * CONFIDENTIAL to NVIDIA and is being provided under the terms and + * conditions of a form of NVIDIA software license agreement by and + * between NVIDIA and Licensee ("License Agreement") or electronically + * accepted by Licensee. Notwithstanding any terms or conditions to + * the contrary in the License Agreement, reproduction or disclosure + * of the Licensed Deliverables to any third party without the express + * written consent of NVIDIA is prohibited. + * + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE + * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS + * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. + * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED + * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, + * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. + * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE + * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY + * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY + * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, + * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THESE LICENSED DELIVERABLES. + * + * U.S. Government End Users. These Licensed Deliverables are a + * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT + * 1995), consisting of "commercial computer software" and "commercial + * computer software documentation" as such terms are used in 48 + * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government + * only as a commercial end item. Consistent with 48 C.F.R.12.212 and + * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all + * U.S. Government End Users acquire the Licensed Deliverables with + * only those rights set forth herein. + * + * Any use of the Licensed Deliverables in individual and commercial + * software must include, in the user documentation and internal + * comments to the code, the above Disclaimer and U.S. Government End + * Users Notice. + */ +#if !defined(CUSPARSE_V2_H_) +#define CUSPARSE_V2_H_ + +#include "cusparse.h" + +#endif diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia/cusparse/lib/__init__.py b/llmeval-env/lib/python3.10/site-packages/nvidia/cusparse/lib/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia/cusparse/lib/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/nvidia/cusparse/lib/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f888ad6b7d2c84fbc24b6206f35c74c558f609ab Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/nvidia/cusparse/lib/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy.libs/libopenblasp-r0-24bff013.3.26.dev.so b/llmeval-env/lib/python3.10/site-packages/scipy.libs/libopenblasp-r0-24bff013.3.26.dev.so new file mode 100644 index 0000000000000000000000000000000000000000..febf7d70cc32960a699501352364ea21c8269d37 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy.libs/libopenblasp-r0-24bff013.3.26.dev.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:09f0031d06ac6f2a67010411a6507849eaa826754ca553a95459ac18a4775e5f +size 34990041