python_code
stringlengths 0
456k
|
---|
#################################################################################################
#
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
import pycutlass
from pycutlass import *
from pycutlass.test import *
from time import sleep
from bfloat16 import bfloat16
import subprocess
from typeguard import typechecked
import re
def getTensorRef(tensor, tensor_layout, conv_kind, problem_size, operand):
ptr = tensor.__array_interface__['data'][0]
if operand == "a":
tensor_coord = cutlass.conv.implicit_gemm_tensor_a_extent(conv_kind, problem_size)
elif operand == "b":
tensor_coord = cutlass.conv.implicit_gemm_tensor_b_extent(conv_kind, problem_size)
elif operand in ["c", "d"]:
tensor_coord = cutlass.conv.implicit_gemm_tensor_c_extent(conv_kind, problem_size)
else:
raise ValueError("unknown operand: " + operand)
layout = tensor_layout.packed(tensor_coord)
if tensor.dtype == np.float64:
return cutlass.TensorRefF64NHWC(ptr, layout)
elif tensor.dtype == np.float32:
return cutlass.TensorRefF32NHWC(ptr, layout)
elif tensor.dtype == np.float16:
return cutlass.TensorRefF16NHWC(ptr, layout)
if tensor.dtype == bfloat16:
return cutlass.TensorRefBF16NHWC(ptr, layout)
elif tensor.dtype == np.int32:
return cutlass.TensorRefS32NHWC(ptr, layout)
elif tensor.dtype == np.int8:
if tensor_layout == cutlass.TensorNC32HW32:
return cutlass.TensorRefS8NC32HW32(ptr, layout)
elif tensor_layout == cutlass.TensorC32RSK32:
return cutlass.TensorRefS8C32RSK32(ptr, layout)
else:
return cutlass.TensorRefS8NHWC(ptr, layout)
else:
raise ValueError("unsupported data type")
def getTensorView(tensor, tensor_layout, conv_kind, problem_size, operand):
tensor_ref = getTensorRef(tensor, tensor_layout, conv_kind, problem_size, operand)
if operand == "a":
tensor_coord = cutlass.conv.implicit_gemm_tensor_a_extent(conv_kind, problem_size)
elif operand == "b":
tensor_coord = cutlass.conv.implicit_gemm_tensor_b_extent(conv_kind, problem_size)
elif operand in ["c", "d"]:
tensor_coord = cutlass.conv.implicit_gemm_tensor_c_extent(conv_kind, problem_size)
else:
raise ValueError("unknown operand: " + operand)
if tensor.dtype == np.float64:
return cutlass.TensorViewF64NHWC(tensor_ref, tensor_coord)
elif tensor.dtype == np.float32:
return cutlass.TensorViewF32NHWC(tensor_ref, tensor_coord)
elif tensor.dtype == np.float16:
return cutlass.TensorViewF16NHWC(tensor_ref, tensor_coord)
elif tensor.dtype == bfloat16:
return cutlass.TensorViewBF16NHWC(tensor_ref, tensor_coord)
elif tensor.dtype == np.int32:
return cutlass.TensorViewS32NHWC(tensor_ref, tensor_coord)
elif tensor.dtype == np.int8:
if tensor_layout == cutlass.TensorNC32HW32:
return cutlass.TensorViewS8NC32HW32(tensor_ref, tensor_coord)
elif tensor_layout == cutlass.TensorC32RSK32:
return cutlass.TensorViewS8C32RSK32(tensor_ref, tensor_coord)
else:
return cutlass.TensorViewS8NHWC(tensor_ref, tensor_coord)
else:
raise ValueError("unsupported data type")
# @typechecked
class Conv2dLauncher:
"""
Launcher that runs the operation on given problem size
"""
def __init__(self, operation: 'Conv2dOperation', seed: int=2080, interleaved=False,
verification=True, profiling=False, warmup_iterations=500, iterations=500, **kwargs) -> None:
self.enable_cached_results = True
self.interleaved = interleaved
# create the reduction kernel
self.reduction_operation = ReductionOperation(
shape=cutlass.MatrixCoord(4, 32 * operation.C.alignment),
C=operation.C, element_accumulator=operation.tile_description.math_instruction.element_accumulator,
element_compute=operation.epilogue_functor.element_epilogue, epilogue_functor=operation.epilogue_functor,
count=operation.C.alignment
)
#: verify the output result
self.verification = verification
#: profile the kernel's runtime
self.profiling = profiling
self.timer = GpuTimer()
self.warmup_iterations = warmup_iterations
self.iterations = iterations
if "sleep" in kwargs.keys():
self.sleep_time = kwargs["sleep"]
else:
self.sleep_time = 0
#
# Compile the operator
#
pycutlass.compiler.add_module([operation, self.reduction_operation])
self.operation = operation
self.dtype_A = Conv2dLauncher.numpy_type(operation.A.element)
self.layout_A = operation.A.layout
self.dtype_B = Conv2dLauncher.numpy_type(operation.B.element)
self.layout_B = operation.B.layout
self.dtype_C = Conv2dLauncher.numpy_type(operation.C.element)
self.layout_C = operation.C.layout
self.dtype_D = Conv2dLauncher.numpy_type(operation.C.element)
self.layout_D = operation.C.layout
accumulator_size = DataTypeSize[operation.tile_description.math_instruction.element_accumulator]
element_size = DataTypeSize[operation.A.element]
if element_size <= 8:
self.scope = 1
elif element_size == 16:
if accumulator_size <= 16:
self.scope = 2
else:
self.scope = 4
else:
self.scope = 7
# Seed
self.seed = seed
self.conv_kind = operation.conv_kind
#
# Get the host reference function
#
self.element_compute = operation.epilogue_functor.element_epilogue
self.host_conv2d = cutlass.test.conv.host.conv2d
self.timer = GpuTimer()
@staticmethod
def numpy_type(type):
if type == cutlass.float64:
return np.float64
elif type == cutlass.float32:
return np.float32
elif type == cutlass.float16:
return np.float16
elif type == cutlass.bfloat16:
return bfloat16
elif type == cutlass.int32:
return np.int32
elif type == cutlass.int8:
return np.int8
else:
raise ValueError("unsupported type: %s" % ShortDataTypeNames[type])
def print_problem_size(self, p, split_k_mode=1):
print("nhwc_%dx%dx%dx%d_krsc_%dx%dx%dx%d_padding_%dx%d_stride_%dx%d_dilation_%dx%d_splitkslices_%d_splitkmode_%d"
% (p.N, p.H, p.W, p.C, p.K, p.R, p.S, p.C, p.pad_h,
p.pad_w, p.stride_h, p.stride_w, p.dilation_h, p.dilation_w, p.split_k_slices, split_k_mode))
def uniform_init(self, size, dtype):
if dtype in [np.float32, np.float16, bfloat16, np.float64]:
return np.ceil(
np.random.uniform(
low=-self.scope - 0.5, high=self.scope - 0.5,
size=size).astype(dtype)
)
else:
return np.random.uniform(
low=-self.scope - 1, high=self.scope + 1,
size=size).astype(dtype)
def eq_gemm_size(self, problem_size):
n = problem_size.N
p = problem_size.P
q = problem_size.Q
k = problem_size.K
r = problem_size.R
s = problem_size.S
c = problem_size.C
h = problem_size.H
w = problem_size.W
if self.conv_kind == cutlass.conv.Operator.fprop:
return cutlass.gemm.GemmCoord(n * p * q, k, r * s * c)
elif self.conv_kind == cutlass.conv.Operator.dgrad:
return cutlass.gemm.GemmCoord(n * h * w, c, k * r * s)
else:
return cutlass.gemm.GemmCoord(k, r * s * c, n * p * q)
def bytes(self, problem_size, alpha, beta):
mnk = self.eq_gemm_size(problem_size)
bytes_ = \
(DataTypeSize[self.operation.A.element] * mnk.m() // 8) * mnk.k() + \
(DataTypeSize[self.operation.B.element] * mnk.n() // 8) * mnk.k() + \
(DataTypeSize[self.operation.C.element] * mnk.m() // 8) * mnk.n()
if beta != 0:
bytes_ += (DataTypeSize[self.operation.C.element] * mnk.m() // 8) * mnk.n()
return bytes_
def flops(self, problem_size):
mnk = self.eq_gemm_size(problem_size)
flops_mainloop_ = mnk.m() * mnk.n() * mnk.k() * 2
flops_epilogue_ = mnk.m() * mnk.n() * 2
# Adjust mainloop flop for dgrad stride
if self.conv_kind == cutlass.conv.Operator.dgrad:
flops_mainloop_ = flops_mainloop_ // (problem_size.stride_h * problem_size.stride_w)
flops_total_ = flops_mainloop_ + flops_epilogue_
return flops_total_
def host_reference(self, problem_size, tensor_A, tensor_B, tensor_C, alpha, beta):
if self.element_compute == cutlass.float16:
alpha = cutlass.float16(alpha)
beta = cutlass.float16(beta)
elif self.element_compute == cutlass.int32:
alpha = int(alpha)
beta = int(beta)
else:
alpha = alpha
beta = beta
# if cached result is loaded
cached_result_loaded = False
if self.enable_cached_results:
# get problem key
cached_test_key = cutlass.test.conv.host.CreateCachedConv2dTestKey(
self.conv_kind, problem_size, alpha, beta,
getTensorView(tensor_A, self.layout_A, self.conv_kind, problem_size, "a"),
getTensorView(tensor_B, self.layout_B, self.conv_kind, problem_size, "b"),
getTensorView(tensor_C, self.layout_C, self.conv_kind, problem_size, "c"),
)
cached_test_result = cutlass.test.conv.host.CachedTestResult()
conv2d_result_cache_name = "cached_results_SM%d_%d.txt" % (self.operation.arch, self.seed)
cached_results = cutlass.test.conv.host.CachedTestResultListing(conv2d_result_cache_name)
# CachedTestResultListing cached_results(conv2d_result_cache_name);
cached = cached_results.find(cached_test_key)
cached_result_loaded = cached[0]
if cached_result_loaded :
cached_test_result = cached[1]
if not cached_result_loaded:
# compute the conv2d on host
tensor_D_ref = np.ones_like(tensor_C)
tensor_ref_A = getTensorRef(tensor_A, self.layout_A, self.conv_kind, problem_size, "a")
tensor_ref_B = getTensorRef(tensor_B, self.layout_B, self.conv_kind, problem_size, "b")
tensor_ref_C = getTensorRef(tensor_C, self.layout_C, self.conv_kind, problem_size, "c")
tensor_ref_D_ref = getTensorRef(tensor_D_ref, self.layout_D, self.conv_kind, problem_size, "d")
self.host_conv2d(
self.conv_kind, problem_size,
tensor_ref_A, tensor_ref_B, tensor_ref_C, tensor_ref_D_ref,
alpha, beta
)
tensor_view_D_ref = getTensorView(tensor_D_ref, self.layout_D, self.conv_kind, problem_size, "d")
if self.enable_cached_results:
cached_test_result.D = cutlass.test.conv.host.TensorHash(tensor_view_D_ref)
cached_results = cutlass.test.conv.host.CachedTestResultListing(conv2d_result_cache_name)
cached_results.append(cached_test_key, cached_test_result)
cached_results.write(conv2d_result_cache_name)
else:
return tensor_D_ref
return cached_test_result.D
def equal(self, tensor_D, tensor_D_ref, problem_size):
if self.enable_cached_results:
tensor_view_D = getTensorView(tensor_D, self.layout_D, self.conv_kind, problem_size, "d")
tensor_D_hash = cutlass.test.conv.host.TensorHash(tensor_view_D)
return tensor_D_hash == tensor_D_ref
else:
tensor_view_D = getTensorView(tensor_D, self.layout_D, self.conv_kind, problem_size, "d")
tensor_view_D_ref = getTensorView(tensor_D_ref, self.layout_D, self.conv_kind, problem_size, "d")
return cutlass.test.conv.host.equals(tensor_view_D, tensor_view_D_ref)
def run_cutlass_profiler(self, problem_size, split_k_mode=cutlass.conv.SplitKMode.Serial, alpha=1.0, beta=0.0):
if split_k_mode == cutlass.conv.SplitKMode.Serial:
split_k_mode_ = "serial"
else:
split_k_mode_ = "parallel"
cutlass_path = os.getenv('CUTLASS_PATH')
assert cutlass_path is not None, "Environment variable 'CUTLASS_PATH' is not defined."
values = {
"profiler_path": cutlass_path + "/build/tools/profiler/cutlass_profiler",
"kernel_name": self.operation.procedural_name(),
"verification_providers": "device",
"provider": "cutlass",
'n': str(problem_size.N),
'h': str(problem_size.H),
'w': str(problem_size.W),
'c': str(problem_size.C),
'k': str(problem_size.K),
'r': str(problem_size.R),
's': str(problem_size.S),
'p': str(problem_size.P),
'q': str(problem_size.Q),
'pad_h': str(problem_size.pad_h),
'pad_w': str(problem_size.pad_w),
'stride_h': str(problem_size.stride_h),
'stride_w': str(problem_size.stride_w),
'dilation_h': str(problem_size.dilation_h),
'dilation_w': str(problem_size.dilation_w),
'split_k_slices': str(problem_size.split_k_slices),
'split_k_mode': split_k_mode_,
'alpha': str(alpha),
'beta': str(beta),
'warmup': str(self.warmup_iterations),
'profile': str(self.iterations)
}
cmd_template = \
"${profiler_path} --kernels=${kernel_name} --verification-providers=${verification_providers}" \
" --providers=${provider} --n=${n} --h=${h} --w=${w} --c=${c} --k=${k} --r=${r} --s=${s} --p=${p}" \
" --q=${q} --pad_h=${pad_h} --pad_w=${pad_w} --stride_h={stride_h} --stride_w=${stride_w}" \
" --dilation_h=${dilation_h} --dilation_w=${dilation_w} --warmup-iterations=${warmup} --profiling-iterations=${profile}" \
" --split_k_slices=${split_k_slices} --alpha=${alpha} --beta=${beta} --split_k_mode=${split_k_mode}"
cmd = SubstituteTemplate(cmd_template, values)
result = subprocess.getoutput(cmd)
m = re.search(r"Runtime:\s+(?P<runtime>\d+.\d+)", result)
runtime = float(m.group('runtime'))
m = re.search(r"Bytes:\s+(?P<bytes>\d+)", result)
bytes = int(m.group('bytes'))
m = re.search(r"FLOPs:\s+(?P<flops>\d+)", result)
flops = int(m.group('flops'))
# check if the problem size matches
assert bytes == self.bytes(problem_size, alpha, beta)
assert flops == self.flops(problem_size)
return runtime
def run(self, problem_size, split_k_mode=cutlass.conv.SplitKMode.Serial,
alpha=1.0, beta=0.0):
assert get_allocated_size() == 0, "%d byte of pool memory is not released in previous run" % get_allocated_size()
#
# Initialize input and output tensors
#
tensor_A_size = cutlass.conv.implicit_gemm_tensor_a_size(self.conv_kind, problem_size)
tensor_B_size = cutlass.conv.implicit_gemm_tensor_b_size(self.conv_kind, problem_size)
tensor_C_size = cutlass.conv.implicit_gemm_tensor_c_size(self.conv_kind, problem_size)
np.random.seed(self.seed)
tensor_A = self.uniform_init(size=(tensor_A_size,), dtype=self.dtype_A)
tensor_B = self.uniform_init(size=(tensor_B_size,), dtype=self.dtype_B)
tensor_C = self.uniform_init(size=(tensor_C_size,), dtype=self.dtype_C)
tensor_D = np.zeros(shape=(tensor_C_size,), dtype=self.dtype_D)
#
# Launch kernel
#
arguments = Conv2dArguments(
operation=self.operation, problem_size=problem_size, A=tensor_A,
B=tensor_B, C=tensor_C, D=tensor_D,
output_op = self.operation.epilogue_type(alpha, beta),
split_k_slices=problem_size.split_k_slices,
split_k_mode=split_k_mode
)
if split_k_mode == cutlass.conv.SplitKMode.Parallel:
implicit_gemm_size = cutlass.conv.implicit_gemm_problem_size(self.operation.conv_kind, arguments.problem_size)
reduction_arguments = ReductionArguments(
self.reduction_operation,
problem_size=[implicit_gemm_size.m(), implicit_gemm_size.n()], partitions=problem_size.split_k_slices,
workspace=arguments.ptr_D,
destination=tensor_D,
source=tensor_C,
output_op = self.reduction_operation.epilogue_type(alpha, beta)
)
self.operation.run(arguments)
if split_k_mode == cutlass.conv.SplitKMode.Parallel:
self.reduction_operation.run(reduction_arguments)
passed = True
if self.verification:
if split_k_mode == cutlass.conv.SplitKMode.Parallel:
reduction_arguments.sync()
else:
arguments.sync()
tensor_D_ref = self.host_reference(problem_size, tensor_A, tensor_B, tensor_C, alpha, beta)
passed = self.equal(tensor_D, tensor_D_ref, problem_size)
try:
assert passed
except AssertionError:
self.print_problem_size(problem_size, split_k_mode)
if self.profiling:
sleep(self.sleep_time)
for _ in range(self.warmup_iterations):
self.operation.run(arguments)
if split_k_mode == cutlass.conv.SplitKMode.Parallel:
self.reduction_operation.run(reduction_arguments)
self.timer.start()
for _ in range(self.warmup_iterations):
self.operation.run(arguments)
if split_k_mode == cutlass.conv.SplitKMode.Parallel:
self.reduction_operation.run(reduction_arguments)
self.timer.stop_and_wait()
runtime = self.timer.duration(self.iterations)
# free memory
del arguments
if split_k_mode == cutlass.conv.SplitKMode.Parallel:
del reduction_arguments
assert get_allocated_size() == 0, "%d byte of pool memory is not released after current run" % get_allocated_size()
if self.profiling:
return runtime
return passed
########################################################################################################
# TestAllConv: Runs cutlass::conv::device::ImplicitGemmConvolution operator and compares it with reference
# TestAllConv runs conv operator on default conv problem sizes from test::conv::device::TestbedConv2dProblemSizes
# Additionally, each conv2d test can provide conv problem sizes (conv_test_sizes) and blacklist of sizes
# (conv_blacklist_sizes)
############################################################################################################
def test_all_conv2d(operation: Conv2dOperation, conv_test_sizes = [], interleaved=False):
passed = True
#
# Testbed object
#
testbed = Conv2dLauncher(operation, interleaved=interleaved)
#
# Get conv problem sizes to run conv operator
#
conv_problems = cutlass.test.conv.TestbedConv2dProblemSizes(64)
# Vector of conv2d problem sizes to avoid duplicate runs
conv_tested_sizes = []
# Flatten 2D problem_vectors into a 1D problem sizes
problem_sizes = conv_problems.conv2d_default_sizes
problem_sizes = [conv_problem for conv_problem in problem_sizes] + conv_test_sizes
# Sweep conv2d problem sizes (split-k-mode=kSerial, split-k-slices=1, alpha=1.0, beta=0.0)
for conv_problem in problem_sizes:
if conv_problem in conv_tested_sizes:
continue
# skip channel dimension % 32 != 0 for interleaved case
if interleaved:
if conv_problem.K % 32 != 0 or conv_problem.C % 32 != 0:
continue
#
# Procedurally disable certain cases
#
# CUTLASS DGRAD's *unity* stride specialization only support stride {1, 1}
if operation.conv_kind == cutlass.conv.Operator.dgrad and operation.stride_support == StrideSupport.Unity:
if not ((conv_problem.stride_h == 1) and (conv_problem.stride_w == 1)):
continue
if not interleaved:
# Fixed channels algorithm requires channel count to match access size
if operation.iterator_algorithm == cutlass.conv.IteratorAlgorithm.fixed_channels:
if conv_problem.C != operation.A.alignment:
continue
# Few channels algorithm requires channel count to match access size
if operation.iterator_algorithm == cutlass.conv.IteratorAlgorithm.few_channels:
if conv_problem.C % operation.A.alignment:
continue
# CUTLASS DGRAD's *strided* stride specialization supports all stride {stride_h, stride_w}
# Although strided dgrad works for all stride combinations, we are only going
# to run strided dgrad for non-unity strides
if operation.conv_kind == cutlass.conv.Operator.dgrad and operation.stride_support == StrideSupport.Strided:
if (conv_problem.stride_h == 1) and (conv_problem.stride_w == 1):
continue
#
# Test
#
# push back tested problem size to avoid re-running duplicates
conv_tested_sizes.append(conv_problem)
passed = testbed.run(conv_problem)
if not passed:
return False
if interleaved:
return True
#
# filter the cases for split K
#
# Small-channels convolution can't run here.
if operation.iterator_algorithm in [cutlass.conv.IteratorAlgorithm.fixed_channels, cutlass.conv.IteratorAlgorithm.few_channels]:
return True
# CUTLASS DGRAD's *stride* specialization does not support split-k mode
if operation.conv_kind == cutlass.conv.Operator.dgrad and operation.stride_support == StrideSupport.Strided:
conv_problem = cutlass.conv.Conv2dProblemSize(
cutlass.Tensor4DCoord(1, 56, 56, 8),
cutlass.Tensor4DCoord(8, 1, 1, 8),
cutlass.Tensor4DCoord(0, 0, 0, 0),
cutlass.MatrixCoord(2, 2),
cutlass.MatrixCoord(1, 1),
cutlass.conv.Mode.cross_correlation,
1, 1
)
passed = testbed.run(conv_problem)
return passed
# Sweep split-k-slice using serial and prallel reduction with non-unity alpha and non-zero beta for
# a single conv2d problem size. Convolution unit tests take a long time to run so only sweep parameters
# which are abolutely neccessary to catch functional bugs. The below code does provide option to sweep
# alpha and beta for local testing, but only runs one value for alpha and beta.
conv2d_split_k_test_size = cutlass.conv.Conv2dProblemSize(
cutlass.Tensor4DCoord(1, 17, 11, 288),
cutlass.Tensor4DCoord(160, 3, 3, 288),
cutlass.Tensor4DCoord(1, 1, 1, 1),
cutlass.MatrixCoord(1, 1),
cutlass.MatrixCoord(1, 1),
cutlass.conv.Mode.cross_correlation,
1, 1
)
split_k_modes = [cutlass.conv.SplitKMode.Parallel, cutlass.conv.SplitKMode.Serial]
split_k_slices = [1, 2, 3, 4, 201]
problem_alpha = [2.0,]
problem_beta = [2.0,]
for split_k_mode in split_k_modes:
for split_k_slice in split_k_slices:
for alpha in problem_alpha:
for beta in problem_beta:
passed = testbed.run(conv2d_split_k_test_size.reset_split_k_slices(split_k_slice),
split_k_mode,
alpha, beta)
return passed
|
from pycutlass.test.profiler import *
from pycutlass.test.conv2d_testbed import *
from pycutlass.test.gemm_testbed import *
from pycutlass.test.gemm_grouped_testbed import *
|
#################################################################################################
#
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
import cutlass
from pycutlass import library, SubstituteTemplate
class Layout:
"""
Utility class to map transpose and non-transpose terminology to row- and column-major terminology
"""
T = cutlass.RowMajor
N = cutlass.ColumnMajor
class LayoutCombination:
"""
Utility class defining all combinations of row- and column-major layouts for operands to a GEMMs
"""
NNN = (Layout.N, Layout.N, Layout.N)
NNT = (Layout.N, Layout.N, Layout.T)
NTN = (Layout.N, Layout.T, Layout.N)
NTT = (Layout.N, Layout.T, Layout.T)
TNN = (Layout.T, Layout.N, Layout.N)
TNT = (Layout.T, Layout.N, Layout.T)
TTN = (Layout.T, Layout.T, Layout.N)
TTT = (Layout.T, Layout.T, Layout.T)
def get_name(layouts, alignments, element_output,
element_accumulator, element_epilogue, cluster_shape,
threadblock_shape, stages, element_a, element_b, arch, opclass, suffix=""):
"""
Generates a procedural name for a test case.
:param layouts: indexable container of layouts of A, B, and C operands
:param alignments: indexable container of alignments of A, B, and C operands
:param element_output: data type of the output element
:param element_accumulator: data type used in accumulation
:param element_epilogue: data type used in computing the epilogue
:param cluster_shape: indexable container of dimensions of threadblock cluster to be launched
:param threadblock_shape: indexable container of dimensions of threadblock tiles
:param stages: number of pipeline stages to use in the kernel
:type stages: int
:param element_a: data type of operand A
:param element_b: data type of operand B
:param arch: compute capability of kernel being generated
:type arch: int
:param opclass: class of operation being performed (e.g., SIMT, Tensor Core)
:type opclass: cutlass.OpClass
:param suffix: additional string to add to the suffix of the name
:type suffix: str
:return: str
"""
name_format = 'test_SM${arch}_Device_Gemm_${eA}${lA}_${eB}${lB}_${eC}${lC}_${opclass}_${acc}_${tbM}x${tbN}x${tbK}_${cM}x${cN}x${cK}_${stages}_align${aA}-${aB}-${aC}${suffix}'
return SubstituteTemplate(name_format,
{
'arch': str(arch),
'eA': library.DataTypeNames[element_a],
'eB': library.DataTypeNames[element_b],
'eC': library.DataTypeNames[element_output],
'lA': library.ShortLayoutTypeNames[layouts[0]],
'lB': library.ShortLayoutTypeNames[layouts[1]],
'lC': library.ShortLayoutTypeNames[layouts[2]],
'opclass': library.OpcodeClassNames[opclass],
'acc': library.DataTypeNames[element_accumulator],
'cM': str(cluster_shape[0]),
'cN': str(cluster_shape[1]),
'cK': str(cluster_shape[2]),
'tbM': str(threadblock_shape[0]),
'tbN': str(threadblock_shape[1]),
'tbK': str(threadblock_shape[2]),
'stages': str(stages) if stages is not None else 'auto',
'aA' : str(alignments[0]),
'aB' : str(alignments[1]),
'aC' : str(alignments[2]),
'suffix': '' if suffix is None else suffix
}
)
|
#################################################################################################
#
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
from time import sleep
import pycutlass
from pycutlass import *
import pycutlass.utils.datatypes as datatypes
import cutlass
from cuda import cudart
from cuda import cuda
from bfloat16 import bfloat16
from .profiler import GpuTimer
import subprocess
def transpose(layout):
if layout == cutlass.RowMajor:
return cutlass.ColumnMajor
elif layout == cutlass.ColumnMajor:
return cutlass.RowMajor
elif layout == cutlass.ColumnMajorInterleaved32:
return cutlass.RowMajorInterleaved32
elif layout == cutlass.RowMajorInterleaved32:
return cutlass.ColumnMajorInterleaved32
def getTensorRef(tensor: np.ndarray, problem_size: cutlass.gemm.GemmCoord, operand: str, layout: cutlass.layout, batch_offset: int = 0):
ptr = tensor.__array_interface__['data'][0]
if operand == "a":
tensor_coord = problem_size.mk()
batch_stride = problem_size.m() * problem_size.k()
elif operand == "b":
tensor_coord = problem_size.kn()
batch_stride = problem_size.k() * problem_size.n()
elif operand in ["c", "d"]:
tensor_coord = problem_size.mn()
batch_stride = problem_size.m() * problem_size.n()
else:
raise ValueError("Unknown operand: " + operand)
elt_size = DataTypeSizeBytes[datatypes.to_cutlass(tensor.dtype)]
ptr += batch_offset * batch_stride * elt_size
if layout == cutlass.RowMajor:
layout = cutlass.RowMajor.packed(tensor_coord)
layout_tag = "RowMajor"
elif layout == cutlass.ColumnMajor:
layout = cutlass.ColumnMajor.packed(tensor_coord)
layout_tag = "ColumnMajor"
elif layout == cutlass.ColumnMajorInterleaved32:
layout = cutlass.ColumnMajorInterleaved32.packed(tensor_coord)
layout_tag = "ColumnMajorInterleaved32"
elif layout == cutlass.RowMajorInterleaved32:
layout = cutlass.RowMajorInterleaved32.packed(tensor_coord)
layout_tag = "RowMajorInterleaved32"
else:
raise ValueError("unsupported layout")
if tensor.dtype == np.float32:
ref_name = "TensorRefF32" + layout_tag
elif tensor.dtype == np.float64:
ref_name = "TensorRefF64" + layout_tag
elif tensor.dtype == np.float16:
ref_name = "TensorRefF16" + layout_tag
elif tensor.dtype == bfloat16:
ref_name = "TensorRefBF16" + layout_tag
elif tensor.dtype == np.int8:
ref_name = "TensorRefS8" + layout_tag
elif tensor.dtype == np.int32:
ref_name = "TensorRefS32" + layout_tag
else:
raise ValueError("unsupported datatype %s" %
ShortDataTypeNames[tensor.dtype])
return getattr(cutlass, ref_name)(ptr, layout)
def getTensorView(tensor: np.ndarray, problem_size: cutlass.gemm.GemmCoord, operand: str, layout: str, batch_offset: int = 0):
tensor_ref = getTensorRef(tensor, problem_size, operand, layout, batch_offset)
if operand == "a":
tensor_coord = problem_size.mk()
elif operand == "b":
tensor_coord = problem_size.kn()
elif operand in ["c", "d"]:
tensor_coord = problem_size.mn()
else:
raise ValueError("Unknown operand: " + operand)
if layout == cutlass.RowMajor:
layout_tag = "RowMajor"
elif layout == cutlass.ColumnMajor:
layout_tag = "ColumnMajor"
elif layout == cutlass.ColumnMajorInterleaved32:
layout_tag = "ColumnMajorInterleaved32"
elif layout == cutlass.RowMajorInterleaved32:
layout_tag = "RowMajorInterleaved32"
else:
raise ValueError("unsupported layout")
if tensor.dtype == np.float32:
ref_name = "TensorViewF32" + layout_tag
elif tensor.dtype == np.float64:
ref_name = "TensorViewF64" + layout_tag
elif tensor.dtype == np.float16:
ref_name = "TensorViewF16" + layout_tag
elif tensor.dtype == bfloat16:
ref_name = "TensorViewBF16" + layout_tag
elif tensor.dtype == np.int32:
ref_name = "TensorViewS32" + layout_tag
elif tensor.dtype == np.int8:
ref_name = "TensorViewS8" + layout_tag
else:
raise ValueError("unsupported datatype")
return getattr(cutlass, ref_name)(tensor_ref, tensor_coord)
class GemmUniversalLauncher:
def __init__(self, operation: 'GemmOperationUniversal', seed: int = 2080, interleaved=False,
verification=True, profiling=False, warmup_iterations=500, iterations=500, **kwargs) -> None:
# create the reduction kernel
self.reduction_operation: ReductionOperation = ReductionOperation(
shape=cutlass.MatrixCoord(4, 32 * operation.C.alignment),
C=operation.C, element_accumulator=operation.tile_description.math_instruction.element_accumulator,
element_compute=operation.epilogue_functor.element_epilogue, epilogue_functor=operation.epilogue_functor,
count=operation.C.alignment
)
self.math_operation = operation.tile_description.math_instruction.math_operation
#: verify the output result
self.verification = verification
#: profile the kernel's runtime
self.profiling = profiling
self.timer = GpuTimer()
self.warmup_iterations = warmup_iterations
self.iterations = iterations
if "sleep" in kwargs.keys():
self.sleep_time = kwargs["sleep"]
else:
self.sleep_time = 0
#
# Compile the operator
#
op_list = [operation]
if operation.arch < 90:
# Split K via Python is currently only supported for pre-SM90 kernels
op_list.append(self.reduction_operation)
pycutlass.compiler.add_module(op_list)
self.operation = operation
self.dtype_A = GemmUniversalLauncher.numpy_type(operation.A.element)
self.dtype_B = GemmUniversalLauncher.numpy_type(operation.B.element)
self.dtype_C = GemmUniversalLauncher.numpy_type(operation.C.element)
self.dtype_D = GemmUniversalLauncher.numpy_type(operation.C.element)
accumulator_size = DataTypeSize[operation.tile_description.math_instruction.element_accumulator]
element_size = DataTypeSize[operation.A.element]
if element_size == 1:
self.scope_max = 1
self.scope_min = 0
elif element_size <= 8:
self.scope_max = 1
self.scope_min = -1
elif element_size == 16:
self.scope_max = 4
self.scope_min = -4
else:
self.scope_max = 8
self.scope_min = -8
#: seed
self.seed: int = seed
#: whether the layout is interleaved
self.interleaved = interleaved
#: compute type
self.compute_type = operation.epilogue_functor.element_epilogue
self.accumulator_type = operation.tile_description.math_instruction.element_accumulator
def print_problem_size(self, p, mode, batch_count):
if mode == cutlass.gemm.Mode.Gemm:
mode = "Gemm"
elif mode == cutlass.gemm.Mode.Batched:
mode = "GemmBatched"
elif mode == cutlass.gemm.Mode.GemmSplitKParallel:
mode = "GemmSplitKParallel"
problem_size = "problem: %d, %d, %d\n batch_count: %d\n mode: %s" % (
p.m(), p.n(), p.k(), batch_count, mode)
print(problem_size)
@staticmethod
def numpy_type(type):
if type == cutlass.float64:
return np.float64
elif type == cutlass.float32:
return np.float32
elif type == cutlass.float16:
return np.float16
elif type == cutlass.bfloat16:
return bfloat16
elif type == cutlass.int32:
return np.int32
elif type == cutlass.int8:
return np.int8
else:
raise ValueError("unsupported type: %s" % ShortDataTypeNames[type])
def uniform_init(self, size, dtype):
if dtype in [np.float32, np.float16, bfloat16, np.float64]:
return np.ceil(
np.random.uniform(
low=self.scope_min - 0.5, high=self.scope_max - 0.5,
size=size).astype(dtype)
)
else:
return np.random.uniform(
low=self.scope_min - 1, high=self.scope_max + 1,
size=size).astype(dtype)
def reorder_tensor_B(self, tensor_B, problem_size):
reordered_tensor_B = np.empty_like(tensor_B)
tensor_ref_B = getTensorRef(
tensor_B, problem_size, "b", self.operation.B.layout)
reordered_tensor_ref_B = getTensorRef(
reordered_tensor_B, problem_size, "b", self.operation.B.layout)
cutlass.gemm.host.reorder_column(
tensor_ref_B, reordered_tensor_ref_B, problem_size)
return reordered_tensor_B
def host_reference(self, problem_size, batch_count, tensor_A, tensor_B, tensor_C, alpha, beta):
tensor_D_ref = np.ones_like(tensor_C)
alpha = self.numpy_type(self.compute_type)(alpha)
beta = self.numpy_type(self.compute_type)(beta)
init_acc = 0
alpha = self.compute_type(alpha).value()
beta = self.compute_type(beta).value()
init_acc = self.accumulator_type(init_acc).value()
for i in range(batch_count):
if self.operation.switched:
tensor_ref_A = getTensorRef(
tensor_A, problem_size, "a", transpose(self.operation.B.layout), batch_offset=i)
tensor_ref_B = getTensorRef(
tensor_B, problem_size, "b", transpose(self.operation.A.layout), batch_offset=i)
tensor_ref_C = getTensorRef(
tensor_C, problem_size, "c", transpose(self.operation.C.layout), batch_offset=i)
tensor_ref_D_ref = getTensorRef(
tensor_D_ref, problem_size, "d", transpose(self.operation.C.layout), batch_offset=i)
else:
tensor_ref_A = getTensorRef(
tensor_A, problem_size, "a", self.operation.A.layout, batch_offset=i)
tensor_ref_B = getTensorRef(
tensor_B, problem_size, "b", self.operation.B.layout, batch_offset=i)
tensor_ref_C = getTensorRef(
tensor_C, problem_size, "c", self.operation.C.layout, batch_offset=i)
tensor_ref_D_ref = getTensorRef(
tensor_D_ref, problem_size, "d", self.operation.C.layout, batch_offset=i)
if self.math_operation in [MathOperation.multiply_add_saturate]:
cutlass.test.gemm.host.gemm_saturate(
problem_size, alpha, tensor_ref_A, tensor_ref_B, beta, tensor_ref_C, tensor_ref_D_ref, init_acc)
else:
cutlass.test.gemm.host.gemm(problem_size, alpha, tensor_ref_A,
tensor_ref_B, beta, tensor_ref_C, tensor_ref_D_ref, init_acc)
return tensor_D_ref
def equal(self, tensor_D, tensor_D_ref, problem_size, batch_count):
for i in range(batch_count):
tensor_view_D = getTensorView(
tensor_D, problem_size, "d", self.operation.C.layout, batch_offset=i)
tensor_view_D_ref = getTensorView(
tensor_D_ref, problem_size, "d", self.operation.C.layout, batch_offset=i)
if not cutlass.test.gemm.host.equals(tensor_view_D, tensor_view_D_ref):
return False
return True
def bytes(self, problem_size, batch_count=1, alpha=1.0, beta=0.0):
m = problem_size.m()
n = problem_size.n()
k = problem_size.k()
bytes = \
(DataTypeSize[self.operation.A.element] * m // 8) * k + \
(DataTypeSize[self.operation.B.element] * n // 8) * k + \
(DataTypeSize[self.operation.C.element] * m // 8) * n
if beta != 0:
bytes += (DataTypeSize[self.operation.C.element] * m // 8) * n
bytes *= batch_count
return bytes
def flops(self, problem_size, batch_count=1):
m = problem_size.m()
n = problem_size.n()
k = problem_size.k()
flops_ = (m * n * k) * 2 * batch_count
return flops_
def run_cutlass_profiler(self, mode, problem_size, batch_count=1, alpha=1.0, beta=0.0):
cutlass_path = os.getenv('CUTLASS_PATH')
assert cutlass_path is not None, "Environment variable 'CUTLASS_PATH' is not defined."
values = {
"profiler_path": cutlass_path + "/build/tools/profiler/cutlass_profiler",
"kernel_name": self.operation.procedural_name(),
"verification_providers": "device",
"provider": "cutlass",
"m": str(problem_size.m()),
"n": str(problem_size.n()),
"k": str(problem_size.k()),
'split_k_slices': str(batch_count),
'alpha': str(alpha),
'beta': str(beta),
'warmup': str(self.warmup_iterations),
'profile': str(self.iterations)
}
cmd_template = \
"${profiler_path} --kernels=${kernel_name} --verification-providers=${verification_providers}" \
" --providers=${provider} --m=${m} --n=${n} --k=${k}"
cmd = SubstituteTemplate(cmd_template, values)
result = subprocess.getoutput(cmd)
m = re.search(r"Runtime:\s+(?P<runtime>\d+.\d+)", result)
runtime = float(m.group('runtime'))
m = re.search(r"Bytes:\s+(?P<bytes>\d+)", result)
bytes = int(m.group('bytes'))
m = re.search(r"FLOPs:\s+(?P<flops>\d+)", result)
flops = int(m.group('flops'))
# check if the problem size matches
assert bytes == self.bytes(problem_size, alpha, beta)
assert flops == self.flops(problem_size)
return runtime
def run(self, mode, problem_size, batch_count=1, split_k_slices=1, alpha=1.0, beta=0.0):
assert get_allocated_size(
) == 0, "%d byte of pool memory is not released in previous run" % get_allocated_size()
np.random.seed(self.seed)
# Assign an actual batch count in cases where we are not running in batched mode.
# This is to differentiate between the number of split K slices and the batch count,
# which are overloaded within the single `batch_count` variable.
true_batch_count = batch_count if mode == cutlass.gemm.Mode.Batched else 1
tensor_A = self.uniform_init(
size=(problem_size.m() * problem_size.k() * true_batch_count,), dtype=self.dtype_A)
tensor_B = self.uniform_init(
size=(problem_size.n() * problem_size.k() * true_batch_count,), dtype=self.dtype_B)
tensor_C = self.uniform_init(
size=(problem_size.m() * problem_size.n() * true_batch_count,), dtype=self.dtype_C)
tensor_D = np.zeros(
shape=(problem_size.m() * problem_size.n() * true_batch_count,), dtype=self.dtype_D)
#
# Launch kernel
#
arguments = GemmArguments(
operation=self.operation, problem_size=problem_size,
A=tensor_A, B=tensor_B, C=tensor_C, D=tensor_D,
output_op=self.operation.epilogue_type(alpha, beta),
gemm_mode=mode, split_k_slices=split_k_slices, batch=batch_count
)
if mode == cutlass.gemm.Mode.GemmSplitKParallel:
reduction_arguments = ReductionArguments(
self.reduction_operation, problem_size=[
problem_size.m(), problem_size.n()],
partitions=split_k_slices,
workspace=arguments.ptr_D,
destination=tensor_D,
source=tensor_C,
output_op=self.reduction_operation.epilogue_type(alpha, beta)
)
self.operation.run(arguments)
if mode == cutlass.gemm.Mode.GemmSplitKParallel:
self.reduction_operation.run(reduction_arguments)
passed = True
if self.verification:
if mode == cutlass.gemm.Mode.GemmSplitKParallel:
reduction_arguments.sync()
else:
arguments.sync()
tensor_D_ref = self.host_reference(
problem_size, true_batch_count, tensor_A, tensor_B, tensor_C, alpha, beta)
passed = self.equal(tensor_D, tensor_D_ref, problem_size, true_batch_count)
try:
assert passed
except AssertionError:
self.print_problem_size(problem_size, mode, batch_count)
if self.profiling:
sleep(self.sleep_time)
for _ in range(self.warmup_iterations):
self.operation.run(arguments)
if mode == cutlass.gemm.Mode.GemmSplitKParallel:
self.reduction_operation.run(reduction_arguments)
self.timer.start()
for _ in range(self.iterations):
self.operation.run(arguments)
if mode == cutlass.gemm.Mode.GemmSplitKParallel:
self.reduction_operation.run(reduction_arguments)
self.timer.stop_and_wait()
runtime = self.timer.duration(self.iterations)
# free memory and clear buffers
del arguments
if mode == cutlass.gemm.Mode.GemmSplitKParallel:
del reduction_arguments
assert get_allocated_size(
) == 0, "%d byte of pool memory is not released after current run" % get_allocated_size()
if self.profiling:
return runtime
return passed
def test_all_gemm(operation: 'GemmOperationUniversal', testcase="universal"):
passed = True
minimum_operand_element_size = min(
DataTypeSize[operation.A.element], DataTypeSize[operation.B.element])
opcode_class = operation.tile_description.math_instruction.opcode_class
if opcode_class == cutlass.OpClass.Simt:
alignment = 1
else:
alignment = 128 // minimum_operand_element_size
# int8_t gemm alignment constraints
if opcode_class == cutlass.OpClass.Simt and operation.A.element == cutlass.int8 and operation.A.layout == cutlass.ColumnMajor:
alignment_m = 4
else:
alignment_m = alignment
if opcode_class == cutlass.OpClass.Simt and operation.B.element == cutlass.int8 and operation.A.layout == cutlass.RowMajor:
alignment_n = 4
else:
alignment_n = alignment
if opcode_class == cutlass.OpClass.Simt and operation.A.element == cutlass.int8 \
and operation.B.element == cutlass.int8 \
and (operation.A.layout == cutlass.RowMajor or operation.B.layout == cutlass.ColumnMajor):
alignment_k = 4
else:
alignment_k = alignment
threadblock_k = operation.tile_description.threadblock_shape[2]
if testcase == "interleaved":
if operation.A.layout in [cutlass.ColumnMajorInterleaved32, cutlass.RowMajorInterleaved32]:
interleavedk = 32
else:
raise ValueError("Unknown layout")
if testcase == "interleaved":
modes = [cutlass.gemm.Mode.Gemm, ]
problem_size_m = [interleavedk, 512+interleavedk]
problem_size_n = [interleavedk, 512+interleavedk]
problem_size_k = [interleavedk, threadblock_k *
operation.tile_description.stages + interleavedk]
problem_alpha = [1.0]
problem_beta = [0.0]
batch_counts = [1, ]
elif testcase == "multistage":
modes = [cutlass.gemm.Mode.Gemm, ]
problem_size_m = [16, 528]
problem_size_n = [16, 528]
problem_size_k = [threadblock_k, threadblock_k * operation.tile_description.stages +
operation.tile_description.math_instruction.instruction_shape[2]]
problem_alpha = [1.0]
problem_beta = [0.0]
batch_counts = [1, ]
else: # universal
modes = [cutlass.gemm.Mode.Gemm]
batch_counts = [1, 2, 3, 5, 7]
if operation.arch < 90:
# Split K kernels via Python are currently only supported pre-SM90
modes.append(cutlass.gemm.Mode.GemmSplitKParallel)
problem_size_m = [alignment_m, 512 - 3 * alignment_m]
problem_size_n = [alignment_n, 512 - 2 * alignment_n]
if operation.tile_description.stages is None:
stages_for_k_calc = 7
else:
stages_for_k_calc = operation.tile_description.stages
problem_size_k = [
alignment_k,
threadblock_k * stages_for_k_calc - alignment_k,
threadblock_k * stages_for_k_calc * 3 - alignment_k]
problem_alpha = [1.0]
problem_beta = [2.0]
testbed = GemmUniversalLauncher(
operation, interleaved=(testcase == "interleaved"))
for mode in modes:
for m in problem_size_m:
for n in problem_size_n:
for k in problem_size_k:
for batch_count in batch_counts:
for alpha in problem_alpha:
for beta in problem_beta:
# skip very small K problems
if testcase == "universal":
if (k // batch_count < 2 * threadblock_k):
continue
problem_size = cutlass.gemm.GemmCoord(m, n, k)
if operation.arch < 90:
split_k_slices = batch_count
else:
split_k_slices = 1
overridden_mode = mode
if mode == cutlass.gemm.Mode.Gemm and batch_count > 1:
overridden_mode = cutlass.gemm.Mode.Batched
passed = testbed.run(
overridden_mode, problem_size, batch_count, split_k_slices, alpha, beta)
err, = cudart.cudaDeviceSynchronize()
if err != cuda.CUresult.CUDA_SUCCESS:
raise RuntimeError(
"CUDA Error %s" % str(err))
if not passed:
return False
return passed
|
#################################################################################################
#
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
from cuda import cuda
from cuda import cudart
class GpuTimer:
def __init__(self) -> None:
self.events = [
cuda.cuEventCreate(cuda.CUevent_flags.CU_EVENT_DEFAULT)[1],
cuda.cuEventCreate(cuda.CUevent_flags.CU_EVENT_DEFAULT)[1]
]
def start(self, stream=cuda.CUstream(0)):
err, = cuda.cuEventRecord(self.events[0], stream)
if err != cuda.CUresult.CUDA_SUCCESS:
raise RuntimeError("CUDA Error %s" % str(err))
def stop(self, stream=cuda.CUstream(0)):
err, = cuda.cuEventRecord(self.events[1], stream)
if err != cuda.CUresult.CUDA_SUCCESS:
raise RuntimeError("CUDA Error %s" % str(err))
pass
def stop_and_wait(self, stream=cuda.CUstream(0)):
self.stop(stream)
if stream:
err, = cuda.cuStreamSynchronize(stream)
if err != cuda.CUresult.CUDA_SUCCESS:
raise RuntimeError("CUDA Error %s" % str(err))
else:
err, = cudart.cudaDeviceSynchronize()
if err != cuda.CUresult.CUDA_SUCCESS:
raise RuntimeError("CUDA Error %s" % str(err))
def duration(self, iterations=1):
err, duration = cuda.cuEventElapsedTime(self.events[0], self.events[1])
if err != cuda.CUresult.CUDA_SUCCESS:
raise RuntimeError("CUDA Error %s" % str(err))
return duration / float(iterations)
|
#################################################################################################
#
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
"""
Utility functions for interacting with the device
"""
from cuda import cudart
def check_cuda_errors(result: list):
"""
Checks whether `result` contains a CUDA error raises the error as an exception, if so. Otherwise,
returns the result contained in the remaining fields of `result`.
:param result: the results of the `cudart` method, consisting of an error code and any method results
:type result: list
:return: non-error-code results from the `results` parameter
"""
# `result` is of the format : (cudaError_t, result...)
err = result[0]
if err.value:
raise RuntimeError("CUDA error: {}".format(cudart.cudaGetErrorName(err)))
if len(result) == 1:
return None
elif len(result) == 2:
return result[1]
else:
return result[1:]
def device_cc(device: int = 0) -> int:
"""
Returns the compute capability of the device with ID `device`.
:param device: ID of the device to query
:type device: int
:return: compute capability of the queried device (e.g., 80 for SM80)
:rtype: int
"""
deviceProp = check_cuda_errors(cudart.cudaGetDeviceProperties(device))
major = str(deviceProp.major)
minor = str(deviceProp.minor)
return int(major + minor)
|
#################################################################################################
#
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
import numpy as np
import cutlass
from pycutlass.library import TensorDescription
from typing import Union
from bfloat16 import bfloat16
try:
import torch
torch_available = True
except ImportError:
torch_available = False
class ReferenceModule:
def __init__(self, A: TensorDescription, B: TensorDescription, C: TensorDescription) -> None:
self.layout_A = A.layout
self.layout_B = B.layout
self.layout_C = C.layout
def run(self, A: np.ndarray, B: np.ndarray, C: np.ndarray, problem_size: cutlass.gemm.GemmCoord, alpha: float=1.0, beta: float=0.0, bias=False, batch=1):
"""
Compute the reference result on CPU
Args:
A: dense operator with shape (M, K) in row-major and (K, M) in column-major
B: dense operator with shape (K, N) in row-major and (N, K) in column-major
C: dense operator with shape (M, N) in row-major and (N, M) in column-major
"""
M, N, K = problem_size.m(), problem_size.n(), problem_size.k()
if isinstance(A, np.ndarray):
if self.layout_A == cutlass.RowMajor:
A_row = np.reshape(A, newshape=(batch, M, K))
else:
A_col = np.reshape(A, newshape=(batch, K, M))
A_row = np.transpose(A_col, axes=(0, 2, 1))
if self.layout_B == cutlass.RowMajor:
B_row = np.reshape(B, newshape=(batch, K, N))
else:
B_col = np.reshape(B, newshape=(batch, N, K))
B_row = np.transpose(B_col, axes=(0, 2, 1))
if self.layout_C == cutlass.RowMajor:
if bias:
C_row = np.reshape(C, newshape=(batch, 1, N))
else:
C_row = np.reshape(C, newshape=(batch, M, N))
else:
if bias:
C_row = np.reshape(C, newshape=(batch, M, 1))
else:
C_col = np.reshape(C, newshape=(batch, N, M))
C_row = np.transpose(C_col, axes=(0, 2, 1))
if A_row.dtype == bfloat16:
# numpy's einsum doesn't support bfloat16
out_row = np.einsum("bik,bkj->bij", A_row.astype(np.float32), B_row.astype(np.float32)) * alpha + C_row * beta
out_row = out_row.astype(C_row.dtype)
else:
out_row = np.einsum("bik,bkj->bij", A_row, B_row) * alpha + C_row * beta
if self.layout_C == cutlass.ColumnMajor:
out = np.transpose(out_row, axes=(0, 2, 1))
else:
out = out_row
return out.ravel()
elif isinstance(A, torch.Tensor):
if self.layout_A == cutlass.RowMajor:
A_row = A.view((M, K))
else:
A_col = A.view((K, M))
A_row = torch.permute(A_col, (1, 0))
if self.layout_B == cutlass.RowMajor:
B_row = B.view((K, N))
else:
B_col = B.view((N, K))
B_row = torch.permute(B_col, (1, 0))
if self.layout_C == cutlass.RowMajor:
C_row = C.view((M, N))
else:
C_col = C.view((N, M))
C_row = torch.permute(C_col, (1, 0))
out_row = torch.matmul(A_row, B_row) * alpha + C_row * beta
if self.layout_C == cutlass.ColumnMajor:
out = torch.permute(out_row, (1, 0))
else:
out = out_row
return torch.flatten(out)
#####################################################################################################
# Conv2d
#####################################################################################################
if torch_available:
class Conv2dReferenceModule:
def __init__(self, A: TensorDescription, B: TensorDescription, C: TensorDescription, kind: cutlass.conv.Operator.fprop) -> None:
self.layout_A = A.layout
self.layout_B = B.layout
self.layout_C = C.layout
self.kind = kind
def run(self,
A: Union[np.ndarray, torch.Tensor],
B: Union[np.ndarray, torch.Tensor],
C: Union[np.ndarray, torch.Tensor], problem_size, alpha=1.0, beta=0.0, bias=False) -> np.ndarray:
"""
Compute the reference result on CPU
"""
n = problem_size.N
h = problem_size.H
w = problem_size.W
c = problem_size.C
k = problem_size.K
r = problem_size.R
s = problem_size.S
p = problem_size.P
q = problem_size.Q
stride_h = problem_size.stride_h
stride_w = problem_size.stride_w
pad_h = problem_size.pad_h
pad_w = problem_size.pad_w
dilation_h = problem_size.dilation_h
dilation_w = problem_size.dilation_w
groups = problem_size.groups
if isinstance(A, np.ndarray):
# the pytorch activation layout is NCHW
# weight layout is Cout Cin Kh Kw (also NCHW)
if self.layout_A == cutlass.TensorNHWC:
A_nhwc = np.reshape(A, newshape=(n, h, w, c))
A_torch_nhwc = torch.from_numpy(A_nhwc).to("cuda")
A_torch_nchw = torch.permute(A_torch_nhwc, (0, 3, 1, 2))
if self.layout_B == cutlass.TensorNHWC:
B_nhwc = np.reshape(B, newshape=(k, r, s, c))
B_torch_nhwc = torch.from_numpy(B_nhwc).to("cuda")
B_torch_nchw = torch.permute(B_torch_nhwc, (0, 3, 1, 2))
if self.layout_C == cutlass.TensorNHWC:
C_nhwc = np.reshape(C, newshape=(n, p, q, k))
C_torch_nhwc = torch.from_numpy(C_nhwc).to("cuda")
C_torch_nchw = torch.permute(C_torch_nhwc, (0, 3, 1, 2))
elif isinstance(A, torch.Tensor):
if self.kind == cutlass.conv.Operator.wgrad:
if self.layout_A == cutlass.TensorNHWC:
A_nhwc = A.view((n, p, q, k))
A_torch_nchw = torch.permute(A_nhwc, (0, 3, 1, 2))
if self.layout_B == cutlass.TensorNHWC:
B_nhwc = B.view((n, h, w, c))
B_torch_nchw = torch.permute(B_nhwc, (0, 3, 1, 2))
if self.layout_C == cutlass.TensorNHWC:
if bias:
C_nhwc = C.view((1, 1, 1, c))
else:
C_nhwc = C.view((k, r, s, c))
C_torch_nchw = torch.permute(C_nhwc, (0, 3, 1, 2))
elif self.kind == cutlass.conv.Operator.dgrad:
if self.layout_A == cutlass.TensorNHWC:
A_nhwc = A.view((n, p, q, k))
A_torch_nchw = torch.permute(A_nhwc, (0, 3, 1, 2))
if self.layout_B == cutlass.TensorNHWC:
B_nhwc = B.view((k, r, s, c))
B_torch_nchw = torch.permute(B_nhwc, (0, 3, 1, 2))
if self.layout_C == cutlass.TensorNHWC:
if bias:
C_nhwc = C.view((1, 1, 1, c))
else:
C_nhwc = C.view((n, h, w, c))
C_torch_nchw = torch.permute(C_nhwc, (0, 3, 1, 2))
else:
if self.layout_A == cutlass.TensorNHWC:
A_nhwc = A.view((n, h, w, c))
A_torch_nchw = torch.permute(A_nhwc, (0, 3, 1, 2))
if self.layout_B == cutlass.TensorNHWC:
B_nhwc = B.view((k, r, s, c))
B_torch_nchw = torch.permute(B_nhwc, (0, 3, 1, 2))
if self.layout_C == cutlass.TensorNHWC:
if bias:
C_nhwc = C.view((1, 1, 1, k))
else:
C_nhwc = C.view((n, p, q, k))
C_torch_nchw = torch.permute(C_nhwc, (0, 3, 1, 2))
if self.kind == cutlass.conv.Operator.fprop:
D_torch_nchw = alpha * torch.nn.functional.conv2d(
A_torch_nchw, B_torch_nchw, stride=(stride_h, stride_w),
padding=(pad_h, pad_w), dilation=(dilation_h, dilation_w), groups=groups) + beta * C_torch_nchw
elif self.kind == cutlass.conv.Operator.dgrad:
D_torch_nchw = alpha * torch.nn.grad.conv2d_input(
(n, c, h, w), B_torch_nchw, A_torch_nchw, padding=(pad_h, pad_w), stride=(stride_h, stride_w)
).to(torch.float32) + beta * C_torch_nchw
elif self.kind == cutlass.conv.Operator.wgrad:
D_torch_nchw = alpha * torch.nn.grad.conv2d_weight(
B_torch_nchw, (k, c, r, s), A_torch_nchw, padding=(pad_h, pad_w), stride=(stride_h, stride_w)
).to(torch.float32) + beta * C_torch_nchw
if self.layout_C == cutlass.TensorNHWC:
if isinstance(A, np.ndarray):
D_torch_out = torch.permute(D_torch_nchw, (0, 2, 3, 1)).detach().cpu().numpy()
elif isinstance(A, torch.Tensor):
D_torch_out = torch.permute(D_torch_nchw, (0, 2, 3, 1))
return D_torch_out.flatten()
|
from pycutlass.utils.reference_model import *
|
#################################################################################################
#
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
"""
Utility functions for converting between frontend datatypes and CUTLASS datatypes
"""
from typing import Union, Tuple
import cutlass
import pycutlass.library as library
try:
import numpy as np
numpy_available = True
except ImportError:
numpy_available = False
def numpy_to_cutlass(inp):
if numpy_available:
if inp == np.float16:
return cutlass.float16
elif inp == np.float32:
return cutlass.float32
elif inp == np.float64:
return cutlass.float64
elif inp == np.int8:
return cutlass.int8
elif inp == np.int32:
return cutlass.int32
return None
try:
import cupy as cp
cupy_available = True
cupy_to_cutlass_dict = {
cp.float16: cutlass.float16,
cp.float32: cutlass.float32,
cp.float64: cutlass.float64
}
except ImportError:
cupy_available = False
def cupy_to_cutlass(inp):
if cupy_available:
if inp == cp.float16:
return cutlass.float16
elif inp == cp.float32:
return cutlass.float32
elif inp == cp.float64:
return cutlass.float64
return None
try:
import torch
torch_available = True
torch_to_cutlass_dict = {
torch.half: cutlass.float16,
torch.float16: cutlass.float16,
torch.float: cutlass.float32,
torch.float32: cutlass.float32,
torch.double: cutlass.float64,
torch.float64: cutlass.float64
}
except ImportError:
torch_available = False
def torch_to_cutlass(inp):
if torch_available:
return torch_to_cutlass_dict.get(inp, None)
try:
import bfloat16
bfloat16_available = True
except ImportError:
bfloat16_available = False
def bfloat16_to_cutlass(inp):
if bfloat16_available:
if inp == bfloat16.bfloat16:
return cutlass.bfloat16
def to_cutlass(inp):
for cvt_fn in [bfloat16_to_cutlass, cupy_to_cutlass, numpy_to_cutlass, torch_to_cutlass]:
out = cvt_fn(inp)
if out is not None:
return out
raise Exception('No available conversion from type {} to a CUTLASS type.'.format(inp))
|
#################################################################################################
#
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
"""
Utilities for stamping out collective mainloops for SM90 kernels
"""
import cute
import cutlass
from pycutlass import SubstituteTemplate
import pycutlass.library as library
tma_alignment_bytes = 16
cp_async_min_alignment_bytes = 4
class RowColMajorToGMMAMajor:
@staticmethod
def A(layout, element):
"""
Converts operand A's layout from row/column major format into CuTe's GMMA major format
:param layout: layout of the A operand
:type layout: cutlass.RowMajor or cutlass.ColumnMajor
:param element: data type of the A operand
:return: C++ CuTe GMMA major format
:rtype: cute.GMMAMajor
"""
type_requires_k_major = (element == cutlass.tfloat32) or (element == cutlass.int8)
if layout == cutlass.ColumnMajor and not type_requires_k_major:
return cute.GMMAMajor.MN
else:
return cute.GMMAMajor.K
@staticmethod
def B(layout, element):
"""
Converts operand B's layout from row/column major format into CuTe's GMMA major format
:param layout: layout of the B operand
:type layout: cutlass.RowMajor or cutlass.ColumnMajor
:param element: data type of the B operand
:return: C++ CuTe GMMA major format
:rtype: cute.GMMAMajor
"""
type_requires_k_major = (element == cutlass.tfloat32) or (element == cutlass.int8)
if layout == cutlass.RowMajor and not type_requires_k_major:
return cute.GMMAMajor.MN
else:
return cute.GMMAMajor.K
def cluster_shape_to_tma(dim):
"""
Returns the TMA copy type for a given cluster dimension
:param dim: a given dimension of a cluster
:type dim: layout
:return: C++ TMA copy time
:rtype: str
"""
return 'cute::SM90_TMA_LOAD' if dim == 1 else 'cute::SM90_TMA_LOAD_MULTICAST'
def make_cpasync_gmem_tiled_copy(thread_count, element, alignment, gmma_layout, dim_mn, dim_k):
"""
Returns a `make_tiled_copy` call for a given configuration
:param thread_count: number of threads in the threadblock
:type thread_count: int
:param element: datatype of the operand in question
:param alignment: byte alignment of the operand in question
:type alignment: int
:param gmma_layout: GMMA layout of the operand in question
:type gmma_layout: cute.GMMAMajor
:param dim_mn: extent of the M/N dimension of the tile
:type dim_mn: int
:param dim_k: extent of the reduction dimension of the tile
:type dim_k: int
:return: C++ call to `make_tiled_copy`
:rtype: str
"""
emission_str = """decltype(cute::make_tiled_copy(
cute::Copy_Atom<SM80_CP_ASYNC_CACHEALWAYS<cute::uint_byte_t<static_cast<int>(sizeof(${element})) * ${alignment}>>, ${element}>{},
cute::Layout<cute::Shape<_${shape0_x}, _${shape0_y}>,
cute::Stride<_${stride_x}, _${stride_y}>>{},
cute::Layout<cute::Shape<_${shape1_x}, _${shape1_y}>>{}))"""
if gmma_layout == cute.GMMAMajor.K:
threads_major = dim_k // alignment
threads_minor = thread_count // threads_major
values = {
'shape0_x': str(threads_minor),
'shape0_y': str(threads_major),
'stride_x': str(threads_major),
'stride_y': '1',
'shape1_x': '1',
'shape1_y': str(alignment)
}
elif gmma_layout == cute.GMMAMajor.MN:
threads_major = dim_mn // alignment
threads_minor = thread_count // threads_major
values = {
'shape0_x': str(threads_major),
'shape0_y': str(threads_minor),
'stride_x': '1',
'stride_y': str(threads_major),
'shape1_x': str(alignment),
'shape1_y': '1'
}
else:
raise Exception('Unexpected GMMA layout {}'.format(gmma_layout))
# Add common values
values['element'] = library.DataTypeTag[element]
values['alignment'] = str(alignment)
return SubstituteTemplate(emission_str, values)
def max_stages(op, arch):
"""
Returns the maximum number pipeline stages that can be used for an operation.
:param op: operation for which the maximum stages should be computed. If stages are
set via the `op.tile_description.stages` parameter, this setting is ignored
in the present calculation
:type op: pycutlass.GemmOperation
:param arch: compute capability of the device on which the operation will be run
:type arch: int
:return: maximum number of pipeline stages that can be used for an operation
:rtype: int
"""
smem_per_stage = library.CalculateSmemUsagePerStage(op)
smem_capacity = library.SharedMemPerCC[arch]
return int(smem_capacity // smem_per_stage)
class LayoutToStride:
_variable_first = 'cute::Stride<int64_t, cute::Int<1>, int64_t>'
_variable_last = 'cute::Stride<cute::Int<1>, int64_t, int64_t>'
@staticmethod
def A(layout):
"""
Returns the CuTe shape type corresponding to the layout of operand A
:param layout: layout of the B operand
:type layout: cutlass.RowMajor or cutlass.ColumnMajor
:return: C++ declaration of CuTe stride
:rtype: str
"""
if layout == cutlass.RowMajor:
return LayoutToStride._variable_first
elif layout == cutlass.ColumnMajor:
return LayoutToStride._variable_last
else:
raise Exception('Unsupported layout {}'.format(layout))
@staticmethod
def B(layout):
"""
Returns the CuTe shape type corresponding to the layout of operand B
:param layout: layout of the B operand
:type layout: cutlass.RowMajor or cutlass.ColumnMajor
:return: C++ declaration of CuTe stride
:rtype: str
"""
if layout == cutlass.RowMajor:
return LayoutToStride._variable_last
elif layout == cutlass.ColumnMajor:
return LayoutToStride._variable_first
else:
raise Exception('Unsupported layout {}'.format(layout))
EMISSION_STR = """
using TileShape_MNK = cute::Shape<_${threadblock_shape_m}, _${threadblock_shape_n}, _${threadblock_shape_k}>;
using ClusterShape_MNK = cute::Shape<_${cluster_shape_m}, _${cluster_shape_n}, _${cluster_shape_k}>;
using TiledMma = decltype(cute::make_tiled_mma(cute::GMMA::ss_op_selector<
${internal_element_A}, ${internal_element_B}, ${element_accumulator}, TileShape_MNK, ${gmma_layout_A}, ${gmma_layout_B}>()));
using SmemLayoutAtomA = decltype(cute::GMMA::smem_selector<${gmma_layout_A}, ${internal_element_A}, _${threadblock_shape_m}, _${threadblock_shape_k}>());
using SmemLayoutAtomB = decltype(cute::GMMA::smem_selector<${gmma_layout_B}, ${internal_element_B}, _${threadblock_shape_n}, _${threadblock_shape_k}>());
using CollectiveOp = typename cutlass::gemm::collective::CollectiveMma<
${mainloop_type}<${stage_count}, ClusterShape_MNK${kernel_schedule}>,
TileShape_MNK,
${element_A},
${stride_A},
${element_B},
${stride_B},
TiledMma,
${gmem_tiled_copy_A},
SmemLayoutAtomA,
void, // GMMA_SS does not need an SmemCopyAtom
${transform_A},
${gmem_tiled_copy_B},
SmemLayoutAtomB,
void, // GMMA_SS does not need an SmemCopyAtom
${transform_B}
>;
"""
def internal_element(element):
"""
Returns the data type internally used for `element`.
:param element: data type
:return: data type used internally
"""
return cutlass.tfloat32 if element == cutlass.float32 else element
def common_values(op, stage_count, transform_A, transform_B):
"""
Returns a dictionary containing common values to be substituted in the emission of the
collective operation declaration. Values specific to a particular collective operation
should be added to these.
:param op: GEMM operation for which to build a collective operation
:type op: pycutlass.GemmOperation
:param stage_count: number of pipeline stages to use in the operation
:type stage_count: int
:param transform_A: transformation to perform on the A operand
:type transform_A: str
:param transform_B: transformation to perform on the B operand
:type transform_B: str
:return: dictionary containing values to substitute in emission string
:rtype: dict
"""
internal_element_a = internal_element(op.A.element)
internal_element_b = internal_element(op.B.element)
return {
'threadblock_shape_m': str(op.tile_description.threadblock_shape[0]),
'threadblock_shape_n': str(op.tile_description.threadblock_shape[1]),
'threadblock_shape_k': str(op.tile_description.threadblock_shape[2]),
'cluster_shape_m': str(op.tile_description.cluster_shape[0]),
'cluster_shape_n': str(op.tile_description.cluster_shape[1]),
'cluster_shape_k': str(op.tile_description.cluster_shape[2]),
'element_A': library.DataTypeTag[op.A.element],
'element_B': library.DataTypeTag[op.B.element],
'internal_element_A': library.DataTypeTag[internal_element_a],
'internal_element_B': library.DataTypeTag[internal_element_b],
'element_accumulator': library.DataTypeTag[op.accumulator_type()],
'gmma_layout_A': library.CuTeLayoutTag[RowColMajorToGMMAMajor.A(op.A.layout, internal_element_a)],
'gmma_layout_B': library.CuTeLayoutTag[RowColMajorToGMMAMajor.B(op.B.layout, internal_element_b)],
'stride_A': LayoutToStride.A(op.A.layout),
'stride_B': LayoutToStride.B(op.B.layout),
'stage_count': str(stage_count),
'transform_A': transform_A,
'transform_B': transform_B
}
def build_gmma_tma(op):
"""
Builds a collective operation declaration targeting TMA GMMA kernels
:param op: GEMM operation for which to build a collective operation
:type op: pycutlass.GemmOperation
:return: string containing the C++ declaration of collective operation
:rtype: str
"""
A_tma_aligned = (library.DataTypeSizeBytes[op.A.element] * op.A.alignment) % tma_alignment_bytes == 0
B_tma_aligned = (library.DataTypeSizeBytes[op.B.element] * op.B.alignment) % tma_alignment_bytes == 0
if not A_tma_aligned or not B_tma_aligned:
raise Exception('Each of the A or B operands must be aligned to {} bytes to use TMA'.format(tma_alignment_bytes))
max_stage_count = max_stages(op, arch=90)
if op.tile_description.stages is None:
op.tile_description.stages = max_stage_count
elif op.tile_description.stages > max_stage_count:
raise Exception('Combination of threadblock shape, data types, and number of stages exceeds shared memory capacity.')
kernel_schedule = 'cutlass::gemm::KernelTmaWarpSpecialized'
if op.tile_description.persistent:
kernel_schedule = 'cutlass::gemm::KernelTmaWarpSpecializedPersistent'
transform_A = 'cute::identity'
transform_B = 'cute::identity'
values = common_values(op, op.tile_description.stages, transform_A, transform_B)
specific_values = {
'mainloop_type': 'cutlass::gemm::MainloopSm90TmaGmmaWarpSpecialized',
'kernel_schedule': ', ' + kernel_schedule,
'gmem_tiled_copy_A': cluster_shape_to_tma(op.tile_description.cluster_shape[1]),
'gmem_tiled_copy_B': cluster_shape_to_tma(op.tile_description.cluster_shape[0])
}
values.update(specific_values)
return SubstituteTemplate(EMISSION_STR, values)
def build_gmma_cpasync(op):
"""
Builds a collective operation declaration targeting cp.async GMMA kernels
:param op: GEMM operation for which to build a collective operation
:type op: pycutlass.GemmOperation
:return: string containing the C++ declaration of collective operation
:rtype: str
"""
A_cp_async_aligned = (library.DataTypeSizeBytes[op.A.element] * op.A.alignment) % cp_async_min_alignment_bytes == 0
B_cp_async_aligned = (library.DataTypeSizeBytes[op.B.element] * op.B.alignment) % cp_async_min_alignment_bytes == 0
if not A_cp_async_aligned or not B_cp_async_aligned:
raise Exception('Each of the A or B operands must be aligned to {} bytes to use cp.async'.format(cp_async_min_alignment_bytes))
max_stage_count = max_stages(op, arch=90)
if op.tile_description.stages is None:
op.tile_description.stages = max_stage_count
elif op.tile_description.stages > max_stage_count:
raise Exception('Combination of threadblock shape, data types, and number of stages exceeds shared memory capacity.')
transform_A = 'cute::identity'
transform_B = 'cute::identity'
thread_count = 128
cpasync_copy_A = make_cpasync_gmem_tiled_copy(thread_count, op.A.element, op.A.alignment, RowColMajorToGMMAMajor.A(op.A.layout, op.A.element),
op.tile_description.threadblock_shape[0], op.tile_description.threadblock_shape[2])
cpasync_copy_B = make_cpasync_gmem_tiled_copy(thread_count, op.B.element, op.B.alignment, RowColMajorToGMMAMajor.B(op.B.layout, op.B.element),
op.tile_description.threadblock_shape[1], op.tile_description.threadblock_shape[2])
values = common_values(op, op.tile_description.stages, transform_A, transform_B)
specific_values = {
'mainloop_type': 'cutlass::gemm::MainloopSm90CpAsyncGmma',
'kernel_schedule': '',
'gmem_tiled_copy_A': cpasync_copy_A,
'gmem_tiled_copy_B': cpasync_copy_B
}
values.update(specific_values)
return SubstituteTemplate(EMISSION_STR, values)
def build(operation):
"""
Builds a collective operation declaration targeting cp.async or TMA for GMMA kernels
:param operation: GEMM operation for which to build a collective operation
:type operation: pycutlass.GemmOperation
:return: string containing the C++ declaration of collective operation
:rtype: str
"""
A_tma_aligned = (library.DataTypeSizeBytes[operation.A.element] * operation.A.alignment) % tma_alignment_bytes == 0
B_tma_aligned = (library.DataTypeSizeBytes[operation.B.element] * operation.B.alignment) % tma_alignment_bytes == 0
tma_correct_size = (library.DataTypeSizeBytes[operation.A.element] == 2 and library.DataTypeSizeBytes[operation.B.element] == 2)
tma_correct_layout = (operation.A.layout == cutlass.RowMajor or operation.B.layout == cutlass.ColumnMajor)
if A_tma_aligned and B_tma_aligned and (tma_correct_size or tma_correct_layout):
return build_gmma_tma(operation)
else:
return build_gmma_cpasync(operation)
|
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# this file creates the test/unit/gemm/device simt tests
outputDir = ""
################################################################################
# parameters
# Edge - for tiles, the edges represent the length of one side
# Ratio - the maximum ratio between 2 edges, limits the skinnyness of tiles
# MaxEdge - maximum length of each edge
# Min/Max - minimum/maximum of the product of edge lengths
################################################################################
warpsPerThreadblockEdge = [1, 2, 4, 8, 16]
warpsPerThreadblockRatio = 2
warpsPerThreadblockMax = 16
# NOTE 1x32 and 2x16 warp tile shapes fail validation for ~10% of cases
warpShapeEdges = [8, 16, 32, 64, 128, 256]
warpShapeRatio = 4
warpShapeMax = 64*64
warpShapeMin = 8*8
threadblockEdgeMax = 256
# char, type bits/elem, max tile, L0 threadblock tiles
precisions = [
["c", "cutlass::complex<float>", 64, 64*128, [ [ 64, 128], [ 64, 32] ] ],
["q", "cutlass::Quaternion<float>", 64, 64*128, [ [ 64, 128], [ 64, 32] ] ],
["d", "double", 64, 64*64, [ [ 64, 64], [ 32, 32] ] ],
["h", "cutlass::half_t", 16, 128*256, [ [256, 128], [ 64, 128], [ 64, 32] ] ],
["i", "int", 32, 128*128, [ [128, 64], [ 16, 32] ] ],
["s", "float", 32, 128*128, [ [128, 256], [128, 128], [ 64, 64] ] ],
["z", "cutlass::complex<double>", 128, 64*64, [ [ 32, 64], [ 16, 32] ] ],
]
# L1 will have a single kernel for every unique shape
# L2 will have everything else
transposes = [
[False, False],
[False, True],
[True, False],
[True, True]
]
################################################################################
# warps per threadblock
################################################################################
warpsPerThreadblocks = []
for warpsPerThreadblock0 in warpsPerThreadblockEdge:
for warpsPerThreadblock1 in warpsPerThreadblockEdge:
if warpsPerThreadblock0 / warpsPerThreadblock1 <= warpsPerThreadblockRatio and warpsPerThreadblock1 / warpsPerThreadblock0 <= warpsPerThreadblockRatio and warpsPerThreadblock0 * warpsPerThreadblock1 <= warpsPerThreadblockMax:
warpsPerThreadblocks.append([warpsPerThreadblock0,
warpsPerThreadblock1])
print("WarpsPerThreadblocks",warpsPerThreadblocks)
################################################################################
# warp shapes
################################################################################
warpNumThreads = 32
warpShapes = []
for warp0 in warpShapeEdges:
for warp1 in warpShapeEdges:
if warp0 / warp1 <= warpShapeRatio and warp1 / warp0 <= warpShapeRatio and warp0*warp1 <= warpShapeMax and warp0*warp1 > warpShapeMin:
warpShapes.append([warp0, warp1])
print("WarpShapes", warpShapes)
numL0 = 0
numL1 = 0
numL2 = 0
################################################################################
# create kernels
# create a file for each precision/transpose
# each file contains many tile sizes
################################################################################
# precisions
for precision in precisions:
# get precision char
precisionChar = precision[0]
precisionType = precision[1]
precisionBits = precision[2]
threadblockMaxElements = precision[3]
threadblockTilesL0 = precision[4]
# transposes
for transpose in transposes:
# get transpose char
columnMajorA = transpose[0]
columnMajorB = transpose[1]
transCharA = "n" if columnMajorA else "t"
transCharB = "n" if columnMajorB else "t"
# open file
fileName="simt_%sgemm_%s%s_sm50.cu" % (precisionChar, transCharA, transCharB)
print("\n", fileName)
filePath = "%s%s" % (outputDir, fileName)
out = open(filePath, "w+")
# write file header
out.write("/***************************************************************************************************\n"
" * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. \n"
" * SPDX-License-Identifier: BSD-3-Clause \n"
" * \n"
" * Redistribution and use in source and binary forms, with or without \n"
" * modification, are permitted provided that the following conditions are met: \n"
" * \n"
" * 1. Redistributions of source code must retain the above copyright notice, this \n"
" * list of conditions and the following disclaimer. \n"
" * \n"
" * 2. Redistributions in binary form must reproduce the above copyright notice, \n"
" * this list of conditions and the following disclaimer in the documentation \n"
" * and/or other materials provided with the distribution. \n"
" * \n"
" * 3. Neither the name of the copyright holder nor the names of its \n"
" * contributors may be used to endorse or promote products derived from \n"
" * this software without specific prior written permission. \n"
" * \n"
" * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" \n"
" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE \n"
" * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE \n"
" * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE \n"
" * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL \n"
" * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR \n"
" * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER \n"
" * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, \n"
" * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE \n"
" * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \n"
" *\n"
" **************************************************************************************************/\n"
"/*! \\file\n"
" \\brief Tests for device-wide GEMM interface\n"
"*/\n"
"\n"
"#include <iostream>\n"
"\n"
"#include \"cutlass/cutlass.h\"\n"
"#include \"cutlass/gemm/device/gemm.h\"\n"
"#include \"cutlass/numeric_types.h\"\n"
"\n"
"#include \"../../common/cutlass_unit_test.h\"\n"
"\n"
"#include \"cutlass/util/host_tensor.h\"\n"
"#include \"cutlass/util/tensor_view_io.h\"\n"
"#include \"cutlass/util/reference/host/tensor_fill.h\"\n"
"#include \"cutlass/util/reference/host/tensor_copy.h\"\n"
"#include \"cutlass/util/reference/host/tensor_compare.h\"\n"
"#include \"cutlass/util/reference/host/gemm.h\"\n"
"\n"
"#include \"testbed.h\"\n"
"\n")
foundThreadblockTilesL0 = {}
foundThreadblockTilesL1 = {}
########################################################################
# for each combination of tile sizes
########################################################################
for warpsPerThreadblock in warpsPerThreadblocks:
for warpShape in warpShapes:
warpThreadsM = 0
if warpShape[0] > warpShape[1]:
warpThreadsM = 8
else:
warpThreadsM = 4
warpThreadsN = warpNumThreads / warpThreadsM
# skip shapes with conflicting rectangularity
# they are unlikely to be fastest
blockG = warpsPerThreadblock[0] > warpsPerThreadblock[1]
blockL = warpsPerThreadblock[0] < warpsPerThreadblock[1]
warpG = warpShape[0] > warpShape[1]
warpL = warpShape[0] < warpShape[1]
blockG2 = warpsPerThreadblock[0] > warpsPerThreadblock[1]*2
blockL2 = warpsPerThreadblock[0]*2 < warpsPerThreadblock[1]
warpG2 = warpShape[0] > warpShape[1]*2
warpL2 = warpShape[0]*2 < warpShape[1]
if blockG2 and warpL: continue
if blockL2 and warpG: continue
if warpG2 and blockL: continue
if warpL2 and blockG: continue
# check threadblock ratios and max
threadblockTile = [warpShape[0]*warpsPerThreadblock[0],
warpShape[1]*warpsPerThreadblock[1]]
if threadblockTile[0] * threadblockTile[1] > threadblockMaxElements: continue
if threadblockTile[0] > threadblockEdgeMax: continue
if threadblockTile[1] > threadblockEdgeMax: continue
totalThreads = warpNumThreads*warpsPerThreadblock[0]*warpsPerThreadblock[1]
# calculate unroll
# ensure that every iteration at least a full load of A,B are done
unrollMin = 8
unrollMin0 = totalThreads / threadblockTile[0]
unrollMin1 = totalThreads / threadblockTile[1]
unroll = max(unrollMin, unrollMin0, unrollMin1)
threadTileM = warpShape[0] / warpThreadsM
threadTileN = warpShape[1] / warpThreadsN
if threadTileM < 2 or threadTileN < 2: continue
if threadTileM*threadTileN*precisionBits > 8*8*32: continue
# epilogue currently only supports N < WarpNumThreads
if threadblockTile[1] < warpNumThreads: continue
# limit smem
smemBitsA = threadblockTile[0]*unroll*2*precisionBits
smemBitsB = threadblockTile[1]*unroll*2*precisionBits
smemKBytes = (smemBitsA+smemBitsB)/8/1024
if (smemKBytes > 48): continue
# test level 0
testLevel = -1
for tileId in range(0, len(threadblockTilesL0)):
tbTile = threadblockTilesL0[tileId]
if tbTile[0] == threadblockTile[0] and tbTile[1] == threadblockTile[1]:
if tuple(tbTile) not in foundThreadblockTilesL0:
testLevel = 0
numL0 += 1
foundThreadblockTilesL0[tuple(tbTile)] = True
# test level 1
if testLevel < 0:
threadblockTileAlreadyUsed = False
if tuple(threadblockTile) not in foundThreadblockTilesL1:
testLevel = 1
numL1 += 1
foundThreadblockTilesL1[tuple(threadblockTile)] = True
# test level 2
if testLevel < 0:
testLevel = 2
numL2 += 1
################################################################
# write this tile to file
################################################################
print("%ix%ix%i__%ix%i_%ix%i_%ix%i L%i" % (
threadblockTile[0], threadblockTile[1], unroll,
threadTileM, threadTileN,
warpThreadsM, warpThreadsN,
warpsPerThreadblock[0], warpsPerThreadblock[1], testLevel))
out.write("////////////////////////////////////////////////////////////////////////////////\n"
"// Elements / Thread: %3i x %3i\n"
"// Threads / Warp: %3i x %3i\n"
"// Warps / Block: %3i x %3i\n"
"// Threadblock: %3i x %3i x %2i\n"
% ( threadTileM, threadTileN,
warpThreadsM, warpThreadsN,
warpsPerThreadblock[0], warpsPerThreadblock[1],
threadblockTile[0], threadblockTile[1], unroll
)
)
out.write("CUTLASS_TEST_L%i(SM50_device_%sgemm_%s%s, %ix%ix%i_%ix%ix1_%ix%i_%ix%i_%ix%i, {\n" % (
testLevel,
precisionChar,
transCharA,
transCharB,
threadblockTile[0],
threadblockTile[1],
unroll,
warpShape[0],
warpShape[1],
threadTileM,
threadTileN,
warpThreadsM,
warpThreadsN,
warpsPerThreadblock[0],
warpsPerThreadblock[1]
))
out.write(" using precision = %s;\n" % precisionType)
out.write(" using ThreadblockShape = cutlass::gemm::GemmShape<%i, %i, %i>;\n" % (
threadblockTile[0],
threadblockTile[1],
unroll))
out.write(" using WarpShape = cutlass::gemm::GemmShape<%i, %i, %i>;\n\n" % (
warpShape[0],
warpShape[1],
unroll))
out.write(" static int const kEpilogueElementsPerAccess = 1;\n"
" using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;\n"
" using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination<\n"
" precision, kEpilogueElementsPerAccess, precision, precision>;\n\n")
out.write(" using Gemm = cutlass::gemm::device::Gemm<\n"
" precision, cutlass::layout::%sMajor,\n"
" precision, cutlass::layout::%sMajor,\n"
" precision, cutlass::layout::RowMajor,\n"
" precision,\n"
" cutlass::arch::OpClassSimt,\n"
" cutlass::arch::Sm50,\n"
" ThreadblockShape, WarpShape, InstructionShape,\n"
" EpilogueOutputOp,\n"
" cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>,\n"
" 2 // Stages\n"
" >;\n" % (
"Column" if columnMajorA else "Row",
"Column" if columnMajorB else "Row",
))
out.write(" EXPECT_TRUE(test::gemm::device::TestAllGemm<Gemm>());\n"
"} )\n\n")
out.close()
print("NumKernels:", numL0, numL1, numL2)
|
#################################################################################################
#
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
import gen_turing_and_volta as api_generator
import gen_sample as sample_creater
import gen_cmake as cmake_creater
import gen_verify as verify_creater
import gen_device as b2b_fused_generator
import replace_fix_impl_header
import argparse
import os
import json
parser = argparse.ArgumentParser(description="Generates Fused Multi-GEMM CUTLASS Kernels")
parser.add_argument("--config-file", default="config.json", help="JSON file containing configuration to generate")
parser.add_argument("--gen-name", default="FusedMultiGemmForward", help="Specific the output name")
parser.add_argument("--output-dir", default="", help="Specifies the output dir")
parser.add_argument("--cutlass-dir", default="", help="Specifies the dependent CUTLASS repo dir")
parser.add_argument("--gen-include-cutlass-dir", default="", help="Specifies the generated CUTLASS code include dir, if needed.")
args = parser.parse_args()
gen_name = args.gen_name
cutlass_deps_dir = args.cutlass_dir
output_dir = args.output_dir
output_dir += "/"
cutlass_deps_root = args.gen_include_cutlass_dir
if cutlass_deps_root == '':
cutlass_deps_root = cutlass_deps_dir + "/include/"
cutlass_deps_root +='/'
if not os.path.exists(output_dir):
os.makedirs(output_dir)
if not os.path.exists(output_dir + "/" + "auto_gen"):
os.mkdir(output_dir + "/" + "auto_gen")
if not os.path.exists(output_dir + "/" + "fixed_impl"):
os.mkdir(output_dir + "/" + "fixed_impl" )
if not os.path.exists(output_dir + "/" + "sample"):
os.mkdir(output_dir + "/" + "sample" )
if not os.path.exists(output_dir + "/" + "auto_gen" + "/" + "device"):
os.mkdir(output_dir + "/" + "auto_gen" + "/" + "device")
if not os.path.exists(output_dir + "/" + "auto_gen" + "/" + "kernel"):
os.mkdir(output_dir + "/" + "auto_gen" + "/" + "kernel")
if not os.path.exists(output_dir + "/" + "auto_gen" + "/" + "threadblock"):
os.mkdir(output_dir + "/" + "auto_gen" + "/" + "threadblock")
with open(args.config_file, 'r') as infile:
gemm_info_dict = json.load(infile)
keys = sorted(gemm_info_dict.keys())
fuse_gemm_info = [gemm_info_dict[k] for k in keys]
for_cutlass_gen_user_include_header_file = [
cutlass_deps_root + "cutlass/epilogue/thread/linear_combination_leaky_relu.h",
cutlass_deps_root + "cutlass/epilogue/thread/linear_combination.h",
]
for_fused_wrapper = [
cutlass_deps_root + "cutlass/epilogue/thread/linear_combination_leaky_relu.h",
cutlass_deps_root + "cutlass/epilogue/thread/linear_combination.h",
"auto_gen/device/" + gen_name + ".h",
cutlass_deps_root + "cutlass/gemm/device/gemm_batched.h",
cutlass_deps_root + "cutlass/cutlass.h",
]
# Copy fixed implementation to the output directory
fix_impl = replace_fix_impl_header.replace_fix_impl("../fixed_impl/", output_dir +"/fixed_impl/", cutlass_deps_root)
fix_impl.gen_code()
auto_gen_output_dir = output_dir + "/auto_gen/"
project_root = ""
turing_plus = b2b_fused_generator.gen_device(fuse_gemm_info, gen_name, for_cutlass_gen_user_include_header_file, cutlass_deps_root, project_root, auto_gen_output_dir)
turing_plus.gen_code(75, 'hmma1688', False)
api = api_generator.gen_one_API(fuse_gemm_info, gen_name, for_fused_wrapper, output_dir)
api.gen_code()
# Generate C++ sample
os.system("cp ../leaky_bias.h " + output_dir + "/sample/")
os.system("cp ../utils.h " + output_dir + "/sample/")
sample_dir = output_dir + "/sample/"
sample = sample_creater.gen_test(fuse_gemm_info, gen_name, for_cutlass_gen_user_include_header_file, sample_dir)
sample.gen_cpp_sample()
cmake_gen = cmake_creater.gen_build_sys(cutlass_deps_dir, output_dir)
cmake_gen.gen_code()
verify = verify_creater.gen_verify(fuse_gemm_info, gen_name, for_fused_wrapper, output_dir)
verify.gen_code()
|
#################################################################################################
#
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
from typing import *
import helper
import gen_ir
import gen_kernel as gen_ker
class gen_device:
def __init__(self, fuse_gemm_info, gen_class_name, user_header_file, cutlass_deps_root, project_root, output_dir = "../"):
self.fuse_gemm_info = fuse_gemm_info
self.raw_gemm_info = fuse_gemm_info
self.b2b_num = len(fuse_gemm_info)
self.user_header_file = user_header_file
self.args = {}
# device arg struct memebr
self.arg_member = []
self.gen_class_name = gen_class_name
self.gen_kernel_name = gen_class_name + "Kernel"
self.tempalte_args = []
self.__tempalate_arg_list = {'Stages': int, 'SplitKSerial': bool, 'IsBetaZero': bool, 'AlignmentA': int, 'AlignmentB': int}
self.file_name = output_dir + "/device/" +gen_class_name +".h"
self.sample_dir = output_dir
self.cutlass_deps_root = cutlass_deps_root
self.project_root = project_root
self.this_file_root = output_dir + "/device/"
self.first_use_1stage = False
## gen kernel
self.gen_kernel = gen_ker.gen_kernel(self.tempalte_args, self.gen_class_name, self.b2b_num, output_dir, cutlass_deps_root, project_root)
def __check_arg_type(self, temp_arg):
if temp_arg in self.__tempalate_arg_list.keys():
return self.__tempalate_arg_list[temp_arg]
find_sub = False
for candidate_arg in self.__tempalate_arg_list.keys():
if (temp_arg.find(candidate_arg) != -1):
return self.__tempalate_arg_list[candidate_arg]
return 'typename'
# def gen_B2b2bGemm_class():
def set_arch(self, sm_cap, mma_tp):
if sm_cap == 75 or sm_cap == 80 or sm_cap == 86:
self.arch = "cutlass::arch::Sm" + str(sm_cap)
if mma_tp is 'hmma1688':
self.mma_shape = [16, 8, 8]
self.mma_tp = 'hmma'
elif mma_tp is 'imma8816':
self.mma_tp = 'imma'
self.mma_shape = [8, 8, 16]
else:
return 0
def gen_include_header(self):
code = '''\
/* Auto Generated code - Do not edit.*/
#pragma once
#include \"{cutlass_root}cutlass/cutlass.h\"
#include \"{cutlass_root}cutlass/numeric_types.h\"
#include \"{cutlass_root}cutlass/arch/arch.h\"
#include \"{cutlass_root}cutlass/device_kernel.h\"
#include \"{cutlass_root}cutlass/gemm/threadblock/threadblock_swizzle.h\"
#include \"{cutlass_root}cutlass/gemm/device/default_gemm_configuration.h\"
#include \"{cutlass_root}cutlass/epilogue/thread/linear_combination_relu.h\"
#include \"{cutlass_root}cutlass/epilogue/thread/linear_combination.h\"
#include \"{project_root}../kernel/b2b_gemm.h\"
#include \"{project_root}../kernel/default_b2b_gemm.h\"
'''.format(cutlass_root=self.cutlass_deps_root, project_root=self.project_root, this_file_root=self.this_file_root)
include_user_header = ""
for header in self.user_header_file:
include_user_header += "#include \"" + header + "\"\n"
return code + include_user_header
def gen_code(self, sm_cap, mma_tp, ifprint = True):
self.set_arch(sm_cap, mma_tp)
self.update_b2b_args()
print(self.fuse_gemm_info)
self.update_b2b_class_template_args()
func_code = self.gen_all_func()
member_var_code = "private:\n typename B2bGemmKernel::Params params_;\n"
gen_code = gen_ir.gen_template_class(self.gen_class_name, self.tempalte_args, func_code + member_var_code)
code = self.gen_include_header() + gen_ir.gen_namespace("cutlass", gen_ir.gen_namespace("gemm", gen_ir.gen_namespace("device", gen_code)))
if ifprint:
print(code)
print("[INFO]: Gen device code output Dir: is ", self.file_name)
with open(self.file_name, 'w+') as f:
f.write(code)
gen_kernel = self.gen_kernel.gen_code(self.first_use_1stage)
print(gen_kernel)
def update_b2b_class_template_args(self):
for arg in self.args.keys():
self.tempalte_args.append([self.__check_arg_type(arg), arg, self.args[arg]])
def update_b2b_args(self):
self.args['ElementA'] = helper.type_2_cutlass_type(self.fuse_gemm_info[0]['A_tp'])
self.args['LayoutA'] = helper.type_2_cutlass_type(self.fuse_gemm_info[0]['A_format'])
cnt = 0
warp_M_tile = 32
# Determine maxmimum N_tile
Max_Ntile = 0
for layer in self.fuse_gemm_info:
n_tile = layer['mnk'][1]
if n_tile > Max_Ntile:
Max_Ntile = n_tile
if Max_Ntile >= 256:
warp_M_tile = 16
stages_temp = []
for layer in self.fuse_gemm_info:
cnt_str = str(cnt)
B_tp_str= 'ElementB' + cnt_str
B_format_str = 'LayoutB' + cnt_str
C_tp_str= 'ElementC' + cnt_str
C_format_str = 'LayoutC' + cnt_str
Acc_str = 'ElementAccumulator' + cnt_str
self.args[B_tp_str] = helper.type_2_cutlass_type(layer['B_tp'])
self.args[B_format_str] = helper.type_2_cutlass_type(layer['B_format'])
self.args[C_tp_str] = helper.type_2_cutlass_type(layer['C_tp'])
self.args[C_format_str] = helper.type_2_cutlass_type(layer['C_format'])
self.args[Acc_str] = helper.type_2_cutlass_type(layer['Acc_tp'])
mnk = layer['mnk'][:]
tile_mnk = mnk[:]
tile_mnk[2] = 32 # force the ktile is 32
#N tile gen
if mnk[1] > 1024:
assert(0)
elif mnk[1] > 512:
tile_mnk[1] = 1024
elif mnk[1] > 256:
tile_mnk[1] = 512
elif mnk[1] > 128:
tile_mnk[1] = 256
elif mnk[1] > 64:
tile_mnk[1] = 128
elif mnk[1] > 32:
tile_mnk[1] = 64
else :
tile_mnk[1] = 32
if tile_mnk[1] == 512:
stages_temp.append(1)
else:
stages_temp.append(2)
tile_mnk[0] = 4 * warp_M_tile
epilogue_setted_type = helper.get_epilogue_tp(layer)
cutlass_epilogue_name = "LinearCombinationRelu"
if epilogue_setted_type.lower() == 'leakyrelu':
cutlass_epilogue_name = "LinearCombinationLeakyRelu"
elif epilogue_setted_type.lower() == 'identity':
cutlass_epilogue_name = "LinearCombination"
epilogue_str = 'EpilogueOutputOp' + cnt_str
if cnt != len(self.fuse_gemm_info) - 1:
n = layer['mnk'][1]
Fragments = tile_mnk[1] // 8 * 2
self.args[epilogue_str] = "cutlass::epilogue::thread::" + cutlass_epilogue_name + "<ElementC0_, " + str(Fragments) +", ElementAccumulator0_, ElementAccumulator0_>"
else:
n = layer['mnk'][1]
n_mod_8 = n % 4
N_align_elements = 1
if n_mod_8 == 0:
N_align_elements = 8
elif n_mod_8 == 4:
N_align_elements = 4
elif n_mod_8 == 2 or n_mod_8 == 6:
N_align_elements = 2
self.args[epilogue_str] = "cutlass::epilogue::thread::" + cutlass_epilogue_name+ "<ElementC0_, " + str(N_align_elements) + ", ElementAccumulator0_, ElementAccumulator0_>"
ThreadBlockShape_str = 'ThreadblockShape' + cnt_str
self.args[ThreadBlockShape_str] = helper.cvt_2_cutlass_shape(tile_mnk)
WarpShape_str = 'WarpShape' + cnt_str
tile_mnk[0] = warp_M_tile
self.args[WarpShape_str] = helper.cvt_2_cutlass_shape(tile_mnk)
cnt += 1
self.args['ElementD'] = helper.type_2_cutlass_type(self.fuse_gemm_info[self.b2b_num - 1]['C_tp'])
self.args['LayoutD'] = helper.type_2_cutlass_type(self.fuse_gemm_info[self.b2b_num - 1]['C_format'])
self.args['InstructionShape'] = helper.cvt_2_cutlass_shape(self.mma_shape)
self.args['OperatorClass'] = 'arch::OpClassTensorOp'
self.args['ArchTag'] = self.arch
self.args['ThreadblockSwizzle'] = 'threadblock::GemmBatchedIdentityThreadblockSwizzle'
for i in range(self.b2b_num):
self.args[helper.var_idx('Stages', i)] = "2"
self.args['AlignmentA'] = str(8)
self.args['AlignmentB'] = str(8)
self.args['SplitKSerial'] = 'false'
self.args['Operator'] = 'typename DefaultGemmConfiguration<OperatorClass_, ArchTag_, ElementA_, ElementB0_, ElementC0_, ElementAccumulator0_>::Operator'
self.args['IsBetaZero'] = 'false'
def gen_using_kernel(self):
code = "using B2bGemmKernel = typename kernel::DefaultB2bGemm<\n"
code += " " + "ElementA,\n"
code += " " + "LayoutA,\n"
for i in range(self.b2b_num):
code += " " + helper.var_idx("ElementB", i) + ",\n"
code += " " + helper.var_idx("LayoutB", i) + ",\n"
code += " " + helper.var_idx("ElementC", i) + ",\n"
code += " " + helper.var_idx("LayoutC", i) + ",\n"
code += " " + helper.var_idx("ElementAccumulator", i) + ",\n"
code += " " + helper.var_idx("EpilogueOutputOp", i) + ",\n"
code += " " + helper.var_idx("ThreadblockShape", i) + ",\n"
code += " " + helper.var_idx("WarpShape", i) + ",\n"
code += " " + "ElementD,\n"
code += " " + "LayoutD,\n"
code += " " + "InstructionShape,\n"
code += " " + "OperatorClass,\n"
code += " " + "ArchTag,\n"
code += " " + "ThreadblockSwizzle,\n"
for i in range(self.b2b_num):
code += " " + helper.var_idx("Stages", i) + ",\n"
code += " " + "AlignmentA,\n"
code += " " + "AlignmentB,\n"
code += " " + "SplitKSerial,\n"
code += " " + "Operator,\n"
code += " " + "IsBetaZero_\n"
code += ">::B2bGemmKernel;\n\n"
return code
def gen_args(self):
def gen_arg_member(b2b_num):
data_members = []
for i in range(b2b_num):
member_type = "GemmCoord"
member_name = "problem_size_" + str(i)
data_members.append((member_type, member_name))
member_type = "TensorRef<ElementA const, LayoutA>"
member_name = "ref_A0"
data_members.append((member_type, member_name))
for i in range(b2b_num):
member_type = "TensorRef<ElementB" + str(i) + " const, LayoutB" + str(i) +">"
member_name = "ref_B" + str(i)
data_members.append((member_type, member_name))
member_type = "TensorRef<ElementC" + str(i) + " const, LayoutC" + str(i) +">"
member_name = "ref_C" + str(i)
data_members.append((member_type, member_name))
member_type = "TensorRef<ElementD, LayoutD>"
member_name = helper.var_idx("ref_D", b2b_num - 1)
data_members.append((member_type, member_name))
for i in range(b2b_num):
member_type = "typename EpilogueOutputOp" + str(i) + "::Params"
member_name = "epilogue" + str(i)
data_members.append((member_type, member_name))
data_members.append(('int', 'batch_count'))
return data_members
def gen_arg_struct_default_ctor(struct_name, data_members, inital_param_num, inital_value):
constructs_code = gen_ir.indentation + "CUTLASS_HOST_DEVICE\n" + \
gen_ir.indentation + struct_name + " (): "
for i in range(inital_param_num):
final_param = ','
if i == inital_param_num - 1:
final_param = '{ }'
constructs_code += data_members[i][1] + inital_value + final_param
constructs_code += "\n"
return constructs_code
def gen_arg_struct_ctor(struct_name, data_members):
constructs_code = gen_ir.indentation + "CUTLASS_HOST_DEVICE\n" + \
gen_ir.indentation + struct_name + " (\n"
cnt = 0
param_num = len(data_members)
for param in data_members:
final = ',\n'
if cnt == param_num - 1:
final = '\n):\n'
constructs_code += gen_ir.indentation + param[0] + " " + param[1] + "_" + final
cnt += 1
cnt = 0
for param in data_members:
final = '),\n'
if cnt == param_num - 1:
final = ") { }\n"
constructs_code += gen_ir.indentation + param[1] + "(" + param[1] + "_" + final
cnt += 1
constructs_code += "\n"
return constructs_code
# (variable type, variable name)
struct_member = gen_arg_member(self.b2b_num)
self.arg_member = struct_member
codeBody = ""
for each_member in struct_member:
codeBody += gen_ir.indentation + each_member[0] + " " + each_member[1] + ";\n"
codeBody += gen_arg_struct_default_ctor("Arguments", struct_member, self.b2b_num, "(0,0,0)") + "\n"
codeBody += gen_arg_struct_ctor("Arguments", struct_member) + "\n"
struct_code = gen_ir.gen_struct("Arguments", codeBody)
return struct_code
def gen_func_constructs(self):
code = self.gen_class_name +"() {}"
return code
def gen_func_initialize(self):
code = "Status initialize(Arguments const &args, void *workspace = nullptr, cudaStream_t stream = nullptr) {\n" + \
"// Determine grid shape\n" + \
"ThreadblockSwizzle threadblock_swizzle;\n" + \
"cutlass::gemm::GemmCoord grid_shape = threadblock_swizzle.get_tiled_shape(\n" + \
" args.problem_size_0, \n" + \
" { ThreadblockShape0::kM, ThreadblockShape0::kN, ThreadblockShape0::kK },\n" + \
" args.batch_count);\n" + \
"// Initialize the Params structure\n" + \
"params_ = typename B2bGemmKernel::Params{\n"
for i in range(self.b2b_num):
code += helper.var_idx(" args.problem_size_", i) + ",\n"
code += " grid_shape,\n" + \
" args.ref_A0.non_const_ref(),\n"
for i in range(self.b2b_num):
code += helper.var_idx(" args.ref_B", i) + ".non_const_ref(),\n"
code += helper.var_idx(" args.ref_C", i) + ".non_const_ref(),\n"
code += helper.var_idx(" args.ref_D", self.b2b_num - 1) + ",\n"
for i in range(self.b2b_num):
code += helper.var_idx(" args.epilogue", i) + ",\n"
code += " args.batch_count\n"
code += "};\n" + \
"return Status::kSuccess;\n" + \
"}\n"
return code
def gen_func_run(self):
code = "Status run(cudaStream_t stream = nullptr) {\n" + \
"\n" + \
" ThreadblockSwizzle threadblock_swizzle;\n" + \
"\n" + \
" dim3 grid = threadblock_swizzle.get_grid_shape(params_.grid_tiled_shape);\n" + \
" dim3 block(B2bGemmKernel::kThreadCount, 1, 1);\n" + \
"\n" + \
" cudaError_t result;\n" + \
"\n" + \
" int smem_size = int(sizeof(typename B2bGemmKernel::SharedStorage));\n" + \
" if (smem_size >= (48 << 10)) {\n" + \
" result = cudaFuncSetAttribute(Kernel<B2bGemmKernel>, cudaFuncAttributeMaxDynamicSharedMemorySize, smem_size);\n" + \
"\n" + \
" if (result != cudaSuccess) {\n" + \
" return Status::kErrorInternal;\n" + \
" }\n" + \
"\n" + \
" result = cudaFuncSetAttribute(\n" + \
" Kernel<B2bGemmKernel>,\n" + \
" cudaFuncAttributePreferredSharedMemoryCarveout, 100);\n" + \
"\n" + \
" if (result != cudaSuccess) {\n" + \
" return Status::kErrorInternal;\n" + \
" }\n" + \
" }\n" + \
" cutlass::Kernel<B2bGemmKernel><<<grid, block, smem_size, stream>>>(params_);\n" + \
" result = cudaGetLastError();\n" + \
" return result == cudaSuccess ? Status::kSuccess : Status::kErrorInternal;\n" + \
" }\n"
return code
def gen_func_operator(self):
opeartor_with_arg_code = "Status operator()(\n" + \
" Arguments const &args,\n" + \
" void *workspace = nullptr,\n" + \
" cudaStream_t stream = nullptr) {\n" + \
" Status status = initialize(args, workspace);\n" + \
" \n" + \
" if (status == Status::kSuccess) {\n" + \
" status = run(stream);\n" + \
" }\n" + \
" return status;\n" + \
"}\n"
operator_code = "Status operator()(\n" + \
" cudaStream_t stream = nullptr) {\n" + \
" Status status = run(stream);\n" + \
" return status;\n" + \
"}\n"
return opeartor_with_arg_code + "\n" + operator_code
def gen_all_func(self):
return self.gen_using_kernel() + "\n" + \
self.gen_args() + "\n" + \
self.gen_func_constructs() + "\n" + \
self.gen_func_initialize() + "\n" + \
self.gen_func_run() + "\n" + \
self.gen_func_operator()
|
#################################################################################################
#
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
import gen_ir
import helper
import gen_threadblock as gen_tb
class gen_default_Gemm:
def __init__(self, template_param, gen_class_name, b2b_num, cutlass_deps_root, project_root):
self.gen_class_name = "B2bGemm"
self.template_param = template_param
self.b2b_num = b2b_num
self.cutlass_deps_root = cutlass_deps_root
self.project_root = project_root
def gen_B2bMma(self, specialized_template_args):
code = "using B2bMma = typename cutlass::gemm::threadblock::DefaultB2bMma<\n"
code += specialized_template_args
code += ">::ThreadblockB2bMma;\n"
# print(code)
return code
def gen_epilogue(self):
epilogue_code = ""
epilogue_code += helper.var_idx("static const int kPartitionsK", self.b2b_num - 1) + helper.var_idx(" = ThreadblockShape", self.b2b_num - 1) + helper.var_idx("::kK / WarpShape", self.b2b_num - 1) + "::kK;\n"
epilogue_code += "using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp<\n"
epilogue_code += " " + helper.var_idx("ThreadblockShape", self.b2b_num - 1) + ",\n"
epilogue_code += " " + helper.var_idx("typename B2bMma::Operator", self.b2b_num - 1) + ",\n"
epilogue_code += " " + helper.var_idx("kPartitionsK", self.b2b_num - 1) + ",\n"
epilogue_code += " " + helper.var_idx("EpilogueOutputOp", self.b2b_num - 1) + ",\n"
epilogue_code += " " + helper.var_idx("EpilogueOutputOp", self.b2b_num - 1) + "::kCount\n"
epilogue_code += ">::Epilogue;\n"
epilogue_code += "using B2bGemmKernel = kernel::B2bGemm<B2bMma, Epilogue, ThreadblockSwizzle, SplitKSerial>;\n\n"
return epilogue_code
def gen_include_header(self):
code = '''
/* Auto Generated code - Do not edit.*/
#pragma once
#include \"{cutlass_dir}cutlass/cutlass.h\"
#include \"{cutlass_dir}cutlass/layout/matrix.h\"
#include \"{cutlass_dir}cutlass/numeric_types.h\"
#include \"{cutlass_dir}cutlass/epilogue/threadblock/epilogue.h\"
#include \"{cutlass_dir}cutlass/epilogue/thread/linear_combination.h\"
#include \"{cutlass_dir}cutlass/gemm/gemm.h\"
#include \"{cutlass_dir}cutlass/gemm/kernel/gemm_pipelined.h\"
#include \"{cutlass_dir}cutlass/gemm/threadblock/default_mma_core_sm75.h\"
#include \"{cutlass_dir}cutlass/gemm/threadblock/default_mma_core_sm70.h\"
#include \"{cutlass_dir}cutlass/gemm/threadblock/default_mma_core_sm80.h\"
#include \"{cutlass_dir}cutlass/gemm/threadblock/default_mma_core_simt.h\"
#include \"{cutlass_dir}cutlass/gemm/threadblock/threadblock_swizzle.h\"
#include \"{cutlass_dir}cutlass/epilogue/threadblock/default_epilogue_tensor_op.h\"
#include \"{cutlass_dir}cutlass/epilogue/threadblock/default_epilogue_volta_tensor_op.h\"
#include \"{cutlass_dir}cutlass/epilogue/threadblock/default_epilogue_simt.h\"
#include \"{cutlass_dir}cutlass/transform/threadblock/predicated_tile_iterator.h\"
#include \"../kernel/b2b_gemm.h\"
#include \"../threadblock/default_b2b_mma.h\"
'''.format(cutlass_dir=self.cutlass_deps_root)
return code
def gen_code(self):
gen_using = ''
# Generate default template struct
gen_code = gen_ir.gen_template_struct("Default" + self.gen_class_name, self.template_param,"", speicalized = None, set_default=False)
filter_list = []
filter_list.append(('Stages', 2))
filter_list.append(("OperatorClass", "arch::OpClassTensorOp"))
filter_list.append(("ArchTag", "arch::Sm75"))
for i in range(self.b2b_num):
filter_list.append((helper.var_idx("LayoutC", i), "layout::RowMajor"))
rtn_template_args, speicalized_template_args = gen_ir.filtered_param(self.template_param, filter_list, keep_= True)
B2bMma_code = self.gen_B2bMma(speicalized_template_args)
epilogue_and_rest_code = self.gen_epilogue()
gen_special_code = gen_ir.gen_template_struct("Default" + self.gen_class_name, rtn_template_args, B2bMma_code + epilogue_and_rest_code, speicalized = speicalized_template_args, set_default=False)
code = gen_ir.gen_namespace("cutlass", gen_ir.gen_namespace("gemm", gen_ir.gen_namespace("kernel", gen_code + gen_special_code)))
return self.gen_include_header() + code
class gen_Kernel:
def __init__(self, template_param, gen_class_name, b2b_num, cutlass_deps_root, project_root):
self.gen_class_name = "B2bGemm"
self.template_param = template_param
self.b2bnum = b2b_num
self.cutlass_deps_root = cutlass_deps_root
self.project_root = project_root
def gen_include_header(self):
code = '''
#pragma once
#include \"{cutlass_dir}cutlass/cutlass.h\"
#include \"{cutlass_dir}cutlass/gemm/gemm.h\"
#include \"{cutlass_dir}cutlass/matrix_coord.h\"\n'''.format(cutlass_dir=self.cutlass_deps_root)
return code
def gen_Params(self):
gen_param = ""
for i in range(self.b2bnum):
gen_param += " " + helper.var_idx("cutlass::gemm::GemmCoord problem_size_", i) + ";\n"
gen_param += " " + "cutlass::gemm::GemmCoord grid_tiled_shape;\n"
gen_param += " " + "typename B2bMma::IteratorA0::Params params_A0;\n"
gen_param += " " + "typename B2bMma::IteratorA0::TensorRef ref_A0;\n"
for i in range(self.b2bnum):
gen_param += " " + helper.var_idx("typename B2bMma::IteratorB", i) + helper.var_idx("::Params params_B", i) + ";\n"
gen_param += " " + helper.var_idx("typename B2bMma::IteratorB", i) + helper.var_idx("::TensorRef ref_B", i) + ";\n"
if i == self.b2bnum - 1:
gen_param += " " + helper.var_idx("typename Epilogue::OutputTileIterator::Params params_C", i) + ";\n"
gen_param += " " + helper.var_idx("typename Epilogue::OutputTileIterator::TensorRef ref_C", i) + ";\n"
else:
gen_param += " " + helper.var_idx("typename FusedAddBiasEpilogue", i) + helper.var_idx("::OutputTileIterator::Params params_C", i) + ";\n"
gen_param += " " + helper.var_idx("typename FusedAddBiasEpilogue", i) + helper.var_idx("::OutputTileIterator::TensorRef ref_C", i) + ";\n"
gen_param += " " + helper.var_idx("typename Epilogue::OutputTileIterator::Params params_D", self.b2bnum - 1) + ";\n"
gen_param += " " + helper.var_idx("typename Epilogue::OutputTileIterator::TensorRef ref_D", self.b2bnum - 1) + ";\n"
for i in range(self.b2bnum):
gen_param += " " + helper.var_idx("typename OutputOp", i) + helper.var_idx("::Params output_op_", i) + ";\n"
gen_param += " " + 'int batch_count' + ";\n"
gen_param += " " + 'int gemm_k_iterations_0' + ";\n"
return gen_param
def gen_Memberfunc(self):
code_default = "\nCUTLASS_HOST_DEVICE\n"
code_default += "Params()"
code_default += " { } \n\n"
code_construct = "\nCUTLASS_HOST_DEVICE\n"
code_construct += "Params(\n"
for i in range(self.b2bnum):
code_construct += " " + helper.var_idx("cutlass::gemm::GemmCoord const & problem_size_", i) + ",\n"
code_construct += " " + "cutlass::gemm::GemmCoord const & grid_tiled_shape,\n"
code_construct += " " + "typename B2bMma::IteratorA0::TensorRef ref_A0,\n"
for i in range(self.b2bnum):
code_construct += " " + helper.var_idx("typename B2bMma::IteratorB", i) + helper.var_idx("::TensorRef ref_B", i) + ",\n"
if i == self.b2bnum - 1:
code_construct += " " + helper.var_idx("typename Epilogue::OutputTileIterator::TensorRef ref_C", i) + ",\n"
else:
code_construct += " " + helper.var_idx("typename FusedAddBiasEpilogue", i) + helper.var_idx("::OutputTileIterator::TensorRef ref_C", i) + ",\n"
code_construct += " " + helper.var_idx("typename Epilogue::OutputTileIterator::TensorRef ref_D", self.b2bnum - 1) + ",\n"
for i in range(self.b2bnum):
code_construct += " " + helper.var_idx("typename OutputOp", i) + helper.var_idx("::Params output_op_", i) + helper.var_idx(" = typename OutputOp", i) + "::Params(),\n"
code_construct += " " + "int batch_count = 1\n"
code_construct += "):\n"
for i in range(self.b2bnum):
code_construct += " " + helper.var_idx("problem_size_", i) + helper.var_idx("(problem_size_", i) + "),\n"
code_construct += " " + "grid_tiled_shape(grid_tiled_shape),\n"
code_construct += " " + "params_A0(ref_A0.layout()),\n"
code_construct += " " + "ref_A0(ref_A0),\n"
for i in range(self.b2bnum):
code_construct += " " + helper.var_idx("params_B", i) + helper.var_idx("(ref_B", i) + ".layout()),\n"
code_construct += " " + helper.var_idx("ref_B", i) + helper.var_idx("(ref_B", i) + "),\n"
code_construct += " " + helper.var_idx("params_C", i) + helper.var_idx("(ref_C", i) + ".layout()),\n"
code_construct += " " + helper.var_idx("ref_C", i) + helper.var_idx("(ref_C", i) + "),\n"
code_construct += " " + helper.var_idx("params_D", self.b2bnum - 1) + helper.var_idx("(ref_D", self.b2bnum - 1) + ".layout()),\n"
code_construct += " " + helper.var_idx("ref_D", self.b2bnum - 1) + helper.var_idx("(ref_D", self.b2bnum - 1) + "),\n"
for i in range(self.b2bnum):
code_construct += " " + helper.var_idx("output_op_", i) + helper.var_idx("(output_op_", i) + "), \n"
code_construct += " " + "batch_count(batch_count) {\n"
code_construct += " " + helper.var_idx("gemm_k_iterations_", 0) + helper.var_idx(" = (problem_size_", 0) + helper.var_idx(".k() + B2bMma::Shape", 0) + helper.var_idx("::kK - 1) / B2bMma::Shape", 0) + "::kK;\n"
code_construct += "}\n"
return code_default + code_construct
def gen_using(self):
code_using = ""
for i in range(self.b2bnum - 1):
code_using += " " + helper.var_idx("using OutputOp", i) + helper.var_idx(" = typename B2bMma::OutputOp", i) + ";\n"
code_using += " " + helper.var_idx("using OutputOp", self.b2bnum - 1) + " = typename Epilogue::OutputOp;\n"
for i in range(self.b2bnum - 1):
code_using += " " + helper.var_idx("using FusedAddBiasEpilogue", i) + helper.var_idx(" = typename B2bMma::FusedAddBiasEpilogue", i) +";\n"
code_using += " " + "using WarpCount0 = typename B2bMma::WarpCount0;\n"
code_using += " " + "static int const kThreadCount = 32 * WarpCount0::kCount;\n"
code_using += gen_ir.gen_struct("Params", self.gen_Params() + self.gen_Memberfunc())
code_using += "union SharedStorage {\n"
code_using += " " + "typename B2bMma::B2bMmaSharedStorage main_loop;\n"
code_using += " " + "typename Epilogue::SharedStorage epilogue;\n"
code_using += "};\n"
return code_using
def gen_can_implement(self):
gen_code = ""
return gen_code
def gen_operator_and_constr(self):
ctr_code = "CUTLASS_HOST_DEVICE\n"
ctr_code += self.gen_class_name + "() { } \n\n"
operator_code = "CUTLASS_DEVICE\n"
operator_code += "void operator()(Params const ¶ms, SharedStorage &shared_storage) {\n"
operator_code += " " + "ThreadblockSwizzle threadblock_swizzle;\n"
operator_code += " " + "cutlass::gemm::GemmCoord threadblock_tile_offset = threadblock_swizzle.get_tile_offset(params.grid_tiled_shape);\n"
operator_code += " " + "int batch_idx = threadblock_tile_offset.k();\n"
operator_code += " " + "if (params.grid_tiled_shape.m() <= threadblock_tile_offset.m() ||\n"
operator_code += " " + "params.grid_tiled_shape.n() <= threadblock_tile_offset.n()) {\n"
operator_code += " " + " " + "return;\n"
operator_code += " " + "}\n"
operator_code += " " + "cutlass::MatrixCoord tb_offset_A0{\n"
operator_code += " " + " " + "threadblock_tile_offset.m() * B2bMma::Shape0::kM,\n"
operator_code += " " + " " + "0\n"
operator_code += " " + "};\n"
for i in range(self.b2bnum):
operator_code += " " + helper.var_idx("cutlass::MatrixCoord tb_offset_B", i) + "{\n"
operator_code += " " + " " + "0,\n"
operator_code += " " + " " + helper.var_idx("threadblock_tile_offset.n() * B2bMma::Shape", i) + "::kN\n"
operator_code += " " + "};\n"
operator_code += " " + "int thread_idx = threadIdx.x;\n\n"
operator_code += " " + "MatrixCoord threadblock_offset(\n"
operator_code += " " + " " + helper.var_idx("threadblock_tile_offset.m() * B2bMma::Shape", self.b2bnum - 1) + "::kM,\n"
operator_code += " " + " " + helper.var_idx("threadblock_tile_offset.n() * B2bMma::Shape", self.b2bnum - 1) + "::kN\n"
operator_code += " " + ");\n"
operator_code += " " + "typename B2bMma::IteratorA0 iterator_A0(\n"
operator_code += " " + " " + "params.params_A0,\n"
operator_code += " " + " " + "params.ref_A0.data(),\n"
operator_code += " " + " " + "params.problem_size_0.mk(),\n"
operator_code += " " + " " + "thread_idx,\n"
operator_code += " " + " " + "tb_offset_A0);\n"
operator_code += " " + "iterator_A0.add_pointer_offset(batch_idx * params.problem_size_0.m() * params.problem_size_0.k());\n\n"
for i in range (self.b2bnum):
operator_code += " " + helper.var_idx("typename B2bMma::IteratorB", i ) + helper.var_idx(" iterator_B", i) + "(\n"
operator_code += " " + " " + helper.var_idx("params.params_B", i) + ",\n"
operator_code += " " + " " + helper.var_idx("params.ref_B", i) + ".data(),\n"
operator_code += " " + " " + helper.var_idx("params.problem_size_", i) + ".kn(),\n"
operator_code += " " + " " + "thread_idx,\n"
operator_code += " " + " " + helper.var_idx("tb_offset_B", i) + ");\n"
operator_code += " " + helper.var_idx("iterator_B", i) + helper.var_idx(".add_pointer_offset(batch_idx * params.problem_size_", i) + helper.var_idx(".n() * params.problem_size_", i) + ".k());\n\n"
for i in range (self.b2bnum - 1):
operator_code += " " + helper.var_idx("typename FusedAddBiasEpilogue", i ) + helper.var_idx("::OutputTileIterator iterator_C", i) + "(\n"
operator_code += " " + " " + helper.var_idx("params.params_C", i) + ",\n"
operator_code += " " + " " + helper.var_idx("params.ref_C", i) + ".data(),\n"
operator_code += " " + " " + helper.var_idx("params.problem_size_" , i) + ".mn(),\n"
operator_code += " " + " " + "thread_idx,\n"
operator_code += " " + " " + "threadblock_offset" + ");\n"
operator_code += " " + helper.var_idx("int ref_C", i) + helper.var_idx("_stride = params.ref_C", i) + ".stride()[0];\n"
operator_code += " " + helper.var_idx("iterator_C", i) + helper.var_idx(".add_pointer_offset(batch_idx * params.problem_size_", i) + helper.var_idx(".n() * (ref_C", i) + helper.var_idx("_stride == 0 ? 1 : params.problem_size_", i) + ".m()));\n\n"
for i in range (self.b2bnum - 1):
operator_code += " " + helper.var_idx("FusedAddBiasEpilogue", i ) + helper.var_idx(" epilogue_", i ) + ";\n"
operator_code += " " + "int warp_idx = __shfl_sync(0x1f, threadIdx.x / 32, 0);\n"
operator_code += " " + "int lane_idx = threadIdx.x % 32;\n"
for i in range (self.b2bnum - 1):
operator_code += " " + helper.var_idx("OutputOp", i) + helper.var_idx(" output_op_", i) + helper.var_idx("(params.output_op_", i) + ");\n"
operator_code += " " + "B2bMma b2bMma(shared_storage.main_loop, thread_idx, warp_idx, lane_idx);\n"
operator_code += " " + "typename B2bMma::FragmentC0 src_accum;\n"
operator_code += " " + helper.var_idx("typename B2bMma::FragmentC", self.b2bnum - 1)+ " accumulators;\n"
operator_code += " " + "src_accum.clear();\n"
operator_code += " " + "accumulators.clear();\n"
operator_code += " " + "b2bMma(params.gemm_k_iterations_0, accumulators, iterator_A0, "
for i in range(self.b2bnum):
operator_code += helper.var_idx("iterator_B", i) + ", "
operator_code += "src_accum"
if self.b2bnum != 1:
operator_code += ", "
for i in range(self.b2bnum - 1):
operator_code += helper.var_idx("output_op_", i) + ", "
for i in range(self.b2bnum - 1):
operator_code += helper.var_idx("epilogue_", i) + ", "
for i in range(self.b2bnum - 1):
final = ", "
if i == self.b2bnum - 2:
final =""
operator_code += helper.var_idx("iterator_C", i) + final
operator_code += ");\n"
operator_code += " " + helper.var_idx("OutputOp", self.b2bnum - 1) + helper.var_idx(" output_op_", self.b2bnum - 1) + helper.var_idx("(params.output_op_", self.b2bnum - 1) + ");\n"
operator_code += " " + "threadblock_tile_offset = threadblock_swizzle.get_tile_offset(params.grid_tiled_shape);\n"
operator_code += " " + helper.var_idx("typename Epilogue::OutputTileIterator iterator_C", self.b2bnum - 1) + "(\n"
operator_code += " " + " " + helper.var_idx("params.params_C", self.b2bnum - 1) + ",\n"
operator_code += " " + " " + helper.var_idx("params.ref_C", self.b2bnum - 1) + ".data(),\n"
operator_code += " " + " " + helper.var_idx("params.problem_size_", self.b2bnum - 1) + ".mn(),\n"
operator_code += " " + " " + "thread_idx,\n"
operator_code += " " + " " + "threadblock_offset\n"
operator_code += " " + ");\n"
operator_code += " " + helper.var_idx("int ref_C", self.b2bnum - 1) + helper.var_idx("_stride = params.ref_C", self.b2bnum - 1) + ".stride()[0];\n"
operator_code += " " + helper.var_idx("iterator_C", self.b2bnum - 1) + helper.var_idx(".add_pointer_offset(batch_idx * params.problem_size_", self.b2bnum - 1) + helper.var_idx(".n() * (ref_C", self.b2bnum - 1) + helper.var_idx("_stride == 0 ? 1 : params.problem_size_", self.b2bnum - 1) + ".m()));\n\n"
operator_code += " " + helper.var_idx("typename Epilogue::OutputTileIterator iterator_D", self.b2bnum - 1) + "(\n"
operator_code += " " + " " + helper.var_idx("params.params_D", self.b2bnum - 1) + ",\n"
operator_code += " " + " " + helper.var_idx("params.ref_D", self.b2bnum - 1) + ".data(),\n"
operator_code += " " + " " + helper.var_idx("params.problem_size_", self.b2bnum - 1) + ".mn(),\n"
operator_code += " " + " " + "thread_idx,\n"
operator_code += " " + " " + "threadblock_offset\n"
operator_code += " " + ");\n"
operator_code += " " + helper.var_idx("iterator_D", self.b2bnum - 1) + helper.var_idx(".add_pointer_offset(batch_idx * params.problem_size_", self.b2bnum - 1) + helper.var_idx(".n() * params.problem_size_", self.b2bnum - 1) + ".m());\n\n"
operator_code += " " + "Epilogue epilogue(\n"
operator_code += " " + " " + "shared_storage.epilogue,\n"
operator_code += " " + " " + "thread_idx,\n"
operator_code += " " + " " + "warp_idx,\n"
operator_code += " " + " " + "lane_idx\n"
operator_code += " " + ");\n"
operator_code += " " + "epilogue("
operator_code += helper.var_idx("output_op_", self.b2bnum - 1) + ", "
operator_code += helper.var_idx("iterator_D", self.b2bnum - 1) + ", "
operator_code += "accumulators, "
operator_code += helper.var_idx("iterator_C", self.b2bnum - 1) + ");\n"
operator_code += "}\n"
return ctr_code + operator_code
def gen_include_header(self):
code = '''
#pragma once
#include \"{cutlass_dir}cutlass/cutlass.h\"
#include \"{cutlass_dir}cutlass/gemm/gemm.h\"
#include \"{cutlass_dir}cutlass/matrix_coord.h\"
#include \"{cutlass_dir}cutlass/semaphore.h\"
'''.format(cutlass_dir=self.cutlass_deps_root)
return code
def gen_code(self):
template_param = []
template_param.append(("typename", "B2bMma"))
template_param.append(("typename", "Epilogue"))
template_param.append(("typename", "ThreadblockSwizzle"))
template_param.append((bool, "SplitKSerial"))
code_body = ""
code_body += self.gen_using()
code_body += self.gen_operator_and_constr()
struct_code = gen_ir.gen_template_struct(self.gen_class_name, template_param, code_body)
code = self.gen_include_header()
code += gen_ir.gen_namespace("cutlass", gen_ir.gen_namespace("gemm", gen_ir.gen_namespace("kernel", struct_code)))
return self.gen_include_header() + code
class gen_kernel:
def __init__(self, template_param, gen_class_name, b2b_num, output_dir, cutlass_deps_root, project_root):
self.template_param = template_param
self.gen_class_name = "B2bGemm"
self.gen_kernel_name = gen_class_name + "Kernel"
self.tempalte_args = []
self.cutlass_deps_root = cutlass_deps_root
self.project_root = project_root
self.gen_default_b2b_gemm = gen_default_Gemm(template_param, gen_class_name, b2b_num, cutlass_deps_root, project_root)
self.gen_Kerenl = gen_Kernel(template_param, gen_class_name, b2b_num, cutlass_deps_root, project_root)
# Include gen_threadBlock
self.gen_threadBlock = gen_tb.gen_threadblock(template_param, gen_class_name, b2b_num, output_dir, cutlass_deps_root, project_root)
self.file_dir = output_dir + "/kernel/"
def gen_code(self, first_use_1stage):
default_b2b_gemm = self.gen_default_b2b_gemm.gen_code()
print("[INFO]: Gen kernel code [default_b2b_gemm.h]output Dir: is ", self.file_dir)
with open(self.file_dir + "default_b2b_gemm.h", "w+") as f:
f.write(default_b2b_gemm)
kernel = self.gen_Kerenl.gen_code()
print("[INFO]: Gen kernel code [b2b_gemm.h]output Dir: is ", self.file_dir)
with open(self.file_dir + "b2b_gemm.h", "w+") as f:
f.write(kernel)
# Call code to gen threadblock
self.gen_threadBlock.gen_code(first_use_1stage)
|
#################################################################################################
#
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
import gen_ir
import helper
class gen_default_b2b_mma:
def __init__(self, template_param, gen_class_name, b2b_num,cutlass_deps_root, project_root):
self.gen_class_name = "DefaultB2bMma"
self.template_param = template_param
self.b2b_num = b2b_num
self.cutlass_deps_root = cutlass_deps_root
self.project_root = project_root
def gen_include_header(self):
code = '''
/* Auto Generated code - Do not edit.*/
#pragma once
#include \"{cutlass_dir}cutlass/cutlass.h\"
#include \"{cutlass_dir}cutlass/numeric_types.h\"
#include \"{cutlass_dir}cutlass/arch/arch.h\"
#include \"{cutlass_dir}cutlass/transform/threadblock/predicated_tile_iterator.h\"
#include \"{cutlass_dir}cutlass/transform/threadblock/predicated_tile_iterator_2dthreadtile.h\"
#include \"{cutlass_dir}cutlass/gemm/threadblock/default_mma_core_sm70.h\"
#include \"{cutlass_dir}cutlass/gemm/threadblock/default_mma_core_sm75.h\"
#include \"{cutlass_dir}cutlass/gemm/threadblock/default_mma_core_sm80.h\"
#include \"../threadblock/b2b_mma_pipelined.h\"
#include \"../../fixed_impl/epilogue/threadblock/fused_bias_act_epilogue.h\"
#include \"../../fixed_impl/epilogue/threadblock/default_bias_act_epilogue_tensor_op.h\"
#include \"../../fixed_impl/gemm/warp/mma_tensor_op_fragment_iterator_without_output_op.h\"
'''.format(cutlass_dir=self.cutlass_deps_root)
return code
def gen_using_MmaCore(self, stage):
threadBlockShape = "ThreadblockShape"
warpShape = "WarpShape"
instrunctionShape = "InstructionShape"
Mma_typename = "typename cutlass::gemm::threadblock::DefaultMmaCore"
gen_code = ""
for i in range(self.b2b_num):
code_using = "using MmaCore" + str(i)
gen_code += code_using + " = " + gen_ir.gen_declare_template_struct(Mma_typename, \
helper.var_idx(threadBlockShape, i), helper.var_idx(warpShape, i), instrunctionShape, \
"ElementA", "LayoutA", \
helper.var_idx("ElementB", i), helper.var_idx("LayoutB", i), \
helper.var_idx("ElementAccumulator", i), "layout::RowMajor", \
"OperatorClass", str(stage), "Operator")
return gen_code
def gen_using_FusedAddBiasEpilogue(self):
gen_code = ""
for i in range(self.b2b_num - 1):
code_using = helper.var_idx("using FusedAddBiasEpilogue", i)
epilogue_name = "typename cutlass::epilogue::threadblock::DefaultFusedBiasActEpilogueTensorOp"
template_args = helper.var_idx("<ThreadblockShape", i) + helper.var_idx(",typename MmaCore", i) + helper.var_idx("::MmaPolicy::Operator, 1, EpilogueOutputOp", i) + ", 2>::Epilogue"
gen_code += code_using + " = " + epilogue_name + template_args + ";\n"
return gen_code
def gen_using_Iterator(self):
code_using = "using IteratorA0"
iterator_typename = "cutlass::transform::threadblock::PredicatedTileIterator"
MmaCore = "MmaCore0"
matrix_shape = "cutlass::MatrixShape<" + MmaCore + "::Shape::kM, " + MmaCore + "::Shape::kK>"
iterator_map = "typename " + MmaCore + "::IteratorThreadMapA"
gen_code = code_using + " = " + gen_ir.gen_declare_template_struct(iterator_typename, \
matrix_shape, "ElementA", "LayoutA", "1", iterator_map, "AlignmentA_")
for i in range(self.b2b_num):
code_using = "using IteratorB" + str(i)
iterator_typename = "cutlass::transform::threadblock::PredicatedTileIterator"
MmaCore = "MmaCore" + str(i)
matrix_shape = "cutlass::MatrixShape<" + MmaCore + "::Shape::kK, " + MmaCore + "::Shape::kN>"
iterator_map = "typename " + MmaCore + "::IteratorThreadMapB"
gen_code += code_using + " = " + gen_ir.gen_declare_template_struct(iterator_typename, \
matrix_shape, helper.var_idx("ElementB", i), helper.var_idx("LayoutB", i), "0", iterator_map, "AlignmentB_")
return gen_code
def gen_fragment_iterator(self):
gen_code = "using AccumulatorLayout = cutlass::layout::ColumnMajor;\n"
for i in range(1, self.b2b_num):
code_using = "using FragmentIteratorA" + str(i)
iterator_typename = "cutlass::gemm::warp::MmaTensorOpPureFragmentIterator"
curr_MmaCore = "MmaCore" + str(i)
prev_MmaCore = "MmaCore" + str(i - 1)
Matrix_shape_curr = "cutlass::MatrixShape<" + curr_MmaCore + "::WarpShape::kM, " + curr_MmaCore + "::InstructionShape::kK>"
Matrix_shape_prev = "cutlass::MatrixShape<" + prev_MmaCore + "::WarpShape::kM, " + prev_MmaCore + "::WarpShape::kN>"
Curr_shape_kK = curr_MmaCore + "::Shape::kK"
gen_code += code_using + " = " + gen_ir.gen_declare_template_struct(iterator_typename, \
Matrix_shape_curr, Matrix_shape_prev, Curr_shape_kK, \
helper.var_idx("ElementAccumulator", i-1), "ElementA", \
"AccumulatorLayout", "InstructionShape_", "true")
return gen_code
def gen_threadblockmma(self):
code_using = "using ThreadblockB2bMma"
iterator_typename = "cutlass::gemm::threadblock::B2bMmaPipelined"
MmaPipelined_param_Mma0_shape = "typename MmaCore0::Shape"
MmaPipelined_param_Mma0_iteratorA = "IteratorA0"
MmaPipelined_param_Mma0_smemIteratorA = "typename MmaCore0::SmemIteratorA"
MmaPipelined_param_Mma0_iteratorB = "IteratorB0"
MmaPipelined_param_Mma0_smemIteratorB = "typename MmaCore0::SmemIteratorB"
MmaPipelined_param_list = MmaPipelined_param_Mma0_shape + ", " + MmaPipelined_param_Mma0_iteratorA + ", " + MmaPipelined_param_Mma0_smemIteratorA + ", " + MmaPipelined_param_Mma0_iteratorB + ", " + MmaPipelined_param_Mma0_smemIteratorB + ", "
for i in range(1, self.b2b_num):
MmaPipelined_param_Mma_shape = "typename MmaCore" + str(i) + "::Shape"
MmaPipelined_param_Mma_iteratorA = "FragmentIteratorA" + str(i)
MmaPipelined_param_Mma_iteratorB = "IteratorB" + str(i)
MmaPipelined_param_Mma_smemIteratorB = "typename MmaCore" + str(i) + "::SmemIteratorB"
MmaPipelined_param_list += MmaPipelined_param_Mma_shape + ", " + MmaPipelined_param_Mma_iteratorA + ", " + MmaPipelined_param_Mma_iteratorB + ", " + MmaPipelined_param_Mma_smemIteratorB + ", "
MmaPipelined_param_list += "ElementAccumulator0, layout::RowMajor, "
for i in range(self.b2b_num - 1):
epilogue_name = "EpilogueOutputOp" + str(i)
MmaPipelined_param_list += epilogue_name + ", "
for i in range(self.b2b_num - 1):
epilogue_name = "FusedAddBiasEpilogue" + str(i)
MmaPipelined_param_list += epilogue_name + ", "
for i in range(self.b2b_num):
MmaPolicy = "typename MmaCore" + str(i) + "::MmaPolicy"
MmaPipelined_param_list += MmaPolicy + ", "
cnt = 0
for i in range(self.b2b_num):
MmaStage = helper.var_idx("Stages", i)
final = ", "
if cnt == self.b2b_num - 1:
final = ""
MmaPipelined_param_list += MmaStage + final
cnt += 1
gen_code = code_using + " = " + gen_ir.gen_declare_template_struct(iterator_typename, MmaPipelined_param_list)
return gen_code
def gen_code(self):
gen_using = ''
# Generate default template struct
gen_code = gen_ir.gen_template_struct(self.gen_class_name, self.template_param, "", speicalized = None, set_default=False)
# Generate specialized template struct
mmacore_codebody = self.gen_using_MmaCore(2)
iterator_codebody = self.gen_using_Iterator()
fragment_iterator_codebody = self.gen_fragment_iterator()
epilogue_iterator_codebody = self.gen_using_FusedAddBiasEpilogue()
threadBlockMma = self.gen_threadblockmma()
specialized_code = mmacore_codebody + iterator_codebody + fragment_iterator_codebody + epilogue_iterator_codebody + threadBlockMma
# Specialize layout C -> cutlass::layout::RowMajor
rtn_template_args, speicalized_template_args = gen_ir.filtered_param(self.template_param, [ ('LayoutD', "cutlass::layout::RowMajor")], keep_= True)
gen_speical_code = gen_ir.gen_template_struct(self.gen_class_name, rtn_template_args, specialized_code, speicalized = speicalized_template_args, set_default=False)
code = gen_ir.gen_namespace("cutlass", gen_ir.gen_namespace("gemm", gen_ir.gen_namespace("threadblock", gen_code + gen_speical_code)))
return self.gen_include_header() + code
class gen_b2b_mme_pipelined:
def __init__(self, template_param, gen_class_name, b2b_num, cutlass_deps_root, project_root):
self.gen_class_name = "B2bMmaPipelined"
self.template_param = template_param
self.b2b_num = b2b_num
self.cutlass_deps_root = cutlass_deps_root
self.project_root = project_root
def gen_include_header(self):
code = '''
#pragma once
#include \"{cutlass_dir}cutlass/cutlass.h\"
#include \"{cutlass_dir}cutlass/array.h\"
#include \"{cutlass_dir}cutlass/aligned_buffer.h\"
#include \"{cutlass_dir}cutlass/numeric_conversion.h\"
#include \"{cutlass_dir}cutlass/numeric_types.h\"
#include \"{cutlass_dir}cutlass/matrix_shape.h\"
#include \"{cutlass_dir}cutlass/gemm/gemm.h\"
#include \"{cutlass_dir}cutlass/gemm/warp/mma_tensor_op_fragment_iterator.h\"
#include \"../threadblock/b2b_mma_base.h\"\n'''.format(cutlass_dir = self.cutlass_deps_root)
return code
def gen_using(self):
code_using = "using FragmentA0 = typename IteratorA0::Fragment;\n"
code_using += "using Base = B2bMmaBase<"
for i in range(self.b2b_num):
code_using += helper.var_idx("Shape", i) + "_, "
for i in range(self.b2b_num):
code_using += helper.var_idx("Policy", i) + "_, "
for i in range(self.b2b_num):
code_using += helper.var_idx("Stage", i) + "_, "
code_using = code_using[: -2] + ">;\n"
for i in range(self.b2b_num):
code_using += helper.var_idx("using FragmentB", i) + helper.var_idx(" = typename IteratorB", i) + "::Fragment;\n"
code_using += helper.var_idx("using FragmentC", i) + helper.var_idx(" = typename Policy", i) + "::Operator::FragmentC;\n"
code_using += helper.var_idx("using Operator", i) + helper.var_idx(" = typename Policy", i) + "::Operator;\n"
for i in range(self.b2b_num - 1):
code_using += helper.var_idx("using IteratorC", i) + helper.var_idx(" = typename FusedAddBiasEpilogue", i) + "::OutputTileIterator;\n"
code_using += "using ArchTag = typename Policy0::Operator::ArchTag;\n"
code_using += "static ComplexTransform const kTransformA0 = Operator0::kTransformA;\n"
for i in range(self.b2b_num):
code_using += helper.var_idx("static ComplexTransform const kTransformB", i) + helper.var_idx(" = Operator", i) + "::kTransformB;\n"
code_using += "private:\n"
code_using += "using WarpFragmentA0 = typename Operator0::FragmentA;\n"
code_using += "using WarpFragmentB0 = typename Operator0::FragmentB;\n"
for i in range(1, self.b2b_num):
code_using += helper.var_idx("using WarpFragmentA", i) + helper.var_idx(" = typename FragmentIteratorA", i) + "::Fragment;\n"
code_using += helper.var_idx("using WarpFragmentB", i) + helper.var_idx(" = typename Operator", i) + "::FragmentB;\n"
code_using += "protected:\n"
code_using += "SmemIteratorA0 smem_iterator_A_;\n"
for i in range(self.b2b_num):
code_using += helper.var_idx("SmemIteratorB", i) + helper.var_idx(" smem_iterator_B", i) + "_;\n"
return code_using
def gen_operator(self, first_use_1stage = False):
code = ""
def gen_operator_param(b2b_num):
param_code = ""
param_code += "int gemm_k_iterations_0,\n"
param_code += helper.var_idx("FragmentC", b2b_num-1) + helper.var_idx(" &accum", b2b_num-1) + ",\n"
param_code += "IteratorA0 iterator_A,\n"
for i in range(b2b_num):
param_code += helper.var_idx("IteratorB", i) + " " + helper.var_idx("iterator_B", i) + ",\n"
param_code += "FragmentC0 const &src_accum, \n"
for i in range(b2b_num - 1):
param_code += helper.var_idx("OutputOp", i) + " " + helper.var_idx("output_op_", i) + ",\n"
for i in range(b2b_num - 1):
param_code += helper.var_idx("FusedAddBiasEpilogue", i) + " " + helper.var_idx("epilogue_", i) + ",\n"
for i in range(b2b_num - 1):
param_code += helper.var_idx("IteratorC", i) + " " + helper.var_idx("iterator_C", i) + ",\n"
param_code += "TransformA0 transform_A0 = TransformA0(), \n"
for i in range(b2b_num):
final = "(),\n"
if i == b2b_num - 1:
final = "()\n"
param_code += helper.var_idx("TransformB", i) + " " + helper.var_idx("transform_B", i) + " = " +helper.var_idx("TransformB", i) + final
return param_code
def gen_first_gemm_1stage(b2b_num):
accu_code = " FragmentC0 accum0 = src_accum;\n"
if b2b_num == 1:
accu_code = " accum0 = src_accum;\n"
code ="\
\n\
FragmentA0 tb_frag_A;\n\
FragmentB0 tb_frag_B0;\n\
\n\
int smem_write_stage_idx = 1;\n\
\n\
tb_frag_A.clear();\n\
tb_frag_B0.clear();\n\
\n\
// The last kblock is loaded in the prolog\n\
iterator_A.load(tb_frag_A);\n\
iterator_B0.load(tb_frag_B0);\n\
\n\
++iterator_A;\n\
++iterator_B0;\n\
\n\
WarpFragmentA0 warp_frag_A0;\n\
WarpFragmentB0 warp_frag_B0;\n\
\n\
Operator0 warp_mma0;\n\
\n\
// Avoid reading out of bounds\n\
if (gemm_k_iterations_0 <= 1) {\n\
iterator_A.clear_mask();\n\
iterator_B0.clear_mask();\n\
}\n\
\n\
// Issue loads during the first warp-level matrix multiply-add *AFTER* issuing \n\
// shared memory loads (which have the tightest latency requirement).\n\
\n\
//\n\
// Mainloop\n\
//\n\
\n\
// Note: The main loop does not support Base::WarpGemmIterations == 2.\n\
CUTLASS_GEMM_LOOP\n\
for (; gemm_k_iterations_0 > 0; --gemm_k_iterations_0) {\n\
\n\
this->smem_iterator_A_.store(tb_frag_A);\n\
this->smem_iterator_B0_.store(tb_frag_B0);\n\
\n\
__syncthreads();\n\
//\n\
// Loop over GEMM K dimension\n\
//\n\
\n\
CUTLASS_PRAGMA_UNROLL\n\
for (int warp_mma_k = 0; warp_mma_k < Base::kWarpGemmIterations0; ++warp_mma_k) {\n\
\n\
// Load warp-level tiles from shared memory, wrapping to k offset if this is the last group\n\
// as the case may be.\n\
\n\
this->warp_tile_iterator_A0_.set_kgroup_index(warp_mma_k % Base::kWarpGemmIterations0);\n\
this->warp_tile_iterator_B0_.set_kgroup_index(warp_mma_k % Base::kWarpGemmIterations0);\n\
\n\
this->warp_tile_iterator_A0_.load(warp_frag_A0);\n\
this->warp_tile_iterator_B0_.load(warp_frag_B0);\n\
\n\
++this->warp_tile_iterator_A0_;\n\
++this->warp_tile_iterator_B0_;\n\
\n\
warp_mma0(accum0, warp_frag_A0, warp_frag_B0, accum0);\n\
}\n\
this->warp_tile_iterator_A0_.add_tile_offset({0, -Policy0::kPartitionsK * Base::kWarpGemmIterations0});\n\
this->warp_tile_iterator_B0_.add_tile_offset({-Policy0::kPartitionsK * Base::kWarpGemmIterations0, 0});\n\
\n\
__syncthreads();\n\
iterator_A.load(tb_frag_A);\n\
iterator_B0.load(tb_frag_B0);\n\
\n\
++iterator_A;\n\
++iterator_B0;\n\
\n\
if(gemm_k_iterations_0 <= 2) {\n\
iterator_A.clear_mask();\n\
iterator_B0.clear_mask();\n\
}\n\
}\n"
return accu_code + code
def gen_first_gemm_2stage(b2b_num):
accu_code = " FragmentC0 accum0 = src_accum;\n"
if b2b_num == 1:
accu_code = " accum0 = src_accum;\n"
code ="\
\n\
FragmentA0 tb_frag_A;\n\
FragmentB0 tb_frag_B0;\n\
\n\
tb_frag_A.clear();\n\
tb_frag_B0.clear();\n\
\n\
// The last kblock is loaded in the prolog\n\
iterator_A.load(tb_frag_A);\n\
iterator_B0.load(tb_frag_B0);\n\
\n\
++iterator_A;\n\
++iterator_B0;\n\
\n\
this->smem_iterator_A_.store(tb_frag_A);\n\
this->smem_iterator_B0_.store(tb_frag_B0);\n\
\n\
++this->smem_iterator_A_;\n\
++this->smem_iterator_B0_;\n\
\n\
__syncthreads();\n\
\n\
// Pair of fragments used to overlap shared memory loads and math instructions\n\
WarpFragmentA0 warp_frag_A0[2];\n\
WarpFragmentB0 warp_frag_B0[2];\n\
\n\
this->warp_tile_iterator_A0_.set_kgroup_index(0);\n\
this->warp_tile_iterator_B0_.set_kgroup_index(0);\n\
\n\
this->warp_tile_iterator_A0_.load(warp_frag_A0[0]);\n\
this->warp_tile_iterator_B0_.load(warp_frag_B0[0]);\n\
\n\
++this->warp_tile_iterator_A0_;\n\
++this->warp_tile_iterator_B0_;\n\
\n\
Operator0 warp_mma0;\n\
\n\
int smem_write_stage_idx = 1;\n\
\n\
// Avoid reading out of bounds\n\
if (gemm_k_iterations_0 <= 1) {\n\
iterator_A.clear_mask();\n\
iterator_B0.clear_mask();\n\
}\n\
\n\
// Issue loads during the first warp-level matrix multiply-add *AFTER* issuing \n\
// shared memory loads (which have the tightest latency requirement).\n\
iterator_A.load(tb_frag_A);\n\
\n\
//\n\
// Mainloop\n\
//\n\
\n\
// Note: The main loop does not support Base::WarpGemmIterations == 2.\n\
CUTLASS_GEMM_LOOP\n\
for (; gemm_k_iterations_0 > 0; --gemm_k_iterations_0) {\n\
\n\
//\n\
// Loop over GEMM K dimension\n\
//\n\
\n\
CUTLASS_PRAGMA_UNROLL\n\
for (int warp_mma_k = 0; warp_mma_k < Base::kWarpGemmIterations0; ++warp_mma_k) {\n\
\n\
// Load warp-level tiles from shared memory, wrapping to k offset if this is the last group\n\
// as the case may be.\n\
\n\
if (warp_mma_k == Base::kWarpGemmIterations0 - 1) {\n\
\n\
// Write fragments to shared memory\n\
this->smem_iterator_A_.store(tb_frag_A);\n\
\n\
this->smem_iterator_B0_.store(tb_frag_B0);\n\
\n\
__syncthreads();\n\
\n\
// Issue loads during the first warp-level matrix multiply-add *AFTER* issuing \n\
// shared memory loads (which have the tightest latency requirement).\n\
iterator_A.load(tb_frag_A);\n\
\n\
++this->smem_iterator_B0_;\n\
++this->smem_iterator_A_;\n\
\n\
\n\
// Add negative offsets to return iterators to the 'start' of the circular buffer in shared memory\n\
if (smem_write_stage_idx == 1) {\n\
this->smem_iterator_A_.add_tile_offset({0, -Base::Stage0});\n\
this->smem_iterator_B0_.add_tile_offset({-Base::Stage0, 0});\n\
}\n\
else {\n\
this->warp_tile_iterator_A0_.add_tile_offset(\n\
{0, -Base::Stage0 * Policy0::kPartitionsK * Base::kWarpGemmIterations0});\n\
this->warp_tile_iterator_B0_.add_tile_offset(\n\
{-Base::Stage0 * Policy0::kPartitionsK * Base::kWarpGemmIterations0,\n\
0});\n\
}\n\
\n\
smem_write_stage_idx ^= 1;\n\
}\n\
\n\
this->warp_tile_iterator_A0_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations0);\n\
this->warp_tile_iterator_B0_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations0);\n\
\n\
this->warp_tile_iterator_A0_.load(warp_frag_A0[(warp_mma_k + 1) % 2]);\n\
this->warp_tile_iterator_B0_.load(warp_frag_B0[(warp_mma_k + 1) % 2]);\n\
\n\
++this->warp_tile_iterator_A0_;\n\
++this->warp_tile_iterator_B0_;\n\
\n\
if (warp_mma_k == 0) {\n\
\n\
iterator_B0.load(tb_frag_B0);\n\
\n\
++iterator_A;\n\
++iterator_B0;\n\
\n\
// Avoid reading out of bounds if this was the last loop iteration\n\
if (gemm_k_iterations_0 <= 2) {\n\
iterator_A.clear_mask();\n\
iterator_B0.clear_mask();\n\
}\n\
}\n\
\n\
warp_mma0(accum0, warp_frag_A0[warp_mma_k % 2], warp_frag_B0[warp_mma_k % 2], accum0);\n\
}\n\
}\n"
return accu_code + code
def gen_other_gemms_2stage(b2b_num):
code = ""
def gemm_teamplate(id):
code = "// " + str(id + 1) + " Gemm"
code += " /// Iterator to load a warp-scoped tile of A1 operand from intermediate accumulator tile\n"
code += " " + helper.var_idx("FragmentC", id - 1) + helper.var_idx(" after_epilogue_accu", id - 1) + ";\n"
code += " " + helper.var_idx("epilogue_", id - 1) + helper.var_idx("(output_op_", id - 1) + helper.var_idx(", accum", id - 1) \
+ helper.var_idx(", after_epilogue_accu", id - 1) + helper.var_idx(", iterator_C", id - 1) +");\n"
# FragmentIteratorA1 warp_tile_iterator_A1_(accum0);
code += " " + helper.var_idx("FragmentIteratorA", id) + helper.var_idx(" warp_tile_iterator_A", id) +"_(" + helper.var_idx("after_epilogue_accu", id - 1) + ");\n"
# FragmentB1 tb_frag_B1;
code += " " + helper.var_idx("FragmentB", id) + " " + helper.var_idx("tb_frag_B", id) + ";\n"
# tb_frag_B1.clear();
code += " " + helper.var_idx("tb_frag_B", id) + ".clear();\n"
# iterator_B1.load(tb_frag_B1);
code += " " + helper.var_idx("iterator_B", id) + ".load(" + helper.var_idx("tb_frag_B", id) + ");\n"
# ++iterator_B1;
code += " " + "++" + helper.var_idx("iterator_B", id) + ";\n"
# this->smem_iterator_B1_.store(tb_frag_B1);
code += " " + helper.var_idx("this->smem_iterator_B", id) + "_.store(" + helper.var_idx("tb_frag_B", id) + ");\n"
# ++this->smem_iterator_B1_;
code += " " + helper.var_idx("++this->smem_iterator_B", id) + "_;\n"
# __syncthreads();
code += " " + "__syncthreads();\n"
# WarpFragmentA1 warp_frag_A1[2];
code += " " + helper.var_idx("WarpFragmentA", id) + helper.var_idx(" warp_frag_A", id) + "[2];\n"
# WarpFragmentB1 warp_frag_B1[2];
code += " " + helper.var_idx("WarpFragmentB", id) + helper.var_idx(" warp_frag_B", id) + "[2];\n"
# this->warp_tile_iterator_B1_.set_kgroup_index(0);
code += " " + helper.var_idx("this->warp_tile_iterator_B", id) + "_.set_kgroup_index(0);\n"
# warp_tile_iterator_A1_.load(warp_frag_A1[0], output_op_0);
code += " " + helper.var_idx("warp_tile_iterator_A", id) + helper.var_idx("_.load(warp_frag_A", id) + "[0]);\n"
# this->warp_tile_iterator_B1_.load(warp_frag_B1[0]);
code += " " + helper.var_idx("this->warp_tile_iterator_B", id) + helper.var_idx("_.load(warp_frag_B", id) + "[0]);\n"
# ++warp_tile_iterator_A1_;
code += " " + helper.var_idx("++warp_tile_iterator_A", id) + "_;\n"
# ++this->warp_tile_iterator_B1_;
code += " " + helper.var_idx("++this->warp_tile_iterator_B", id) + "_;\n"
# Operator1 warp_mma1;
code += " " + helper.var_idx("Operator", id) + " " + helper.var_idx("warp_mma", id) + ";\n"
# smem_write_stage_idx = 1;
code += " " + "smem_write_stage_idx = 1;\n"
# int gemm_k_iterations_1 = FragmentIteratorA1::Policy::kIterations / Base::kWarpGemmIterations1;
code += " " + helper.var_idx("int gemm_k_iterations_", id) + " = " + helper.var_idx("FragmentIteratorA", id) + helper.var_idx("::Policy::kIterations / Base::kWarpGemmIterations", id) +";\n"
# if (gemm_k_iterations_1 <= 1) {
# iterator_B1.clear_mask();
# }
code += " " + "if (" + helper.var_idx("gemm_k_iterations_", id) + " <= 1 ){\n" \
+ " " + " " + helper.var_idx("iterator_B", id) + ".clear_mask();\n" \
+ " " +"}\n"
# CUTLASS_PRAGMA_UNROLL
code += " " + "CUTLASS_PRAGMA_UNROLL\n"
# for (; gemm_k_iterations_1 > 0; --gemm_k_iterations_1) {
code += " " + helper.var_idx("for (; gemm_k_iterations_", id) + helper.var_idx(" > 0; --gemm_k_iterations_", id) + ") {\n"
# CUTLASS_PRAGMA_UNROLL
code += " " + " " + "CUTLASS_PRAGMA_UNROLL\n"
# for (int warp_mma_k = 0; warp_mma_k < Base::kWarpGemmIterations1; ++warp_mma_k) {
code += " " + " " + helper.var_idx("for (int warp_mma_k = 0; warp_mma_k < Base::kWarpGemmIterations", id) + "; ++warp_mma_k) {\n"
# if (warp_mma_k == Base::kWarpGemmIterations1 - 1) {
code += " " + " " + " " + helper.var_idx("if (warp_mma_k == Base::kWarpGemmIterations", id) + " - 1) {\n"
# this->smem_iterator_B1_.store(tb_frag_B1);
code += " " + " " + " " + " " + helper.var_idx(" this->smem_iterator_B", id) + helper.var_idx("_.store(tb_frag_B", id) + ");\n"
# __syncthreads();
code += " " + " " + " " + " " + "__syncthreads();\n"
# ++smem_iterator_B1_;
code += " " + " " + " " + " " + helper.var_idx(" ++smem_iterator_B", id) + "_;\n"
# if (smem_write_stage_idx == 1) {
# smem_iterator_B1_.add_tile_offset({-Base::Stage, 0});
# }
code += " " + " " + " " + " " + "if ( smem_write_stage_idx == 1 ) {\n" \
+ " " + " " + " " + " " + " " + helper.var_idx("smem_iterator_B", id) + helper.var_idx("_.add_tile_offset({-Base::Stage", i) + ", 0});\n" \
+ " " + " " + " " + " " +"}\n"
# else {
# this->warp_tile_iterator_B1_.add_tile_offset(
# {-Base::Stage * Policy1::kPartitionsK *
# Base::kWarpGemmIterations1,
# 0});
# }
code += " " + " " + " " + " " + "else {\n" \
+ " " + " " + " " + " " + " " + helper.var_idx("this->warp_tile_iterator_B", id) + "_.add_tile_offset(\n" \
+ " " + " " + " " + " " + " " + helper.var_idx("{-Base::Stage", id) + helper.var_idx(" * Policy", id) + "::kPartitionsK *\n" \
+ " " + " " + " " + " " + " " + helper.var_idx("Base::kWarpGemmIterations", id) + ",\n" \
+ " " + " " + " " + " " + " " + "0});\n" \
+ " " + " " + " " + " " + "}\n"
# smem_write_stage_idx ^= 1;
# }
code += " " + " " + " " + " " + "smem_write_stage_idx ^= 1;\n" \
+ " " + " " + " " + "}\n"
# this->warp_tile_iterator_B1_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations1);
code += " " + " " + " " + helper.var_idx("this->warp_tile_iterator_B", id) + helper.var_idx("_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations", id) + ");\n"
# warp_tile_iterator_A1_.load(warp_frag_A1[(warp_mma_k + 1) % 2], output_op_0);
code += " " + " " + " " + helper.var_idx("warp_tile_iterator_A", id) + helper.var_idx("_.load(warp_frag_A", id) + "[(warp_mma_k + 1) % 2]);\n"
# this->warp_tile_iterator_B1_.load(warp_frag_B1[(warp_mma_k + 1) % 2]);
code += " " + " " + " " + helper.var_idx("this->warp_tile_iterator_B", id) + helper.var_idx("_.load(warp_frag_B", id) + "[(warp_mma_k + 1) % 2]);\n"
# ++warp_tile_iterator_A1_;
code += " " + " " + " " + helper.var_idx("++warp_tile_iterator_A", id) + "_;\n"
# ++this->warp_tile_iterator_B1_;
code += " " + " " + " " + helper.var_idx("++this->warp_tile_iterator_B", id) + "_;\n"
# if (warp_mma_k == 0) {
# iterator_B1.load(tb_frag_B1);
# ++iterator_B1;
# if (gemm_k_iterations_1 <= 2) {
# iterator_B1.clear_mask();
# }
# }
code += " " + " " + " " + " if (warp_mma_k == 0) {\n" \
+ " " + " " + " " + " " + helper.var_idx("iterator_B", id) + helper.var_idx(".load(tb_frag_B", id) + ");\n" \
+ " " + " " + " " + " " + helper.var_idx("++iterator_B", id) +";\n" \
+ " " + " " + " " + " " + helper.var_idx("if (gemm_k_iterations_", id) +" <= 2) {\n" \
+ " " + " " + " " + " " + " " + helper.var_idx("iterator_B", id) + ".clear_mask();\n" \
+ " " + " " + " " + " " + "}\n" \
+ " " + " " + " " + "}\n"
# warp_mma1(accum, warp_frag_A1[warp_mma_k % 2], warp_frag_B1[warp_mma_k % 2], accum);
# }
# }
code += " " + " " + " " + helper.var_idx("warp_mma", id) + helper.var_idx("(accum", id) + helper.var_idx(", warp_frag_A", id) + helper.var_idx("[warp_mma_k % 2], warp_frag_B", id) + helper.var_idx("[warp_mma_k % 2], accum", id) + ");\n" \
+ " " + " " + "}\n" \
+ " " + "}\n\n\n"
return code
for i in range (1, b2b_num):
clear_accu = ""
if i != b2b_num - 1:
clear_accu = " " + helper.var_idx("FragmentC", i) + helper.var_idx(" accum", i) +";\n"
clear_accu += " " + helper.var_idx("accum", i) +".clear();\n"
code += clear_accu + gemm_teamplate(i)
return code
operator_code = " CUTLASS_DEVICE\n\
void operator()(\n " + gen_operator_param(self.b2b_num) + ") {\n"
if first_use_1stage:
operator_code += gen_first_gemm_1stage(self.b2b_num)
else:
operator_code += gen_first_gemm_2stage(self.b2b_num)
operator_code += gen_other_gemms_2stage(self.b2b_num) + "}\n"
return operator_code
def gen_construct_func(self):
name = self.gen_class_name
func_code = "CUTLASS_DEVICE\n"
func_code += name + "(\n" \
+ " " + "typename Base::B2bMmaSharedStorage &shared_storage,\n" \
+ " " + "int thread_idx,\n" \
+ " " + "int warp_idx,\n" \
+ " " + "int lane_idx\n" \
+ "):\n"
func_code += " " + "Base(shared_storage, thread_idx, warp_idx, lane_idx),\n" \
+ " " + "smem_iterator_A_(shared_storage.sharedStorage0.operand_A_ref(), thread_idx),\n"
for i in range(self.b2b_num):
final = ",\n"
if i == self.b2b_num - 1:
final = " {\n"
func_code += helper.var_idx("smem_iterator_B", i) + helper.var_idx("_(shared_storage.sharedStorage", i) +".operand_B_ref(), thread_idx)" + final
func_code += " " + "int warp_idx_mn = warp_idx % (Base::WarpCount0::kM * Base::WarpCount0::kN);\n"
func_code += " " + "int warp_idx_k = warp_idx / (Base::WarpCount0::kM * Base::WarpCount0::kN);\n"
func_code += " " + "int warp_idx_m = warp_idx_mn % Base::WarpCount0::kM;\n"
func_code += " " + "int warp_idx_n = warp_idx_mn / Base::WarpCount0::kM;\n"
for i in range(self.b2b_num):
func_code += " " + helper.var_idx("int tile_offset_k", i) + helper.var_idx(" = Base::kWarpGemmIterations", i) + " * warp_idx_k;\n"
func_code += " " + "this->warp_tile_iterator_A0_.add_tile_offset({warp_idx_m, tile_offset_k0});\n"
for i in range(self.b2b_num):
func_code += " " + helper.var_idx("this->warp_tile_iterator_B", i) + helper.var_idx("_.add_tile_offset({tile_offset_k", i) + ", warp_idx_n});\n"
func_code += "}\n"
return func_code
def gen_member_func(self, first_use_1stage):
code = "public:\n"
code += self.gen_operator(first_use_1stage)
code += self.gen_construct_func()
return code
def gen_code(self, first_use_1stage):
def gen_template_args(b2b_num):
template_param = []
template_param.append(("typename", "Shape0"))
template_param.append(("typename", "IteratorA0"))
template_param.append(("typename", "SmemIteratorA0"))
template_param.append(("typename", "IteratorB0"))
template_param.append(("typename", "SmemIteratorB0"))
for i in range(1, b2b_num):
template_param.append(("typename", helper.var_idx("Shape", i)))
template_param.append(("typename", helper.var_idx("FragmentIteratorA", i)))
template_param.append(("typename", helper.var_idx("IteratorB", i)))
template_param.append(("typename", helper.var_idx("SmemIteratorB", i)))
template_param.append(("typename", "ElementC"))
template_param.append(("typename", "LayoutC"))
for i in range(0, b2b_num - 1):
template_param.append(("typename", helper.var_idx("OutputOp", i)))
for i in range(0, b2b_num - 1):
template_param.append(("typename", helper.var_idx("FusedAddBiasEpilogue", i)))
for i in range(0, b2b_num):
template_param.append(("typename", helper.var_idx("Policy", i)))
for i in range(0, b2b_num):
template_param.append((int, helper.var_idx("Stage", i)))
template_param.append(("typename","TransformA0", "NumericArrayConverter<typename SmemIteratorA0_::Element, typename IteratorA0_::Element, IteratorA0_::Fragment::kElements>"))
for i in range(0, b2b_num):
cvtr = helper.var_idx("NumericArrayConverter<typename SmemIteratorB", i) + helper.var_idx("_::Element, typename IteratorB", i) + helper.var_idx("_::Element, IteratorB", i) + "_::Fragment::kElements>"
template_param.append(("typename", helper.var_idx("TransformB", i), cvtr))
template_param.append(("typename", "Enable", "bool"))
return template_param
template_param = gen_template_args(self.b2b_num)
inheritance_code = "public B2bMmaBase<"
for i in range(self.b2b_num):
inheritance_code += helper.var_idx("Shape", i) + "_, "
for i in range(self.b2b_num):
inheritance_code += helper.var_idx("Policy", i) + "_, "
for i in range(self.b2b_num - 1):
inheritance_code += helper.var_idx("Stage", i) + "_, "
inheritance_code += helper.var_idx("Stage", self.b2b_num - 1) + "_"
inheritance_code += ">"
code_body = ""
using_code= self.gen_using()
func_code = self.gen_member_func(first_use_1stage)
code_body = using_code + func_code
class_code = gen_ir.gen_template_class(self.gen_class_name, template_param, code_body, inheritance_code = inheritance_code)
code = self.gen_include_header()
code += gen_ir.gen_namespace("cutlass", gen_ir.gen_namespace("gemm", gen_ir.gen_namespace("threadblock", class_code)))
# print(code)
return code
class gen_b2b_mma_base:
def __init__(self, template_param, gen_class_name, b2b_num, cutlass_deps_root, project_root):
self.gen_class_name = gen_class_name
self.template_param = template_param
self.b2b_num = b2b_num
self.cutlass_deps_root = cutlass_deps_root
self.project_root = project_root
def gen_include_header(self):
code = '''
#pragma once
#include \"{cutlass_dirs}cutlass/aligned_buffer.h\"
#include \"{cutlass_dirs}cutlass/arch/memory.h\"
#include \"{cutlass_dirs}cutlass/array.h\"
#include \"{cutlass_dirs}cutlass/cutlass.h\"
#include \"{cutlass_dirs}cutlass/gemm/gemm.h\"
#include \"{cutlass_dirs}cutlass/matrix_shape.h\"
#include \"{cutlass_dirs}cutlass/numeric_types.h\"\n'''.format(cutlass_dirs=self.cutlass_deps_root)
return code
def gen_shared_storage(self):
code = \
" template< \n\
typename Shape_,\n\
typename Policy_,\n\
int ThisStage_\n\
>\n\
class SharedStorage {\n\
public:\n\
using Shape = Shape_;\n\
using Policy = Policy_;\n\
static int const ThisStage = ThisStage_;\n\
using Operator = typename Policy::Operator;\n\
\
using TensorRefA = TensorRef<typename Operator::ElementA, typename Operator::LayoutA>;\n\
\
/// Tensor reference to the B operand \n\
using TensorRefB = TensorRef<typename Operator::ElementB, typename Operator::LayoutB>;\n\
\n\
/// Shape of the A matrix operand in shared memory \n\
using ShapeA = MatrixShape<Shape::kM + Policy::SmemPaddingA::kRow,\n\
Shape::kK * ThisStage +\n\
Policy::SmemPaddingA::kColumn>;\n\
\n\
/// Shape of the B matrix operand in shared memory\n\
using ShapeB =\n\
MatrixShape<Shape::kK * ThisStage + Policy::SmemPaddingB::kRow,\n\
Shape::kN + Policy::SmemPaddingB::kColumn>;\n\
\n\
public:\n\
\n\
/// Buffer for A operand\n\
AlignedBuffer<typename Operator::ElementA, ShapeA::kCount> operand_A;\n\
\n\
/// Buffer for B operand\n\
AlignedBuffer<typename Operator::ElementB, ShapeB::kCount> operand_B;\n\
\n\
public:\n\
\n\
/// Returns a layout object for the A matrix\n\
CUTLASS_DEVICE\n\
static typename Operator::LayoutA LayoutA() {\n\
return Operator::LayoutA::packed({ShapeA::kRow, ShapeA::kColumn});\n\
}\n\
\n\
/// Returns a layout object for the B matrix\n\
CUTLASS_HOST_DEVICE\n\
static typename Operator::LayoutB LayoutB() {\n\
return Operator::LayoutB::packed({ShapeB::kRow, ShapeB::kColumn});\n\
}\n\
\n\
/// Returns a TensorRef to the A operand\n\
CUTLASS_HOST_DEVICE\n\
TensorRefA operand_A_ref() {\n\
return TensorRefA{operand_A.data(), LayoutA()};\n\
}\n\
\n\
/// Returns a TensorRef to the B operand\n\
CUTLASS_HOST_DEVICE\n\
TensorRefB operand_B_ref() {\n\
return TensorRefB{operand_B.data(), LayoutB()};\n\
}\n\
CUTLASS_HOST_DEVICE\n\
void * get_B_Shared_ptr() {\n\
return operand_B.data();\n\
}\n\
};\n"
return code
def gen_using_and_misc(self, b2b_num):
code_using = ""
for i in range(b2b_num):
code_using += "using Operator" +str(i) + " = typename Policy" + str(i) +"::Operator;\n"
for i in range(b2b_num):
code_using += "using WarpGemm" +str(i) + " = typename Policy" + str(i) +"::Operator::Shape;\n"
for i in range(b2b_num):
code_using += "using WarpCount" +str(i) + " = GemmShape<" + helper.var_idx("Shape", i) +"::kM / " + helper.var_idx("WarpGemm", i) +"::kM, "\
+ helper.var_idx("Shape", i) +"::kN / " + helper.var_idx("WarpGemm", i) +"::kN, "\
+ helper.var_idx("Shape", i) +"::kK / " + helper.var_idx("WarpGemm", i) +"::kK>;\n"
code_misc = ""
for i in range(b2b_num):
code_misc += "static int const " + helper.var_idx("kWarpGemmIterations", i) + " = (" + helper.var_idx("WarpGemm", i) + "::kK / " + helper.var_idx("Operator", i) +"::Policy::MmaShape::kK);\n"
code = code_using + code_misc + self.gen_shared_storage()
for i in range(b2b_num):
code += "using " + helper.var_idx("SharedStorage", i) + " = SharedStorage<" + helper.var_idx("Shape", i) + ", " + helper.var_idx("Policy", i) +", " + helper.var_idx("Stage", i) + ">;\n"
def gen_union_shared_storage(b2b_num):
code = ""
for i in range(b2b_num):
code += " " +helper.var_idx("SharedStorage", i) + " " + helper.var_idx("sharedStorage", i) +";\n"
return code
code += "union B2bMmaSharedStorage {\n" + gen_union_shared_storage(self.b2b_num) + "};\n"
for i in range(b2b_num - 1):
code += helper.var_idx("void * C", i) + "_smm_ptr;\n"
return code
def gen_protected(self):
code = "\nprotected:\n"
code += "typename Operator0::IteratorA warp_tile_iterator_A0_;\n"
for i in range(self.b2b_num):
code += "typename Operator" +str(i) + "::IteratorB" +" warp_tile_iterator_B" + str(i) + "_;\n"
return code
def gen_public_member(self):
code = "\npublic:\n"
code += "CUTLASS_DEVICE\n"
code += \
"B2bMmaBase(\n" + \
" B2bMmaSharedStorage & shared_storage,\n" + \
" int thread_idx,\n" + \
" int warp_idx,\n" + \
" int lane_idx\n" + \
"):\n" + \
" warp_tile_iterator_A0_(shared_storage.sharedStorage0.operand_A_ref(), lane_idx),\n"
for i in range(self.b2b_num):
final = ",\n"
if i == self.b2b_num-1:
final = "\n"
iterator = " warp_tile_iterator_B" + str(i) + "_"
shared_storage = "shared_storage.sharedStorage" + str(i) + ".operand_B_ref()"
code += iterator + "(" + shared_storage + ", lane_idx)" + final
code += "{\n"
for i in range(self.b2b_num - 1):
code += helper.var_idx(" C", i) + helper.var_idx("_smm_ptr = shared_storage.sharedStorage", i) + ".get_B_Shared_ptr();\n"
code += "}\n"
return code
def gen_code(self):
tempalte_arg = []
for i in range(self.b2b_num):
tempalte_arg.append(("typename", helper.var_idx("Shape", i)))
for i in range(self.b2b_num):
tempalte_arg.append(("typename", helper.var_idx("Policy", i)))
for i in range(self.b2b_num):
tempalte_arg.append((int, helper.var_idx("Stage", i)))
code_body = self.gen_using_and_misc(self.b2b_num)
code_body += self.gen_protected()
code_body += self.gen_public_member()
class_code = gen_ir.gen_template_class("B2bMmaBase", tempalte_arg, code_body)
code = self.gen_include_header() + gen_ir.gen_namespace("cutlass", gen_ir.gen_namespace("gemm", gen_ir.gen_namespace("threadblock", class_code)))
return code
class gen_threadblock:
def __init__(self, template_param, gen_class_name, b2b_num, output_dir, cutlass_deps_root, project_root):
self.gen_class_name = gen_class_name
self.template_param = template_param
self.b2b_num = b2b_num
self.file_dir = output_dir + "/threadblock/"
self.cutlass_deps_root = cutlass_deps_root
self.project_root = project_root
self.gen_b2b_mma_base = gen_b2b_mma_base(template_param, gen_class_name, b2b_num, cutlass_deps_root, project_root)
self.gen_b2b_mma_pipelined = gen_b2b_mme_pipelined(template_param, gen_class_name, b2b_num, cutlass_deps_root, project_root)
self.gen_default_b2b_mma = gen_default_b2b_mma(template_param, gen_class_name, b2b_num, cutlass_deps_root, project_root)
def gen_code(self, first_use_1stage):
base_code = self.gen_b2b_mma_base.gen_code()
print("[INFO]: Gen kernel code [b2b_mma_base.h]output Dir: is ", self.file_dir)
with open(self.file_dir + "b2b_mma_base.h", "w+") as f:
f.write(base_code)
pipeline_code = self.gen_b2b_mma_pipelined.gen_code(first_use_1stage = first_use_1stage)
print("[INFO]: Gen kernel code [b2b_mma_pipelined.h]output Dir: is ", self.file_dir)
with open(self.file_dir + "b2b_mma_pipelined.h", "w+") as f:
f.write(pipeline_code)
default_code = self.gen_default_b2b_mma.gen_code()
print("[INFO]: Gen kernel code [default_b2b_mma.h]output Dir: is ", self.file_dir)
with open(self.file_dir + "default_b2b_mma.h", "w+") as f:
f.write(default_code)
|
#################################################################################################
#
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
import ast
fuse_gemm_info = [
{
'epilogue': {
'tp': 'LeakyRelu', #'CustomizedLeaky_RELU'
'bias': {'addbias': False, 'bias_tp': 'mat'},
'args': [('float', 'leaky_alpha', 1.3), ],
'func': '''
y = max(leaky_alpha * x, x)
y = y * x
'''
}
},
]
class AnalysisNodeVisitor(ast.NodeVisitor):
def visit_Import(self,node):
ast.NodeVisitor.generic_visit(self, node)
def visit_ImportFrom(self,node):
ast.NodeVisitor.generic_visit(self, node)
def visit_Assign(self,node):
print('Node type: Assign and fields: ', node._fields)
# print('Node type: Assign and targets value: ', node.targets, node.value)
ast.NodeVisitor.generic_visit(self, node)
def visit_BinOp(self, node):
print('Node type: BinOp and fields: ', node._fields)
print('node op: ', type(node.op).__name__)
ast.NodeVisitor.generic_visit(self, node)
def visit_Expr(self, node):
print('Node type: Expr and fields: ', node._fields)
ast.NodeVisitor.generic_visit(self, node)
def visit_Num(self,node):
print('Node type: Num and fields: ', node._fields)
print('Node type: Num: ', node.n)
def visit_Name(self,node):
print('Node type: Name and fields: ', node._fields)
print('Node type: Name and fields: ', type(node.ctx).__name__, node.id)
ast.NodeVisitor.generic_visit(self, node)
def visit_Str(self, node):
print('Node type: Str and fields: ', node._fields)
class CodeVisitor(ast.NodeVisitor):
def visit_BinOp(self, node):
if isinstance(node.op, ast.Add):
node.op = ast.Sub()
self.generic_visit(node)
def visit_Assign(self, node):
print('Assign %s' % node.value)
self.generic_visit(node)
def visit_Name(self, node):
print("Name:", node.id)
self.generic_visit(node)
def visit_FunctionDef(self, node):
print('Function Name:%s'% node.name.op)
self.generic_visit(node)
func_log_stmt = ast.Print(
dest = None,
values = [ast.Str(s = 'calling func: %s' % node.name, lineno = 0, col_offset = 0)],
nl = True,
lineno = 0,
col_offset = 0,
)
node.body.insert(0, func_log_stmt)
visitor = AnalysisNodeVisitor()
code = \
'''
a=max(leaky_alpha * x, x +1)
'''
visitor.visit(ast.parse(code))
|
#################################################################################################
#
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
class gen_build_sys:
def __init__(self, cutlass_deps_dir, output_dir = "../"):
self.output_dir = output_dir
self.cutlass_deps_dir = cutlass_deps_dir
def gen_top(self):
code = ""
code += '''\
# Auto Generated code - Do not edit.
cmake_minimum_required(VERSION 3.8)
project(CUTLASS_MULTI_GEMMS LANGUAGES CXX CUDA)
find_package(CUDAToolkit)
set(CUDA_PATH ${{CUDA_TOOLKIT_ROOT_DIR}})
set(CUTLASS_PATH \"{cutlass_deps_dir}/include\")
set(CUTLASS_UTIL_PATH \"{cutlass_deps_dir}/tools/util/include\")
list(APPEND CMAKE_MODULE_PATH ${{CUDAToolkit_LIBRARY_DIR}})
'''.format(cutlass_deps_dir=self.cutlass_deps_dir)
code += '''\
set(GPU_ARCHS \"\" CACHE STRING
\"List of GPU architectures (semicolon-separated) to be compiled for.\")
if(\"${GPU_ARCHS}\" STREQUAL \"\")
set(GPU_ARCHS \"70\")
endif()
foreach(arch ${GPU_ARCHS})
set(CMAKE_CUDA_FLAGS \"${CMAKE_CUDA_FLAGS} -gencode arch=compute_${arch},code=sm_${arch}\")
if(SM STREQUAL 70 OR SM STREQUAL 75)
set(CMAKE_C_FLAGS \"${CMAKE_C_FLAGS} -DWMMA\")
set(CMAKE_CXX_FLAGS \"${CMAKE_CXX_FLAGS} -DWMMA\")
set(CMAKE_CUDA_FLAGS \"${CMAKE_CUDA_FLAGS} -DWMMA\")
endif()
endforeach()
set(CMAKE_C_FLAGS \"${CMAKE_C_FLAGS}\")
set(CMAKE_CXX_FLAGS \"${CMAKE_CXX_FLAGS}\")
set(CMAKE_CUDA_FLAGS \"${CMAKE_CUDA_FLAGS} -Xcompiler -Wall\")
set(CMAKE_C_FLAGS_DEBUG \"${CMAKE_C_FLAGS_DEBUG} -Wall -O0\")
set(CMAKE_CXX_FLAGS_DEBUG \"${CMAKE_CXX_FLAGS_DEBUG} -Wall -O0\")
set(CMAKE_CUDA_FLAGS_DEBUG \"${CMAKE_CUDA_FLAGS_DEBUG} -O0 -G -Xcompiler -Wall\")
set(CMAKE_CXX_STANDARD 11)
set(CMAKE_CXX_STANDARD_REQUIRED ON)
if(CMAKE_CXX_STANDARD STREQUAL \"11\")
set(CMAKE_CUDA_FLAGS \"${CMAKE_CUDA_FLAGS} --expt-extended-lambda\")
set(CMAKE_CUDA_FLAGS \"${CMAKE_CUDA_FLAGS} --expt-relaxed-constexpr\")
endif()
set(CMAKE_CXX_FLAGS \"${CMAKE_CXX_FLAGS} -g -O3\")
set(CMAKE_CUDA_FLAGS \"${CMAKE_CUDA_FLAGS} -Xcompiler -O3\")
set(CMAKE_CUDA_FLAGS \"${CMAKE_CUDA_FLAGS} -Xcompiler=-fno-strict-aliasing\")
set(COMMON_HEADER_DIRS
${PROJECT_SOURCE_DIR}
${CUDAToolkit_INCLUDE_DIRS}
)
set(COMMON_LIB_DIRS
${CUDAToolkit_LIBRARY_DIR}
)
list(APPEND COMMON_HEADER_DIRS ${CUTLASS_PATH})
list(APPEND COMMON_HEADER_DIRS ${CUTLASS_UTIL_PATH})
'''
code += '''\
include_directories(
${COMMON_HEADER_DIRS}
)
link_directories(
${COMMON_LIB_DIRS}
)
add_definitions(-D_GLIBCXX_USE_CXX11_ABI=0)
add_definitions(-DGOOGLE_CUDA=1)
add_executable(sample
sample/sample.cu
one_api.cu
)
target_link_libraries(sample PRIVATE
-lcudart
-lnvToolsExt
${CMAKE_THREAD_LIBS_INIT}
)
if(NOT DEFINED LIB_INSTALL_PATH)
set(LIB_INSTALL_PATH ${CMAKE_CURRENT_BINARY_DIR})
endif()
'''
return code
def gen_code(self):
top_code = self.gen_top()
with open(self.output_dir + "CMakeLists.txt", "w") as f:
f.write(top_code)
|
#################################################################################################
#
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
import helper
import gen_ir as ir
import gen_turing_and_volta as gen_basic
class gen_verify:
def __init__(self, fuse_gemm_info, gen_class_name, user_header_file, output_dir = "../"):
self.fuse_gemm_info = fuse_gemm_info
self.name = gen_class_name + "_verify"
self.b2b_num = len(fuse_gemm_info)
self.params = []
self.user_header_file = ""
for header in user_header_file:
self.user_header_file += "#include \"" + header + "\"\n"
self.separate_cutlass = gen_basic.gen_volta_turing_fuse_act_impl(fuse_gemm_info, gen_class_name, user_header_file, output_dir)
self.gen_params()
self.output_dir = output_dir
def gen_code(self):
code = ""
code += self.user_header_file
code += self.separate_cutlass.gen_using(False) #False -> Turing, True -> Volta
code_body = ""
for i in range(self.b2b_num):
code_body += " " + helper.var_idx("Gemm", i) + helper.var_idx(" gemm_op_", i) + ";\n"
code_body += " " + helper.var_idx("gemm_op_", i) + helper.var_idx(".initialize(Arguments_", i) + ", nullptr);\n"
code_body += self.separate_cutlass.gen_run()
code += ir.gen_func(self.name, self.params, code_body)
helper.write_2_headfile("cutlass_verify.h", self.output_dir, code)
def gen_params(self):
for i in range(self.b2b_num):
self.params.append(
(
helper.var_idx("typename Gemm", i)+ "::Arguments",
helper.var_idx("Arguments_", i)
)
)
def get_params(self, declartion = True):
code = ""
if declartion:
for param in self.params:
code += param[0] + " " + param[1] + ";\n"
return code
def gen_initialize():
code = ""
initialize_code = self.separate_cutlass.gen_initialize()
code = ir.gen_func("initialize", [[]])
|
#################################################################################################
#
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
def type_2_cutlass_type(input_type = "fp16"):
# float point type
if input_type == "fp32":
return "float"
if input_type == "bf16":
return "cutlass::bfloat16_t"
if input_type == "fp16":
return "cutlass::half_t"
# integer type
if(input_type == "int32"):
return "int32_t"
if(input_type == "int8"):
return "int8_t"
if input_type == 'Row':
return 'cutlass::layout::RowMajor'
if input_type == 'Col':
return 'cutlass::layout::ColumnMajor'
def cvt_2_cutlass_shape(gemm_shape):
# gemm shape
if len(gemm_shape) == 3:
val = "cutlass::gemm::GemmShape<" \
+ str(gemm_shape[0]) + ", " \
+ str(gemm_shape[1]) + ", " \
+ str(gemm_shape[2]) + ">"
return val
def write_2_headfile(filename, file_dir, string):
with open(file_dir + filename, 'w') as f:
f.write("/* Auto Generated code - Do not edit.*/\n\n\n#pragma once\n" + string)
def var_idx(varaiable, index):
return varaiable + str(index)
def list_2_string(input_list, ):
rtn_string = ""
cnt = 0
for element in input_list:
final = ", \n"
if cnt == len(input_list) - 1:
final = "\n"
cnt += 1
rtn_string += str(element) + final
return rtn_string
def get_epilogue_info(layer_info):
return layer_info['epilogue']
def get_epilogue_tp(layer_info):
epilogue_info = get_epilogue_info(layer_info)
return epilogue_info['tp']
def get_epilogue_add_bias_or_not(layer_info):
epilogue_info = get_epilogue_info(layer_info)
return epilogue_info['bias']['addbias']
def get_epilogue_add_bias_tp(layer_info):
epilogue_info = get_epilogue_info(layer_info)
return epilogue_info['bias']['bias_tp']
def get_epilogue_args(layer_info):
epilogue_info = get_epilogue_info(layer_info)
return epilogue_info['args']
def get_epilogue_bias_shape(layer_info):
bias_tp = get_epilogue_add_bias_tp(layer_info).lower()
mn_shape = layer_info['mnk'][:-1]
if bias_tp == 'mat':
mn_shape[0] = 'M'
return mn_shape
elif bias_tp == 'vec':
mn_shape[0] = 1
return mn_shape
else:
assert(0)
def get_epilogue_bias_ldm(layer_info):
bias_tp = get_epilogue_add_bias_tp(layer_info).lower()
mn_shape = layer_info['mnk'][:-1]
c_layout = layer_info['C_format'].lower()
if c_layout != 'row':
assert(0)
if bias_tp == 'mat':
return mn_shape[1]
elif bias_tp == 'vec':
return 0
else:
assert(0)
def get_epilogue_compute_tp(layer_info):
return layer_info['Acc_tp']
|
#################################################################################################
#
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
import helper
import gen_ir as ir
class gen_turing_impl:
def __init__(self,fuse_gemm_info, gen_class_name, user_header_file, output_dir = "../"):
self.fuse_gemm_info = fuse_gemm_info
self.class_name = gen_class_name
self.gen_class_name = gen_class_name + "_turing_impl"
self.user_header_file = ""
for header in user_header_file:
self.user_header_file += "#include \"" + header + "\"\n"
self.output_dir = output_dir
self.b2b_num = len(fuse_gemm_info)
self.gen_turing_unfused = gen_volta_turing_fuse_act_impl(fuse_gemm_info, gen_class_name, user_header_file, output_dir)
def gen_using(self):
code_using = "using b2b_gemm = typename cutlass::gemm::device::" + self.class_name + "<cutlass::half_t>;"
return code_using + "\n"
def gen_initialize(self):
code = ""
for i in range(self.b2b_num):
code_this = ""
code_this += helper.var_idx(helper.type_2_cutlass_type(self.fuse_gemm_info[i]['Acc_tp']) + " alpha", i) + " = " + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['Acc_tp']) + "(1);\n"
beta = "(1)"
if helper.get_epilogue_add_bias_or_not(self.fuse_gemm_info[i]) is False:
beta = "(0)"
code_this += helper.var_idx(helper.type_2_cutlass_type(self.fuse_gemm_info[i]['Acc_tp']) + " beta", i) + " = " + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['Acc_tp']) + beta + ";\n"
k_str = str(self.fuse_gemm_info[i]['mnk'][2])
if i == 0:
k_str = "K0"
code_this += helper.var_idx("cutlass::gemm::GemmCoord problem_size_", i) + "(M, " + str(self.fuse_gemm_info[i]['mnk'][1]) + ", " + k_str + ");\n"
code += code_this
code += "typename b2b_gemm::Arguments arguments{\n"
for i in range(self.b2b_num):
code += " " + helper.var_idx("problem_size_", i) + ",\n"
code += " " + "{reinterpret_cast<" + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['A_tp']) + "*>(" + helper.var_idx("A", 0) + "), " + helper.var_idx("problem_size_", 0) + ".k()},\n"
for i in range(self.b2b_num):
ldmB = str(self.fuse_gemm_info[i]['mnk'][2])
if i == 0:
ldmB = "K0"
if self.fuse_gemm_info[i]['B_format'] is 'Row':
ldmB = str(self.fuse_gemm_info[i]['mnk'][1])
ldmC = str(helper.get_epilogue_bias_ldm(self.fuse_gemm_info[i]))
code += " " + "{reinterpret_cast<" + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['B_tp']) + "*>(" + helper.var_idx("B", i) + "), " + ldmB + "},\n"
code += " " + "{reinterpret_cast<" + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['C_tp']) + "*>(" + helper.var_idx("C", i) + "), " + ldmC + "},\n"
code += " " + "{reinterpret_cast<" + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['C_tp']) + "*>(" + helper.var_idx("D", self.b2b_num -1) + "), " + helper.var_idx("problem_size_", self.b2b_num - 1) + ".n()},\n"
for i in range(self.b2b_num):
code += " " + "{ " + helper.var_idx("alpha", i) + ", " + helper.var_idx("beta", i)
for epilogue_arg in helper.get_epilogue_args(self.fuse_gemm_info[i]):
arg_name = helper.var_idx("Epilogue", i) + "_" + epilogue_arg[1]
code += ", " + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['Acc_tp']) + "(" + str(arg_name) + ")"
code += "},\n"
code += " " + "Batch};\n\n"
code += " " "b2b_gemm gemm_op;\n"
code += " " + "gemm_op.initialize(arguments);\n"
return code + "\n"
def gen_run(self):
code = " " + "gemm_op(stream);\n"
return code
def gen_wrapper(self):
code_body = ""
arg_lists = []
arg_lists.append(["int", "M"])
arg_lists.append(["int", "K0"])
arg_lists.append(["int", "Batch"])
arg_lists.append(["void*", helper.var_idx("A", 0)])
for i in range(self.b2b_num):
arg_lists.append(["void*", helper.var_idx("B", i)])
arg_lists.append(["void*", helper.var_idx("C", i)])
arg_lists.append(["void*", helper.var_idx("D", i)])
epilogue_args = helper.get_epilogue_args(self.fuse_gemm_info[i])
acc_tp = helper.get_epilogue_compute_tp(self.fuse_gemm_info[i])
for arg in epilogue_args:
arg_tp = arg[0]
arg_name = helper.var_idx("Epilogue", i) + "_" + arg[1]
arg_lists.append([arg_tp, arg_name])
if self.b2b_num == 1:
code_body += self.gen_turing_unfused.gen_using(False) #False -> Turing, True -> Volta
code_body += self.gen_turing_unfused.gen_initialize()
code_body += self.gen_turing_unfused.gen_run()
else:
code_body += self.gen_using()
code_body += self.gen_initialize()
code_body += self.gen_run()
code = ir.gen_func(self.gen_class_name, arg_lists, code_body)
return code
def gen_code(self):
code = self.gen_wrapper()
helper.write_2_headfile("turing_impl.h", self.output_dir, self.user_header_file + "\n" + code)
class gen_volta_turing_fuse_act_impl:
def __init__(self, fuse_gemm_info, gen_class_name, user_header_file, output_dir = "../"):
self.fuse_gemm_info = fuse_gemm_info
self.gen_class_name = gen_class_name + "_volta_impl"
self.user_header_file = ""
for header in user_header_file:
self.user_header_file += "#include \"" + header + "\"\n"
self.output_dir = output_dir
self.b2b_num = len(fuse_gemm_info)
def perf_tiling(self, layer_mnk):
mnk = layer_mnk[:]
block_tile = mnk[:]
block_tile[2] = 32 # force the K tile to be 32
# M tile gen
block_tile[0] = 32
# N tile gen
if mnk[1] > 128:
block_tile[1] = 256
elif mnk[1] > 64:
block_tile[1] = 128
elif mnk[1] > 32:
block_tile[1] = 64
else :
block_tile[1] = 32
warp_tile = block_tile[:]
if block_tile[1] == 256:
warp_tile[1] = 64
elif block_tile[1] == 128:
warp_tile[1] = 32
elif block_tile[1] == 64:
warp_tile[1] = 32
else :
warp_tile[1] = 32
warp_tile[0] = 32
return block_tile, warp_tile
def process_epilogue(self, epilogue_tp, n, C_tp, Acc_tp):
epilogue_setted_type = epilogue_tp
cutlass_epilogue_name = "LinearCombinationRelu"
if epilogue_setted_type.lower() == 'leakyrelu':
cutlass_epilogue_name = "LinearCombinationLeakyRelu"
elif epilogue_setted_type.lower() == 'identity':
cutlass_epilogue_name = "LinearCombination"
n_mod_8 = n % 4
N_align_elements = 1
if n_mod_8 == 0:
N_align_elements = 8
elif n_mod_8 == 4:
N_align_elements = 4
elif n_mod_8 == 2 or n_mod_8 == 6:
N_align_elements = 2
epilogue_str = "cutlass::epilogue::thread::" + cutlass_epilogue_name+ "<" + C_tp + ", " + str(N_align_elements) + ", " + Acc_tp + ", " + Acc_tp + ">"
return epilogue_str
def gen_using(self, volta = True):
code_using = ""
volta_arch = "cutlass::arch::Sm70"
volta_tc = "cutlass::gemm::GemmShape<8, 8, 4>"
turing_arch = "cutlass::arch::Sm75"
turing_tc = "cutlass::gemm::GemmShape<16, 8, 8>"
arch = ""
tc = ""
if volta:
arch = volta_arch
tc = volta_tc
else:
arch = turing_arch
tc = turing_tc
for i in range(self.b2b_num):
k = self.fuse_gemm_info[i]['mnk'][2]
k_mod_8 = k % 4
ab_ldm = 1
if k_mod_8 == 0:
ab_ldm = 8
elif k_mod_8 == 4:
ab_ldm = 4
elif k_mod_8 == 2 or k_mod_8 == 6:
ab_ldm = 2
block_tile, warp_tile = self.perf_tiling(self.fuse_gemm_info[i]['mnk'])
this_gemm_config = helper.var_idx("using Gemm", i) + " = cutlass::gemm::device::GemmBatched<\n"
this_gemm_config += " " + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['A_tp']) + ",\n"
this_gemm_config += " " + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['A_format']) + ",\n"
this_gemm_config += " " + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['B_tp']) + ",\n"
this_gemm_config += " " + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['B_format']) + ",\n"
this_gemm_config += " " + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['C_tp']) + ",\n"
this_gemm_config += " " + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['C_format']) + ",\n"
this_gemm_config += " " + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['Acc_tp']) + ",\n"
this_gemm_config += " " + "cutlass::arch::OpClassTensorOp,\n"
this_gemm_config += " " + arch + ",\n"
this_gemm_config += " " + "cutlass::gemm::GemmShape<" + str(block_tile[0]) + ", " + str(block_tile[1]) + ", " + str(block_tile[2]) + ">,\n"
this_gemm_config += " " + "cutlass::gemm::GemmShape<" + str(warp_tile[0]) + ", " + str(warp_tile[1]) + ", " + str(warp_tile[2]) + ">,\n"
this_gemm_config += " " + tc + ",\n"
this_gemm_config += " " + self.process_epilogue(helper.get_epilogue_tp(self.fuse_gemm_info[i]), self.fuse_gemm_info[i]['mnk'][1], helper.type_2_cutlass_type(self.fuse_gemm_info[i]['C_tp']), helper.type_2_cutlass_type(self.fuse_gemm_info[i]['Acc_tp'])) + ",\n"
this_gemm_config += " " + "cutlass::gemm::threadblock::GemmBatchedIdentityThreadblockSwizzle,\n"
this_gemm_config += " " + "2,\n"
this_gemm_config += " " + str(ab_ldm) + ",\n"
this_gemm_config += " " + str(ab_ldm) + ">;\n"
code_using += this_gemm_config + "\n"
return code_using + "\n"
def gen_initialize(self):
code = ""
for i in range(self.b2b_num):
code_this = ""
N_str = str(self.fuse_gemm_info[i]['mnk'][1])
code_this += helper.var_idx(helper.type_2_cutlass_type(self.fuse_gemm_info[i]['Acc_tp']) + " alpha", i) + " = " + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['Acc_tp']) + "(1);\n"
beta = "(1)"
if helper.get_epilogue_add_bias_or_not( self.fuse_gemm_info[i]) is False:
beta = "(0)"
code_this += helper.var_idx(helper.type_2_cutlass_type(self.fuse_gemm_info[i]['Acc_tp']) + " beta", i) + " = " + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['Acc_tp']) + beta + ";\n"
k_str = str(self.fuse_gemm_info[i]['mnk'][2])
if i == 0:
k_str = "K0"
code_this += helper.var_idx("cutlass::gemm::GemmCoord problem_size_", i) + "(M, " + str(self.fuse_gemm_info[i]['mnk'][1]) + ", " + k_str + ");\n"
code_this += helper.var_idx("typename Gemm", i) + helper.var_idx("::Arguments arguments_", i) + "{\n"
code_this += " " + helper.var_idx("problem_size_", i) + ",\n"
ldmA = k_str
ldmB = k_str
ldmC = str(self.fuse_gemm_info[i]['mnk'][1])
ldmBias = str(helper.get_epilogue_bias_ldm(self.fuse_gemm_info[i]))
if self.fuse_gemm_info[i]['A_format'] is 'Col':
ldmA = "M"
if self.fuse_gemm_info[i]['B_format'] is 'Row':
ldmB = str(self.fuse_gemm_info[i]['mnk'][1])
if self.fuse_gemm_info[i]['C_format'] is 'Col':
ldmC = "M"
if i == 0:
code_this += " " + "{reinterpret_cast<" + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['A_tp']) + "*>(" + helper.var_idx("A", i) + "), " + ldmA + "}, " + "M * " + ldmA + ",\n"
else:
code_this += " " + "{reinterpret_cast<" + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['A_tp']) + "*>(" + helper.var_idx("D", i - 1) + "), " + ldmA + "}, " + "M * " + ldmA + ",\n"
code_this += " " + "{reinterpret_cast<" + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['B_tp']) + "*>(" + helper.var_idx("B", i) + "), " + ldmB + "}, " + N_str + " * " + ldmB + ",\n"
M_bias = str(helper.get_epilogue_bias_shape(self.fuse_gemm_info[i])[0])
code_this += " " + "{reinterpret_cast<" + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['C_tp']) + "*>(" + helper.var_idx("C", i) + "), " + ldmBias + "}, " + M_bias + " * " + N_str + ",\n"
code_this += " " + "{reinterpret_cast<" + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['C_tp']) + "*>(" + helper.var_idx("D", i) + "), " + ldmC + "}, " + "M * " + ldmC + ",\n"
code_this += " " + "{ " + helper.var_idx("alpha", i) + ", " + helper.var_idx("beta", i)
for epilogue_arg in helper.get_epilogue_args(self.fuse_gemm_info[i]):
arg_name = helper.var_idx("Epilogue", i) + "_" + epilogue_arg[1]
code_this += ", " + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['Acc_tp']) + "(" + str(arg_name) + ")"
code_this += " },\n"
code_this += " " + "Batch};\n"
code_this += " " + helper.var_idx("Gemm", i) + helper.var_idx(" gemm_op_", i) + ";\n"
code_this += " " + helper.var_idx("gemm_op_", i) + helper.var_idx(".initialize(arguments_", i) + ", nullptr);\n"
code += code_this + "\n"
return code + "\n"
def gen_run(self):
code = ""
for i in range(self.b2b_num):
code_this = ""
code_this += " " + helper.var_idx("gemm_op_", i) + "(stream);\n"
code += code_this
return code
def gen_wrapper(self):
code_body = ""
arg_lists = []
arg_lists.append(["int", "M"])
arg_lists.append(["int", "K0"])
arg_lists.append(["int", "Batch"])
arg_lists.append(["void*", helper.var_idx("A", 0)])
for i in range(self.b2b_num):
arg_lists.append(["void*", helper.var_idx("B", i)])
arg_lists.append(["void*", helper.var_idx("C", i)])
arg_lists.append(["void*", helper.var_idx("D", i)])
epilogue_args = helper.get_epilogue_args(self.fuse_gemm_info[i])
acc_tp = helper.get_epilogue_compute_tp(self.fuse_gemm_info[i])
for arg in epilogue_args:
arg_tp = arg[0]
arg_name = helper.var_idx("Epilogue", i) + "_" + arg[1]
arg_lists.append([arg_tp, arg_name])
code_body += self.gen_using()
code_body += self.gen_initialize()
code_body += self.gen_run()
code = ir.gen_func(self.gen_class_name, arg_lists, code_body)
return code
def gen_code(self):
code = self.gen_wrapper()
helper.write_2_headfile("volta_impl.h", self.output_dir, self.user_header_file + "\n" + code)
class gen_one_API:
def __init__(self, fuse_gemm_info, gen_class_name, user_header_file, output_dir = "../"):
self.fuse_gemm_info = fuse_gemm_info
self.gen_class_name = gen_class_name
self.user_header_file = ""
for header in user_header_file:
self.user_header_file += "#include \"" + header + "\"\n"
self.output_dir = output_dir
self.b2b_num = len(fuse_gemm_info)
self.gen_volta = gen_volta_turing_fuse_act_impl(fuse_gemm_info, gen_class_name, user_header_file, output_dir)
self.gen_turing = gen_turing_impl(fuse_gemm_info, gen_class_name, user_header_file, output_dir)
def gen_CUTLASS_irrelevant_API(self):
code = ""
code += "#include <cuda_runtime.h>\n"
code += "#include <assert.h>\n"
param_name = "Fused" + str(self.b2b_num) + "xGemm_"
for i in range(self.b2b_num):
param_name += str(self.fuse_gemm_info[i]['mnk'][1]) + "_"
param_name += "Params"
params = ""
params += " " + "int M;\n"
params += " " + "int K0;\n"
params += " " + "int Batch;\n"
params += " " + "const void* A0;\n"
for i in range(self.b2b_num):
params += " " + "const void* " + helper.var_idx("B", i) + ";\n"
params += " " + "const void* " + helper.var_idx("C", i) + ";\n"
epilogue_args = helper.get_epilogue_args(self.fuse_gemm_info[i])
acc_tp = helper.get_epilogue_compute_tp(self.fuse_gemm_info[i])
for arg in epilogue_args:
arg_tp = arg[0]
arg_name = helper.var_idx("Epilogue", i) + "_" + arg[1]
params += " " + arg_tp + " " + arg_name + ";\n"
params += " " + "void* " + helper.var_idx("D", i) + ";\n"
code += ir.gen_struct(param_name, params)
code += "using Param = " + param_name + ";\n"
code += "void one_api( const Param & param, int sm, cudaStream_t stream);\n"
return code
def gen_one_api(self):
code = ""
code += "/* Auto Generated code - Do not edit.*/\n"
code += "#include \"cutlass_irrelevant.h\"\n"
code += "#include \"api.h\"\n"
code += "void one_api( const Param & param, int sm, cudaStream_t stream) {\n"
code += " " + "if (sm == 70) \n"
code += " " + " " + self.gen_class_name + "_volta_impl(param.M, param.K0, param.Batch, const_cast<void*>(param.A0), "
for i in range(self.b2b_num):
code += helper.var_idx("const_cast<void*>(param.B", i) + "), "
code += helper.var_idx("const_cast<void*>(param.C", i) + "), "
code += helper.var_idx("param.D", i) + ", "
epilogue_args = helper.get_epilogue_args(self.fuse_gemm_info[i])
for arg in epilogue_args:
arg_name = helper.var_idx("Epilogue", i) + "_" + arg[1]
code += "param." + arg_name + ", "
code += "stream);\n"
code += " " + "else if(sm >= 75) \n"
code += " " + " " + self.gen_class_name + "_turing_impl(param.M, param.K0, param.Batch, const_cast<void*>(param.A0), "
for i in range(self.b2b_num):
code += helper.var_idx("const_cast<void*>(param.B", i) + "), "
code += helper.var_idx("const_cast<void*>(param.C", i) + "), "
code += helper.var_idx("param.D", i) + ", "
epilogue_args = helper.get_epilogue_args(self.fuse_gemm_info[i])
for arg in epilogue_args:
arg_name = helper.var_idx("Epilogue", i) + "_" + arg[1]
code += "param." + arg_name + ", "
code += "stream);\n"
code += " " + "else assert(0);\n"
code += "}\n"
return code
def gen_code(self):
turing_code = self.gen_turing.gen_wrapper()
volta_code = self.gen_volta.gen_wrapper()
cutlass_irrelevant_code = self.gen_CUTLASS_irrelevant_API()
one_api_code = self.gen_one_api()
with open(self.output_dir + "one_api.cu", "w+") as f:
f.write(one_api_code)
helper.write_2_headfile("cutlass_irrelevant.h", self.output_dir, cutlass_irrelevant_code)
helper.write_2_headfile("api.h", self.output_dir, self.user_header_file + "\n" + turing_code + volta_code)
|
#################################################################################################
#
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
import helper
import gen_ir as ir
class gen_test:
def __init__(self, fuse_gemm_info, gen_class_name, user_header_file, output_dir = "../"):
self.fuse_gemm_info = fuse_gemm_info
self.gen_class_name = gen_class_name
self.user_header_file = user_header_file
self.sample_dir = output_dir
self.b2b_num = len(fuse_gemm_info)
def gen_cpp_sample(self):
code = "/* Auto Generated code - Do not edit.*/\n"
code += "#include <stdio.h> \n"
code += "#include \"cutlass/gemm/device/gemm_batched.h\" \n"
code += "#include \"cutlass/cutlass.h\" \n"
code += "#include \"../cutlass_irrelevant.h\" \n"
code += "#include \"../cutlass_verify.h\" \n"
code += "#include \"leaky_bias.h\" \n"
code += "#include \"utils.h\" \n"
code += "int main(int args, char * argv[]) {\n"
code += " " + "int M = atoi(argv[1]);\n"
code += " " + "int K0 = " + str(self.fuse_gemm_info[0]['mnk'][0]) + ";\n"
code += " " + "if(args == 3);\n"
code += " " + " " + "K0 = atoi(argv[2]);\n"
code += " " + "int B = 1;\n"
code += " " + "if(args == 4);\n"
code += " " + " " + "B = atoi(argv[3]);\n"
code += " " + "srand(1234UL);\n"
code += " " + "int device_id = 0;\n"
code += " " + "cudaGetDevice(&device_id);\n"
code += " " + "cudaDeviceProp prop;\n"
code += " " + "cudaGetDeviceProperties(&prop, device_id);\n"
code += " " + "int sm = prop.major *10 + prop.minor;\n"
code += "using ElementCompute = cutlass::half_t;\n"
for i in range(self.b2b_num):
code += " " + helper.var_idx("ElementCompute alpha", i) + " = ElementCompute(1);\n"
addbias = helper.get_epilogue_add_bias_or_not( self.fuse_gemm_info[i])
if addbias:
code += " " + helper.var_idx("ElementCompute beta", i) + " = ElementCompute(1);\n"
else:
code += " " + helper.var_idx("ElementCompute beta", i) + " = ElementCompute(0);\n"
code += " " + "size_t flops = 0;\n"
for i in range(self.b2b_num):
m = self.fuse_gemm_info[i]['mnk'][0]
n = self.fuse_gemm_info[i]['mnk'][1]
k = self.fuse_gemm_info[i]['mnk'][2]
bias_shape = helper.get_epilogue_bias_shape(self.fuse_gemm_info[i])
this_k = "K0"
if (i > 0):
this_k = str(k)
code += " " + "flops += size_t(2) * size_t(M) * size_t(B) * " + "size_t(" + str(n) + ") * size_t(" + this_k + ");\n"
code += " " + helper.var_idx("cutlass::gemm::GemmCoord problem_size_", i) + "(" + "M" + ", " + str(n) + ", " + this_k + ");\n"
code += " " + helper.var_idx("memory_unit<cutlass::half_t> Mat_A", i) + helper.var_idx("(B * problem_size_", i) + helper.var_idx(".m() * problem_size_", i) + ".k());\n"
code += " " + helper.var_idx("memory_unit<cutlass::half_t> Mat_B", i) + helper.var_idx("(B * problem_size_", i) + helper.var_idx(".n() * problem_size_", i) + ".k());\n"
code += " " + helper.var_idx("memory_unit<cutlass::half_t> Mat_C", i) + "(B * " + str(bias_shape[0]) + " * " + str(bias_shape[1]) + ");\n"
code += " " + helper.var_idx("memory_unit<cutlass::half_t> Mat_D_cutlass_ref", i) + helper.var_idx("(B * problem_size_", i) + helper.var_idx(".m() * problem_size_", i) + ".n());\n"
code += " " + helper.var_idx("Mat_A", i) + ".init();\n"
code += " " + helper.var_idx("Mat_B", i) + ".init();\n"
code += " " + helper.var_idx("Mat_C", i) + ".init();\n"
code += " " + helper.var_idx("memory_unit<cutlass::half_t> Mat_D", self.b2b_num - 1) + helper.var_idx("(B * problem_size_", i) + helper.var_idx(".m() * problem_size_",self.b2b_num - 1) + ".n());\n"
params = []
params.append("M")
params.append("B")
params.append("Mat_A0.device_ptr")
for i in range(self.b2b_num):
params.append(helper.var_idx("Mat_B", i) + ".device_ptr")
params.append(helper.var_idx("Mat_C", i) + ".device_ptr")
if i != self.b2b_num-1:
params.append(helper.var_idx("Mat_D_cutlass_ref", i) + ".device_ptr")
params.append(helper.var_idx("Mat_D", self.b2b_num - 1) + ".device_ptr")
code += " " + "Param arguments = {\n"
code += " " + " " + "M,\n"
code += " " + " " + "K0,\n"
code += " " + " " + "B,\n"
code += " " + " " + "reinterpret_cast<const void*>(Mat_A0.device_ptr),\n"
cnt = 1
for i in range(self.b2b_num):
bias_flag = helper.get_epilogue_add_bias_or_not( self.fuse_gemm_info[i])
code += " " + " " + "reinterpret_cast<const void*>(" + helper.var_idx("Mat_B", i) + ".device_ptr" + "),\n"
cnt += 1
if bias_flag:
code += " " + " " + "reinterpret_cast<const void*>(" + helper.var_idx("Mat_C", i) + ".device_ptr" + "),\n"
cnt += 1
else:
code += " " + " " + "reinterpret_cast<const void*>(NULL),\n"
epilogue_args = helper.get_epilogue_args(self.fuse_gemm_info[i])
acc_tp = helper.get_epilogue_compute_tp(self.fuse_gemm_info[i])
for arg in epilogue_args:
arg_value = str(arg[2])
code += " " + " " + helper.type_2_cutlass_type(acc_tp) + "(" + arg_value + "),\n"
if i != self.b2b_num - 1:
code += " " + " " + "reinterpret_cast<void*>(" + helper.var_idx("Mat_D_cutlass_ref", i) + ".device_ptr" + "),\n"
else:
code += " " + " " + "reinterpret_cast<void*>(" + helper.var_idx("Mat_D", i) + ".device_ptr" + ")};\n"
code += " " + "TI(FUSED_CUTLASS);\n"
code += " " + "for(int i = 0; i < 100; i++){\n"
code += " " + " " + "one_api(arguments, sm, NULL);\n"
code += " " + "}\n"
code += " " + "TO(FUSED_CUTLASS, \"FUSED_CUTLASS\", 100);\n"
code += "\n"
for i in range(self.b2b_num):
code_this = ""
N_str = str(self.fuse_gemm_info[i]['mnk'][1])
code_this += " " + helper.var_idx("typename Gemm", i) + helper.var_idx("::Arguments arguments_", i) + "{\n"
code_this += " " + " " + helper.var_idx("problem_size_", i) + ",\n"
ldmA = str(self.fuse_gemm_info[i]['mnk'][2])
if i == 0:
ldmA = "K0"
ldmB = str(self.fuse_gemm_info[i]['mnk'][2])
if i == 0:
ldmB = "K0"
ldmC = str(self.fuse_gemm_info[i]['mnk'][1])
ldmBias = str(helper.get_epilogue_bias_ldm(self.fuse_gemm_info[i]))
if self.fuse_gemm_info[i]['A_format'] is 'Col':
ldmA = "M"
if self.fuse_gemm_info[i]['B_format'] is 'Row':
ldmB = str(self.fuse_gemm_info[i]['mnk'][1])
if self.fuse_gemm_info[i]['C_format'] is 'Col':
ldmC = "M"
if i == 0:
code_this += " " + " " + "{reinterpret_cast<" + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['A_tp']) + "*>(" + helper.var_idx("Mat_A", i) + ".device_ptr), " + ldmA + "}, " + "M * " + ldmA + ",\n"
else:
code_this += " " + " " + "{reinterpret_cast<" + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['A_tp']) + "*>(" + helper.var_idx("Mat_D_cutlass_ref", i - 1) + ".device_ptr), " + ldmA + "}, " + "M * " + ldmA + ",\n"
code_this += " " + " " + "{reinterpret_cast<" + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['B_tp']) + "*>(" + helper.var_idx("Mat_B", i) + ".device_ptr), " + ldmB + "}, " + N_str + " * " + ldmB + ",\n"
M_bias = str(helper.get_epilogue_bias_shape(self.fuse_gemm_info[i])[0])
code_this += " " + " " + "{reinterpret_cast<" + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['C_tp']) + "*>(" + helper.var_idx("Mat_C", i) + ".device_ptr), " + ldmBias + "}, " + M_bias + " * " + N_str + ",\n"
code_this += " " + " " + "{reinterpret_cast<" + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['C_tp']) + "*>(" + helper.var_idx("Mat_D_cutlass_ref", i) + ".device_ptr), " + ldmC + "}, " + "M * " + ldmC + ",\n"
code_this += " " + " " + "{ " + helper.var_idx("alpha", i) + ", " + helper.var_idx("beta", i)
for epilogue_arg in helper.get_epilogue_args(self.fuse_gemm_info[i]):
arg_value = str(epilogue_arg[2])
code_this += ", " + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['Acc_tp']) + "(" + str(arg_value) + ")"
code_this += " " + " },\n"
code_this += " " + " " + "B};\n"
code += code_this
code += " " + "TI(UNFUSED_CUTLASS);\n"
code += " " + "for(int i = 0; i < 100; i++){\n"
code += " " + " " + self.gen_class_name + "_verify(\n"
for i in range(self.b2b_num):
code += " " + " " + " " + helper.var_idx("arguments_", i) + ",\n"
code += " " + " " + " " + "NULL);\n"
code += " " + "}\n"
code += " " + "TO(UNFUSED_CUTLASS, \"UNFUSED_CUTLASS\", 100);\n"
code += " " + helper.var_idx("Mat_D_cutlass_ref", self.b2b_num - 1) + ".d2h();\n"
code += " " + helper.var_idx("Mat_D", self.b2b_num - 1) + ".d2h();\n"
code += " " + helper.var_idx("check_result(Mat_D_cutlass_ref", self.b2b_num - 1) + helper.var_idx(".host_ptr, Mat_D", self.b2b_num - 1) \
+ helper.var_idx(".host_ptr, Mat_D", self.b2b_num - 1) + ".elements);\n"
code += "\n\n}\n"
with open(self.sample_dir + "sample.cu", "w+") as f:
f.write(code)
|
#################################################################################################
#
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
import os
class replace_fix_impl:
def __init__(self, src_dir, dst_dir, cutlass_deps_root):
self.src_dir = src_dir
self.dst_dir = dst_dir
self.cutlass_deps_root = cutlass_deps_root
def gen_code(self):
for sub_dir in os.walk(self.src_dir):
files_in_sub_dir = sub_dir[2]
src_dirs = sub_dir[0]
output_dirs = self.dst_dir + sub_dir[0][len(self.src_dir):]
if not os.path.exists(output_dirs):
os.mkdir(output_dirs)
for f in files_in_sub_dir:
with open(src_dirs +"/" + f, 'r') as current_file:
output_lines = []
lines = current_file.readlines()
for line in lines:
if(len(line) >= len("#include \"cutlass") and line[:len("#include \"cutlass")] == "#include \"cutlass"):
new_line = "#include \"" + self.cutlass_deps_root + line[len("#include \""):]
# print(new_line)
output_lines.append(new_line)
else:
output_lines.append(line)
with open(output_dirs + "/" + f, "w+") as dest_file:
dest_file.writelines(output_lines)
|
#################################################################################################
#
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
import helper
indentation = " "
def append_word(word):
code = ""
code += word
code += " "
return code
def gen_namespace(namespace, codeBody):
code_gen = "namespace " + namespace + " {\n"
code_gen += codeBody
code_gen += "} // namespace " + namespace + "\n"
return code_gen
def gen_expression(type, lval, rval = None):
code_gen = ""
code_gen += append_word(type)
code_gen += append_word(lval)
if rval is not None:
code_gen += append_word("=")
code_gen += append_word(rval)
return code_gen
def gen_class(name, codeBody, inheritance_code = None):
code_gen = ""
if inheritance_code is None:
code_gen = "class " + name + "{\n"
else:
code_gen = "class " + name + " : "+ inheritance_code + "{\n"
code_gen += codeBody
code_gen += "}; // class " + name + "\n"
return code_gen
def gen_struct(name, codeBody, specialized = None):
specialized_code = ""
if specialized is not None:
specialized_code = "<" + specialized + ">"
code_gen = "struct " + name + specialized_code + "{\n"
code_gen += codeBody
code_gen += "}; // struct " + name + "\n"
return code_gen
def gen_template_arg(arg_type, arg_name, default_val = None):
rval = None
if default_val is not None:
rval = str(default_val)
arg_typename = ""
if arg_type is int:
arg_typename = "int"
elif arg_type is bool:
arg_typename = "bool"
else:
arg_typename = "typename"
internal_arg_name = arg_name + "_"
code_gen = indentation
code_gen += gen_expression(arg_typename, internal_arg_name, rval)
return code_gen
def gen_template_args(args, set_default = True):
arg_len = len(args)
cnt = 1
code_gen = ""
for arg_tuple in args:
arg_type = arg_tuple[0]
arg_name = arg_tuple[1]
arg_default_val = None
if len(arg_tuple) == 3 and set_default:
arg_default_val = arg_tuple[2]
code_gen += gen_template_arg(arg_type, arg_name, arg_default_val)
if cnt != arg_len:
code_gen += ",\n"
cnt += 1
return code_gen
def gen_template_head(args, set_default = True):
code_gen = "template <\n"
code_gen += gen_template_args(args, set_default)
code_gen += ">\n"
return code_gen
def export_template_args(args):
code_gen = "public:\n"
for arg_tuple in args:
code_gen += indentation
arg_type = arg_tuple[0]
arg_name = arg_tuple[1]
internal_arg_name = arg_name + "_"
typename = ""
if arg_type is int:
typename = "static int const"
elif arg_type is bool:
typename = "static bool const"
else:
typename = "using"
code_gen += gen_expression(typename, arg_name, internal_arg_name)
code_gen += ";\n"
return code_gen
def gen_template_class(class_name, args, codeBody, set_default = True, inheritance_code = None):
code_gen = ""
code_gen += gen_template_head(args, set_default)
code_gen += gen_class(class_name, export_template_args(args) + codeBody, inheritance_code)
return code_gen
def gen_template_struct(struct_name, args, codeBody, speicalized = None, set_default = True, export_args = True):
code_gen = ""
code_gen += gen_template_head(args, set_default)
code = export_template_args(args) + codeBody
if export_args is False:
code = codeBody
code_gen += gen_struct(struct_name, code , speicalized)
return code_gen
def gen_declare_template_struct(name, *params):
code = name + "<"
cnt = 0
param_num = len(params)
for param in params:
final = ", "
if cnt == param_num - 1:
final = ""
code += param + final
cnt += 1
code += ">;\n"
return code
def filtered_param(params, name_and_value_pair, keep_ = False):
rtn_template_args = []
speicalized_template_args = []
for param in params:
param_name = ""
if len(param) >= 1:
param_name = param[1]
else:
param_name = param[0]
hit_flag = False
set_value = ""
for n_v_pair in name_and_value_pair:
filter_name = n_v_pair[0]
set_value = n_v_pair[1]
if param_name == (filter_name + "_") or param_name == filter_name :
hit_flag = True
break
if hit_flag is False:
rtn_template_args.append(param)
if hit_flag is True:
speicalized_template_args.append(set_value)
else:
if keep_ is True:
speicalized_template_args.append(param_name + "_")
else:
speicalized_template_args.append(param_name)
specialized_template_arg_str = helper.list_2_string(speicalized_template_args)
return rtn_template_args, specialized_template_arg_str
def gen_func(func_name, arg_lists, code_body, only_declare = False, with_cudaStream = True):
code = "void " + func_name + "(\n"
for arg in arg_lists:
arg_tp = arg[0]
arg_nm = arg[1]
code += " " + arg_tp + " " + arg_nm + ",\n"
code += "cudaStream_t stream)"
if only_declare :
return code
code += "{\n"
code += code_body + "\n"
code += "}\n"
return code
def indent_level(code, level = 0):
rtn_code = ""
for i in range(level):
rtn_code += " "
rtn_code += code
return rtn_code
|
################################################################################
#
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
################################################################################
"""
Basic example of using the CUTLASS Python interface to run a GEMM
"""
import argparse
import numpy as np
import sys
import cutlass
import pycutlass
from pycutlass import *
from pycutlass.utils.device import device_cc
parser = argparse.ArgumentParser(description="Launch a GEMM kernel from Python: 'D = alpha * A * B + beta * C'")
parser.add_argument("--m", default=128, type=int, help="M dimension of the GEMM")
parser.add_argument("--n", default=128, type=int, help="N dimension of the GEMM")
parser.add_argument("--k", default=128, type=int, help="K dimension of the GEMM")
parser.add_argument('--print_cuda', action="store_true", help="Print the underlying CUDA kernel")
try:
args = parser.parse_args()
except:
sys.exit(0)
# Check that the device is of a sufficient compute capability
cc = device_cc()
assert cc >= 70, "The CUTLASS Python GEMM example requires compute capability greater than or equal to 70."
alignment = 8
assert args.m % alignment == 0, "M dimension of size {} is not divisible by alignment of {}".format(args.m, alignment)
assert args.n % alignment == 0, "N dimension of size {} is not divisible by alignment of {}".format(args.n, alignment)
assert args.k % alignment == 0, "K dimension of size {} is not divisible by alignment of {}".format(args.k, alignment)
np.random.seed(0)
# Allocate a pool of device memory to be used by the kernel
pycutlass.get_memory_pool(init_pool_size=2**30, max_pool_size=2**32)
# Set the compiler to use to NVCC
pycutlass.compiler.nvcc()
# Set up A, B, C and accumulator
A = TensorDescription(cutlass.float16, cutlass.ColumnMajor, alignment)
B = TensorDescription(cutlass.float16, cutlass.RowMajor, alignment)
C = TensorDescription(cutlass.float32, cutlass.ColumnMajor, alignment)
element_acc = cutlass.float32
element_epilogue = cutlass.float32
# Select instruction shape based on the Tensor Core instructions supported
# by the device on which we are running
if cc == 70:
instruction_shape = [8, 8, 4]
elif cc == 75:
instruction_shape = [16, 8, 8]
else:
instruction_shape = [16, 8, 16]
math_inst = MathInstruction(
instruction_shape,
A.element, B.element, element_acc,
cutlass.OpClass.TensorOp,
MathOperation.multiply_add
)
tile_description = TileDescription(
[128, 128, 32], # Threadblock shape
2, # Number of stages
[2, 2, 1], # Number of warps within each dimension of the threadblock shape
math_inst
)
epilogue_functor = pycutlass.LinearCombination(C.element, C.alignment, element_acc, element_epilogue)
operation = GemmOperationUniversal(
arch=cc, tile_description=tile_description,
A=A, B=B, C=C,
epilogue_functor=epilogue_functor)
if args.print_cuda:
print(operation.rt_module.emit())
operations = [operation, ]
# Compile the operation
pycutlass.compiler.add_module(operations)
# Randomly initialize tensors
tensor_A = np.ceil(np.random.uniform(low=-8.5, high=7.5, size=(args.m * args.k,))).astype(np.float16)
tensor_B = np.ceil(np.random.uniform(low=-8.5, high=7.5, size=(args.k * args.n,))).astype(np.float16)
tensor_C = np.ceil(np.random.uniform(low=-8.5, high=7.5, size=(args.m * args.n,))).astype(np.float32)
tensor_D = np.zeros(shape=(args.m * args.n,)).astype(np.float32)
problem_size = cutlass.gemm.GemmCoord(args.m, args.n, args.k)
alpha = 1.
beta = 0.
arguments = GemmArguments(
operation=operation, problem_size=problem_size,
A=tensor_A, B=tensor_B, C=tensor_C, D=tensor_D,
output_op=operation.epilogue_type(alpha, beta))
# Run the operation
operation.run(arguments)
arguments.sync()
# Run the host reference module and compare to the CUTLASS result
reference = ReferenceModule(A, B, C)
tensor_D_ref = reference.run(tensor_A, tensor_B, tensor_C, problem_size, alpha, beta)
try:
assert np.array_equal(tensor_D, tensor_D_ref)
except:
assert np.allclose(tensor_D, tensor_D_ref, atol=1e-5)
print("Passed.")
|
################################################################################
#
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
################################################################################
"""
Basic example of using the CUTLASS Python interface to run a 2d convolution
"""
import argparse
import torch
import numpy as np
import sys
import cutlass
import pycutlass
from pycutlass import *
from pycutlass.utils.device import device_cc
parser = argparse.ArgumentParser(
description=("Launch a 2d convolution kernel from Python. "
"See https://docs.nvidia.com/deeplearning/performance/dl-performance-convolutional/index.html#convo-intro for notation."))
parser.add_argument("--n", default=1, type=int, help="N dimension of the convolution")
parser.add_argument("--c", default=64, type=int, help="C dimension of the convolution")
parser.add_argument("--h", default=32, type=int, help="H dimension of the convolution")
parser.add_argument("--w", default=32, type=int, help="W dimension of the convolution")
parser.add_argument("--k", default=32, type=int, help="N dimension of the convolution")
parser.add_argument("--r", default=3, type=int, help="R dimension of the convolution")
parser.add_argument("--s", default=3, type=int, help="S dimension of the convolution")
parser.add_argument('--print_cuda', action="store_true", help="Print the underlying CUDA kernel")
try:
args = parser.parse_args()
except:
sys.exit(0)
# Check that the device is of a sufficient compute capability
cc = device_cc()
assert cc >= 70, "The CUTLASS Python Conv2d example requires compute capability greater than or equal to 70."
alignment = 1
np.random.seed(0)
# Allocate a pool of device memory to be used by the kernel
pycutlass.get_memory_pool(init_pool_size=2**30, max_pool_size=2**32)
# Set the compiler to use to NVCC
pycutlass.compiler.nvcc()
# Set up A, B, C and accumulator
A = TensorDescription(cutlass.float16, cutlass.TensorNHWC, alignment)
B = TensorDescription(cutlass.float16, cutlass.TensorNHWC, alignment)
C = TensorDescription(cutlass.float32, cutlass.TensorNHWC, alignment)
element_acc = cutlass.float32
element_epilogue = cutlass.float32
# Select instruction shape based on the Tensor Core instructions supported
# by the device on which we are running
if cc == 70:
instruction_shape = [8, 8, 4]
elif cc == 75:
instruction_shape = [16, 8, 8]
else:
instruction_shape = [16, 8, 16]
math_inst = MathInstruction(
instruction_shape,
A.element, B.element, element_acc,
cutlass.OpClass.TensorOp,
MathOperation.multiply_add
)
tile_description = TileDescription(
[128, 128, 32], # Threadblock shape
2, # Number of stages
[2, 2, 1], # Number of warps within each dimension of the threadblock shape
math_inst
)
epilogue_functor = pycutlass.LinearCombination(C.element, C.alignment, element_acc, element_epilogue)
operation = Conv2dOperation(
conv_kind=cutlass.conv.Operator.fprop,
iterator_algorithm=cutlass.conv.IteratorAlgorithm.optimized,
arch=cc, tile_description=tile_description,
A=A, B=B, C=C, stride_support=StrideSupport.Strided,
epilogue_functor=epilogue_functor
)
if args.print_cuda:
print(operation.rt_module.emit())
operations = [operation, ]
# Compile the operation
pycutlass.compiler.add_module(operations)
# Randomly initialize tensors
problem_size = cutlass.conv.Conv2dProblemSize(
cutlass.Tensor4DCoord(args.n, args.h, args.c, args.w),
cutlass.Tensor4DCoord(args.k, args.r, args.s, args.c),
cutlass.Tensor4DCoord(0, 0, 0, 0), # Padding
cutlass.MatrixCoord(1, 1), # Strides
cutlass.MatrixCoord(1, 1), # Dilation
cutlass.conv.Mode.cross_correlation,
1, # Split k slices
1 # Groups
)
tensor_A_size = cutlass.conv.implicit_gemm_tensor_a_size(operation.conv_kind, problem_size)
tensor_B_size = cutlass.conv.implicit_gemm_tensor_b_size(operation.conv_kind, problem_size)
tensor_C_size = cutlass.conv.implicit_gemm_tensor_c_size(operation.conv_kind, problem_size)
tensor_A = torch.ceil(torch.empty(size=(tensor_A_size,), dtype=torch.float16, device="cuda").uniform_(-8.5, 7.5))
tensor_B = torch.ceil(torch.empty(size=(tensor_B_size,), dtype=torch.float16, device="cuda").uniform_(-8.5, 7.5))
tensor_C = torch.ceil(torch.empty(size=(tensor_C_size,), dtype=torch.float32, device="cuda").uniform_(-8.5, 7.5))
tensor_D = torch.ones(size=(tensor_C_size,), dtype=torch.float32, device="cuda")
alpha = 1.
beta = 0.
arguments = Conv2dArguments(
operation=operation, problem_size=problem_size,
A=tensor_A, B=tensor_B, C=tensor_C, D=tensor_D,
output_op=operation.epilogue_type(alpha, beta)
)
# Run the operation
operation.run(arguments)
arguments.sync()
# Run the host reference module and compare to the CUTLASS result
reference = Conv2dReferenceModule(A, B, C, operation.conv_kind)
tensor_D_ref = reference.run(tensor_A, tensor_B, tensor_C, problem_size, alpha, beta)
try:
assert torch.equal(tensor_D, tensor_D_ref)
except:
assert torch.allclose(tensor_D, tensor_D_ref, rtol=1e-2)
print("Passed.")
|
################################################################################
#
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
################################################################################
"""
Basic example of using the CUTLASS Python interface to run a grouped GEMM
"""
import argparse
import numpy as np
import sys
import cutlass
import pycutlass
from pycutlass import *
from pycutlass.utils.device import device_cc
parser = argparse.ArgumentParser(description="Launch a grouped GEMM kernel from Python")
parser.add_argument('--print_cuda', action="store_true", help="Print the underlying CUDA kernel")
try:
args = parser.parse_args()
except:
sys.exit(0)
# Check that the device is of a sufficient compute capability
cc = device_cc()
assert cc >= 70, "The CUTLASS Python grouped GEMM example requires compute capability greater than or equal to 70."
np.random.seed(0)
# Allocate a pool of device memory to be used by the kernel
pycutlass.get_memory_pool(init_pool_size=2**30, max_pool_size=2**32)
# Set the compiler to use to NVCC
pycutlass.compiler.nvcc()
# Set up A, B, C and accumulator
alignment = 1
A = TensorDescription(cutlass.float16, cutlass.ColumnMajor, alignment)
B = TensorDescription(cutlass.float16, cutlass.RowMajor, alignment)
C = TensorDescription(cutlass.float32, cutlass.ColumnMajor, alignment)
element_acc = cutlass.float32
element_epilogue = cutlass.float32
# Select instruction shape based on the Tensor Core instructions supported
# by the device on which we are running
if cc == 70:
instruction_shape = [8, 8, 4]
elif cc == 75:
instruction_shape = [16, 8, 8]
else:
instruction_shape = [16, 8, 16]
math_inst = MathInstruction(
instruction_shape,
A.element, B.element, element_acc,
cutlass.OpClass.TensorOp,
MathOperation.multiply_add
)
tile_description = TileDescription(
[128, 128, 32], # Threadblock shape
2, # Number of stages
[2, 2, 1], # Number of warps within each dimension of the threadblock shape
math_inst
)
epilogue_functor = pycutlass.LinearCombination(C.element, C.alignment, element_acc, element_epilogue)
operation = GemmOperationGrouped(
arch=cc, tile_description=tile_description,
A=A, B=B, C=C,
epilogue_functor=epilogue_functor,
precompute_mode=SchedulerMode.Device)
if args.print_cuda:
print(operation.rt_module.emit())
operations = [operation, ]
# Compile the operation
pycutlass.compiler.add_module(operations)
# Initialize tensors for each problem in the group
problem_sizes = [
cutlass.gemm.GemmCoord(128, 128, 64),
cutlass.gemm.GemmCoord(512, 256, 128)
]
problem_count = len(problem_sizes)
alpha = 1.
beta = 0.
tensor_As = []
tensor_Bs = []
tensor_Cs = []
tensor_Ds = []
tensor_D_refs = []
reference = ReferenceModule(A, B, C)
for problem_size in problem_sizes:
# Randomly initialize tensors
m = problem_size.m()
n = problem_size.n()
k = problem_size.k()
tensor_A = np.ceil(np.random.uniform(low=-8.5, high=7.5, size=(m * k,))).astype(np.float16)
tensor_B = np.ceil(np.random.uniform(low=-8.5, high=7.5, size=(k * n,))).astype(np.float16)
tensor_C = np.ceil(np.random.uniform(low=-8.5, high=7.5, size=(m * n,))).astype(np.float32)
tensor_D = np.zeros(shape=(m * n,)).astype(np.float32)
tensor_As.append(tensor_A)
tensor_Bs.append(tensor_B)
tensor_Cs.append(tensor_C)
tensor_Ds.append(tensor_D)
# Run the reference GEMM
tensor_D_ref = reference.run(tensor_A, tensor_B, tensor_C, problem_size, alpha, beta)
tensor_D_refs.append(tensor_D_ref)
arguments = GemmGroupedArguments(
operation, problem_sizes, tensor_As, tensor_Bs, tensor_Cs, tensor_Ds,
output_op=operation.epilogue_type(alpha, beta)
)
# Run the operation
operation.run(arguments)
arguments.sync()
# Compare the CUTLASS result to the host reference result
for tensor_d, tensor_d_ref in zip(tensor_Ds, tensor_D_refs):
try:
assert np.array_equal(tensor_d, tensor_d_ref)
except:
assert np.allclose(tensor_d, tensor_d_ref, rtol=1e-5)
print("Passed.")
|
################################################################################
#
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
################################################################################
import numpy as np
import pycutlass
from pycutlass import *
from pycutlass.utils.device import device_cc
import cutlass
from bfloat16 import bfloat16
import sys
import argparse
# parse the arguments
parser = argparse.ArgumentParser(description="Launch CUTLASS GEMM kernels from Python: 'D = alpha * A * B + beta * C'")
# Operation description
# math instruction description
parser.add_argument("-i", "--instruction_shape",
default=[1, 1, 1], nargs=3, type=int,
help="This option describes the size of MMA op")
parser.add_argument("-ta", "--element_a", default="float32", type=str,
choices=['float64', 'float32', 'float16', 'bfloat16', 'int32', 'int8'],
help='Data type of elements in input tensor A')
parser.add_argument("-tb", "--element_b", default="float32", type=str,
choices=['float64', 'float32', 'float16', 'bfloat16', 'int32', 'int8'],
help='Data type of elements in input tensor B')
parser.add_argument("-tc", "--element_c", default="float32", type=str,
choices=['float64', 'float32', 'float16', 'bfloat16', 'int32', 'int8'],
help='Data type of elements in input tensor C and output tensor D')
parser.add_argument("-tacc", "--element_acc", default="float32", type=str,
choices=['float64', 'float32', 'float16', 'bfloat16', 'int32', 'int8'],
help='Data type of accumulator')
parser.add_argument('-m', "--math", default="multiply_add",
type=str, choices=["multiply_add", "multiply_add_fast_bf16", "multiply_add_fast_f32"], help="math instruction")
parser.add_argument('-op', "--opcode", default="simt", type=str,
choices=["Simt", 'TensorOp'],
help="This option describes whether you want to use tensor \
cores (TensorOp) or regular SIMT cores (Simt) on GPU SM")
# tile description
parser.add_argument("-b", "--threadblock_shape",
default=[128, 128, 8], nargs=3, type=int,
help="This option describes the tile size a thread block with compute")
parser.add_argument("-s", "--stages", default=4,
type=int, help="Number of pipelines you want to use")
parser.add_argument("-w", "--warp_count", default=[4, 2, 1], nargs=3, type=int,
help="This option describes the number of warps along M, N, and K of the threadblock")
parser.add_argument("-cc", "--compute_capability", default=80,
type=int, help="This option describes CUDA SM architecture number")
# A
parser.add_argument('-la', "--layout_a", default="RowMajor", type=str, choices=[
"RowMajor", "ColumnMajor", "RowMajorInterleaved32", "ColumnMajorInterleaved32"],
help="Memory layout of input tensor A")
parser.add_argument('-aa', '--alignment_a', default=1,
type=int, help="Memory alignement of input tensor A")
# B
parser.add_argument('-lb', "--layout_b", default="RowMajor", type=str, choices=[
"RowMajor", "ColumnMajor", "RowMajorInterleaved32", "ColumnMajorInterleaved32"],
help="Memory layout of input tensor B")
parser.add_argument('-ab', '--alignment_b', default=1,
type=int, help="Memory alignment of input tensor B")
# C
parser.add_argument('-lc', "--layout_c", default="RowMajor", type=str, choices=[
"RowMajor", "ColumnMajor", "RowMajorInterleaved32", "ColumnMajorInterleaved32"],
help="Memory layout of input tensor C and output tensor D")
parser.add_argument('-ac', '--alignment_c', default=1,
type=int, help="Memory alignment of input tensor C and output tensor D")
# epilogue
parser.add_argument("-te", "--element_epilogue", default="float32", type=str,
choices=['float64', 'float32', 'float16', 'bfloat16'], help='Epilogue datatype')
parser.add_argument("-ep", "--epilogue_functor", default="LinearCombination",
type=str, choices=['LinearCombination', 'FastLinearCombinationClamp', 'LinearCombinationClamp'],
help="This option describes the epilogue part of the kernel")
parser.add_argument("-epv", "--epilogue_visitor", default=None,
type=str, choices=['RowReduction', 'ColumnReduction', 'RowBroadcast', 'ColumnBroadcast'], help="epilogue visitor for more complex epilogues")
# swizzling
parser.add_argument("-sw", "--swizzling_functor", default="IdentitySwizzle1", type=str, choices=[
"IdentitySwizzle1", "IdentitySwizzle2", "IdentitySwizzle4", "IdentitySwizzle8", "HorizontalSwizzle", "BatchedIdentitySwizzle"],
help="This option describes how thread blocks are scheduled on GPU")
# Argument
parser.add_argument("-p", "--problem_size",
default=[128, 128, 128], nargs=3, type=int,
help="GEMM problem size M, N, K")
parser.add_argument("-alpha", "--alpha", default=1.0, type=float,
help="Scaling factor of A * B")
parser.add_argument("-beta", "--beta", default=0.0, type=float,
help="Scaling factor of C")
parser.add_argument("-gm", "--gemm_mode", default="Gemm", type=str,
choices=["Gemm", "GemmSplitKParallel", "Batched", "Array"],
help="GEMM mode. Gemm is used for non-splitK or serial-splitK. \
GemmSplitKParallel is used for parallel splitK")
parser.add_argument('-k', '--split_k_slices', default=1,
type=int, help="Number of split-k partitions. (default 1)")
parser.add_argument('-bias', '--bias', action='store_true', help="C is bias vector")
parser.add_argument('-batch', '--batch', default=1, type=int, help="batch size for batched GEMM")
# Activation function
parser.add_argument("-activ", "--activation_function", default="identity",
choices=["identity", "relu", "leaky_relu", "tanh", "sigmoid", "silu", "hardswish", "gelu"], help="activation function")
parser.add_argument("-activ_arg", "--activation_args", default=[], nargs="+", type=float,
help="addition arguments for activation")
parser.add_argument('--print_cuda', action="store_true",
help="print the underlying CUDA kernel")
try:
args = parser.parse_args()
except:
sys.exit(0)
cc = device_cc()
if args.compute_capability != cc:
raise Exception(("Parameter --compute-capability of {} "
"does not match that of the device of {}.").format(args.compute_capability, cc))
pycutlass.get_memory_pool(init_pool_size=2**30, max_pool_size=2**32)
pycutlass.compiler.nvcc()
np.random.seed(0)
element_a = getattr(cutlass, args.element_a)
element_b = getattr(cutlass, args.element_b)
element_c = getattr(cutlass, args.element_c)
element_acc = getattr(cutlass, args.element_acc)
math_operation = getattr(MathOperation, args.math)
opclass = getattr(cutlass.OpClass, args.opcode)
math_inst = MathInstruction(
args.instruction_shape, element_a, element_b,
element_acc, opclass, math_operation
)
tile_description = TileDescription(
args.threadblock_shape, args.stages, args.warp_count,
math_inst
)
layout_a = getattr(cutlass, args.layout_a)
layout_b = getattr(cutlass, args.layout_b)
layout_c = getattr(cutlass, args.layout_c)
A = TensorDescription(
element_a, layout_a, args.alignment_a
)
B = TensorDescription(
element_b, layout_b, args.alignment_b
)
C = TensorDescription(
element_c, layout_c, args.alignment_c
)
element_epilogue = getattr(cutlass, args.element_epilogue)
if (args.activation_function == "identity"
or (args.gemm_mode == "GemmSplitKParallel" and args.split_k_slices > 1)):
#
epilogue_functor = getattr(pycutlass, args.epilogue_functor)(
C.element, C.alignment, math_inst.element_accumulator, element_epilogue)
else:
epilogue_functor = getattr(pycutlass, "LinearCombinationGeneric")(
getattr(pycutlass, args.activation_function)(element_epilogue),
C.element, C.alignment, math_inst.element_accumulator, element_epilogue)
swizzling_functor = getattr(cutlass, args.swizzling_functor)
visitor = args.epilogue_visitor is not None
if args.epilogue_visitor == "ColumnReduction":
class ColumnReduction_(EpilogueVisitTree):
def __call__(
self, accum: 'tensor', c: 'tensor',
alpha: 'scalar', beta: 'scalar'):
#
D = alpha * accum + beta * c
reduction = reduction_op(D, "column", "Add", args.threadblock_shape[0])
return D, reduction
epilogue_functor = ColumnReduction_(
epilogue_functor, tile_description, math_inst.element_accumulator,
C.alignment, element_epilogue, C.element)
epilogue_functor.initialize()
elif args.epilogue_visitor == "RowReduction":
class RowReduction_(EpilogueVisitTree):
def __call__(
self, accum: 'tensor', c: 'tensor',
alpha: 'scalar', beta: 'scalar'):
#
D = alpha * accum + tanh.numpy(beta * c)
reduction = reduction_op(D, "row", "Add", args.threadblock_shape[1])
return D, reduction
epilogue_functor = RowReduction_(
epilogue_functor, tile_description, math_inst.element_accumulator,
C.alignment, element_epilogue, C.element)
epilogue_functor.initialize()
elif args.epilogue_visitor == "RowBroadcast":
class RowBroadcast_(EpilogueVisitTree):
def __call__(
self, accum: 'tensor', c: 'tensor',
vector: 'row', alpha: 'scalar', beta: 'scalar'):
#
T = accum + vector
scale_T = alpha * T
Z = relu.numpy(scale_T + beta * c)
return Z, T
epilogue_functor = RowBroadcast_(
epilogue_functor, tile_description, math_inst.element_accumulator,
C.alignment, element_epilogue, C.element)
epilogue_functor.initialize()
elif args.epilogue_visitor == "ColumnBroadcast":
class ColumnBroadcast_(EpilogueVisitTree):
def __call__(
self, accum: 'tensor', c: 'tensor',
vector: 'column', alpha: 'scalar', beta: 'scalar'):
#
T = accum + vector
scale_T = leaky_relu.numpy(alpha * T, 0.2)
Z = scale_T + beta * c
return Z, T
epilogue_functor = ColumnBroadcast_(
epilogue_functor, tile_description, math_inst.element_accumulator,
C.alignment, element_epilogue, C.element)
epilogue_functor.initialize()
else:
epilogue_functor = epilogue_functor
operation = GemmOperationUniversal(
arch=args.compute_capability, tile_description=tile_description,
A=A, B=B, C=C,
epilogue_functor=epilogue_functor, swizzling_functor=swizzling_functor,
visitor=visitor
)
if args.print_cuda:
print(operation.rt_module.emit())
operations = [operation, ]
if args.gemm_mode == "GemmSplitKParallel":
if (args.activation_function == "identity"):
epilogue_functor_reduction = getattr(pycutlass, args.epilogue_functor)(
C.element, C.alignment, math_inst.element_accumulator, element_epilogue)
else:
epilogue_functor_reduction = getattr(pycutlass, "LinearCombinationGeneric")(
getattr(pycutlass, args.activation_function)(element_epilogue),
C.element, C.alignment, math_inst.element_accumulator, element_epilogue)
reduction_operation = ReductionOperation(
shape=cutlass.MatrixCoord(4, 32 * C.alignment),
C=C, element_accumulator=element_acc,
element_compute=element_epilogue,
epilogue_functor=epilogue_functor_reduction,
count=C.alignment
)
operations.append(reduction_operation)
pycutlass.compiler.add_module(operations)
# User-provide inputs
problem_size = cutlass.gemm.GemmCoord(
args.problem_size[0], args.problem_size[1], args.problem_size[2])
tensor_a_size = args.batch * problem_size.m() * problem_size.k()
if args.element_a != "int8":
if args.element_a == "bfloat16":
tensor_A = np.ceil(
np.random.uniform(low=-8.5, high=7.5, size=(tensor_a_size,))
).astype(bfloat16)
else:
tensor_A = np.ceil(
np.random.uniform(low=-8.5, high=7.5, size=(tensor_a_size,))
).astype(getattr(np, args.element_a))
else:
tensor_A = np.random.uniform(
low=-2, high=2,size=(tensor_a_size,)
).astype(getattr(np, args.element_a))
tensor_b_size = args.batch * problem_size.k() * problem_size.n()
if args.element_b != "int8":
if args.element_b == "bfloat16":
tensor_B = np.ceil(
np.random.uniform(low=-8.5, high=7.5, size=(tensor_b_size,))
).astype(bfloat16)
else:
tensor_B = np.ceil(
np.random.uniform(low=-8.5, high=7.5, size=(tensor_b_size,))
).astype(getattr(np, args.element_b))
else:
tensor_B = np.random.uniform(
low=-2, high=2, size=(tensor_b_size,)
).astype(getattr(np, args.element_b))
if args.element_c != "int8":
if args.bias:
if args.layout_c == "RowMajor":
tensor_c_size = args.batch * problem_size.n()
elif args.layout_c == "ColumnMajor":
tensor_c_size = args.batch * problem_size.m()
else:
raise ValueError(args.layout_c)
else:
tensor_c_size = args.batch * problem_size.m() * problem_size.n()
if args.element_c == "bfloat16":
tensor_C = np.ceil(
np.random.uniform(low=-8.5, high=7.5, size=(tensor_c_size,))
).astype(bfloat16)
else:
tensor_C = np.ceil(
np.random.uniform(low=-8.5, high=7.5, size=(tensor_c_size,))
).astype(getattr(np, args.element_c))
else:
tensor_C = np.random.uniform(
low=-2, high=2, size=(args.batch * problem_size.m() * problem_size.n(),)
).astype(getattr(np, args.element_c))
tensor_D = np.zeros(
shape=(args.batch * problem_size.m() * problem_size.n(),)
).astype(getattr(np, args.element_c))
if args.epilogue_visitor == "RowReduction":
cta_n = args.threadblock_shape[1]
num_cta_n = (problem_size.n() + cta_n - 1) // cta_n
reduction = np.zeros(shape=(args.batch * problem_size.m() * num_cta_n,), dtype=getattr(np, args.element_c))
output_op = operation.epilogue_type(
D=tensor_D, alpha=args.alpha, beta=args.beta, c=tensor_C, reduction=reduction, problem_size=[problem_size.m(), problem_size.n()]
)
elif args.epilogue_visitor == "ColumnReduction":
cta_m = args.threadblock_shape[0]
num_cta_m = (problem_size.m() + cta_m - 1) // cta_m
reduction = np.zeros(shape=(args.batch * problem_size.n() * num_cta_m,), dtype=getattr(np, args.element_c))
output_op = operation.epilogue_type(
D=tensor_D, alpha=args.alpha, beta=args.beta, c=tensor_C, reduction=reduction, problem_size=[problem_size.m(), problem_size.n()]
)
elif args.epilogue_visitor == "RowBroadcast":
vector = np.ceil(
np.random.uniform(low=-8.5, high=7.5, size=(args.batch, 1, problem_size.n()))
).astype(getattr(np, args.element_c))
tensor_t = np.empty_like(tensor_D)
output_op = operation.epilogue_type(
c=tensor_C, vector=vector, alpha=args.alpha, beta=args.beta, Z=tensor_D, T=tensor_t, problem_size=[problem_size.m(), problem_size.n()]
)
elif args.epilogue_visitor == "ColumnBroadcast":
vector = np.ceil(
np.random.uniform(low=-8.5, high=7.5, size=(args.batch, problem_size.m(), 1))
).astype(getattr(np, args.element_c))
tensor_t = np.empty_like(tensor_D)
output_op = operation.epilogue_type(
c=tensor_C, vector=vector, alpha=args.alpha, beta=args.beta, Z=tensor_D, T=tensor_t, problem_size=[problem_size.m(), problem_size.n()]
)
else:
output_op = operation.epilogue_type(*([args.alpha, args.beta] + args.activation_args))
arguments = GemmArguments(
operation=operation, problem_size=problem_size,
A=tensor_A, B=tensor_B, C=tensor_C, D=tensor_D,
output_op=output_op,
gemm_mode=getattr(cutlass.gemm.Mode, args.gemm_mode),
split_k_slices=args.split_k_slices, batch=args.batch
)
if args.gemm_mode == "GemmSplitKParallel":
reduction_arguments = ReductionArguments(
operation=reduction_operation,
problem_size=[problem_size.m(), problem_size.n()],
partitions=args.split_k_slices, workspace=arguments.ptr_D,
destination=tensor_D, source=tensor_C,
output_op=reduction_operation.epilogue_type(*([args.alpha, args.beta] + args.activation_args)),
bias = arguments.bias
)
operation.run(arguments)
if args.gemm_mode == "GemmSplitKParallel":
reduction_operation.run(reduction_arguments)
reduction_arguments.sync()
else:
arguments.sync()
# run the host reference module
reference = ReferenceModule(A, B, C)
tensor_D_ref = reference.run(
tensor_A, tensor_B, tensor_C, problem_size, args.alpha, args.beta, args.bias, args.batch)
if args.epilogue_visitor in ["RowBroadcast", "ColumnBroadcast"]:
tensor_D_ref = (tensor_D_ref.reshape((args.batch, problem_size.m(), problem_size.n())) + vector).flatten()
tensor_D_ref = getattr(pycutlass, args.activation_function).numpy(*([tensor_D_ref,] + args.activation_args))
if args.epilogue_visitor in ["RowReduction", "ColumnReduction"]:
output_op.sync()
accum_ref = reference.run(
tensor_A, tensor_B, tensor_C, problem_size, 1.0, 0.0, args.bias, args.batch)
tensor_D_ref, reduction_ref = epilogue_functor(
accum_ref.reshape((args.batch, problem_size.m(), problem_size.n())),
tensor_C.reshape((args.batch, problem_size.m(), problem_size.n())),
args.alpha, args.beta
)
tensor_D_ref = tensor_D_ref.flatten()
reduction_ref = reduction_ref.flatten()
assert np.allclose(reduction_ref, reduction, atol=1e-2)
elif args.epilogue_visitor in ["RowBroadcast", "ColumnBroadcast"]:
output_op.sync()
accum_ref = reference.run(
tensor_A, tensor_B, tensor_C, problem_size, 1.0, 0.0, args.bias, args.batch)
tensor_D_ref, tensor_T_ref = epilogue_functor(
accum_ref.reshape((args.batch, problem_size.m(), problem_size.n())),
tensor_C.reshape((args.batch, problem_size.m(), problem_size.n())),
vector, args.alpha, args.beta)
tensor_D_ref = tensor_D_ref.flatten()
tensor_T_ref = tensor_T_ref.flatten()
assert np.array_equal(tensor_t, tensor_T_ref)
try:
assert np.array_equal(tensor_D, tensor_D_ref)
except:
assert np.allclose(tensor_D, tensor_D_ref, atol=1e-5)
print("Passed.")
|
################################################################################
#
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
################################################################################
import numpy as np
import pycutlass
from pycutlass import *
from pycutlass.conv2d_operation import *
from pycutlass.utils import reference_model
from pycutlass.utils.device import device_cc
import sys
import torch.nn.functional as F
import argparse
# parse the arguments
parser = argparse.ArgumentParser(description="Launch CUTLASS convolution 2d kernels from Python")
# Operation description
# math instruction description
parser.add_argument("-i", "--instruction_shape",
default=[1, 1, 1], nargs=3, type=int,
help="This option describes the size of MMA op")
parser.add_argument("-ta", "--element_a", default="float32", type=str,
choices=['float64', 'float32', 'float16', 'bfloat16', 'int32', 'int8'],
help='Data type of elements in input tensor A')
parser.add_argument("-tb", "--element_b", default="float32", type=str,
choices=['float64', 'float32', 'float16', 'bfloat16', 'int32', 'int8'],
help='Data type of elements in input tensor B')
parser.add_argument("-tc", "--element_c", default="float32", type=str,
choices=['float64', 'float32', 'float16', 'bfloat16', 'int32', 'int8'],
help='Data type of elements in input tensor C and output tensor D')
parser.add_argument("-tacc", "--element_acc", default="float32", type=str,
choices=['float64', 'float32', 'float16', 'bfloat16', 'int32', 'int8'],
help='Data type of accumulator')
parser.add_argument('-m', "--math", default="multiply_add",
type=str, choices=["multiply_add", "multiply_add_fast_bf16", "multiply_add_fast_f32"], help="math instruction")
parser.add_argument('-op', "--opcode", default="simt", type=str,
choices=["Simt", 'TensorOp'],
help='This option describes whether you want to use tensor \
cores (TensorOp) or regular SIMT cores (Simt) on GPU SM')
# tile description
parser.add_argument("-b", "--threadblock_shape",
default=[128, 128, 8], nargs=3, type=int,
help="This option describes the tile size a thread block with compute")
parser.add_argument("-s", "--stages", default=4,
type=int, help="Number of pipelines you want to use")
parser.add_argument("-w", "--warp_count", default=[
4, 2, 1], nargs=3, type=int,
help="This option describes the number of warps along M, N, and K of the threadblock")
parser.add_argument("-cc", "--compute_capability", default=80,
type=int, help="This option describes CUDA SM architecture number")
# A
parser.add_argument('-la', "--layout_a", default="TensorNHWC", type=str, choices=[
"TensorNHWC", "TensorNC32HW32"],
help="Memory layout of input tensor A")
parser.add_argument('-aa', '--alignment_a', default=1,
type=int, help="Memory alignement of input tensor A")
# B
parser.add_argument('-lb', "--layout_b", default="TensorNHWC", type=str, choices=[
"TensorNHWC", "TensorC32RSK32"],
help="Memory layout of input tensor B")
parser.add_argument('-ab', '--alignment_b', default=1,
type=int, help="Memory alignment of input tensor B")
# C
parser.add_argument('-lc', "--layout_c", default="TensorNHWC", type=str, choices=[
"TensorNHWC", "TensorNC32HW32"],
help="Memory layout of input tensor C and output tensor D")
parser.add_argument('-ac', '--alignment_c', default=1,
type=int, help="Memory alignment of input tensor C and output tensor D")
# epilogue
parser.add_argument("-te", "--element_epilogue", default="float32", type=str,
choices=['float64', 'float32', 'float16', 'bfloat16'],
help='Data type of computation in the epilogue')
parser.add_argument("-ep", "--epilogue_functor", default="LinearCombination",
type=str, choices=['LinearCombination', 'FastLinearCombinationClamp', 'LinearCombinationClamp'],
help="This option describes the epilogue part of the kernel")
# swizzling
parser.add_argument("-sw", "--swizzling_functor", default="IdentitySwizzle1", type=str, choices=[
"IdentitySwizzle1", "IdentitySwizzle2", "IdentitySwizzle4", "IdentitySwizzle8",
"HorizontalSwizzle", "StridedDgradIdentitySwizzle1", "StridedDgradIdentitySwizzle4",
"StridedDgradHorizontalSwizzle"],
help="This option describes how thread blocks are scheduled on GPU")
# conv related
parser.add_argument("-co", "--conv_kind", default="fprop", type=str, choices=['fprop', 'dgrad', 'wgrad'],
help="The type of convolution: forward propagation (fprop), \
gradient of activation (dgrad), gradient of weight (wgrad)")
parser.add_argument("-st", "--stride_support", default="Strided", type=str, choices=["Strided", "Unity"],
)
parser.add_argument("-ia", "--iterator_algorithm", default="analytic", type=str,
choices=["analytic", "optimized", "fixed_channels", "few_channels"],
help="This option describes iterator algorithm")
# arguments
parser.add_argument("-sm", "--split_k_mode", default="Serial", type=str, choices=["Serial", "Parallel"],
help="Split K Mode. Serial is used for non-splitK or serial-splitK.\
Parallel is used for parallel splitK.")
parser.add_argument('-k', '--split_k_slices', default=1,
type=int, help="Number of split-k partitions. (default 1)")
parser.add_argument("-nhwc", "--nhwc", nargs=4, type=int, help="input size (NHWC)")
parser.add_argument("-krsc", "--krsc", nargs=4, type=int, help="filter size (KRSC)")
parser.add_argument("-pad", "--pad", nargs=4, type=int, help="padding (pad_h, _, pad_w, _)")
parser.add_argument("-stride", "--stride", nargs=2, type=int, help="stride (stride_h, stride_w)")
parser.add_argument("-dilation", "--dilation", nargs=2, type=int, help="dilation (dilation_h, dilation_w)")
parser.add_argument("-alpha", "--alpha", default=1.0, type=float, help="alpha")
parser.add_argument("-beta", "--beta", default=0.0, type=float, help="beta")
parser.add_argument('-bias', '--bias', action='store_true', help="C is bias vector")
# Activation function
parser.add_argument("-activ", "--activation_function", default="identity",
choices=["identity", "relu", "leaky_relu", "tanh", "sigmoid", "silu", "hardswish", "gelu"], help="activation function")
parser.add_argument("-activ_arg", "--activation_args", default=[], nargs="+", type=float,
help="addition arguments for activation")
parser.add_argument('--print_cuda', action="store_true",
help="print the underlying CUDA kernel")
try:
args = parser.parse_args()
except:
sys.exit(0)
cc = device_cc()
if args.compute_capability != cc:
raise Exception(("Parameter --compute-capability of {} "
"does not match that of the device of {}.").format(args.compute_capability, cc))
pycutlass.get_memory_pool(init_pool_size=2**30, max_pool_size=2**32)
np.random.seed(0)
element_a = getattr(cutlass, args.element_a)
element_b = getattr(cutlass, args.element_b)
element_c = getattr(cutlass, args.element_c)
element_acc = getattr(cutlass, args.element_acc)
math_operation = getattr(MathOperation, args.math)
opclass = getattr(cutlass.OpClass, args.opcode)
math_inst = MathInstruction(
args.instruction_shape, element_a, element_b,
element_acc, opclass, math_operation
)
tile_description = TileDescription(
args.threadblock_shape, args.stages, args.warp_count,
math_inst
)
layout_a = getattr(cutlass, args.layout_a)
layout_b = getattr(cutlass, args.layout_b)
layout_c = getattr(cutlass, args.layout_c)
A = TensorDescription(
element_a, layout_a, args.alignment_a
)
B = TensorDescription(
element_b, layout_b, args.alignment_b
)
C = TensorDescription(
element_c, layout_c, args.alignment_c
)
element_epilogue = getattr(cutlass, args.element_epilogue)
if (args.activation_function == "identity"
or (args.split_k_mode == "Parallel" and args.split_k_slices > 1)):
#
epilogue_functor = getattr(pycutlass, args.epilogue_functor)(
C.element, C.alignment, math_inst.element_accumulator, element_epilogue)
else:
epilogue_functor = getattr(pycutlass, "LinearCombinationGeneric")(
getattr(pycutlass, args.activation_function)(element_epilogue),
C.element, C.alignment, math_inst.element_accumulator, element_epilogue)
iterator_algorithm = getattr(cutlass.conv.IteratorAlgorithm, args.iterator_algorithm)
swizzling_functor = getattr(cutlass, args.swizzling_functor)
stride_support = getattr(StrideSupport, args.stride_support)
conv_kind = getattr(cutlass.conv.Operator, args.conv_kind)
operation = Conv2dOperation(
conv_kind=conv_kind, iterator_algorithm=iterator_algorithm,
arch=args.compute_capability, tile_description=tile_description,
A=A, B=B, C=C, stride_support=stride_support,
epilogue_functor=epilogue_functor, swizzling_functor=swizzling_functor
)
if args.print_cuda:
print(operation.rt_module.emit())
operations = [operation,]
if args.split_k_mode == "Parallel" and args.split_k_slices > 1:
if (args.activation_function == "identity"):
epilogue_functor_reduction = getattr(pycutlass, args.epilogue_functor)(
C.element, C.alignment, math_inst.element_accumulator, element_epilogue)
else:
epilogue_functor_reduction = getattr(pycutlass, "LinearCombinationGeneric")(
getattr(pycutlass, args.activation_function)(element_epilogue),
C.element, C.alignment, math_inst.element_accumulator, element_epilogue)
reduction_operation = ReductionOperation(
shape=cutlass.MatrixCoord(4, 32 * C.alignment),
C=C, element_accumulator=element_acc,
element_compute=element_epilogue,
epilogue_functor=epilogue_functor_reduction,
count=C.alignment
)
operations.append(reduction_operation)
pycutlass.compiler.add_module(operations)
problem_size = cutlass.conv.Conv2dProblemSize(
cutlass.Tensor4DCoord(args.nhwc[0], args.nhwc[1], args.nhwc[2], args.nhwc[3]),
cutlass.Tensor4DCoord(args.krsc[0], args.krsc[1], args.krsc[2], args.krsc[3]),
cutlass.Tensor4DCoord(args.pad[0], args.pad[1], args.pad[2], args.pad[3]),
cutlass.MatrixCoord(args.stride[0], args.stride[1]),
cutlass.MatrixCoord(args.dilation[0], args.dilation[1]),
cutlass.conv.Mode.cross_correlation,
args.split_k_slices, 1
)
# User-provide inputs
tensor_A_size = cutlass.conv.implicit_gemm_tensor_a_size(
conv_kind, problem_size
)
tensor_B_size = cutlass.conv.implicit_gemm_tensor_b_size(
conv_kind, problem_size
)
if args.bias:
tensor_C_size = cutlass.conv.implicit_gemm_tensor_c_extent(
conv_kind, problem_size
).at(3)
else:
tensor_C_size = cutlass.conv.implicit_gemm_tensor_c_size(
conv_kind, problem_size
)
tensor_D_size = cutlass.conv.implicit_gemm_tensor_c_size(
conv_kind, problem_size
)
if args.element_a != "int8":
tensor_A = torch.ceil(torch.empty(size=(tensor_A_size,), dtype=getattr(torch, args.element_a), device="cuda").uniform_(-8.5, 7.5))
else:
tensor_A = torch.empty(size=(tensor_A_size,), dtype=getattr(torch, args.element_a), device="cuda").uniform_(-2, 2)
if args.element_b != "int8":
tensor_B = torch.ceil(torch.empty(size=(tensor_B_size,), dtype=getattr(torch, args.element_b), device="cuda").uniform_(-8.5, 7.5))
else:
tensor_B = torch.empty(size=(tensor_B_size,), dtype=getattr(torch, args.element_b), device="cuda").uniform_(-2, 2)
if args.element_c != "int8":
tensor_C = torch.ceil(torch.empty(size=(tensor_C_size,), dtype=getattr(torch, args.element_c), device="cuda").uniform_(-8.5, 7.5))
else:
tensor_C = torch.empty(size=(tensor_C_size,), dtype=getattr(torch, args.element_c), device="cuda").uniform_(-2, 2)
tensor_D = torch.ones(size=(tensor_D_size,), dtype=getattr(torch, args.element_c), device="cuda")
arguments = Conv2dArguments(
operation=operation, problem_size=problem_size, A=tensor_A,
B=tensor_B, C=tensor_C, D=tensor_D,
output_op = operation.epilogue_type(*([args.alpha, args.beta] + args.activation_args)),
split_k_mode=getattr(cutlass.conv.SplitKMode, args.split_k_mode),
split_k_slices=problem_size.split_k_slices
)
if args.split_k_mode == "Parallel" and args.split_k_slices > 1:
implicit_gemm_size = cutlass.conv.implicit_gemm_problem_size(conv_kind, arguments.problem_size)
reduction_arguments = ReductionArguments(
reduction_operation,
problem_size=[implicit_gemm_size.m(), implicit_gemm_size.n()],
partitions=problem_size.split_k_slices,
workspace=arguments.ptr_D,
destination=tensor_D,
source=tensor_C,
output_op = reduction_operation.epilogue_type(*([args.alpha, args.beta] + args.activation_args)),
bias = arguments.bias
)
operation.run(arguments)
if args.split_k_mode == "Parallel" and args.split_k_slices > 1:
reduction_operation.run(reduction_arguments)
reduction_arguments.sync()
else:
arguments.sync()
reference_model = Conv2dReferenceModule(A, B, C, conv_kind)
tensor_D_ref = reference_model.run(tensor_A, tensor_B, tensor_C, arguments.problem_size, args.alpha, args.beta, args.bias)
if (args.activation_function != "identity"):
tensor_D_ref = getattr(F, args.activation_function)(*([tensor_D_ref,] + args.activation_args))
try:
assert torch.equal(tensor_D, tensor_D_ref)
except:
assert torch.allclose(tensor_D, tensor_D_ref, rtol=1e-2)
print("Passed.")
|
################################################################################
#
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
################################################################################
import numpy as np
import pycutlass
from pycutlass import *
from pycutlass.utils.device import device_cc
import csv
import sys
import argparse
# parse the arguments
parser = argparse.ArgumentParser(
description="Launch CUTLASS GEMM Grouped kernels from Python")
# Operation description
# math instruction description
parser.add_argument("-i", "--instruction_shape",
default=[1, 1, 1], nargs=3, type=int,
help="This option describes the size of MMA op")
parser.add_argument("-ta", "--element_a", default="float32", type=str,
choices=['float64', 'float32', 'float16', 'bfloat16', 'int32', 'int8'],
help='Data type of elements in input tensor A')
parser.add_argument("-tb", "--element_b", default="float32", type=str,
choices=['float64', 'float32', 'float16', 'bfloat16', 'int32', 'int8'],
help='Data type of elements in input tensor B')
parser.add_argument("-tc", "--element_c", default="float32", type=str,
choices=['float64', 'float32', 'float16', 'bfloat16', 'int32', 'int8'],
help='Data type of elements in input tensor C and output tensor D')
parser.add_argument("-tacc", "--element_acc", default="float32", type=str,
choices=['float64', 'float32', 'float16', 'bfloat16', 'int32', 'int8'],
help='Data type of accumulator')
parser.add_argument('-m', "--math", default="multiply_add",
type=str, choices=["multiply_add", "multiply_add_fast_bf16", "multiply_add_fast_f32"], help="math instruction")
parser.add_argument('-op', "--opcode", default="simt", type=str,
choices=["Simt", 'TensorOp'], help='This option describes whether you want to use tensor \
cores (TensorOp) or regular SIMT cores (Simt) on GPU SM')
# tile description
parser.add_argument("-b", "--threadblock_shape",
default=[128, 128, 8], nargs=3, type=int,
help="This option describes the tile size a thread block with compute")
parser.add_argument("-s", "--stages", default=4,
type=int, help="Number of pipelines you want to use")
parser.add_argument("-w", "--warp_count", default=[
4, 2, 1], nargs=3, type=int,
help="This option describes the number of warps along M, N, and K of the threadblock")
parser.add_argument("-cc", "--compute_capability", default=80,
type=int, help="This option describes CUDA SM architecture number")
# A
parser.add_argument('-la', "--layout_a", default="RowMajor", type=str, choices=[
"RowMajor", "ColumnMajor", "RowMajorInterleaved32", "ColumnMajorInterleaved32"],
help="Memory layout of input tensor A")
parser.add_argument('-aa', '--alignment_a', default=1,
type=int, help="Memory alignment of input tensor A")
# B
parser.add_argument('-lb', "--layout_b", default="RowMajor", type=str, choices=[
"RowMajor", "ColumnMajor", "RowMajorInterleaved32", "ColumnMajorInterleaved32"],
help="Memory layout of input tensor B")
parser.add_argument('-ab', '--alignment_b', default=1,
type=int, help="Memory alignment of input tensor B")
# C
parser.add_argument('-lc', "--layout_c", default="RowMajor", type=str, choices=[
"RowMajor", "ColumnMajor", "RowMajorInterleaved32", "ColumnMajorInterleaved32"],
help="Memory layout of input tensor C and output tensor D")
parser.add_argument('-ac', '--alignment_c', default=1,
type=int, help="Memory alignment of input tensor C and output tensor D")
# epilogue
parser.add_argument("-te", "--element_epilogue", default="float32", type=str,
choices=['float64', 'float32', 'float16', 'bfloat16'], help='Epilogue datatype')
parser.add_argument("-ep", "--epilogue_functor", default="LinearCombination",
type=str, choices=['LinearCombination', 'FastLinearCombinationClamp', 'LinearCombinationClamp'],
help="This option describes the epilogue part of the kernel")
# swizzling
parser.add_argument("-sw", "--swizzling_functor", default="IdentitySwizzle1", type=str, choices=[
"IdentitySwizzle1", "IdentitySwizzle2", "IdentitySwizzle4", "IdentitySwizzle8", "HorizontalSwizzle"],
help="This option describes how thread blocks are scheduled on GPU. \
NOTE: Threadblock swizzling is currently not supported by CUTLASS's grouped kernels. \
This parameter is passed in at present to match the APIs of other kernels. The parameter \
is unused within the kernel")
# precompute mode
parser.add_argument("-pm", "--precompute_mode",
default="Device", type=str, choices=["Host", "Device"],
help="Grouped Gemm Scheduing on device only (Device) or using host precompute (Host)")
# arguments
parser.add_argument("-p", "--problem_size_dir", type=str,
help="path to the csv file contains the problem sizes")
parser.add_argument("-alpha", "--alpha", default=1.0, type=float, help="alpha")
parser.add_argument("-beta", "--beta", default=0.0, type=float, help="beta")
parser.add_argument('-bias', '--bias', action='store_true', help="C is bias vector")
# Activation function
parser.add_argument("-activ", "--activation_function", default="identity",
choices=["identity", "relu", "leaky_relu", "tanh", "sigmoid", "silu", "hardswish", "gelu"], help="activation function")
parser.add_argument("-activ_arg", "--activation_args", default=[], nargs="+", type=float,
help="addition arguments for activation")
parser.add_argument('--print_cuda', action="store_true",
help="print the underlying CUDA kernel")
try:
args = parser.parse_args()
except:
sys.exit(0)
cc = device_cc()
if args.compute_capability != cc:
raise Exception(("Parameter --compute-capability of {} "
"does not match that of the device of {}.").format(args.compute_capability, cc))
pycutlass.get_memory_pool(init_pool_size=2**30, max_pool_size=2**32)
np.random.seed(0)
element_a = getattr(cutlass, args.element_a)
element_b = getattr(cutlass, args.element_b)
element_c = getattr(cutlass, args.element_c)
element_acc = getattr(cutlass, args.element_acc)
math_operation = getattr(MathOperation, args.math)
opclass = getattr(cutlass.OpClass, args.opcode)
math_inst = MathInstruction(
args.instruction_shape, element_a, element_b,
element_acc, opclass, math_operation
)
tile_description = TileDescription(
args.threadblock_shape, args.stages, args.warp_count,
math_inst
)
layout_a = getattr(cutlass, args.layout_a)
layout_b = getattr(cutlass, args.layout_b)
layout_c = getattr(cutlass, args.layout_c)
A = TensorDescription(
element_a, layout_a, args.alignment_a
)
B = TensorDescription(
element_b, layout_b, args.alignment_b
)
C = TensorDescription(
element_c, layout_c, args.alignment_c
)
element_epilogue = getattr(cutlass, args.element_epilogue)
if args.activation_function == "identity":
epilogue_functor = getattr(pycutlass, args.epilogue_functor)(
C.element, C.alignment, math_inst.element_accumulator, element_epilogue)
else:
epilogue_functor = getattr(pycutlass, "LinearCombinationGeneric")(
getattr(pycutlass, args.activation_function)(element_epilogue),
C.element, C.alignment, math_inst.element_accumulator, element_epilogue)
swizzling_functor = getattr(cutlass, args.swizzling_functor)
precompute_mode = getattr(SchedulerMode, args.precompute_mode)
operation = GemmOperationGrouped(
arch=args.compute_capability, tile_description=tile_description,
A=A, B=B, C=C,
epilogue_functor=epilogue_functor, swizzling_functor=swizzling_functor,
precompute_mode=precompute_mode
)
if args.print_cuda:
print(operation.rt_module.emit())
pycutlass.compiler.add_module([operation, ])
reference_module = ReferenceModule(A, B, C)
# get problems
problem_sizes = []
with open(args.problem_size_dir) as csv_file:
reader = csv.reader(csv_file)
for row in reader:
problem_sizes.append(
cutlass.gemm.GemmCoord(int(row[0]), int(row[1]), int(row[2]))
)
problem_count = len(problem_sizes)
tensor_As = []
tensor_Bs = []
tensor_Cs = []
tensor_Ds = []
problem_sizes_coord = []
tensor_D_refs = []
for problem_size in problem_sizes:
if args.element_a != "int8":
if args.element_a == "bfloat16":
tensor_A = np.ceil(np.random.uniform(low=-8.5, high=7.5, size=(problem_size.m()
* problem_size.k(),))).astype(bfloat16)
else:
tensor_A = np.ceil(np.random.uniform(low=-8.5, high=7.5, size=(problem_size.m()
* problem_size.k(),))).astype(getattr(np, args.element_a))
else:
tensor_A = np.random.uniform(low=-2, high=2, size=(problem_size.m()
* problem_size.k(),)).astype(getattr(np, args.element_a))
if args.element_b != "int8":
if args.element_b == "bfloat16":
tensor_B = np.ceil(np.random.uniform(low=-8.5, high=7.5, size=(problem_size.k()
* problem_size.n(),))).astype(bfloat16)
else:
tensor_B = np.ceil(np.random.uniform(low=-8.5, high=7.5, size=(problem_size.k()
* problem_size.n(),))).astype(getattr(np, args.element_b))
else:
tensor_B = np.random.uniform(low=-2, high=2, size=(problem_size.k()
* problem_size.n(),)).astype(getattr(np, args.element_b))
if args.element_c != "int8":
if args.bias:
if args.layout_c == "RowMajor":
c_size = problem_size.n()
elif args.layout_c == "ColumnMajor":
c_size = problem_size.m()
else:
raise ValueError(args.layout_c)
else:
c_size = problem_size.m() * problem_size.n()
if args.element_c == "bfloat16":
tensor_C = np.ceil(
np.random.uniform(low=-8.5, high=7.5, size=(c_size,))
).astype(bfloat16)
else:
tensor_C = np.ceil(
np.random.uniform(low=-8.5, high=7.5, size=(c_size,))
).astype(getattr(np, args.element_c))
else:
tensor_C = np.random.uniform(
low=-2, high=2, size=(problem_size.m() * problem_size.n(),)
).astype(getattr(np, args.element_c))
tensor_D = np.zeros(
shape=(problem_size.m() * problem_size.n(),)
).astype(getattr(np, args.element_c))
tensor_As.append(tensor_A)
tensor_Bs.append(tensor_B)
tensor_Cs.append(tensor_C)
tensor_Ds.append(tensor_D)
tensor_D_ref = reference_module.run(
tensor_A, tensor_B, tensor_C, problem_size,
args.alpha, args.beta, args.bias)
tensor_D_ref = getattr(pycutlass, args.activation_function).numpy(*([tensor_D_ref,] + args.activation_args))
tensor_D_refs.append(tensor_D_ref)
problem_sizes_coord.append(problem_size)
arguments = GemmGroupedArguments(
operation, problem_sizes_coord, tensor_As, tensor_Bs, tensor_Cs, tensor_Ds,
output_op=operation.epilogue_type(*([args.alpha, args.beta] + args.activation_args))
)
operation.run(arguments)
arguments.sync()
for tensor_d, tensor_d_ref in zip(tensor_Ds, tensor_D_refs):
try:
assert np.array_equal(tensor_d, tensor_d_ref)
except:
assert np.allclose(tensor_d, tensor_d_ref, rtol=1e-5)
print("Passed.")
|
#!/usr/bin/env python
from __future__ import absolute_import, division, print_function
import os
import sys
import subprocess
import argparse
from functools import reduce
from itertools import chain
from pyHIPIFY import hipify_python
parser = argparse.ArgumentParser(description='Top-level script for HIPifying, filling in most common parameters')
parser.add_argument(
'--project-directory',
type=str,
default=os.path.normpath(os.path.join(
os.path.realpath(__file__),
os.pardir,
os.pardir,
os.pardir,
)),
help="The root of the project. (default: %(default)s)",
required=False)
parser.add_argument(
'--output-directory',
type=str,
default='',
help="The Directory to Store the Hipified Project",
required=False)
parser.add_argument(
'--list-files-only',
action='store_true',
help="Only print the list of hipify files.")
parser.add_argument(
'--root-dir',
type=str,
default="gloo",
help="The root directory of gloo project",
required=False)
args = parser.parse_args()
amd_build_dir = os.path.dirname(os.path.realpath(__file__))
proj_dir = os.path.join(os.path.dirname(os.path.dirname(amd_build_dir)))
if args.project_directory:
proj_dir = args.project_directory
out_dir = proj_dir
if args.output_directory:
out_dir = args.output_directory
includes = [
os.path.join(args.root_dir, "*cuda*"),
os.path.join(args.root_dir, "*nccl*"),
]
ignores = [
]
hipify_python.hipify(
project_directory=proj_dir,
output_directory=out_dir,
includes=includes,
ignores=ignores,
list_files_only=args.list_files_only,
show_progress=False)
|
import collections
from pyHIPIFY.constants import *
""" Mapping of CUDA functions, include files, constants, and types to ROCm/HIP equivalents
This closely follows the implementation in hipify-clang
https://github.com/ROCm-Developer-Tools/HIP/blob/master/hipify-clang/src/CUDA2HipMap.cpp
and its structure.
There are different maps for fundamental names, include files, identifies, sparse, and
PyTorch specific translations.
Each of the entries in these maps translates a CUDA string to a tuple containing the
ROCm/HIP string, a type and API annotation and - optionally - an annotation if it is not
supported in ROCm/HIP yet.
"""
CUDA_TYPE_NAME_MAP = collections.OrderedDict([
("CUresult", ("hipError_t", CONV_TYPE, API_DRIVER)),
("cudaError_t", ("hipError_t", CONV_TYPE, API_RUNTIME)),
("CUDA_ARRAY3D_DESCRIPTOR", ("HIP_ARRAY3D_DESCRIPTOR", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CUDA_ARRAY_DESCRIPTOR", ("HIP_ARRAY_DESCRIPTOR", CONV_TYPE, API_DRIVER)),
("CUDA_MEMCPY2D", ("hip_Memcpy2D", CONV_TYPE, API_DRIVER)),
("CUDA_MEMCPY3D", ("HIP_MEMCPY3D", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CUDA_MEMCPY3D_PEER", ("HIP_MEMCPY3D_PEER", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CUDA_POINTER_ATTRIBUTE_P2P_TOKENS", ("HIP_POINTER_ATTRIBUTE_P2P_TOKENS", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CUDA_RESOURCE_DESC", ("HIP_RESOURCE_DESC", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CUDA_RESOURCE_VIEW_DESC", ("HIP_RESOURCE_VIEW_DESC", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CUipcEventHandle", ("hipIpcEventHandle", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CUipcMemHandle", ("hipIpcMemHandle", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CUaddress_mode", ("hipAddress_mode", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CUarray_cubemap_face", ("hipArray_cubemap_face", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CUarray_format", ("hipArray_format", CONV_TYPE, API_DRIVER)),
("CUcomputemode", ("hipComputemode", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CUmem_advise", ("hipMemAdvise", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CUmem_range_attribute", ("hipMemRangeAttribute", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CUctx_flags", ("hipCctx_flags", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CUdevice", ("hipDevice_t", CONV_TYPE, API_DRIVER)),
("CUdevice_attribute_enum", ("hipDeviceAttribute_t", CONV_TYPE, API_DRIVER)),
("CUdevice_attribute", ("hipDeviceAttribute_t", CONV_TYPE, API_DRIVER)),
("CUdeviceptr", ("hipDeviceptr_t", CONV_TYPE, API_DRIVER)),
("CUarray_st", ("hipArray", CONV_TYPE, API_DRIVER)),
("CUarray", ("hipArray *", CONV_TYPE, API_DRIVER)),
("CUdevprop_st", ("hipDeviceProp_t", CONV_TYPE, API_DRIVER)),
("CUdevprop", ("hipDeviceProp_t", CONV_TYPE, API_DRIVER)),
("CUfunction", ("hipFunction_t", CONV_TYPE, API_DRIVER)),
("CUgraphicsResource", ("hipGraphicsResource_t", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CUmipmappedArray", ("hipMipmappedArray_t", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CUfunction_attribute", ("hipFuncAttribute_t", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CUfunction_attribute_enum", ("hipFuncAttribute_t", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CUgraphicsMapResourceFlags", ("hipGraphicsMapFlags", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CUgraphicsMapResourceFlags_enum", ("hipGraphicsMapFlags", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CUgraphicsRegisterFlags", ("hipGraphicsRegisterFlags", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CUgraphicsRegisterFlags_enum", ("hipGraphicsRegisterFlags", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CUoccupancy_flags", ("hipOccupancyFlags", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CUoccupancy_flags_enum", ("hipOccupancyFlags", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CUfunc_cache_enum", ("hipFuncCache", CONV_TYPE, API_DRIVER)),
("CUfunc_cache", ("hipFuncCache", CONV_TYPE, API_DRIVER)),
("CUipcMem_flags", ("hipIpcMemFlags", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CUipcMem_flags_enum", ("hipIpcMemFlags", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CUjit_cacheMode", ("hipJitCacheMode", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED)),
("CUjit_cacheMode_enum", ("hipJitCacheMode", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED)),
("CUjit_fallback", ("hipJitFallback", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED)),
("CUjit_fallback_enum", ("hipJitFallback", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED)),
("CUjit_option", ("hipJitOption", CONV_JIT, API_DRIVER)),
("CUjit_option_enum", ("hipJitOption", CONV_JIT, API_DRIVER)),
("CUjit_target", ("hipJitTarget", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED)),
("CUjit_target_enum", ("hipJitTarget", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED)),
("CUjitInputType", ("hipJitInputType", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED)),
("CUjitInputType_enum", ("hipJitInputType", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED)),
("CUlimit", ("hipLimit_t", CONV_TYPE, API_DRIVER)),
("CUlimit_enum", ("hipLimit_t", CONV_TYPE, API_DRIVER)),
("CUmemAttach_flags", ("hipMemAttachFlags_t", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CUmemAttach_flags_enum", ("hipMemAttachFlags_t", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CUmemorytype", ("hipMemType_t", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CUmemorytype_enum", ("hipMemType_t", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CUresourcetype", ("hipResourceType", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED)),
("CUresourcetype_enum", ("hipResourceType", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED)),
("CUresourceViewFormat", ("hipResourceViewFormat", CONV_TEX, API_DRIVER)),
("CUresourceViewFormat_enum", ("hipResourceViewFormat", CONV_TEX, API_DRIVER)),
("CUsharedconfig", ("hipSharedMemConfig", CONV_TYPE, API_DRIVER)),
("CUsharedconfig_enum", ("hipSharedMemConfig", CONV_TYPE, API_DRIVER)),
("CUcontext", ("hipCtx_t", CONV_TYPE, API_DRIVER)),
("CUmodule", ("hipModule_t", CONV_TYPE, API_DRIVER)),
("CUstream", ("hipStream_t", CONV_TYPE, API_DRIVER)),
("CUstream_st", ("ihipStream_t", CONV_TYPE, API_DRIVER)),
("CUstreamCallback", ("hipStreamCallback_t", CONV_TYPE, API_DRIVER)),
("CUsurfObject", ("hipSurfaceObject", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CUsurfref", ("hipSurfaceReference_t", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CUtexObject", ("hipTextureObject_t", CONV_TYPE, API_DRIVER)),
("CUtexref", ("textureReference", CONV_TYPE, API_DRIVER)),
("CUstream_flags", ("hipStreamFlags", CONV_TYPE, API_DRIVER)),
("CUstreamWaitValue_flags", ("hipStreamWaitValueFlags", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CUstreamWriteValue_flags", ("hipStreamWriteValueFlags", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CUstreamBatchMemOpType", ("hipStreamBatchMemOpType", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CUdevice_P2PAttribute", ("hipDeviceP2PAttribute", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CUevent", ("hipEvent_t", CONV_TYPE, API_DRIVER)),
("CUevent_flags", ("hipEventFlags", CONV_EVENT, API_DRIVER, HIP_UNSUPPORTED)),
("CUfilter_mode", ("hipTextureFilterMode", CONV_TEX, API_DRIVER)),
("CUGLDeviceList", ("hipGLDeviceList", CONV_GL, API_DRIVER, HIP_UNSUPPORTED)),
("CUGLmap_flags", ("hipGLMapFlags", CONV_GL, API_DRIVER, HIP_UNSUPPORTED)),
("CUd3d9DeviceList", ("hipD3D9DeviceList", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED)),
("CUd3d9map_flags", ("hipD3D9MapFlags", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED)),
("CUd3d9register_flags", ("hipD3D9RegisterFlags", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED)),
("CUd3d10DeviceList", ("hipd3d10DeviceList", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED)),
("CUd3d10map_flags", ("hipD3D10MapFlags", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED)),
("CUd3d10register_flags", ("hipD3D10RegisterFlags", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED)),
("CUd3d11DeviceList", ("hipd3d11DeviceList", CONV_D3D11, API_DRIVER, HIP_UNSUPPORTED)),
("CUeglStreamConnection_st", ("hipEglStreamConnection", CONV_EGL, API_DRIVER, HIP_UNSUPPORTED)),
("CUeglStreamConnection", ("hipEglStreamConnection", CONV_EGL, API_DRIVER, HIP_UNSUPPORTED)),
("libraryPropertyType_t", ("hipLibraryPropertyType_t", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("libraryPropertyType", ("hipLibraryPropertyType_t", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaStreamCallback_t", ("hipStreamCallback_t", CONV_TYPE, API_RUNTIME)),
("cudaArray", ("hipArray", CONV_MEM, API_RUNTIME)),
("cudaArray_t", ("hipArray_t", CONV_MEM, API_RUNTIME)),
("cudaArray_const_t", ("hipArray_const_t", CONV_MEM, API_RUNTIME)),
("cudaMipmappedArray_t", ("hipMipmappedArray_t", CONV_MEM, API_RUNTIME)),
("cudaMipmappedArray_const_t", ("hipMipmappedArray_const_t", CONV_MEM, API_RUNTIME)),
("cudaArrayDefault", ("hipArrayDefault", CONV_MEM, API_RUNTIME)),
("cudaArrayLayered", ("hipArrayLayered", CONV_MEM, API_RUNTIME)),
("cudaArraySurfaceLoadStore", ("hipArraySurfaceLoadStore", CONV_MEM, API_RUNTIME)),
("cudaArrayCubemap", ("hipArrayCubemap", CONV_MEM, API_RUNTIME)),
("cudaArrayTextureGather", ("hipArrayTextureGather", CONV_MEM, API_RUNTIME)),
("cudaMemoryAdvise", ("hipMemAdvise", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaMemRangeAttribute", ("hipMemRangeAttribute", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaMemcpyKind", ("hipMemcpyKind", CONV_MEM, API_RUNTIME)),
("cudaMemoryType", ("hipMemoryType", CONV_MEM, API_RUNTIME)),
("cudaExtent", ("hipExtent", CONV_MEM, API_RUNTIME)),
("cudaPitchedPtr", ("hipPitchedPtr", CONV_MEM, API_RUNTIME)),
("cudaPos", ("hipPos", CONV_MEM, API_RUNTIME)),
("cudaEvent_t", ("hipEvent_t", CONV_TYPE, API_RUNTIME)),
("cudaStream_t", ("hipStream_t", CONV_TYPE, API_RUNTIME)),
("cudaPointerAttributes", ("hipPointerAttribute_t", CONV_TYPE, API_RUNTIME)),
("cudaDeviceAttr", ("hipDeviceAttribute_t", CONV_TYPE, API_RUNTIME)),
("cudaDeviceProp", ("hipDeviceProp_t", CONV_TYPE, API_RUNTIME)),
("cudaDeviceP2PAttr", ("hipDeviceP2PAttribute", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaComputeMode", ("hipComputeMode", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaFuncCache", ("hipFuncCache_t", CONV_CACHE, API_RUNTIME)),
("cudaFuncAttributes", ("hipFuncAttributes", CONV_EXEC, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaSharedMemConfig", ("hipSharedMemConfig", CONV_TYPE, API_RUNTIME)),
("cudaLimit", ("hipLimit_t", CONV_TYPE, API_RUNTIME)),
("cudaOutputMode", ("hipOutputMode", CONV_OTHER, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaTextureReadMode", ("hipTextureReadMode", CONV_TEX, API_RUNTIME)),
("cudaTextureFilterMode", ("hipTextureFilterMode", CONV_TEX, API_RUNTIME)),
("cudaChannelFormatKind", ("hipChannelFormatKind", CONV_TEX, API_RUNTIME)),
("cudaChannelFormatDesc", ("hipChannelFormatDesc", CONV_TEX, API_RUNTIME)),
("cudaResourceDesc", ("hipResourceDesc", CONV_TEX, API_RUNTIME)),
("cudaResourceViewDesc", ("hipResourceViewDesc", CONV_TEX, API_RUNTIME)),
("cudaTextureDesc", ("hipTextureDesc", CONV_TEX, API_RUNTIME)),
("surfaceReference", ("hipSurfaceReference", CONV_SURFACE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaTextureObject_t", ("hipTextureObject_t", CONV_TEX, API_RUNTIME)),
("cudaResourceType", ("hipResourceType", CONV_TEX, API_RUNTIME)),
("cudaResourceViewFormat", ("hipResourceViewFormat", CONV_TEX, API_RUNTIME)),
("cudaTextureAddressMode", ("hipTextureAddressMode", CONV_TEX, API_RUNTIME)),
("cudaSurfaceBoundaryMode", ("hipSurfaceBoundaryMode", CONV_SURFACE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaSurfaceFormatMode", ("hipSurfaceFormatMode", CONV_SURFACE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaTextureType1D", ("hipTextureType1D", CONV_TEX, API_RUNTIME)),
("cudaTextureType2D", ("hipTextureType2D", CONV_TEX, API_RUNTIME)),
("cudaTextureType3D", ("hipTextureType3D", CONV_TEX, API_RUNTIME)),
("cudaTextureTypeCubemap", ("hipTextureTypeCubemap", CONV_TEX, API_RUNTIME)),
("cudaTextureType1DLayered", ("hipTextureType1DLayered", CONV_TEX, API_RUNTIME)),
("cudaTextureType2DLayered", ("hipTextureType2DLayered", CONV_TEX, API_RUNTIME)),
("cudaTextureTypeCubemapLayered", ("hipTextureTypeCubemapLayered", CONV_TEX, API_RUNTIME)),
("cudaIpcEventHandle_t", ("hipIpcEventHandle_t", CONV_TYPE, API_RUNTIME)),
("cudaIpcEventHandle_st", ("hipIpcEventHandle_t", CONV_TYPE, API_RUNTIME)),
("cudaIpcMemHandle_t", ("hipIpcMemHandle_t", CONV_TYPE, API_RUNTIME)),
("cudaIpcMemHandle_st", ("hipIpcMemHandle_t", CONV_TYPE, API_RUNTIME)),
("cudaGraphicsCubeFace", ("hipGraphicsCubeFace", CONV_GRAPHICS, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaGraphicsMapFlags", ("hipGraphicsMapFlags", CONV_GRAPHICS, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaGraphicsRegisterFlags", ("hipGraphicsRegisterFlags", CONV_GRAPHICS, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaGLDeviceList", ("hipGLDeviceList", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaGLMapFlags", ("hipGLMapFlags", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaD3D9DeviceList", ("hipD3D9DeviceList", CONV_D3D9, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaD3D9MapFlags", ("hipD3D9MapFlags", CONV_D3D9, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaD3D9RegisterFlags", ("hipD3D9RegisterFlags", CONV_D3D9, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaD3D10DeviceList", ("hipd3d10DeviceList", CONV_D3D10, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaD3D10MapFlags", ("hipD3D10MapFlags", CONV_D3D10, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaD3D10RegisterFlags", ("hipD3D10RegisterFlags", CONV_D3D10, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaD3D11DeviceList", ("hipd3d11DeviceList", CONV_D3D11, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaEglStreamConnection", ("hipEglStreamConnection", CONV_EGL, API_RUNTIME, HIP_UNSUPPORTED)),
("cublasHandle_t", ("rocblas_handle", CONV_TYPE, API_BLAS)),
("cublasOperation_t", ("rocblas_operation", CONV_TYPE, API_BLAS)),
("cublasStatus_t", ("rocblas_status", CONV_TYPE, API_BLAS)),
("cublasFillMode_t", ("rocblas_fill", CONV_TYPE, API_BLAS)),
("cublasDiagType_t", ("rocblas_diagonal", CONV_TYPE, API_BLAS)),
("cublasSideMode_t", ("rocblas_side", CONV_TYPE, API_BLAS)),
("cublasPointerMode_t", ("rocblas_pointer_mode", CONV_TYPE, API_BLAS)),
("cublasAtomicsMode_t", ("rocblas_atomics_mode", CONV_TYPE, API_BLAS, HIP_UNSUPPORTED)),
("cublasDataType_t", ("rocblas_data_type", CONV_TYPE, API_BLAS, HIP_UNSUPPORTED)),
("curandStatus", ("hiprandStatus_t", CONV_TYPE, API_RAND)),
("curandStatus_t", ("hiprandStatus_t", CONV_TYPE, API_RAND)),
("curandRngType", ("hiprandRngType_t", CONV_TYPE, API_RAND)),
("curandRngType_t", ("hiprandRngType_t", CONV_TYPE, API_RAND)),
("curandGenerator_st", ("hiprandGenerator_st", CONV_TYPE, API_RAND)),
("curandGenerator_t", ("hiprandGenerator_t", CONV_TYPE, API_RAND)),
("curandDirectionVectorSet", ("hiprandDirectionVectorSet_t", CONV_TYPE, API_RAND, HIP_UNSUPPORTED)),
("curandDirectionVectorSet_t", ("hiprandDirectionVectorSet_t", CONV_TYPE, API_RAND, HIP_UNSUPPORTED)),
("curandOrdering", ("hiprandOrdering_t", CONV_TYPE, API_RAND, HIP_UNSUPPORTED)),
("curandOrdering_t", ("hiprandOrdering_t", CONV_TYPE, API_RAND, HIP_UNSUPPORTED)),
("curandDistribution_st", ("hiprandDistribution_st", CONV_TYPE, API_RAND, HIP_UNSUPPORTED)),
("curandHistogramM2V_st", ("hiprandDistribution_st", CONV_TYPE, API_RAND, HIP_UNSUPPORTED)),
("curandDistribution_t", ("hiprandDistribution_t", CONV_TYPE, API_RAND, HIP_UNSUPPORTED)),
("curandHistogramM2V_t", ("hiprandDistribution_t", CONV_TYPE, API_RAND, HIP_UNSUPPORTED)),
("curandDistributionShift_st", ("hiprandDistributionShift_st", CONV_TYPE, API_RAND, HIP_UNSUPPORTED)),
("curandDistributionShift_t", ("hiprandDistributionShift_t", CONV_TYPE, API_RAND, HIP_UNSUPPORTED)),
("curandDistributionM2Shift_st", ("hiprandDistributionM2Shift_st", CONV_TYPE, API_RAND, HIP_UNSUPPORTED)),
("curandDistributionM2Shift_t", ("hiprandDistributionM2Shift_t", CONV_TYPE, API_RAND, HIP_UNSUPPORTED)),
("curandHistogramM2_st", ("hiprandHistogramM2_st", CONV_TYPE, API_RAND, HIP_UNSUPPORTED)),
("curandHistogramM2_t", ("hiprandHistogramM2_t", CONV_TYPE, API_RAND, HIP_UNSUPPORTED)),
("curandHistogramM2K_st", ("hiprandHistogramM2K_st", CONV_TYPE, API_RAND, HIP_UNSUPPORTED)),
("curandHistogramM2K_t", ("hiprandHistogramM2K_t", CONV_TYPE, API_RAND, HIP_UNSUPPORTED)),
("curandDiscreteDistribution_st", ("hiprandDiscreteDistribution_st", CONV_TYPE, API_RAND)),
("curandDiscreteDistribution_t", ("hiprandDiscreteDistribution_t", CONV_TYPE, API_RAND)),
("curandMethod", ("hiprandMethod_t", CONV_TYPE, API_RAND, HIP_UNSUPPORTED)),
("curandMethod_t", ("hiprandMethod_t", CONV_TYPE, API_RAND, HIP_UNSUPPORTED)),
("curandDirectionVectors32_t", ("hiprandDirectionVectors32_t", CONV_TYPE, API_RAND)),
("curandDirectionVectors64_t", ("hiprandDirectionVectors64_t", CONV_TYPE, API_RAND, HIP_UNSUPPORTED)),
("curandStateMtgp32_t", ("hiprandStateMtgp32_t", CONV_TYPE, API_RAND)),
("curandStateMtgp32", ("hiprandStateMtgp32_t", CONV_TYPE, API_RAND)),
("curandStateScrambledSobol64_t", ("hiprandStateScrambledSobol64_t", CONV_TYPE, API_RAND, HIP_UNSUPPORTED)),
("curandStateSobol64_t", ("hiprandStateSobol64_t", CONV_TYPE, API_RAND, HIP_UNSUPPORTED)),
("curandStateScrambledSobol32_t", ("hiprandStateScrambledSobol32_t", CONV_TYPE, API_RAND, HIP_UNSUPPORTED)),
("curandStateSobol32_t", ("hiprandStateSobol32_t", CONV_TYPE, API_RAND)),
("curandStateMRG32k3a_t", ("hiprandStateMRG32k3a_t", CONV_TYPE, API_RAND)),
("curandStatePhilox4_32_10_t", ("hiprandStatePhilox4_32_10_t", CONV_TYPE, API_RAND)),
("curandStateXORWOW_t", ("hiprandStateXORWOW_t", CONV_TYPE, API_RAND)),
("curandState_t", ("hiprandState_t", CONV_TYPE, API_RAND)),
("curandState", ("hiprandState_t", CONV_TYPE, API_RAND)),
])
CUDA_INCLUDE_MAP = collections.OrderedDict([
# since pytorch uses "\b{pattern}\b" as the actual re pattern,
# patterns listed here have to begin and end with alnum chars
("include <cuda.h", ("include <hip/hip_runtime.h", CONV_INCLUDE_CUDA_MAIN_H, API_DRIVER)),
('include "cuda.h', ('include "hip/hip_runtime.h', CONV_INCLUDE_CUDA_MAIN_H, API_DRIVER)),
("cuda_runtime.h", ("hip/hip_runtime.h", CONV_INCLUDE_CUDA_MAIN_H, API_RUNTIME)),
("cuda_runtime_api.h", ("hip/hip_runtime_api.h", CONV_INCLUDE, API_RUNTIME)),
("channel_descriptor.h", ("hip/channel_descriptor.h", CONV_INCLUDE, API_RUNTIME)),
("device_functions.h", ("hip/device_functions.h", CONV_INCLUDE, API_RUNTIME)),
("driver_types.h", ("hip/driver_types.h", CONV_INCLUDE, API_RUNTIME)),
("cuComplex.h", ("hip/hip_complex.h", CONV_INCLUDE, API_RUNTIME)),
("cuda_fp16.h", ("hip/hip_fp16.h", CONV_INCLUDE, API_RUNTIME)),
("cuda_texture_types.h", ("hip/hip_texture_types.h", CONV_INCLUDE, API_RUNTIME)),
("vector_types.h", ("hip/hip_vector_types.h", CONV_INCLUDE, API_RUNTIME)),
("cublas.h", ("rocblas.h", CONV_INCLUDE_CUDA_MAIN_H, API_BLAS)),
("cublas_v2.h", ("rocblas.h", CONV_INCLUDE_CUDA_MAIN_H, API_BLAS)),
("curand.h", ("hiprand.h", CONV_INCLUDE_CUDA_MAIN_H, API_RAND)),
("curand_kernel.h", ("hiprand_kernel.h", CONV_INCLUDE, API_RAND)),
("curand_discrete.h", ("hiprand_kernel.h", CONV_INCLUDE, API_RAND)),
("curand_discrete2.h", ("hiprand_kernel.h", CONV_INCLUDE, API_RAND)),
("curand_globals.h", ("hiprand_kernel.h", CONV_INCLUDE, API_RAND)),
("curand_lognormal.h", ("hiprand_kernel.h", CONV_INCLUDE, API_RAND)),
("curand_mrg32k3a.h", ("hiprand_kernel.h", CONV_INCLUDE, API_RAND)),
("curand_mtgp32.h", ("hiprand_kernel.h", CONV_INCLUDE, API_RAND)),
("curand_mtgp32_host.h", ("hiprand_mtgp32_host.h", CONV_INCLUDE, API_RAND)),
("curand_mtgp32_kernel.h", ("hiprand_kernel.h", CONV_INCLUDE, API_RAND)),
("curand_mtgp32dc_p_11213.h", ("rocrand_mtgp32_11213.h", CONV_INCLUDE, API_RAND)),
("curand_normal.h", ("hiprand_kernel.h", CONV_INCLUDE, API_RAND)),
("curand_normal_static.h", ("hiprand_kernel.h", CONV_INCLUDE, API_RAND)),
("curand_philox4x32_x.h", ("hiprand_kernel.h", CONV_INCLUDE, API_RAND)),
("curand_poisson.h", ("hiprand_kernel.h", CONV_INCLUDE, API_RAND)),
("curand_precalc.h", ("hiprand_kernel.h", CONV_INCLUDE, API_RAND)),
("curand_uniform.h", ("hiprand_kernel.h", CONV_INCLUDE, API_RAND)),
("cusparse.h", ("hipsparse.h", CONV_INCLUDE, API_RAND)),
("cufft.h", ("hipfft.h", CONV_INCLUDE, API_BLAS)),
("cufftXt.h", ("hipfft.h", CONV_INCLUDE, API_BLAS)),
])
CUDA_IDENTIFIER_MAP = collections.OrderedDict([
("__CUDACC__", ("__HIPCC__", CONV_DEF, API_RUNTIME)),
("CUDA_ERROR_INVALID_CONTEXT", ("hipErrorInvalidContext", CONV_TYPE, API_DRIVER)),
("CUDA_ERROR_CONTEXT_ALREADY_CURRENT", ("hipErrorContextAlreadyCurrent", CONV_TYPE, API_DRIVER)),
("CUDA_ERROR_ARRAY_IS_MAPPED", ("hipErrorArrayIsMapped", CONV_TYPE, API_DRIVER)),
("CUDA_ERROR_ALREADY_MAPPED", ("hipErrorAlreadyMapped", CONV_TYPE, API_DRIVER)),
("CUDA_ERROR_ALREADY_ACQUIRED", ("hipErrorAlreadyAcquired", CONV_TYPE, API_DRIVER)),
("CUDA_ERROR_NOT_MAPPED", ("hipErrorNotMapped", CONV_TYPE, API_DRIVER)),
("CUDA_ERROR_NOT_MAPPED_AS_ARRAY", ("hipErrorNotMappedAsArray", CONV_TYPE, API_DRIVER)),
("CUDA_ERROR_NOT_MAPPED_AS_POINTER", ("hipErrorNotMappedAsPointer", CONV_TYPE, API_DRIVER)),
("CUDA_ERROR_CONTEXT_ALREADY_IN_USE", ("hipErrorContextAlreadyInUse", CONV_TYPE, API_DRIVER)),
("CUDA_ERROR_INVALID_SOURCE", ("hipErrorInvalidSource", CONV_TYPE, API_DRIVER)),
("CUDA_ERROR_FILE_NOT_FOUND", ("hipErrorFileNotFound", CONV_TYPE, API_DRIVER)),
("CUDA_ERROR_NOT_FOUND", ("hipErrorNotFound", CONV_TYPE, API_DRIVER)),
("CUDA_ERROR_LAUNCH_INCOMPATIBLE_TEXTURING", ("hipErrorLaunchIncompatibleTexturing", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CUDA_ERROR_PRIMARY_CONTEXT_ACTIVE", ("hipErrorPrimaryContextActive", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CUDA_ERROR_CONTEXT_IS_DESTROYED", ("hipErrorContextIsDestroyed", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CUDA_ERROR_NOT_PERMITTED", ("hipErrorNotPermitted", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CUDA_ERROR_NOT_SUPPORTED", ("hipErrorNotSupported", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("cudaErrorMissingConfiguration", ("hipErrorMissingConfiguration", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaErrorPriorLaunchFailure", ("hipErrorPriorLaunchFailure", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaErrorInvalidDeviceFunction", ("hipErrorInvalidDeviceFunction", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaErrorInvalidConfiguration", ("hipErrorInvalidConfiguration", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaErrorInvalidPitchValue", ("hipErrorInvalidPitchValue", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaErrorInvalidSymbol", ("hipErrorInvalidSymbol", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaErrorInvalidHostPointer", ("hipErrorInvalidHostPointer", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaErrorInvalidDevicePointer", ("hipErrorInvalidDevicePointer", CONV_TYPE, API_RUNTIME)),
("cudaErrorInvalidTexture", ("hipErrorInvalidTexture", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaErrorInvalidTextureBinding", ("hipErrorInvalidTextureBinding", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaErrorInvalidChannelDescriptor", ("hipErrorInvalidChannelDescriptor", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaErrorInvalidMemcpyDirection", ("hipErrorInvalidMemcpyDirection", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaErrorAddressOfConstant", ("hipErrorAddressOfConstant", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaErrorTextureFetchFailed", ("hipErrorTextureFetchFailed", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaErrorTextureNotBound", ("hipErrorTextureNotBound", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaErrorSynchronizationError", ("hipErrorSynchronizationError", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaErrorInvalidFilterSetting", ("hipErrorInvalidFilterSetting", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaErrorInvalidNormSetting", ("hipErrorInvalidNormSetting", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaErrorMixedDeviceExecution", ("hipErrorMixedDeviceExecution", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaErrorNotYetImplemented", ("hipErrorNotYetImplemented", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaErrorMemoryValueTooLarge", ("hipErrorMemoryValueTooLarge", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaErrorInsufficientDriver", ("hipErrorInsufficientDriver", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaErrorSetOnActiveProcess", ("hipErrorSetOnActiveProcess", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaErrorInvalidSurface", ("hipErrorInvalidSurface", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaErrorDuplicateVariableName", ("hipErrorDuplicateVariableName", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaErrorDuplicateTextureName", ("hipErrorDuplicateTextureName", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaErrorDuplicateSurfaceName", ("hipErrorDuplicateSurfaceName", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaErrorDevicesUnavailable", ("hipErrorDevicesUnavailable", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaErrorIncompatibleDriverContext", ("hipErrorIncompatibleDriverContext", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaErrorDeviceAlreadyInUse", ("hipErrorDeviceAlreadyInUse", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaErrorLaunchMaxDepthExceeded", ("hipErrorLaunchMaxDepthExceeded", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaErrorLaunchFileScopedTex", ("hipErrorLaunchFileScopedTex", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaErrorLaunchFileScopedSurf", ("hipErrorLaunchFileScopedSurf", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaErrorSyncDepthExceeded", ("hipErrorSyncDepthExceeded", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaErrorLaunchPendingCountExceeded", ("hipErrorLaunchPendingCountExceeded", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaErrorNotPermitted", ("hipErrorNotPermitted", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaErrorNotSupported", ("hipErrorNotSupported", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaErrorStartupFailure", ("hipErrorStartupFailure", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaErrorApiFailureBase", ("hipErrorApiFailureBase", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("CUDA_SUCCESS", ("hipSuccess", CONV_TYPE, API_DRIVER)),
("cudaSuccess", ("hipSuccess", CONV_TYPE, API_RUNTIME)),
("CUDA_ERROR_INVALID_VALUE", ("hipErrorInvalidValue", CONV_TYPE, API_DRIVER)),
("cudaErrorInvalidValue", ("hipErrorInvalidValue", CONV_TYPE, API_RUNTIME)),
("CUDA_ERROR_OUT_OF_MEMORY", ("hipErrorMemoryAllocation", CONV_TYPE, API_DRIVER)),
("cudaErrorMemoryAllocation", ("hipErrorMemoryAllocation", CONV_TYPE, API_RUNTIME)),
("CUDA_ERROR_NOT_INITIALIZED", ("hipErrorNotInitialized", CONV_TYPE, API_DRIVER)),
("cudaErrorInitializationError", ("hipErrorInitializationError", CONV_TYPE, API_RUNTIME)),
("CUDA_ERROR_DEINITIALIZED", ("hipErrorDeinitialized", CONV_TYPE, API_DRIVER)),
("cudaErrorCudartUnloading", ("hipErrorDeinitialized", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("CUDA_ERROR_PROFILER_DISABLED", ("hipErrorProfilerDisabled", CONV_TYPE, API_DRIVER)),
("cudaErrorProfilerDisabled", ("hipErrorProfilerDisabled", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("CUDA_ERROR_PROFILER_NOT_INITIALIZED", ("hipErrorProfilerNotInitialized", CONV_TYPE, API_DRIVER)),
("cudaErrorProfilerNotInitialized", ("hipErrorProfilerNotInitialized", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("CUDA_ERROR_PROFILER_ALREADY_STARTED", ("hipErrorProfilerAlreadyStarted", CONV_TYPE, API_DRIVER)),
("cudaErrorProfilerAlreadyStarted", ("hipErrorProfilerAlreadyStarted", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("CUDA_ERROR_PROFILER_ALREADY_STOPPED", ("hipErrorProfilerAlreadyStopped", CONV_TYPE, API_DRIVER)),
("cudaErrorProfilerAlreadyStopped", ("hipErrorProfilerAlreadyStopped", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("CUDA_ERROR_NO_DEVICE", ("hipErrorNoDevice", CONV_TYPE, API_DRIVER)),
("cudaErrorNoDevice", ("hipErrorNoDevice", CONV_TYPE, API_RUNTIME)),
("CUDA_ERROR_INVALID_DEVICE", ("hipErrorInvalidDevice", CONV_TYPE, API_DRIVER)),
("cudaErrorInvalidDevice", ("hipErrorInvalidDevice", CONV_TYPE, API_RUNTIME)),
("CUDA_ERROR_INVALID_IMAGE", ("hipErrorInvalidImage", CONV_TYPE, API_DRIVER)),
("cudaErrorInvalidKernelImage", ("hipErrorInvalidImage", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("CUDA_ERROR_MAP_FAILED", ("hipErrorMapFailed", CONV_TYPE, API_DRIVER)),
("cudaErrorMapBufferObjectFailed", ("hipErrorMapFailed", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("CUDA_ERROR_UNMAP_FAILED", ("hipErrorUnmapFailed", CONV_TYPE, API_DRIVER)),
("cudaErrorUnmapBufferObjectFailed", ("hipErrorUnmapFailed", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("CUDA_ERROR_NO_BINARY_FOR_GPU", ("hipErrorNoBinaryForGpu", CONV_TYPE, API_DRIVER)),
("cudaErrorNoKernelImageForDevice", ("hipErrorNoBinaryForGpu", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("CUDA_ERROR_ECC_UNCORRECTABLE", ("hipErrorECCNotCorrectable", CONV_TYPE, API_DRIVER)),
("cudaErrorECCUncorrectable", ("hipErrorECCNotCorrectable", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("CUDA_ERROR_UNSUPPORTED_LIMIT", ("hipErrorUnsupportedLimit", CONV_TYPE, API_DRIVER)),
("cudaErrorUnsupportedLimit", ("hipErrorUnsupportedLimit", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("CUDA_ERROR_PEER_ACCESS_UNSUPPORTED", ("hipErrorPeerAccessUnsupported", CONV_TYPE, API_DRIVER)),
("cudaErrorPeerAccessUnsupported", ("hipErrorPeerAccessUnsupported", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("CUDA_ERROR_INVALID_PTX", ("hipErrorInvalidKernelFile", CONV_TYPE, API_DRIVER)),
("cudaErrorInvalidPtx", ("hipErrorInvalidKernelFile", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("CUDA_ERROR_INVALID_GRAPHICS_CONTEXT", ("hipErrorInvalidGraphicsContext", CONV_TYPE, API_DRIVER)),
("cudaErrorInvalidGraphicsContext", ("hipErrorInvalidGraphicsContext", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("CUDA_ERROR_NVLINK_UNCORRECTABLE", ("hipErrorNvlinkUncorrectable", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("cudaErrorNvlinkUncorrectable", ("hipErrorNvlinkUncorrectable", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("CUDA_ERROR_SHARED_OBJECT_SYMBOL_NOT_FOUND", ("hipErrorSharedObjectSymbolNotFound", CONV_TYPE, API_DRIVER)),
("cudaErrorSharedObjectSymbolNotFound", ("hipErrorSharedObjectSymbolNotFound", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("CUDA_ERROR_SHARED_OBJECT_INIT_FAILED", ("hipErrorSharedObjectInitFailed", CONV_TYPE, API_DRIVER)),
("cudaErrorSharedObjectInitFailed", ("hipErrorSharedObjectInitFailed", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("CUDA_ERROR_OPERATING_SYSTEM", ("hipErrorOperatingSystem", CONV_TYPE, API_DRIVER)),
("cudaErrorOperatingSystem", ("hipErrorOperatingSystem", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("CUDA_ERROR_INVALID_HANDLE", ("hipErrorInvalidResourceHandle", CONV_TYPE, API_DRIVER)),
("cudaErrorInvalidResourceHandle", ("hipErrorInvalidResourceHandle", CONV_TYPE, API_RUNTIME)),
("CUDA_ERROR_NOT_READY", ("hipErrorNotReady", CONV_TYPE, API_DRIVER)),
("cudaErrorNotReady", ("hipErrorNotReady", CONV_TYPE, API_RUNTIME)),
("CUDA_ERROR_ILLEGAL_ADDRESS", ("hipErrorIllegalAddress", CONV_TYPE, API_DRIVER)),
("cudaErrorIllegalAddress", ("hipErrorIllegalAddress", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("CUDA_ERROR_LAUNCH_OUT_OF_RESOURCES", ("hipErrorLaunchOutOfResources", CONV_TYPE, API_DRIVER)),
("cudaErrorLaunchOutOfResources", ("hipErrorLaunchOutOfResources", CONV_TYPE, API_RUNTIME)),
("CUDA_ERROR_LAUNCH_TIMEOUT", ("hipErrorLaunchTimeOut", CONV_TYPE, API_DRIVER)),
("cudaErrorLaunchTimeout", ("hipErrorLaunchTimeOut", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("CUDA_ERROR_PEER_ACCESS_ALREADY_ENABLED", ("hipErrorPeerAccessAlreadyEnabled", CONV_TYPE, API_DRIVER)),
("cudaErrorPeerAccessAlreadyEnabled", ("hipErrorPeerAccessAlreadyEnabled", CONV_TYPE, API_RUNTIME)),
("CUDA_ERROR_PEER_ACCESS_NOT_ENABLED", ("hipErrorPeerAccessNotEnabled", CONV_TYPE, API_DRIVER)),
("cudaErrorPeerAccessNotEnabled", ("hipErrorPeerAccessNotEnabled", CONV_TYPE, API_RUNTIME)),
("CUDA_ERROR_ASSERT", ("hipErrorAssert", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("cudaErrorAssert", ("hipErrorAssert", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("CUDA_ERROR_TOO_MANY_PEERS", ("hipErrorTooManyPeers", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("cudaErrorTooManyPeers", ("hipErrorTooManyPeers", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("CUDA_ERROR_HOST_MEMORY_ALREADY_REGISTERED", ("hipErrorHostMemoryAlreadyRegistered", CONV_TYPE, API_DRIVER)),
("cudaErrorHostMemoryAlreadyRegistered", ("hipErrorHostMemoryAlreadyRegistered", CONV_TYPE, API_RUNTIME)),
("CUDA_ERROR_HOST_MEMORY_NOT_REGISTERED", ("hipErrorHostMemoryNotRegistered", CONV_TYPE, API_DRIVER)),
("cudaErrorHostMemoryNotRegistered", ("hipErrorHostMemoryNotRegistered", CONV_TYPE, API_RUNTIME)),
("CUDA_ERROR_HARDWARE_STACK_ERROR", ("hipErrorHardwareStackError", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("cudaErrorHardwareStackError", ("hipErrorHardwareStackError", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("CUDA_ERROR_ILLEGAL_INSTRUCTION", ("hipErrorIllegalInstruction", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("cudaErrorIllegalInstruction", ("hipErrorIllegalInstruction", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("CUDA_ERROR_MISALIGNED_ADDRESS", ("hipErrorMisalignedAddress", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("cudaErrorMisalignedAddress", ("hipErrorMisalignedAddress", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("CUDA_ERROR_INVALID_ADDRESS_SPACE", ("hipErrorInvalidAddressSpace", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("cudaErrorInvalidAddressSpace", ("hipErrorInvalidAddressSpace", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("CUDA_ERROR_INVALID_PC", ("hipErrorInvalidPc", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("cudaErrorInvalidPc", ("hipErrorInvalidPc", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("CUDA_ERROR_LAUNCH_FAILED", ("hipErrorLaunchFailure", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("cudaErrorLaunchFailure", ("hipErrorLaunchFailure", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("CUDA_ERROR_UNKNOWN", ("hipErrorUnknown", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("cudaErrorUnknown", ("hipErrorUnknown", CONV_TYPE, API_RUNTIME)),
("CU_TR_ADDRESS_MODE_WRAP", ("HIP_TR_ADDRESS_MODE_WRAP", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_TR_ADDRESS_MODE_CLAMP", ("HIP_TR_ADDRESS_MODE_CLAMP", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_TR_ADDRESS_MODE_MIRROR", ("HIP_TR_ADDRESS_MODE_MIRROR", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_TR_ADDRESS_MODE_BORDER", ("HIP_TR_ADDRESS_MODE_BORDER", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_CUBEMAP_FACE_POSITIVE_X", ("HIP_CUBEMAP_FACE_POSITIVE_X", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_CUBEMAP_FACE_NEGATIVE_X", ("HIP_CUBEMAP_FACE_NEGATIVE_X", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_CUBEMAP_FACE_POSITIVE_Y", ("HIP_CUBEMAP_FACE_POSITIVE_Y", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_CUBEMAP_FACE_NEGATIVE_Y", ("HIP_CUBEMAP_FACE_NEGATIVE_Y", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_CUBEMAP_FACE_POSITIVE_Z", ("HIP_CUBEMAP_FACE_POSITIVE_Z", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_CUBEMAP_FACE_NEGATIVE_Z", ("HIP_CUBEMAP_FACE_NEGATIVE_Z", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_AD_FORMAT_UNSIGNED_INT8", ("HIP_AD_FORMAT_UNSIGNED_INT8", CONV_TYPE, API_DRIVER)),
("CU_AD_FORMAT_UNSIGNED_INT16", ("HIP_AD_FORMAT_UNSIGNED_INT16", CONV_TYPE, API_DRIVER)),
("CU_AD_FORMAT_UNSIGNED_INT32", ("HIP_AD_FORMAT_UNSIGNED_INT32", CONV_TYPE, API_DRIVER)),
("CU_AD_FORMAT_SIGNED_INT8", ("HIP_AD_FORMAT_SIGNED_INT8", CONV_TYPE, API_DRIVER)),
("CU_AD_FORMAT_SIGNED_INT16", ("HIP_AD_FORMAT_SIGNED_INT16", CONV_TYPE, API_DRIVER)),
("CU_AD_FORMAT_SIGNED_INT32", ("HIP_AD_FORMAT_SIGNED_INT32", CONV_TYPE, API_DRIVER)),
("CU_AD_FORMAT_HALF", ("HIP_AD_FORMAT_HALF", CONV_TYPE, API_DRIVER)),
("CU_AD_FORMAT_FLOAT", ("HIP_AD_FORMAT_FLOAT", CONV_TYPE, API_DRIVER)),
("CU_COMPUTEMODE_DEFAULT", ("hipComputeModeDefault", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_COMPUTEMODE_EXCLUSIVE", ("hipComputeModeExclusive", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_COMPUTEMODE_PROHIBITED", ("hipComputeModeProhibited", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_COMPUTEMODE_EXCLUSIVE_PROCESS", ("hipComputeModeExclusiveProcess", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_MEM_ADVISE_SET_READ_MOSTLY", ("hipMemAdviseSetReadMostly", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_MEM_ADVISE_UNSET_READ_MOSTLY", ("hipMemAdviseUnsetReadMostly", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_MEM_ADVISE_SET_PREFERRED_LOCATION", ("hipMemAdviseSetPreferredLocation", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_MEM_ADVISE_UNSET_PREFERRED_LOCATION", ("hipMemAdviseUnsetPreferredLocation", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_MEM_ADVISE_SET_ACCESSED_BY", ("hipMemAdviseSetAccessedBy", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_MEM_ADVISE_UNSET_ACCESSED_BY", ("hipMemAdviseUnsetAccessedBy", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_MEM_RANGE_ATTRIBUTE_READ_MOSTLY", ("hipMemRangeAttributeReadMostly", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_MEM_RANGE_ATTRIBUTE_PREFERRED_LOCATION", ("hipMemRangeAttributePreferredLocation", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_MEM_RANGE_ATTRIBUTE_ACCESSED_BY", ("hipMemRangeAttributeAccessedBy", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_MEM_RANGE_ATTRIBUTE_LAST_PREFETCH_LOCATION", ("hipMemRangeAttributeLastPrefetchLocation", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_CTX_SCHED_AUTO", ("HIP_CTX_SCHED_AUTO", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_CTX_SCHED_SPIN", ("HIP_CTX_SCHED_SPIN", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_CTX_SCHED_YIELD", ("HIP_CTX_SCHED_YIELD", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_CTX_SCHED_BLOCKING_SYNC", ("HIP_CTX_SCHED_BLOCKING_SYNC", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_CTX_BLOCKING_SYNC", ("HIP_CTX_BLOCKING_SYNC", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_CTX_SCHED_MASK", ("HIP_CTX_SCHED_MASK", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_CTX_MAP_HOST", ("HIP_CTX_MAP_HOST", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_CTX_LMEM_RESIZE_TO_MAX", ("HIP_CTX_LMEM_RESIZE_TO_MAX", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_CTX_FLAGS_MASK", ("HIP_CTX_FLAGS_MASK", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_LAUNCH_PARAM_BUFFER_POINTER", ("HIP_LAUNCH_PARAM_BUFFER_POINTER", CONV_TYPE, API_DRIVER)),
("CU_LAUNCH_PARAM_BUFFER_SIZE", ("HIP_LAUNCH_PARAM_BUFFER_SIZE", CONV_TYPE, API_DRIVER)),
("CU_LAUNCH_PARAM_END", ("HIP_LAUNCH_PARAM_END", CONV_TYPE, API_DRIVER)),
("CU_IPC_HANDLE_SIZE", ("HIP_LAUNCH_PARAM_END", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_MEMHOSTALLOC_DEVICEMAP", ("HIP_MEMHOSTALLOC_DEVICEMAP", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_MEMHOSTALLOC_PORTABLE", ("HIP_MEMHOSTALLOC_PORTABLE", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_MEMHOSTALLOC_WRITECOMBINED", ("HIP_MEMHOSTALLOC_WRITECOMBINED", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_MEMHOSTREGISTER_DEVICEMAP", ("HIP_MEMHOSTREGISTER_DEVICEMAP", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_MEMHOSTREGISTER_IOMEMORY", ("HIP_MEMHOSTREGISTER_IOMEMORY", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_MEMHOSTREGISTER_PORTABLE", ("HIP_MEMHOSTREGISTER_PORTABLE", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_PARAM_TR_DEFAULT", ("HIP_PARAM_TR_DEFAULT", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_STREAM_LEGACY", ("HIP_STREAM_LEGACY", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_STREAM_PER_THREAD", ("HIP_STREAM_PER_THREAD", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_TRSA_OVERRIDE_FORMAT", ("HIP_TRSA_OVERRIDE_FORMAT", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_TRSF_NORMALIZED_COORDINATES", ("HIP_TRSF_NORMALIZED_COORDINATES", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_TRSF_READ_AS_INTEGER", ("HIP_TRSF_READ_AS_INTEGER", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_TRSF_SRGB", ("HIP_TRSF_SRGB", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CUDA_ARRAY3D_2DARRAY", ("HIP_ARRAY3D_LAYERED", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CUDA_ARRAY3D_CUBEMAP", ("HIP_ARRAY3D_CUBEMAP", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CUDA_ARRAY3D_DEPTH_TEXTURE", ("HIP_ARRAY3D_DEPTH_TEXTURE", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CUDA_ARRAY3D_LAYERED", ("HIP_ARRAY3D_LAYERED", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CUDA_ARRAY3D_SURFACE_LDST", ("HIP_ARRAY3D_SURFACE_LDST", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CUDA_ARRAY3D_TEXTURE_GATHER", ("HIP_ARRAY3D_TEXTURE_GATHER", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
# ("CUDA_VERSION", ("HIP_VERSION", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_BLOCK", ("hipDeviceAttributeMaxThreadsPerBlock", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_X", ("hipDeviceAttributeMaxBlockDimX", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_Y", ("hipDeviceAttributeMaxBlockDimY", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_Z", ("hipDeviceAttributeMaxBlockDimZ", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_X", ("hipDeviceAttributeMaxGridDimX", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_Y", ("hipDeviceAttributeMaxGridDimY", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_Z", ("hipDeviceAttributeMaxGridDimZ", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK", ("hipDeviceAttributeMaxSharedMemoryPerBlock", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_DEVICE_ATTRIBUTE_SHARED_MEMORY_PER_BLOCK", ("hipDeviceAttributeMaxSharedMemoryPerBlock", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_DEVICE_ATTRIBUTE_TOTAL_CONSTANT_MEMORY", ("hipDeviceAttributeTotalConstantMemory", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_DEVICE_ATTRIBUTE_WARP_SIZE", ("hipDeviceAttributeWarpSize", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_DEVICE_ATTRIBUTE_MAX_PITCH", ("hipDeviceAttributeMaxPitch", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_DEVICE_ATTRIBUTE_MAX_REGISTERS_PER_BLOCK", ("hipDeviceAttributeMaxRegistersPerBlock", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_DEVICE_ATTRIBUTE_REGISTERS_PER_BLOCK", ("hipDeviceAttributeMaxRegistersPerBlock", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_DEVICE_ATTRIBUTE_CLOCK_RATE", ("hipDeviceAttributeClockRate", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_DEVICE_ATTRIBUTE_TEXTURE_ALIGNMENT", ("hipDeviceAttributeTextureAlignment", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_DEVICE_ATTRIBUTE_GPU_OVERLAP", ("hipDeviceAttributeAsyncEngineCount", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT", ("hipDeviceAttributeMultiprocessorCount", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_DEVICE_ATTRIBUTE_KERNEL_EXEC_TIMEOUT", ("hipDeviceAttributeKernelExecTimeout", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_DEVICE_ATTRIBUTE_INTEGRATED", ("hipDeviceAttributeIntegrated", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_DEVICE_ATTRIBUTE_CAN_MAP_HOST_MEMORY", ("hipDeviceAttributeCanMapHostMemory", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_DEVICE_ATTRIBUTE_COMPUTE_MODE", ("hipDeviceAttributeComputeMode", CONV_TYPE, API_DRIVER)),
("CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_WIDTH", ("hipDeviceAttributeMaxTexture1DWidth", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_WIDTH", ("hipDeviceAttributeMaxTexture2DWidth", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_HEIGHT", ("hipDeviceAttributeMaxTexture2DHeight", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_WIDTH", ("hipDeviceAttributeMaxTexture3DWidth", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_HEIGHT", ("hipDeviceAttributeMaxTexture3DHeight", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_DEPTH", ("hipDeviceAttributeMaxTexture3DDepth", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_WIDTH", ("hipDeviceAttributeMaxTexture2DLayeredWidth", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_HEIGHT", ("hipDeviceAttributeMaxTexture2DLayeredHeight", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_LAYERS", ("hipDeviceAttributeMaxTexture2DLayeredLayers", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_ARRAY_WIDTH", ("hipDeviceAttributeMaxTexture2DLayeredWidth", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_ARRAY_HEIGHT", ("hipDeviceAttributeMaxTexture2DLayeredHeight", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_ARRAY_NUMSLICES", ("hipDeviceAttributeMaxTexture2DLayeredLayers", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_DEVICE_ATTRIBUTE_SURFACE_ALIGNMENT", ("hipDeviceAttributeSurfaceAlignment", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_DEVICE_ATTRIBUTE_CONCURRENT_KERNELS", ("hipDeviceAttributeConcurrentKernels", CONV_TYPE, API_DRIVER)),
("CU_DEVICE_ATTRIBUTE_ECC_ENABLED", ("hipDeviceAttributeEccEnabled", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_DEVICE_ATTRIBUTE_PCI_BUS_ID", ("hipDeviceAttributePciBusId", CONV_TYPE, API_DRIVER)),
("CU_DEVICE_ATTRIBUTE_PCI_DEVICE_ID", ("hipDeviceAttributePciDeviceId", CONV_TYPE, API_DRIVER)),
("CU_DEVICE_ATTRIBUTE_TCC_DRIVER", ("hipDeviceAttributeTccDriver", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_DEVICE_ATTRIBUTE_MEMORY_CLOCK_RATE", ("hipDeviceAttributeMemoryClockRate", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_DEVICE_ATTRIBUTE_GLOBAL_MEMORY_BUS_WIDTH", ("hipDeviceAttributeMemoryBusWidth", CONV_TYPE, API_DRIVER)),
("CU_DEVICE_ATTRIBUTE_L2_CACHE_SIZE", ("hipDeviceAttributeL2CacheSize", CONV_TYPE, API_DRIVER)),
("CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_MULTIPROCESSOR", ("hipDeviceAttributeMaxThreadsPerMultiProcessor", CONV_TYPE, API_DRIVER)),
("CU_DEVICE_ATTRIBUTE_ASYNC_ENGINE_COUNT", ("hipDeviceAttributeAsyncEngineCount", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_DEVICE_ATTRIBUTE_UNIFIED_ADDRESSING", ("hipDeviceAttributeUnifiedAddressing", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LAYERED_WIDTH", ("hipDeviceAttributeMaxTexture1DLayeredWidth", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LAYERED_LAYERS", ("hipDeviceAttributeMaxTexture1DLayeredLayers", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_DEVICE_ATTRIBUTE_CAN_TEX2D_GATHER", ("hipDeviceAttributeCanTex2DGather", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_GATHER_WIDTH", ("hipDeviceAttributeMaxTexture2DGatherWidth", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_GATHER_HEIGHT", ("hipDeviceAttributeMaxTexture2DGatherHeight", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_WIDTH_ALTERNATE", ("hipDeviceAttributeMaxTexture3DWidthAlternate", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_HEIGHT_ALTERNATE", ("hipDeviceAttributeMaxTexture3DHeightAlternate", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_DEPTH_ALTERNATE", ("hipDeviceAttributeMaxTexture3DDepthAlternate", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_DEVICE_ATTRIBUTE_PCI_DOMAIN_ID", ("hipDeviceAttributePciDomainId", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_DEVICE_ATTRIBUTE_TEXTURE_PITCH_ALIGNMENT", ("hipDeviceAttributeTexturePitchAlignment", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_WIDTH", ("hipDeviceAttributeMaxTextureCubemapWidth", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_LAYERED_WIDTH", ("hipDeviceAttributeMaxTextureCubemapLayeredWidth", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_LAYERED_LAYERS", ("hipDeviceAttributeMaxTextureCubemapLayeredLayers", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_WIDTH", ("hipDeviceAttributeMaxSurface1DWidth", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_WIDTH", ("hipDeviceAttributeMaxSurface2DWidth", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_HEIGHT", ("hipDeviceAttributeMaxSurface2DHeight", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_WIDTH", ("hipDeviceAttributeMaxSurface3DWidth", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_HEIGHT", ("hipDeviceAttributeMaxSurface3DHeight", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_DEPTH", ("hipDeviceAttributeMaxSurface3DDepth", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_LAYERED_WIDTH", ("hipDeviceAttributeMaxSurface1DLayeredWidth", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_LAYERED_LAYERS", ("hipDeviceAttributeMaxSurface1DLayeredLayers", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_WIDTH", ("hipDeviceAttributeMaxSurface2DLayeredWidth", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_HEIGHT", ("hipDeviceAttributeMaxSurface2DLayeredHeight", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_LAYERS", ("hipDeviceAttributeMaxSurface2DLayeredLayers", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_WIDTH", ("hipDeviceAttributeMaxSurfaceCubemapWidth", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_LAYERED_WIDTH", ("hipDeviceAttributeMaxSurfaceCubemapLayeredWidth", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_LAYERED_LAYERS", ("hipDeviceAttributeMaxSurfaceCubemapLayeredLayers", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LINEAR_WIDTH", ("hipDeviceAttributeMaxTexture1DLinearWidth", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_WIDTH", ("hipDeviceAttributeMaxTexture2DLinearWidth", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_HEIGHT", ("hipDeviceAttributeMaxTexture2DLinearHeight", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_PITCH", ("hipDeviceAttributeMaxTexture2DLinearPitch", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_MIPMAPPED_WIDTH", ("hipDeviceAttributeMaxTexture2DMipmappedWidth", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_MIPMAPPED_HEIGHT", ("hipDeviceAttributeMaxTexture2DMipmappedHeight", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR", ("hipDeviceAttributeComputeCapabilityMajor", CONV_TYPE, API_DRIVER)),
("CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR", ("hipDeviceAttributeComputeCapabilityMinor", CONV_TYPE, API_DRIVER)),
("CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_MIPMAPPED_WIDTH", ("hipDeviceAttributeMaxTexture1DMipmappedWidth", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_DEVICE_ATTRIBUTE_STREAM_PRIORITIES_SUPPORTED", ("hipDeviceAttributeStreamPrioritiesSupported", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_DEVICE_ATTRIBUTE_GLOBAL_L1_CACHE_SUPPORTED", ("hipDeviceAttributeGlobalL1CacheSupported", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_DEVICE_ATTRIBUTE_LOCAL_L1_CACHE_SUPPORTED", ("hipDeviceAttributeLocalL1CacheSupported", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_MULTIPROCESSOR", ("hipDeviceAttributeMaxSharedMemoryPerMultiprocessor", CONV_TYPE, API_DRIVER)),
("CU_DEVICE_ATTRIBUTE_MAX_REGISTERS_PER_MULTIPROCESSOR", ("hipDeviceAttributeMaxRegistersPerMultiprocessor", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_DEVICE_ATTRIBUTE_MANAGED_MEMORY", ("hipDeviceAttributeManagedMemory", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_DEVICE_ATTRIBUTE_MULTI_GPU_BOARD", ("hipDeviceAttributeIsMultiGpuBoard", CONV_TYPE, API_DRIVER)),
("CU_DEVICE_ATTRIBUTE_MULTI_GPU_BOARD_GROUP_ID", ("hipDeviceAttributeMultiGpuBoardGroupId", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_DEVICE_ATTRIBUTE_HOST_NATIVE_ATOMIC_SUPPORTED", ("hipDeviceAttributeHostNativeAtomicSupported", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_DEVICE_ATTRIBUTE_SINGLE_TO_DOUBLE_PRECISION_PERF_RATIO", ("hipDeviceAttributeSingleToDoublePrecisionPerfRatio", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS", ("hipDeviceAttributePageableMemoryAccess", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS", ("hipDeviceAttributeConcurrentManagedAccess", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_DEVICE_ATTRIBUTE_COMPUTE_PREEMPTION_SUPPORTED", ("hipDeviceAttributeComputePreemptionSupported", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_DEVICE_ATTRIBUTE_CAN_USE_HOST_POINTER_FOR_REGISTERED_MEM", ("hipDeviceAttributeCanUseHostPointerForRegisteredMem", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_DEVICE_ATTRIBUTE_MAX", ("hipDeviceAttributeMax", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_POINTER_ATTRIBUTE_CONTEXT", ("hipPointerAttributeContext", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_POINTER_ATTRIBUTE_MEMORY_TYPE", ("hipPointerAttributeMemoryType", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_POINTER_ATTRIBUTE_DEVICE_POINTER", ("hipPointerAttributeDevicePointer", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_POINTER_ATTRIBUTE_HOST_POINTER", ("hipPointerAttributeHostPointer", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_POINTER_ATTRIBUTE_P2P_TOKENS", ("hipPointerAttributeP2pTokens", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_POINTER_ATTRIBUTE_SYNC_MEMOPS", ("hipPointerAttributeSyncMemops", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_POINTER_ATTRIBUTE_BUFFER_ID", ("hipPointerAttributeBufferId", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_POINTER_ATTRIBUTE_IS_MANAGED", ("hipPointerAttributeIsManaged", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK", ("hipFuncAttributeMaxThreadsPerBlocks", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_FUNC_ATTRIBUTE_SHARED_SIZE_BYTES", ("hipFuncAttributeSharedSizeBytes", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_FUNC_ATTRIBUTE_CONST_SIZE_BYTES", ("hipFuncAttributeConstSizeBytes", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_FUNC_ATTRIBUTE_LOCAL_SIZE_BYTES", ("hipFuncAttributeLocalSizeBytes", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_FUNC_ATTRIBUTE_NUM_REGS", ("hipFuncAttributeNumRegs", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_FUNC_ATTRIBUTE_PTX_VERSION", ("hipFuncAttributePtxVersion", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_FUNC_ATTRIBUTE_BINARY_VERSION", ("hipFuncAttributeBinaryVersion", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_FUNC_ATTRIBUTE_CACHE_MODE_CA", ("hipFuncAttributeCacheModeCA", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_FUNC_ATTRIBUTE_MAX", ("hipFuncAttributeMax", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_GRAPHICS_MAP_RESOURCE_FLAGS_NONE", ("hipGraphicsMapFlagsNone", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_GRAPHICS_MAP_RESOURCE_FLAGS_READ_ONLY", ("hipGraphicsMapFlagsReadOnly", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_GRAPHICS_MAP_RESOURCE_FLAGS_WRITE_DISCARD", ("hipGraphicsMapFlagsWriteDiscard", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_GRAPHICS_REGISTER_FLAGS_NONE", ("hipGraphicsRegisterFlagsNone", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_GRAPHICS_REGISTER_FLAGS_READ_ONLY", ("hipGraphicsRegisterFlagsReadOnly", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_GRAPHICS_REGISTER_FLAGS_WRITE_DISCARD", ("hipGraphicsRegisterFlagsWriteDiscard", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_GRAPHICS_REGISTER_FLAGS_SURFACE_LDST", ("hipGraphicsRegisterFlagsSurfaceLoadStore", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_GRAPHICS_REGISTER_FLAGS_TEXTURE_GATHER", ("hipGraphicsRegisterFlagsTextureGather", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_OCCUPANCY_DEFAULT", ("hipOccupancyDefault", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_OCCUPANCY_DISABLE_CACHING_OVERRIDE", ("hipOccupancyDisableCachingOverride", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_FUNC_CACHE_PREFER_NONE", ("hipFuncCachePreferNone", CONV_CACHE, API_DRIVER)),
("CU_FUNC_CACHE_PREFER_SHARED", ("hipFuncCachePreferShared", CONV_CACHE, API_DRIVER)),
("CU_FUNC_CACHE_PREFER_L1", ("hipFuncCachePreferL1", CONV_CACHE, API_DRIVER)),
("CU_FUNC_CACHE_PREFER_EQUAL", ("hipFuncCachePreferEqual", CONV_CACHE, API_DRIVER)),
("CU_IPC_MEM_LAZY_ENABLE_PEER_ACCESS", ("hipIpcMemLazyEnablePeerAccess", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CUDA_IPC_HANDLE_SIZE", ("HIP_IPC_HANDLE_SIZE", CONV_TYPE, API_DRIVER)),
("CU_JIT_CACHE_OPTION_NONE", ("hipJitCacheModeOptionNone", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED)),
("CU_JIT_CACHE_OPTION_CG", ("hipJitCacheModeOptionCG", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED)),
("CU_JIT_CACHE_OPTION_CA", ("hipJitCacheModeOptionCA", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED)),
("CU_PREFER_PTX", ("hipJitFallbackPreferPtx", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED)),
("CU_PREFER_BINARY", ("hipJitFallbackPreferBinary", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED)),
("CU_JIT_MAX_REGISTERS", ("hipJitOptionMaxRegisters", CONV_JIT, API_DRIVER)),
("CU_JIT_THREADS_PER_BLOCK", ("hipJitOptionThreadsPerBlock", CONV_JIT, API_DRIVER)),
("CU_JIT_WALL_TIME", ("hipJitOptionWallTime", CONV_JIT, API_DRIVER)),
("CU_JIT_INFO_LOG_BUFFER", ("hipJitOptionInfoLogBuffer", CONV_JIT, API_DRIVER)),
("CU_JIT_INFO_LOG_BUFFER_SIZE_BYTES", ("hipJitOptionInfoLogBufferSizeBytes", CONV_JIT, API_DRIVER)),
("CU_JIT_ERROR_LOG_BUFFER", ("hipJitOptionErrorLogBuffer", CONV_JIT, API_DRIVER)),
("CU_JIT_ERROR_LOG_BUFFER_SIZE_BYTES", ("hipJitOptionErrorLogBufferSizeBytes", CONV_JIT, API_DRIVER)),
("CU_JIT_OPTIMIZATION_LEVEL", ("hipJitOptionOptimizationLevel", CONV_JIT, API_DRIVER)),
("CU_JIT_TARGET_FROM_CUCONTEXT", ("hipJitOptionTargetFromContext", CONV_JIT, API_DRIVER)),
("CU_JIT_TARGET", ("hipJitOptionTarget", CONV_JIT, API_DRIVER)),
("CU_JIT_FALLBACK_STRATEGY", ("hipJitOptionFallbackStrategy", CONV_JIT, API_DRIVER)),
("CU_JIT_GENERATE_DEBUG_INFO", ("hipJitOptionGenerateDebugInfo", CONV_JIT, API_DRIVER)),
("CU_JIT_LOG_VERBOSE", ("hipJitOptionLogVerbose", CONV_JIT, API_DRIVER)),
("CU_JIT_GENERATE_LINE_INFO", ("hipJitOptionGenerateLineInfo", CONV_JIT, API_DRIVER)),
("CU_JIT_CACHE_MODE", ("hipJitOptionCacheMode", CONV_JIT, API_DRIVER)),
("CU_JIT_NEW_SM3X_OPT", ("hipJitOptionSm3xOpt", CONV_JIT, API_DRIVER)),
("CU_JIT_FAST_COMPILE", ("hipJitOptionFastCompile", CONV_JIT, API_DRIVER)),
("CU_JIT_NUM_OPTIONS", ("hipJitOptionNumOptions", CONV_JIT, API_DRIVER)),
("CU_TARGET_COMPUTE_10", ("hipJitTargetCompute10", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED)),
("CU_TARGET_COMPUTE_11", ("hipJitTargetCompute11", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED)),
("CU_TARGET_COMPUTE_12", ("hipJitTargetCompute12", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED)),
("CU_TARGET_COMPUTE_13", ("hipJitTargetCompute13", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED)),
("CU_TARGET_COMPUTE_20", ("hipJitTargetCompute20", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED)),
("CU_TARGET_COMPUTE_21", ("hipJitTargetCompute21", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED)),
("CU_TARGET_COMPUTE_30", ("hipJitTargetCompute30", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED)),
("CU_TARGET_COMPUTE_32", ("hipJitTargetCompute32", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED)),
("CU_TARGET_COMPUTE_35", ("hipJitTargetCompute35", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED)),
("CU_TARGET_COMPUTE_37", ("hipJitTargetCompute37", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED)),
("CU_TARGET_COMPUTE_50", ("hipJitTargetCompute50", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED)),
("CU_TARGET_COMPUTE_52", ("hipJitTargetCompute52", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED)),
("CU_TARGET_COMPUTE_53", ("hipJitTargetCompute53", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED)),
("CU_TARGET_COMPUTE_60", ("hipJitTargetCompute60", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED)),
("CU_TARGET_COMPUTE_61", ("hipJitTargetCompute61", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED)),
("CU_TARGET_COMPUTE_62", ("hipJitTargetCompute62", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED)),
("CU_JIT_INPUT_CUBIN", ("hipJitInputTypeBin", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED)),
("CU_JIT_INPUT_PTX", ("hipJitInputTypePtx", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED)),
("CU_JIT_INPUT_FATBINARY", ("hipJitInputTypeFatBinary", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED)),
("CU_JIT_INPUT_OBJECT", ("hipJitInputTypeObject", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED)),
("CU_JIT_INPUT_LIBRARY", ("hipJitInputTypeLibrary", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED)),
("CU_JIT_NUM_INPUT_TYPES", ("hipJitInputTypeNumInputTypes", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED)),
("CU_LIMIT_STACK_SIZE", ("hipLimitStackSize", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_LIMIT_PRINTF_FIFO_SIZE", ("hipLimitPrintfFifoSize", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_LIMIT_MALLOC_HEAP_SIZE", ("hipLimitMallocHeapSize", CONV_TYPE, API_DRIVER)),
("CU_LIMIT_DEV_RUNTIME_SYNC_DEPTH", ("hipLimitDevRuntimeSyncDepth", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_LIMIT_DEV_RUNTIME_PENDING_LAUNCH_COUNT", ("hipLimitDevRuntimePendingLaunchCount", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_LIMIT_STACK_SIZE", ("hipLimitStackSize", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_MEM_ATTACH_GLOBAL", ("hipMemAttachGlobal", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_MEM_ATTACH_HOST", ("hipMemAttachHost", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_MEM_ATTACH_SINGLE", ("hipMemAttachSingle", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_MEMORYTYPE_HOST", ("hipMemTypeHost", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_MEMORYTYPE_DEVICE", ("hipMemTypeDevice", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_MEMORYTYPE_ARRAY", ("hipMemTypeArray", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_MEMORYTYPE_UNIFIED", ("hipMemTypeUnified", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_RESOURCE_TYPE_ARRAY", ("hipResourceTypeArray", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED)),
("CU_RESOURCE_TYPE_MIPMAPPED_ARRAY", ("hipResourceTypeMipmappedArray", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED)),
("CU_RESOURCE_TYPE_LINEAR", ("hipResourceTypeLinear", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED)),
("CU_RESOURCE_TYPE_PITCH2D", ("hipResourceTypePitch2D", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED)),
("CU_RES_VIEW_FORMAT_NONE", ("hipResViewFormatNone", CONV_TEX, API_DRIVER)),
("CU_RES_VIEW_FORMAT_UINT_1X8", ("hipResViewFormatUnsignedChar1", CONV_TEX, API_DRIVER)),
("CU_RES_VIEW_FORMAT_UINT_2X8", ("hipResViewFormatUnsignedChar2", CONV_TEX, API_DRIVER)),
("CU_RES_VIEW_FORMAT_UINT_4X8", ("hipResViewFormatUnsignedChar4", CONV_TEX, API_DRIVER)),
("CU_RES_VIEW_FORMAT_SINT_1X8", ("hipResViewFormatSignedChar1", CONV_TEX, API_DRIVER)),
("CU_RES_VIEW_FORMAT_SINT_2X8", ("hipResViewFormatSignedChar2", CONV_TEX, API_DRIVER)),
("CU_RES_VIEW_FORMAT_SINT_4X8", ("hipResViewFormatSignedChar4", CONV_TEX, API_DRIVER)),
("CU_RES_VIEW_FORMAT_UINT_1X16", ("hipResViewFormatUnsignedShort1", CONV_TEX, API_DRIVER)),
("CU_RES_VIEW_FORMAT_UINT_2X16", ("hipResViewFormatUnsignedShort2", CONV_TEX, API_DRIVER)),
("CU_RES_VIEW_FORMAT_UINT_4X16", ("hipResViewFormatUnsignedShort4", CONV_TEX, API_DRIVER)),
("CU_RES_VIEW_FORMAT_SINT_1X16", ("hipResViewFormatSignedShort1", CONV_TEX, API_DRIVER)),
("CU_RES_VIEW_FORMAT_SINT_2X16", ("hipResViewFormatSignedShort2", CONV_TEX, API_DRIVER)),
("CU_RES_VIEW_FORMAT_SINT_4X16", ("hipResViewFormatSignedShort4", CONV_TEX, API_DRIVER)),
("CU_RES_VIEW_FORMAT_UINT_1X32", ("hipResViewFormatUnsignedInt1", CONV_TEX, API_DRIVER)),
("CU_RES_VIEW_FORMAT_UINT_2X32", ("hipResViewFormatUnsignedInt2", CONV_TEX, API_DRIVER)),
("CU_RES_VIEW_FORMAT_UINT_4X32", ("hipResViewFormatUnsignedInt4", CONV_TEX, API_DRIVER)),
("CU_RES_VIEW_FORMAT_SINT_1X32", ("hipResViewFormatSignedInt1", CONV_TEX, API_DRIVER)),
("CU_RES_VIEW_FORMAT_SINT_2X32", ("hipResViewFormatSignedInt2", CONV_TEX, API_DRIVER)),
("CU_RES_VIEW_FORMAT_SINT_4X32", ("hipResViewFormatSignedInt4", CONV_TEX, API_DRIVER)),
("CU_RES_VIEW_FORMAT_FLOAT_1X16", ("hipResViewFormatHalf1", CONV_TEX, API_DRIVER)),
("CU_RES_VIEW_FORMAT_FLOAT_2X16", ("hipResViewFormatHalf2", CONV_TEX, API_DRIVER)),
("CU_RES_VIEW_FORMAT_FLOAT_4X16", ("hipResViewFormatHalf4", CONV_TEX, API_DRIVER)),
("CU_RES_VIEW_FORMAT_FLOAT_1X32", ("hipResViewFormatFloat1", CONV_TEX, API_DRIVER)),
("CU_RES_VIEW_FORMAT_FLOAT_2X32", ("hipResViewFormatFloat2", CONV_TEX, API_DRIVER)),
("CU_RES_VIEW_FORMAT_FLOAT_4X32", ("hipResViewFormatFloat4", CONV_TEX, API_DRIVER)),
("CU_RES_VIEW_FORMAT_UNSIGNED_BC1", ("hipResViewFormatUnsignedBlockCompressed1", CONV_TEX, API_DRIVER)),
("CU_RES_VIEW_FORMAT_UNSIGNED_BC2", ("hipResViewFormatUnsignedBlockCompressed2", CONV_TEX, API_DRIVER)),
("CU_RES_VIEW_FORMAT_UNSIGNED_BC3", ("hipResViewFormatUnsignedBlockCompressed3", CONV_TEX, API_DRIVER)),
("CU_RES_VIEW_FORMAT_UNSIGNED_BC4", ("hipResViewFormatUnsignedBlockCompressed4", CONV_TEX, API_DRIVER)),
("CU_RES_VIEW_FORMAT_SIGNED_BC4", ("hipResViewFormatSignedBlockCompressed4", CONV_TEX, API_DRIVER)),
("CU_RES_VIEW_FORMAT_UNSIGNED_BC5", ("hipResViewFormatUnsignedBlockCompressed5", CONV_TEX, API_DRIVER)),
("CU_RES_VIEW_FORMAT_SIGNED_BC5", ("hipResViewFormatSignedBlockCompressed5", CONV_TEX, API_DRIVER)),
("CU_RES_VIEW_FORMAT_UNSIGNED_BC6H", ("hipResViewFormatUnsignedBlockCompressed6H", CONV_TEX, API_DRIVER)),
("CU_RES_VIEW_FORMAT_SIGNED_BC6H", ("hipResViewFormatSignedBlockCompressed6H", CONV_TEX, API_DRIVER)),
("CU_RES_VIEW_FORMAT_UNSIGNED_BC7", ("hipResViewFormatUnsignedBlockCompressed7", CONV_TEX, API_DRIVER)),
("CU_SHARED_MEM_CONFIG_DEFAULT_BANK_SIZE", ("hipSharedMemBankSizeDefault", CONV_TYPE, API_DRIVER)),
("CU_SHARED_MEM_CONFIG_FOUR_BYTE_BANK_SIZE", ("hipSharedMemBankSizeFourByte", CONV_TYPE, API_DRIVER)),
("CU_SHARED_MEM_CONFIG_EIGHT_BYTE_BANK_SIZE", ("hipSharedMemBankSizeEightByte", CONV_TYPE, API_DRIVER)),
("CU_STREAM_DEFAULT", ("hipStreamDefault", CONV_TYPE, API_DRIVER)),
("CU_STREAM_NON_BLOCKING", ("hipStreamNonBlocking", CONV_TYPE, API_DRIVER)),
("CU_STREAM_WAIT_VALUE_GEQ", ("hipStreamWaitValueGeq", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_STREAM_WAIT_VALUE_EQ", ("hipStreamWaitValueEq", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_STREAM_WAIT_VALUE_AND", ("hipStreamWaitValueAnd", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_STREAM_WAIT_VALUE_FLUSH", ("hipStreamWaitValueFlush", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_STREAM_WRITE_VALUE_DEFAULT", ("hipStreamWriteValueDefault", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_STREAM_WRITE_VALUE_NO_MEMORY_BARRIER", ("hipStreamWriteValueNoMemoryBarrier", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_STREAM_MEM_OP_WAIT_VALUE_32", ("hipStreamBatchMemOpWaitValue32", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_STREAM_MEM_OP_WRITE_VALUE_32", ("hipStreamBatchMemOpWriteValue32", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_STREAM_MEM_OP_FLUSH_REMOTE_WRITES", ("hipStreamBatchMemOpFlushRemoteWrites", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("cuGetErrorName", ("hipGetErrorName___", CONV_ERROR, API_DRIVER, HIP_UNSUPPORTED)),
("cuGetErrorString", ("hipGetErrorString___", CONV_ERROR, API_DRIVER, HIP_UNSUPPORTED)),
("cuInit", ("hipInit", CONV_INIT, API_DRIVER)),
("cuDriverGetVersion", ("hipDriverGetVersion", CONV_VERSION, API_DRIVER)),
("cuCtxCreate_v2", ("hipCtxCreate", CONV_CONTEXT, API_DRIVER)),
("cuCtxDestroy_v2", ("hipCtxDestroy", CONV_CONTEXT, API_DRIVER)),
("cuCtxGetApiVersion", ("hipCtxGetApiVersion", CONV_CONTEXT, API_DRIVER)),
("cuCtxGetCacheConfig", ("hipCtxGetCacheConfig", CONV_CONTEXT, API_DRIVER)),
("cuCtxGetCurrent", ("hipCtxGetCurrent", CONV_CONTEXT, API_DRIVER)),
("cuCtxGetDevice", ("hipCtxGetDevice", CONV_CONTEXT, API_DRIVER)),
("cuCtxGetFlags", ("hipCtxGetFlags", CONV_CONTEXT, API_DRIVER)),
("cuCtxGetLimit", ("hipCtxGetLimit", CONV_CONTEXT, API_DRIVER, HIP_UNSUPPORTED)),
("cuCtxGetSharedMemConfig", ("hipCtxGetSharedMemConfig", CONV_CONTEXT, API_DRIVER)),
("cuCtxGetStreamPriorityRange", ("hipCtxGetStreamPriorityRange", CONV_CONTEXT, API_DRIVER, HIP_UNSUPPORTED)),
("cuCtxPopCurrent_v2", ("hipCtxPopCurrent", CONV_CONTEXT, API_DRIVER)),
("cuCtxPushCurrent_v2", ("hipCtxPushCurrent", CONV_CONTEXT, API_DRIVER)),
("cuCtxSetCacheConfig", ("hipCtxSetCacheConfig", CONV_CONTEXT, API_DRIVER)),
("cuCtxSetCurrent", ("hipCtxSetCurrent", CONV_CONTEXT, API_DRIVER)),
("cuCtxSetLimit", ("hipCtxSetLimit", CONV_CONTEXT, API_DRIVER, HIP_UNSUPPORTED)),
("cuCtxSetSharedMemConfig", ("hipCtxSetSharedMemConfig", CONV_CONTEXT, API_DRIVER)),
("cuCtxSynchronize", ("hipCtxSynchronize", CONV_CONTEXT, API_DRIVER)),
("cuCtxAttach", ("hipCtxAttach", CONV_CONTEXT, API_DRIVER, HIP_UNSUPPORTED)),
("cuCtxDetach", ("hipCtxDetach", CONV_CONTEXT, API_DRIVER, HIP_UNSUPPORTED)),
("cuCtxEnablePeerAccess", ("hipCtxEnablePeerAccess", CONV_PEER, API_DRIVER)),
("cuCtxDisablePeerAccess", ("hipCtxDisablePeerAccess", CONV_PEER, API_DRIVER)),
("cuDeviceCanAccessPeer", ("hipDeviceCanAccessPeer", CONV_PEER, API_DRIVER)),
("cuDeviceGetP2PAttribute", ("hipDeviceGetP2PAttribute", CONV_PEER, API_DRIVER, HIP_UNSUPPORTED)),
("cuDevicePrimaryCtxGetState", ("hipDevicePrimaryCtxGetState", CONV_CONTEXT, API_DRIVER)),
("cuDevicePrimaryCtxRelease", ("hipDevicePrimaryCtxRelease", CONV_CONTEXT, API_DRIVER)),
("cuDevicePrimaryCtxReset", ("hipDevicePrimaryCtxReset", CONV_CONTEXT, API_DRIVER)),
("cuDevicePrimaryCtxRetain", ("hipDevicePrimaryCtxRetain", CONV_CONTEXT, API_DRIVER)),
("cuDevicePrimaryCtxSetFlags", ("hipDevicePrimaryCtxSetFlags", CONV_CONTEXT, API_DRIVER)),
("cuDeviceGet", ("hipGetDevice", CONV_DEVICE, API_DRIVER)),
("cuDeviceGetName", ("hipDeviceGetName", CONV_DEVICE, API_DRIVER)),
("cuDeviceGetCount", ("hipGetDeviceCount", CONV_DEVICE, API_DRIVER)),
("cuDeviceGetAttribute", ("hipDeviceGetAttribute", CONV_DEVICE, API_DRIVER)),
("cuDeviceGetPCIBusId", ("hipDeviceGetPCIBusId", CONV_DEVICE, API_DRIVER)),
("cuDeviceGetByPCIBusId", ("hipDeviceGetByPCIBusId", CONV_DEVICE, API_DRIVER)),
("cuDeviceTotalMem_v2", ("hipDeviceTotalMem", CONV_DEVICE, API_DRIVER)),
("cuDeviceComputeCapability", ("hipDeviceComputeCapability", CONV_DEVICE, API_DRIVER)),
("cuDeviceGetProperties", ("hipGetDeviceProperties", CONV_DEVICE, API_DRIVER)),
("cuLinkAddData", ("hipLinkAddData", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED)),
("cuLinkAddFile", ("hipLinkAddFile", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED)),
("cuLinkComplete", ("hipLinkComplete", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED)),
("cuLinkCreate", ("hipLinkCreate", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED)),
("cuLinkDestroy", ("hipLinkDestroy", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED)),
("cuModuleGetFunction", ("hipModuleGetFunction", CONV_MODULE, API_DRIVER)),
("cuModuleGetGlobal_v2", ("hipModuleGetGlobal", CONV_MODULE, API_DRIVER)),
("cuModuleGetSurfRef", ("hipModuleGetSurfRef", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED)),
("cuModuleGetTexRef", ("hipModuleGetTexRef", CONV_MODULE, API_DRIVER)),
("cuModuleLoad", ("hipModuleLoad", CONV_MODULE, API_DRIVER)),
("cuModuleLoadData", ("hipModuleLoadData", CONV_MODULE, API_DRIVER)),
("cuModuleLoadDataEx", ("hipModuleLoadDataEx", CONV_MODULE, API_DRIVER)),
("cuModuleLoadFatBinary", ("hipModuleLoadFatBinary", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED)),
("cuModuleUnload", ("hipModuleUnload", CONV_MODULE, API_DRIVER)),
("CU_DEVICE_P2P_ATTRIBUTE_PERFORMANCE_RANK", ("hipDeviceP2PAttributePerformanceRank", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_DEVICE_P2P_ATTRIBUTE_ACCESS_SUPPORTED", ("hipDeviceP2PAttributeAccessSupported", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_DEVICE_P2P_ATTRIBUTE_NATIVE_ATOMIC_SUPPORTED", ("hipDeviceP2PAttributeNativeAtomicSupported", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CU_EVENT_DEFAULT", ("hipEventDefault", CONV_EVENT, API_DRIVER)),
("CU_EVENT_BLOCKING_SYNC", ("hipEventBlockingSync", CONV_EVENT, API_DRIVER)),
("CU_EVENT_DISABLE_TIMING", ("hipEventDisableTiming", CONV_EVENT, API_DRIVER)),
("CU_EVENT_INTERPROCESS", ("hipEventInterprocess", CONV_EVENT, API_DRIVER)),
("cuEventCreate", ("hipEventCreate", CONV_EVENT, API_DRIVER)),
("cuEventDestroy_v2", ("hipEventDestroy", CONV_EVENT, API_DRIVER)),
("cuEventElapsedTime", ("hipEventElapsedTime", CONV_EVENT, API_DRIVER)),
("cuEventQuery", ("hipEventQuery", CONV_EVENT, API_DRIVER)),
("cuEventRecord", ("hipEventRecord", CONV_EVENT, API_DRIVER)),
("cuEventSynchronize", ("hipEventSynchronize", CONV_EVENT, API_DRIVER)),
("cuFuncGetAttribute", ("hipFuncGetAttribute", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED)),
("cuFuncSetCacheConfig", ("hipFuncSetCacheConfig", CONV_MODULE, API_DRIVER)),
("cuFuncSetSharedMemConfig", ("hipFuncSetSharedMemConfig", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED)),
("cuLaunchKernel", ("hipModuleLaunchKernel", CONV_MODULE, API_DRIVER)),
("cuFuncSetBlockShape", ("hipFuncSetBlockShape", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED)),
("cuFuncSetSharedSize", ("hipFuncSetSharedSize", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED)),
("cuLaunch", ("hipLaunch", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED)),
("cuLaunchGrid", ("hipLaunchGrid", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED)),
("cuLaunchGridAsync", ("hipLaunchGridAsync", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED)),
("cuParamSetf", ("hipParamSetf", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED)),
("cuParamSeti", ("hipParamSeti", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED)),
("cuParamSetSize", ("hipParamSetSize", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED)),
("cuParamSetSize", ("hipParamSetSize", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED)),
("cuParamSetv", ("hipParamSetv", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED)),
("cuOccupancyMaxActiveBlocksPerMultiprocessor", ("hipOccupancyMaxActiveBlocksPerMultiprocessor", CONV_OCCUPANCY, API_DRIVER)),
("cuOccupancyMaxActiveBlocksPerMultiprocessorWithFlags", ("hipOccupancyMaxActiveBlocksPerMultiprocessorWithFlags", CONV_OCCUPANCY, API_DRIVER, HIP_UNSUPPORTED)),
("cuOccupancyMaxPotentialBlockSize", ("hipOccupancyMaxPotentialBlockSize", CONV_OCCUPANCY, API_DRIVER)),
("cuOccupancyMaxPotentialBlockSizeWithFlags", ("hipOccupancyMaxPotentialBlockSizeWithFlags", CONV_OCCUPANCY, API_DRIVER, HIP_UNSUPPORTED)),
("cuStreamAddCallback", ("hipStreamAddCallback", CONV_STREAM, API_DRIVER)),
("cuStreamAttachMemAsync", ("hipStreamAttachMemAsync", CONV_STREAM, API_DRIVER, HIP_UNSUPPORTED)),
("cuStreamCreate", ("hipStreamCreate__", CONV_STREAM, API_DRIVER, HIP_UNSUPPORTED)),
("cuStreamCreateWithPriority", ("hipStreamCreateWithPriority", CONV_STREAM, API_DRIVER, HIP_UNSUPPORTED)),
("cuStreamDestroy_v2", ("hipStreamDestroy", CONV_STREAM, API_DRIVER)),
("cuStreamGetFlags", ("hipStreamGetFlags", CONV_STREAM, API_DRIVER)),
("cuStreamGetPriority", ("hipStreamGetPriority", CONV_STREAM, API_DRIVER, HIP_UNSUPPORTED)),
("cuStreamQuery", ("hipStreamQuery", CONV_STREAM, API_DRIVER)),
("cuStreamSynchronize", ("hipStreamSynchronize", CONV_STREAM, API_DRIVER)),
("cuStreamWaitEvent", ("hipStreamWaitEvent", CONV_STREAM, API_DRIVER)),
("cuStreamWaitValue32", ("hipStreamWaitValue32", CONV_STREAM, API_DRIVER, HIP_UNSUPPORTED)),
("cuStreamWriteValue32", ("hipStreamWriteValue32", CONV_STREAM, API_DRIVER, HIP_UNSUPPORTED)),
("cuStreamBatchMemOp", ("hipStreamBatchMemOp", CONV_STREAM, API_DRIVER, HIP_UNSUPPORTED)),
("cuArray3DCreate", ("hipArray3DCreate", CONV_MEM, API_DRIVER)),
("cuArray3DGetDescriptor", ("hipArray3DGetDescriptor", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)),
("cuArrayCreate", ("hipArrayCreate", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)),
("cuArrayDestroy", ("hipArrayDestroy", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)),
("cuArrayGetDescriptor", ("hipArrayGetDescriptor", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)),
("cuIpcCloseMemHandle", ("hipIpcCloseMemHandle", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)),
("cuIpcGetEventHandle", ("hipIpcGetEventHandle", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)),
("cuIpcGetMemHandle", ("hipIpcGetMemHandle", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)),
("cuIpcOpenEventHandle", ("hipIpcOpenEventHandle", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)),
("cuIpcOpenMemHandle", ("hipIpcOpenMemHandle", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)),
("cuMemAlloc_v2", ("hipMalloc", CONV_MEM, API_DRIVER)),
("cuMemAllocHost", ("hipMemAllocHost", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)),
("cuMemAllocManaged", ("hipMemAllocManaged", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)),
("cuMemAllocPitch", ("hipMemAllocPitch__", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)),
("cuMemcpy", ("hipMemcpy__", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)),
("cuMemcpy2D", ("hipMemcpy2D__", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)),
("cuMemcpy2DAsync", ("hipMemcpy2DAsync__", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)),
("cuMemcpy2DUnaligned", ("hipMemcpy2DUnaligned", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)),
("cuMemcpy3D", ("hipMemcpy3D__", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)),
("cuMemcpy3DAsync", ("hipMemcpy3DAsync__", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)),
("cuMemcpy3DPeer", ("hipMemcpy3DPeer__", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)),
("cuMemcpy3DPeerAsync", ("hipMemcpy3DPeerAsync__", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)),
("cuMemcpyAsync", ("hipMemcpyAsync__", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)),
("cuMemcpyAtoA", ("hipMemcpyAtoA", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)),
("cuMemcpyAtoD", ("hipMemcpyAtoD", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)),
("cuMemcpyAtoH", ("hipMemcpyAtoH", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)),
("cuMemcpyAtoHAsync", ("hipMemcpyAtoHAsync", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)),
("cuMemcpyDtoA", ("hipMemcpyDtoA", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)),
("cuMemcpyDtoD_v2", ("hipMemcpyDtoD", CONV_MEM, API_DRIVER)),
("cuMemcpyDtoDAsync_v2", ("hipMemcpyDtoDAsync", CONV_MEM, API_DRIVER)),
("cuMemcpyDtoH_v2", ("hipMemcpyDtoH", CONV_MEM, API_DRIVER)),
("cuMemcpyDtoHAsync_v2", ("hipMemcpyDtoHAsync", CONV_MEM, API_DRIVER)),
("cuMemcpyHtoA", ("hipMemcpyHtoA", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)),
("cuMemcpyHtoAAsync", ("hipMemcpyHtoAAsync", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)),
("cuMemcpyHtoD_v2", ("hipMemcpyHtoD", CONV_MEM, API_DRIVER)),
("cuMemcpyHtoDAsync_v2", ("hipMemcpyHtoDAsync", CONV_MEM, API_DRIVER)),
("cuMemcpyPeerAsync", ("hipMemcpyPeerAsync__", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)),
("cuMemcpyPeer", ("hipMemcpyPeer__", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)),
("cuMemFree_v2", ("hipFree", CONV_MEM, API_DRIVER)),
("cuMemFreeHost", ("hipHostFree", CONV_MEM, API_DRIVER)),
("cuMemGetAddressRange", ("hipMemGetAddressRange", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)),
("cuMemGetInfo_v2", ("hipMemGetInfo", CONV_MEM, API_DRIVER)),
("cuMemHostAlloc", ("hipHostMalloc", CONV_MEM, API_DRIVER)),
("cuMemHostGetDevicePointer", ("hipMemHostGetDevicePointer", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)),
("cuMemHostGetFlags", ("hipMemHostGetFlags", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)),
("cuMemHostRegister_v2", ("hipHostRegister", CONV_MEM, API_DRIVER)),
("cuMemHostUnregister", ("hipHostUnregister", CONV_MEM, API_DRIVER)),
("cuMemsetD16_v2", ("hipMemsetD16", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)),
("cuMemsetD16Async", ("hipMemsetD16Async", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)),
("cuMemsetD2D16_v2", ("hipMemsetD2D16", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)),
("cuMemsetD2D16Async", ("hipMemsetD2D16Async", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)),
("cuMemsetD2D32_v2", ("hipMemsetD2D32", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)),
("cuMemsetD2D32Async", ("hipMemsetD2D32Async", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)),
("cuMemsetD2D8_v2", ("hipMemsetD2D8", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)),
("cuMemsetD2D8Async", ("hipMemsetD2D8Async", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)),
("cuMemsetD32_v2", ("hipMemset", CONV_MEM, API_DRIVER)),
("cuMemsetD32Async", ("hipMemsetAsync", CONV_MEM, API_DRIVER)),
("cuMemsetD8_v2", ("hipMemsetD8", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)),
("cuMemsetD8Async", ("hipMemsetD8Async", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)),
("cuMipmappedArrayCreate", ("hipMipmappedArrayCreate", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)),
("cuMipmappedArrayDestroy", ("hipMipmappedArrayDestroy", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)),
("cuMipmappedArrayGetLevel", ("hipMipmappedArrayGetLevel", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)),
("cuMemPrefetchAsync", ("hipMemPrefetchAsync__", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)),
("cuMemAdvise", ("hipMemAdvise", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)),
("cuMemRangeGetAttribute", ("hipMemRangeGetAttribute", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)),
("cuMemRangeGetAttributes", ("hipMemRangeGetAttributes", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)),
("cuPointerGetAttribute", ("hipPointerGetAttribute", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)),
("cuPointerGetAttributes", ("hipPointerGetAttributes", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)),
("cuPointerSetAttribute", ("hipPointerSetAttribute", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)),
("CU_TR_FILTER_MODE_POINT", ("hipFilterModePoint", CONV_TEX, API_DRIVER)),
("CU_TR_FILTER_MODE_LINEAR", ("hipFilterModeLinear", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED)),
("cuTexRefGetAddress", ("hipTexRefGetAddress", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED)),
("cuTexRefGetAddressMode", ("hipTexRefGetAddressMode", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED)),
("cuTexRefGetArray", ("hipTexRefGetArray", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED)),
("cuTexRefGetBorderColor", ("hipTexRefGetBorderColor", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED)),
("cuTexRefGetFilterMode", ("hipTexRefGetFilterMode", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED)),
("cuTexRefGetFlags", ("hipTexRefGetFlags", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED)),
("cuTexRefGetFormat", ("hipTexRefGetFormat", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED)),
("cuTexRefGetMaxAnisotropy", ("hipTexRefGetMaxAnisotropy", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED)),
("cuTexRefGetMipmapFilterMode", ("hipTexRefGetMipmapFilterMode", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED)),
("cuTexRefGetMipmapLevelBias", ("hipTexRefGetMipmapLevelBias", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED)),
("cuTexRefGetMipmapLevelClamp", ("hipTexRefGetMipmapLevelClamp", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED)),
("cuTexRefGetMipmappedArray", ("hipTexRefGetMipmappedArray", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED)),
("cuTexRefSetAddress", ("hipTexRefSetAddress", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED)),
("cuTexRefSetAddress2D", ("hipTexRefSetAddress2D", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED)),
("cuTexRefSetAddressMode", ("hipTexRefSetAddressMode", CONV_TEX, API_DRIVER)),
("cuTexRefSetArray", ("hipTexRefSetArray", CONV_TEX, API_DRIVER)),
("cuTexRefSetBorderColor", ("hipTexRefSetBorderColor", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED)),
("cuTexRefSetFilterMode", ("hipTexRefSetFilterMode", CONV_TEX, API_DRIVER)),
("cuTexRefSetFlags", ("hipTexRefSetFlags", CONV_TEX, API_DRIVER)),
("cuTexRefSetFormat", ("hipTexRefSetFormat", CONV_TEX, API_DRIVER)),
("cuTexRefSetMaxAnisotropy", ("hipTexRefSetMaxAnisotropy", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED)),
("cuTexRefSetMipmapFilterMode", ("hipTexRefSetMipmapFilterMode", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED)),
("cuTexRefSetMipmapLevelBias", ("hipTexRefSetMipmapLevelBias", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED)),
("cuTexRefSetMipmapLevelClamp", ("hipTexRefSetMipmapLevelClamp", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED)),
("cuTexRefSetMipmappedArray", ("hipTexRefSetMipmappedArray", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED)),
("cuTexRefCreate", ("hipTexRefCreate", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED)),
("cuTexRefDestroy", ("hipTexRefDestroy", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED)),
("cuSurfRefGetArray", ("hipSurfRefGetArray", CONV_SURFACE, API_DRIVER, HIP_UNSUPPORTED)),
("cuSurfRefSetArray", ("hipSurfRefSetArray", CONV_SURFACE, API_DRIVER, HIP_UNSUPPORTED)),
("cuTexObjectCreate", ("hipTexObjectCreate", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED)),
("cuTexObjectDestroy", ("hipTexObjectDestroy", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED)),
("cuTexObjectGetResourceDesc", ("hipTexObjectGetResourceDesc", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED)),
("cuTexObjectGetResourceViewDesc", ("hipTexObjectGetResourceViewDesc", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED)),
("cuTexObjectGetTextureDesc", ("hipTexObjectGetTextureDesc", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED)),
("cuSurfObjectCreate", ("hipSurfObjectCreate", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED)),
("cuSurfObjectDestroy", ("hipSurfObjectDestroy", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED)),
("cuSurfObjectGetResourceDesc", ("hipSurfObjectGetResourceDesc", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED)),
("cuGraphicsMapResources", ("hipGraphicsMapResources", CONV_GRAPHICS, API_DRIVER, HIP_UNSUPPORTED)),
("cuGraphicsResourceGetMappedMipmappedArray", ("hipGraphicsResourceGetMappedMipmappedArray", CONV_GRAPHICS, API_DRIVER, HIP_UNSUPPORTED)),
("cuGraphicsResourceGetMappedPointer", ("hipGraphicsResourceGetMappedPointer", CONV_GRAPHICS, API_DRIVER, HIP_UNSUPPORTED)),
("cuGraphicsResourceSetMapFlags", ("hipGraphicsResourceSetMapFlags", CONV_GRAPHICS, API_DRIVER, HIP_UNSUPPORTED)),
("cuGraphicsSubResourceGetMappedArray", ("hipGraphicsSubResourceGetMappedArray", CONV_GRAPHICS, API_DRIVER, HIP_UNSUPPORTED)),
("cuGraphicsUnmapResources", ("hipGraphicsUnmapResources", CONV_GRAPHICS, API_DRIVER, HIP_UNSUPPORTED)),
("cuGraphicsUnregisterResource", ("hipGraphicsUnregisterResource", CONV_GRAPHICS, API_DRIVER, HIP_UNSUPPORTED)),
("cuProfilerInitialize", ("hipProfilerInitialize", CONV_OTHER, API_DRIVER, HIP_UNSUPPORTED)),
("cuProfilerStart", ("hipProfilerStart", CONV_OTHER, API_DRIVER)),
("cuProfilerStop", ("hipProfilerStop", CONV_OTHER, API_DRIVER)),
("CU_GL_DEVICE_LIST_ALL", ("HIP_GL_DEVICE_LIST_ALL", CONV_GL, API_DRIVER, HIP_UNSUPPORTED)),
("CU_GL_DEVICE_LIST_CURRENT_FRAME", ("HIP_GL_DEVICE_LIST_CURRENT_FRAME", CONV_GL, API_DRIVER, HIP_UNSUPPORTED)),
("CU_GL_DEVICE_LIST_NEXT_FRAME", ("HIP_GL_DEVICE_LIST_NEXT_FRAME", CONV_GL, API_DRIVER, HIP_UNSUPPORTED)),
("cuGLGetDevices", ("hipGLGetDevices", CONV_GL, API_DRIVER, HIP_UNSUPPORTED)),
("cuGraphicsGLRegisterBuffer", ("hipGraphicsGLRegisterBuffer", CONV_GL, API_DRIVER, HIP_UNSUPPORTED)),
("cuGraphicsGLRegisterImage", ("hipGraphicsGLRegisterImage", CONV_GL, API_DRIVER, HIP_UNSUPPORTED)),
("cuWGLGetDevice", ("hipWGLGetDevice", CONV_GL, API_DRIVER, HIP_UNSUPPORTED)),
("CU_GL_MAP_RESOURCE_FLAGS_NONE", ("HIP_GL_MAP_RESOURCE_FLAGS_NONE", CONV_GL, API_DRIVER, HIP_UNSUPPORTED)),
("CU_GL_MAP_RESOURCE_FLAGS_READ_ONLY", ("HIP_GL_MAP_RESOURCE_FLAGS_READ_ONLY", CONV_GL, API_DRIVER, HIP_UNSUPPORTED)),
("CU_GL_MAP_RESOURCE_FLAGS_WRITE_DISCARD", ("HIP_GL_MAP_RESOURCE_FLAGS_WRITE_DISCARD", CONV_GL, API_DRIVER, HIP_UNSUPPORTED)),
("cuGLCtxCreate", ("hipGLCtxCreate", CONV_GL, API_DRIVER, HIP_UNSUPPORTED)),
("cuGLInit", ("hipGLInit", CONV_GL, API_DRIVER, HIP_UNSUPPORTED)),
("cuGLMapBufferObject", ("hipGLMapBufferObject", CONV_GL, API_DRIVER, HIP_UNSUPPORTED)),
("cuGLMapBufferObjectAsync", ("hipGLMapBufferObjectAsync", CONV_GL, API_DRIVER, HIP_UNSUPPORTED)),
("cuGLRegisterBufferObject", ("hipGLRegisterBufferObject", CONV_GL, API_DRIVER, HIP_UNSUPPORTED)),
("cuGLSetBufferObjectMapFlags", ("hipGLSetBufferObjectMapFlags", CONV_GL, API_DRIVER, HIP_UNSUPPORTED)),
("cuGLUnmapBufferObject", ("hipGLUnmapBufferObject", CONV_GL, API_DRIVER, HIP_UNSUPPORTED)),
("cuGLUnmapBufferObjectAsync", ("hipGLUnmapBufferObjectAsync", CONV_GL, API_DRIVER, HIP_UNSUPPORTED)),
("cuGLUnregisterBufferObject", ("hipGLUnregisterBufferObject", CONV_GL, API_DRIVER, HIP_UNSUPPORTED)),
("CU_D3D9_DEVICE_LIST_ALL", ("HIP_D3D9_DEVICE_LIST_ALL", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED)),
("CU_D3D9_DEVICE_LIST_CURRENT_FRAME", ("HIP_D3D9_DEVICE_LIST_CURRENT_FRAME", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED)),
("CU_D3D9_DEVICE_LIST_NEXT_FRAME", ("HIP_D3D9_DEVICE_LIST_NEXT_FRAME", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED)),
("cuD3D9CtxCreate", ("hipD3D9CtxCreate", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED)),
("cuD3D9CtxCreateOnDevice", ("hipD3D9CtxCreateOnDevice", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED)),
("cuD3D9GetDevice", ("hipD3D9GetDevice", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED)),
("cuD3D9GetDevices", ("hipD3D9GetDevices", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED)),
("cuD3D9GetDirect3DDevice", ("hipD3D9GetDirect3DDevice", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED)),
("cuGraphicsD3D9RegisterResource", ("hipGraphicsD3D9RegisterResource", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED)),
("CU_D3D9_MAPRESOURCE_FLAGS_NONE", ("HIP_D3D9_MAPRESOURCE_FLAGS_NONE", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED)),
("CU_D3D9_MAPRESOURCE_FLAGS_READONLY", ("HIP_D3D9_MAPRESOURCE_FLAGS_READONLY", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED)),
("CU_D3D9_MAPRESOURCE_FLAGS_WRITEDISCARD", ("HIP_D3D9_MAPRESOURCE_FLAGS_WRITEDISCARD", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED)),
("CU_D3D9_REGISTER_FLAGS_NONE", ("HIP_D3D9_REGISTER_FLAGS_NONE", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED)),
("CU_D3D9_REGISTER_FLAGS_ARRAY", ("HIP_D3D9_REGISTER_FLAGS_ARRAY", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED)),
("cuD3D9MapResources", ("hipD3D9MapResources", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED)),
("cuD3D9RegisterResource", ("hipD3D9RegisterResource", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED)),
("cuD3D9ResourceGetMappedArray", ("hipD3D9ResourceGetMappedArray", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED)),
("cuD3D9ResourceGetMappedPitch", ("hipD3D9ResourceGetMappedPitch", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED)),
("cuD3D9ResourceGetMappedPointer", ("hipD3D9ResourceGetMappedPointer", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED)),
("cuD3D9ResourceGetMappedSize", ("hipD3D9ResourceGetMappedSize", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED)),
("cuD3D9ResourceGetSurfaceDimensions", ("hipD3D9ResourceGetSurfaceDimensions", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED)),
("cuD3D9ResourceSetMapFlags", ("hipD3D9ResourceSetMapFlags", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED)),
("cuD3D9UnmapResources", ("hipD3D9UnmapResources", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED)),
("cuD3D9UnregisterResource", ("hipD3D9UnregisterResource", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED)),
("CU_D3D10_DEVICE_LIST_ALL", ("HIP_D3D10_DEVICE_LIST_ALL", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED)),
("CU_D3D10_DEVICE_LIST_CURRENT_FRAME", ("HIP_D3D10_DEVICE_LIST_CURRENT_FRAME", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED)),
("CU_D3D10_DEVICE_LIST_NEXT_FRAME", ("HIP_D3D10_DEVICE_LIST_NEXT_FRAME", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED)),
("cuD3D10GetDevice", ("hipD3D10GetDevice", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED)),
("cuD3D10GetDevices", ("hipD3D10GetDevices", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED)),
("cuGraphicsD3D10RegisterResource", ("hipGraphicsD3D10RegisterResource", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED)),
("CU_D3D10_MAPRESOURCE_FLAGS_NONE", ("HIP_D3D10_MAPRESOURCE_FLAGS_NONE", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED)),
("CU_D3D10_MAPRESOURCE_FLAGS_READONLY", ("HIP_D3D10_MAPRESOURCE_FLAGS_READONLY", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED)),
("CU_D3D10_MAPRESOURCE_FLAGS_WRITEDISCARD", ("HIP_D3D10_MAPRESOURCE_FLAGS_WRITEDISCARD", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED)),
("CU_D3D10_REGISTER_FLAGS_NONE", ("HIP_D3D10_REGISTER_FLAGS_NONE", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED)),
("CU_D3D10_REGISTER_FLAGS_ARRAY", ("HIP_D3D10_REGISTER_FLAGS_ARRAY", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED)),
("cuD3D10CtxCreate", ("hipD3D10CtxCreate", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED)),
("cuD3D10CtxCreateOnDevice", ("hipD3D10CtxCreateOnDevice", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED)),
("cuD3D10GetDirect3DDevice", ("hipD3D10GetDirect3DDevice", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED)),
("cuD3D10MapResources", ("hipD3D10MapResources", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED)),
("cuD3D10RegisterResource", ("hipD3D10RegisterResource", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED)),
("cuD3D10ResourceGetMappedArray", ("hipD3D10ResourceGetMappedArray", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED)),
("cuD3D10ResourceGetMappedPitch", ("hipD3D10ResourceGetMappedPitch", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED)),
("cuD3D10ResourceGetMappedPointer", ("hipD3D10ResourceGetMappedPointer", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED)),
("cuD3D10ResourceGetMappedSize", ("hipD3D10ResourceGetMappedSize", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED)),
("cuD3D10ResourceGetSurfaceDimensions", ("hipD3D10ResourceGetSurfaceDimensions", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED)),
("cuD310ResourceSetMapFlags", ("hipD3D10ResourceSetMapFlags", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED)),
("cuD3D10UnmapResources", ("hipD3D10UnmapResources", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED)),
("cuD3D10UnregisterResource", ("hipD3D10UnregisterResource", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED)),
("CU_D3D11_DEVICE_LIST_ALL", ("HIP_D3D11_DEVICE_LIST_ALL", CONV_D3D11, API_DRIVER, HIP_UNSUPPORTED)),
("CU_D3D11_DEVICE_LIST_CURRENT_FRAME", ("HIP_D3D11_DEVICE_LIST_CURRENT_FRAME", CONV_D3D11, API_DRIVER, HIP_UNSUPPORTED)),
("CU_D3D11_DEVICE_LIST_NEXT_FRAME", ("HIP_D3D11_DEVICE_LIST_NEXT_FRAME", CONV_D3D11, API_DRIVER, HIP_UNSUPPORTED)),
("cuD3D11GetDevice", ("hipD3D11GetDevice", CONV_D3D11, API_DRIVER, HIP_UNSUPPORTED)),
("cuD3D11GetDevices", ("hipD3D11GetDevices", CONV_D3D11, API_DRIVER, HIP_UNSUPPORTED)),
("cuGraphicsD3D11RegisterResource", ("hipGraphicsD3D11RegisterResource", CONV_D3D11, API_DRIVER, HIP_UNSUPPORTED)),
("cuD3D11CtxCreate", ("hipD3D11CtxCreate", CONV_D3D11, API_DRIVER, HIP_UNSUPPORTED)),
("cuD3D11CtxCreateOnDevice", ("hipD3D11CtxCreateOnDevice", CONV_D3D11, API_DRIVER, HIP_UNSUPPORTED)),
("cuD3D11GetDirect3DDevice", ("hipD3D11GetDirect3DDevice", CONV_D3D11, API_DRIVER, HIP_UNSUPPORTED)),
("cuGraphicsVDPAURegisterOutputSurface", ("hipGraphicsVDPAURegisterOutputSurface", CONV_VDPAU, API_DRIVER, HIP_UNSUPPORTED)),
("cuGraphicsVDPAURegisterVideoSurface", ("hipGraphicsVDPAURegisterVideoSurface", CONV_VDPAU, API_DRIVER, HIP_UNSUPPORTED)),
("cuVDPAUGetDevice", ("hipVDPAUGetDevice", CONV_VDPAU, API_DRIVER, HIP_UNSUPPORTED)),
("cuVDPAUCtxCreate", ("hipVDPAUCtxCreate", CONV_VDPAU, API_DRIVER, HIP_UNSUPPORTED)),
("cuEGLStreamConsumerAcquireFrame", ("hipEGLStreamConsumerAcquireFrame", CONV_EGL, API_DRIVER, HIP_UNSUPPORTED)),
("cuEGLStreamConsumerConnect", ("hipEGLStreamConsumerConnect", CONV_EGL, API_DRIVER, HIP_UNSUPPORTED)),
("cuEGLStreamConsumerConnectWithFlags", ("hipEGLStreamConsumerConnectWithFlags", CONV_EGL, API_DRIVER, HIP_UNSUPPORTED)),
("cuEGLStreamConsumerDisconnect", ("hipEGLStreamConsumerDisconnect", CONV_EGL, API_DRIVER, HIP_UNSUPPORTED)),
("cuEGLStreamConsumerReleaseFrame", ("hipEGLStreamConsumerReleaseFrame", CONV_EGL, API_DRIVER, HIP_UNSUPPORTED)),
("cuEGLStreamProducerConnect", ("hipEGLStreamProducerConnect", CONV_EGL, API_DRIVER, HIP_UNSUPPORTED)),
("cuEGLStreamProducerDisconnect", ("hipEGLStreamProducerDisconnect", CONV_EGL, API_DRIVER, HIP_UNSUPPORTED)),
("cuEGLStreamProducerPresentFrame", ("hipEGLStreamProducerPresentFrame", CONV_EGL, API_DRIVER, HIP_UNSUPPORTED)),
("cuEGLStreamProducerReturnFrame", ("hipEGLStreamProducerReturnFrame", CONV_EGL, API_DRIVER, HIP_UNSUPPORTED)),
("cuGraphicsEGLRegisterImage", ("hipGraphicsEGLRegisterImage", CONV_EGL, API_DRIVER, HIP_UNSUPPORTED)),
("cuGraphicsResourceGetMappedEglFrame", ("hipGraphicsResourceGetMappedEglFrame", CONV_EGL, API_DRIVER, HIP_UNSUPPORTED)),
("cudaDataType_t", ("hipDataType_t", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaDataType", ("hipDataType_t", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("CUDA_R_16F", ("hipR16F", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("CUDA_C_16F", ("hipC16F", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("CUDA_R_32F", ("hipR32F", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("CUDA_C_32F", ("hipC32F", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("CUDA_R_64F", ("hipR64F", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("CUDA_C_64F", ("hipC64F", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("CUDA_R_8I", ("hipR8I", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("CUDA_C_8I", ("hipC8I", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("CUDA_R_8U", ("hipR8U", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("CUDA_C_8U", ("hipC8U", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("CUDA_R_32I", ("hipR32I", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("CUDA_C_32I", ("hipC32I", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("CUDA_R_32U", ("hipR32U", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("CUDA_C_32U", ("hipC32U", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("MAJOR_VERSION", ("hipLibraryMajorVersion", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("MINOR_VERSION", ("hipLibraryMinorVersion", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("PATCH_LEVEL", ("hipLibraryPatchVersion", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaMemAttachGlobal", ("hipMemAttachGlobal", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaMemAttachHost", ("hipMemAttachHost", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaMemAttachSingle", ("hipMemAttachSingle", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaOccupancyDefault", ("hipOccupancyDefault", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaOccupancyDisableCachingOverride", ("hipOccupancyDisableCachingOverride", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaGetLastError", ("hipGetLastError", CONV_ERROR, API_RUNTIME)),
("cudaPeekAtLastError", ("hipPeekAtLastError", CONV_ERROR, API_RUNTIME)),
("cudaGetErrorName", ("hipGetErrorName", CONV_ERROR, API_RUNTIME)),
("cudaGetErrorString", ("hipGetErrorString", CONV_ERROR, API_RUNTIME)),
("cudaMemcpy3DParms", ("hipMemcpy3DParms", CONV_MEM, API_RUNTIME)),
("cudaMemcpy3DPeerParms", ("hipMemcpy3DPeerParms", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaMemcpy", ("hipMemcpy", CONV_MEM, API_RUNTIME)),
("cudaMemcpyToArray", ("hipMemcpyToArray", CONV_MEM, API_RUNTIME)),
("cudaMemcpyToSymbol", ("hipMemcpyToSymbol", CONV_MEM, API_RUNTIME)),
("cudaMemcpyToSymbolAsync", ("hipMemcpyToSymbolAsync", CONV_MEM, API_RUNTIME)),
("cudaMemcpyAsync", ("hipMemcpyAsync", CONV_MEM, API_RUNTIME)),
("cudaMemcpy2D", ("hipMemcpy2D", CONV_MEM, API_RUNTIME)),
("cudaMemcpy2DAsync", ("hipMemcpy2DAsync", CONV_MEM, API_RUNTIME)),
("cudaMemcpy2DToArray", ("hipMemcpy2DToArray", CONV_MEM, API_RUNTIME)),
("cudaMemcpy2DArrayToArray", ("hipMemcpy2DArrayToArray", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaMemcpy2DFromArray", ("hipMemcpy2DFromArray", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaMemcpy2DFromArrayAsync", ("hipMemcpy2DFromArrayAsync", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaMemcpy2DToArrayAsync", ("hipMemcpy2DToArrayAsync", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaMemcpy3D", ("hipMemcpy3D", CONV_MEM, API_RUNTIME)),
("cudaMemcpy3DAsync", ("hipMemcpy3DAsync", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaMemcpy3DPeer", ("hipMemcpy3DPeer", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaMemcpy3DPeerAsync", ("hipMemcpy3DPeerAsync", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaMemcpyArrayToArray", ("hipMemcpyArrayToArray", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaMemcpyFromArrayAsync", ("hipMemcpyFromArrayAsync", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaMemcpyFromSymbol", ("hipMemcpyFromSymbol", CONV_MEM, API_RUNTIME)),
("cudaMemcpyFromSymbolAsync", ("hipMemcpyFromSymbolAsync", CONV_MEM, API_RUNTIME)),
("cudaMemAdvise", ("hipMemAdvise", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaMemRangeGetAttribute", ("hipMemRangeGetAttribute", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaMemRangeGetAttributes", ("hipMemRangeGetAttributes", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaMemAdviseSetReadMostly", ("hipMemAdviseSetReadMostly", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaMemAdviseUnsetReadMostly", ("hipMemAdviseUnsetReadMostly", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaMemAdviseSetPreferredLocation", ("hipMemAdviseSetPreferredLocation", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaMemAdviseUnsetPreferredLocation", ("hipMemAdviseUnsetPreferredLocation", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaMemAdviseSetAccessedBy", ("hipMemAdviseSetAccessedBy", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaMemAdviseUnsetAccessedBy", ("hipMemAdviseUnsetAccessedBy", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaMemRangeAttributeReadMostly", ("hipMemRangeAttributeReadMostly", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaMemRangeAttributePreferredLocation", ("hipMemRangeAttributePreferredLocation", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaMemRangeAttributeAccessedBy", ("hipMemRangeAttributeAccessedBy", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaMemRangeAttributeLastPrefetchLocation", ("hipMemRangeAttributeLastPrefetchLocation", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaMemcpyHostToHost", ("hipMemcpyHostToHost", CONV_MEM, API_RUNTIME)),
("cudaMemcpyHostToDevice", ("hipMemcpyHostToDevice", CONV_MEM, API_RUNTIME)),
("cudaMemcpyDeviceToHost", ("hipMemcpyDeviceToHost", CONV_MEM, API_RUNTIME)),
("cudaMemcpyDeviceToDevice", ("hipMemcpyDeviceToDevice", CONV_MEM, API_RUNTIME)),
("cudaMemcpyDefault", ("hipMemcpyDefault", CONV_MEM, API_RUNTIME)),
("cudaMemset", ("hipMemset", CONV_MEM, API_RUNTIME)),
("cudaMemsetAsync", ("hipMemsetAsync", CONV_MEM, API_RUNTIME)),
("cudaMemset2D", ("hipMemset2D", CONV_MEM, API_RUNTIME)),
("cudaMemset2DAsync", ("hipMemset2DAsync", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaMemset3D", ("hipMemset3D", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaMemset3DAsync", ("hipMemset3DAsync", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaMemGetInfo", ("hipMemGetInfo", CONV_MEM, API_RUNTIME)),
("cudaArrayGetInfo", ("hipArrayGetInfo", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaFreeMipmappedArray", ("hipFreeMipmappedArray", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaGetMipmappedArrayLevel", ("hipGetMipmappedArrayLevel", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaGetSymbolAddress", ("hipGetSymbolAddress", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaGetSymbolSize", ("hipGetSymbolSize", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaMemPrefetchAsync", ("hipMemPrefetchAsync", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaMallocHost", ("hipHostMalloc", CONV_MEM, API_RUNTIME)),
("cudaMallocArray", ("hipMallocArray", CONV_MEM, API_RUNTIME)),
("cudaMalloc", ("hipMalloc", CONV_MEM, API_RUNTIME)),
("cudaMalloc3D", ("hipMalloc3D", CONV_MEM, API_RUNTIME)),
("cudaMalloc3DArray", ("hipMalloc3DArray", CONV_MEM, API_RUNTIME)),
("cudaMallocManaged", ("hipMallocManaged", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaMallocMipmappedArray", ("hipMallocMipmappedArray", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaMallocPitch", ("hipMallocPitch", CONV_MEM, API_RUNTIME)),
("cudaFreeHost", ("hipHostFree", CONV_MEM, API_RUNTIME)),
("cudaFreeArray", ("hipFreeArray", CONV_MEM, API_RUNTIME)),
("cudaFree", ("hipFree", CONV_MEM, API_RUNTIME)),
("cudaHostRegister", ("hipHostRegister", CONV_MEM, API_RUNTIME)),
("cudaHostUnregister", ("hipHostUnregister", CONV_MEM, API_RUNTIME)),
("cudaHostAlloc", ("hipHostMalloc", CONV_MEM, API_RUNTIME)),
("cudaMemoryTypeHost", ("hipMemoryTypeHost", CONV_MEM, API_RUNTIME)),
("cudaMemoryTypeDevice", ("hipMemoryTypeDevice", CONV_MEM, API_RUNTIME)),
("make_cudaExtent", ("make_hipExtent", CONV_MEM, API_RUNTIME)),
("make_cudaPitchedPtr", ("make_hipPitchedPtr", CONV_MEM, API_RUNTIME)),
("make_cudaPos", ("make_hipPos", CONV_MEM, API_RUNTIME)),
("cudaHostAllocDefault", ("hipHostMallocDefault", CONV_MEM, API_RUNTIME)),
("cudaHostAllocPortable", ("hipHostMallocPortable", CONV_MEM, API_RUNTIME)),
("cudaHostAllocMapped", ("hipHostMallocMapped", CONV_MEM, API_RUNTIME)),
("cudaHostAllocWriteCombined", ("hipHostMallocWriteCombined", CONV_MEM, API_RUNTIME)),
("cudaHostGetFlags", ("hipHostGetFlags", CONV_MEM, API_RUNTIME)),
("cudaHostRegisterDefault", ("hipHostRegisterDefault", CONV_MEM, API_RUNTIME)),
("cudaHostRegisterPortable", ("hipHostRegisterPortable", CONV_MEM, API_RUNTIME)),
("cudaHostRegisterMapped", ("hipHostRegisterMapped", CONV_MEM, API_RUNTIME)),
("cudaHostRegisterIoMemory", ("hipHostRegisterIoMemory", CONV_MEM, API_RUNTIME)),
# ("warpSize", ("hipWarpSize", CONV_SPECIAL_FUNC, API_RUNTIME), (HIP actually uses warpSize...),
("cudaEventCreate", ("hipEventCreate", CONV_EVENT, API_RUNTIME)),
("cudaEventCreateWithFlags", ("hipEventCreateWithFlags", CONV_EVENT, API_RUNTIME)),
("cudaEventDestroy", ("hipEventDestroy", CONV_EVENT, API_RUNTIME)),
("cudaEventRecord", ("hipEventRecord", CONV_EVENT, API_RUNTIME)),
("cudaEventElapsedTime", ("hipEventElapsedTime", CONV_EVENT, API_RUNTIME)),
("cudaEventSynchronize", ("hipEventSynchronize", CONV_EVENT, API_RUNTIME)),
("cudaEventQuery", ("hipEventQuery", CONV_EVENT, API_RUNTIME)),
("cudaEventDefault", ("hipEventDefault", CONV_EVENT, API_RUNTIME)),
("cudaEventBlockingSync", ("hipEventBlockingSync", CONV_EVENT, API_RUNTIME)),
("cudaEventDisableTiming", ("hipEventDisableTiming", CONV_EVENT, API_RUNTIME)),
("cudaEventInterprocess", ("hipEventInterprocess", CONV_EVENT, API_RUNTIME)),
("cudaStreamCreate", ("hipStreamCreate", CONV_STREAM, API_RUNTIME)),
("cudaStreamCreateWithFlags", ("hipStreamCreateWithFlags", CONV_STREAM, API_RUNTIME)),
("cudaStreamCreateWithPriority", ("hipStreamCreateWithPriority", CONV_STREAM, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaStreamDestroy", ("hipStreamDestroy", CONV_STREAM, API_RUNTIME)),
("cudaStreamWaitEvent", ("hipStreamWaitEvent", CONV_STREAM, API_RUNTIME)),
("cudaStreamSynchronize", ("hipStreamSynchronize", CONV_STREAM, API_RUNTIME)),
("cudaStreamGetFlags", ("hipStreamGetFlags", CONV_STREAM, API_RUNTIME)),
("cudaStreamQuery", ("hipStreamQuery", CONV_STREAM, API_RUNTIME)),
("cudaStreamAddCallback", ("hipStreamAddCallback", CONV_STREAM, API_RUNTIME)),
("cudaStreamAttachMemAsync", ("hipStreamAttachMemAsync", CONV_STREAM, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaStreamGetPriority", ("hipStreamGetPriority", CONV_STREAM, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaStreamDefault", ("hipStreamDefault", CONV_TYPE, API_RUNTIME)),
("cudaStreamNonBlocking", ("hipStreamNonBlocking", CONV_TYPE, API_RUNTIME)),
("cudaDeviceSynchronize", ("hipDeviceSynchronize", CONV_DEVICE, API_RUNTIME)),
("cudaDeviceReset", ("hipDeviceReset", CONV_DEVICE, API_RUNTIME)),
("cudaSetDevice", ("hipSetDevice", CONV_DEVICE, API_RUNTIME)),
("cudaGetDevice", ("hipGetDevice", CONV_DEVICE, API_RUNTIME)),
("cudaGetDeviceCount", ("hipGetDeviceCount", CONV_DEVICE, API_RUNTIME)),
("cudaChooseDevice", ("hipChooseDevice", CONV_DEVICE, API_RUNTIME)),
("cudaThreadExit", ("hipDeviceReset", CONV_THREAD, API_RUNTIME)),
("cudaThreadGetCacheConfig", ("hipDeviceGetCacheConfig", CONV_THREAD, API_RUNTIME)),
("cudaThreadGetLimit", ("hipThreadGetLimit", CONV_THREAD, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaThreadSetCacheConfig", ("hipDeviceSetCacheConfig", CONV_THREAD, API_RUNTIME)),
("cudaThreadSetLimit", ("hipThreadSetLimit", CONV_THREAD, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaThreadSynchronize", ("hipDeviceSynchronize", CONV_THREAD, API_RUNTIME)),
("cudaDeviceGetAttribute", ("hipDeviceGetAttribute", CONV_DEVICE, API_RUNTIME)),
("cudaDevAttrMaxThreadsPerBlock", ("hipDeviceAttributeMaxThreadsPerBlock", CONV_TYPE, API_RUNTIME)),
("cudaDevAttrMaxBlockDimX", ("hipDeviceAttributeMaxBlockDimX", CONV_TYPE, API_RUNTIME)),
("cudaDevAttrMaxBlockDimY", ("hipDeviceAttributeMaxBlockDimY", CONV_TYPE, API_RUNTIME)),
("cudaDevAttrMaxBlockDimZ", ("hipDeviceAttributeMaxBlockDimZ", CONV_TYPE, API_RUNTIME)),
("cudaDevAttrMaxGridDimX", ("hipDeviceAttributeMaxGridDimX", CONV_TYPE, API_RUNTIME)),
("cudaDevAttrMaxGridDimY", ("hipDeviceAttributeMaxGridDimY", CONV_TYPE, API_RUNTIME)),
("cudaDevAttrMaxGridDimZ", ("hipDeviceAttributeMaxGridDimZ", CONV_TYPE, API_RUNTIME)),
("cudaDevAttrMaxSharedMemoryPerBlock", ("hipDeviceAttributeMaxSharedMemoryPerBlock", CONV_TYPE, API_RUNTIME)),
("cudaDevAttrTotalConstantMemory", ("hipDeviceAttributeTotalConstantMemory", CONV_TYPE, API_RUNTIME)),
("cudaDevAttrWarpSize", ("hipDeviceAttributeWarpSize", CONV_TYPE, API_RUNTIME)),
("cudaDevAttrMaxPitch", ("hipDeviceAttributeMaxPitch", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaDevAttrMaxRegistersPerBlock", ("hipDeviceAttributeMaxRegistersPerBlock", CONV_TYPE, API_RUNTIME)),
("cudaDevAttrClockRate", ("hipDeviceAttributeClockRate", CONV_TYPE, API_RUNTIME)),
("cudaDevAttrTextureAlignment", ("hipDeviceAttributeTextureAlignment", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaDevAttrGpuOverlap", ("hipDeviceAttributeGpuOverlap", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaDevAttrMultiProcessorCount", ("hipDeviceAttributeMultiprocessorCount", CONV_TYPE, API_RUNTIME)),
("cudaDevAttrKernelExecTimeout", ("hipDeviceAttributeKernelExecTimeout", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaDevAttrIntegrated", ("hipDeviceAttributeIntegrated", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaDevAttrCanMapHostMemory", ("hipDeviceAttributeCanMapHostMemory", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaDevAttrComputeMode", ("hipDeviceAttributeComputeMode", CONV_TYPE, API_RUNTIME)),
("cudaDevAttrMaxTexture1DWidth", ("hipDeviceAttributeMaxTexture1DWidth", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaDevAttrMaxTexture2DWidth", ("hipDeviceAttributeMaxTexture2DWidth", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaDevAttrMaxTexture2DHeight", ("hipDeviceAttributeMaxTexture2DHeight", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaDevAttrMaxTexture3DWidth", ("hipDeviceAttributeMaxTexture3DWidth", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaDevAttrMaxTexture3DHeight", ("hipDeviceAttributeMaxTexture3DHeight", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaDevAttrMaxTexture3DDepth", ("hipDeviceAttributeMaxTexture3DDepth", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaDevAttrMaxTexture2DLayeredWidth", ("hipDeviceAttributeMaxTexture2DLayeredWidth", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaDevAttrMaxTexture2DLayeredHeight", ("hipDeviceAttributeMaxTexture2DLayeredHeight", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaDevAttrMaxTexture2DLayeredLayers", ("hipDeviceAttributeMaxTexture2DLayeredLayers", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaDevAttrSurfaceAlignment", ("hipDeviceAttributeSurfaceAlignment", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaDevAttrConcurrentKernels", ("hipDeviceAttributeConcurrentKernels", CONV_TYPE, API_RUNTIME)),
("cudaDevAttrEccEnabled", ("hipDeviceAttributeEccEnabled", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaDevAttrPciBusId", ("hipDeviceAttributePciBusId", CONV_TYPE, API_RUNTIME)),
("cudaDevAttrPciDeviceId", ("hipDeviceAttributePciDeviceId", CONV_TYPE, API_RUNTIME)),
("cudaDevAttrTccDriver", ("hipDeviceAttributeTccDriver", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaDevAttrMemoryClockRate", ("hipDeviceAttributeMemoryClockRate", CONV_TYPE, API_RUNTIME)),
("cudaDevAttrGlobalMemoryBusWidth", ("hipDeviceAttributeMemoryBusWidth", CONV_TYPE, API_RUNTIME)),
("cudaDevAttrL2CacheSize", ("hipDeviceAttributeL2CacheSize", CONV_TYPE, API_RUNTIME)),
("cudaDevAttrMaxThreadsPerMultiProcessor", ("hipDeviceAttributeMaxThreadsPerMultiProcessor", CONV_TYPE, API_RUNTIME)),
("cudaDevAttrAsyncEngineCount", ("hipDeviceAttributeAsyncEngineCount", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaDevAttrUnifiedAddressing", ("hipDeviceAttributeUnifiedAddressing", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaDevAttrMaxTexture1DLayeredWidth", ("hipDeviceAttributeMaxTexture1DLayeredWidth", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaDevAttrMaxTexture1DLayeredLayers", ("hipDeviceAttributeMaxTexture1DLayeredLayers", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaDevAttrMaxTexture2DGatherWidth", ("hipDeviceAttributeMaxTexture2DGatherWidth", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaDevAttrMaxTexture2DGatherHeight", ("hipDeviceAttributeMaxTexture2DGatherHeight", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaDevAttrMaxTexture3DWidthAlt", ("hipDeviceAttributeMaxTexture3DWidthAlternate", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaDevAttrMaxTexture3DHeightAlt", ("hipDeviceAttributeMaxTexture3DHeightAlternate", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaDevAttrMaxTexture3DDepthAlt", ("hipDeviceAttributeMaxTexture3DDepthAlternate", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaDevAttrPciDomainId", ("hipDeviceAttributePciDomainId", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaDevAttrTexturePitchAlignment", ("hipDeviceAttributeTexturePitchAlignment", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaDevAttrMaxTextureCubemapWidth", ("hipDeviceAttributeMaxTextureCubemapWidth", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaDevAttrMaxTextureCubemapLayeredWidth", ("hipDeviceAttributeMaxTextureCubemapLayeredWidth", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaDevAttrMaxTextureCubemapLayeredLayers", ("hipDeviceAttributeMaxTextureCubemapLayeredLayers", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaDevAttrMaxSurface1DWidth", ("hipDeviceAttributeMaxSurface1DWidth", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaDevAttrMaxSurface2DWidth", ("hipDeviceAttributeMaxSurface2DWidth", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaDevAttrMaxSurface2DHeight", ("hipDeviceAttributeMaxSurface2DHeight", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaDevAttrMaxSurface3DWidth", ("hipDeviceAttributeMaxSurface3DWidth", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaDevAttrMaxSurface3DHeight", ("hipDeviceAttributeMaxSurface3DHeight", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaDevAttrMaxSurface3DDepth", ("hipDeviceAttributeMaxSurface3DDepth", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaDevAttrMaxSurface1DLayeredWidth", ("hipDeviceAttributeMaxSurface1DLayeredWidth", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaDevAttrMaxSurface1DLayeredLayers", ("hipDeviceAttributeMaxSurface1DLayeredLayers", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaDevAttrMaxSurface2DLayeredWidth", ("hipDeviceAttributeMaxSurface2DLayeredWidth", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaDevAttrMaxSurface2DLayeredHeight", ("hipDeviceAttributeMaxSurface2DLayeredHeight", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaDevAttrMaxSurface2DLayeredLayers", ("hipDeviceAttributeMaxSurface2DLayeredLayers", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaDevAttrMaxSurfaceCubemapWidth", ("hipDeviceAttributeMaxSurfaceCubemapWidth", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaDevAttrMaxSurfaceCubemapLayeredWidth", ("hipDeviceAttributeMaxSurfaceCubemapLayeredWidth", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaDevAttrMaxSurfaceCubemapLayeredLayers", ("hipDeviceAttributeMaxSurfaceCubemapLayeredLayers", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaDevAttrMaxTexture1DLinearWidth", ("hipDeviceAttributeMaxTexture1DLinearWidth", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaDevAttrMaxTexture2DLinearWidth", ("hipDeviceAttributeMaxTexture2DLinearWidth", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaDevAttrMaxTexture2DLinearHeight", ("hipDeviceAttributeMaxTexture2DLinearHeight", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaDevAttrMaxTexture2DLinearPitch", ("hipDeviceAttributeMaxTexture2DLinearPitch", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaDevAttrMaxTexture2DMipmappedWidth", ("hipDeviceAttributeMaxTexture2DMipmappedWidth", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaDevAttrMaxTexture2DMipmappedHeight", ("hipDeviceAttributeMaxTexture2DMipmappedHeight", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaDevAttrComputeCapabilityMajor", ("hipDeviceAttributeComputeCapabilityMajor", CONV_TYPE, API_RUNTIME)),
("cudaDevAttrComputeCapabilityMinor", ("hipDeviceAttributeComputeCapabilityMinor", CONV_TYPE, API_RUNTIME)),
("cudaDevAttrMaxTexture1DMipmappedWidth", ("hipDeviceAttributeMaxTexture1DMipmappedWidth", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaDevAttrStreamPrioritiesSupported", ("hipDeviceAttributeStreamPrioritiesSupported", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaDevAttrGlobalL1CacheSupported", ("hipDeviceAttributeGlobalL1CacheSupported", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaDevAttrLocalL1CacheSupported", ("hipDeviceAttributeLocalL1CacheSupported", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaDevAttrMaxSharedMemoryPerMultiprocessor", ("hipDeviceAttributeMaxSharedMemoryPerMultiprocessor", CONV_TYPE, API_RUNTIME)),
("cudaDevAttrMaxRegistersPerMultiprocessor", ("hipDeviceAttributeMaxRegistersPerMultiprocessor", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaDevAttrManagedMemory", ("hipDeviceAttributeManagedMemory", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaDevAttrIsMultiGpuBoard", ("hipDeviceAttributeIsMultiGpuBoard", CONV_TYPE, API_RUNTIME)),
("cudaDevAttrMultiGpuBoardGroupID", ("hipDeviceAttributeMultiGpuBoardGroupID", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaDevAttrHostNativeAtomicSupported", ("hipDeviceAttributeHostNativeAtomicSupported", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaDevAttrSingleToDoublePrecisionPerfRatio", ("hipDeviceAttributeSingleToDoublePrecisionPerfRatio", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaDevAttrPageableMemoryAccess", ("hipDeviceAttributePageableMemoryAccess", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaDevAttrConcurrentManagedAccess", ("hipDeviceAttributeConcurrentManagedAccess", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaDevAttrComputePreemptionSupported", ("hipDeviceAttributeComputePreemptionSupported", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaDevAttrCanUseHostPointerForRegisteredMem", ("hipDeviceAttributeCanUseHostPointerForRegisteredMem", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaPointerGetAttributes", ("hipPointerGetAttributes", CONV_MEM, API_RUNTIME)),
("cudaHostGetDevicePointer", ("hipHostGetDevicePointer", CONV_MEM, API_RUNTIME)),
("cudaGetDeviceProperties", ("hipGetDeviceProperties", CONV_DEVICE, API_RUNTIME)),
("cudaDeviceGetPCIBusId", ("hipDeviceGetPCIBusId", CONV_DEVICE, API_RUNTIME)),
("cudaDeviceGetByPCIBusId", ("hipDeviceGetByPCIBusId", CONV_DEVICE, API_RUNTIME)),
("cudaDeviceGetStreamPriorityRange", ("hipDeviceGetStreamPriorityRange", CONV_DEVICE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaSetValidDevices", ("hipSetValidDevices", CONV_DEVICE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaDevP2PAttrPerformanceRank", ("hipDeviceP2PAttributePerformanceRank", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaDevP2PAttrAccessSupported", ("hipDeviceP2PAttributeAccessSupported", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaDevP2PAttrNativeAtomicSupported", ("hipDeviceP2PAttributeNativeAtomicSupported", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaDeviceGetP2PAttribute", ("hipDeviceGetP2PAttribute", CONV_DEVICE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaComputeModeDefault", ("hipComputeModeDefault", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaComputeModeExclusive", ("hipComputeModeExclusive", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaComputeModeProhibited", ("hipComputeModeProhibited", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaComputeModeExclusiveProcess", ("hipComputeModeExclusiveProcess", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaGetDeviceFlags", ("hipGetDeviceFlags", CONV_DEVICE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaSetDeviceFlags", ("hipSetDeviceFlags", CONV_DEVICE, API_RUNTIME)),
("cudaDeviceScheduleAuto", ("hipDeviceScheduleAuto", CONV_TYPE, API_RUNTIME)),
("cudaDeviceScheduleSpin", ("hipDeviceScheduleSpin", CONV_TYPE, API_RUNTIME)),
("cudaDeviceScheduleYield", ("hipDeviceScheduleYield", CONV_TYPE, API_RUNTIME)),
("cudaDeviceBlockingSync", ("hipDeviceScheduleBlockingSync", CONV_TYPE, API_RUNTIME)),
("cudaDeviceScheduleBlockingSync", ("hipDeviceScheduleBlockingSync", CONV_TYPE, API_RUNTIME)),
("cudaDeviceScheduleMask", ("hipDeviceScheduleMask", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaDeviceMapHost", ("hipDeviceMapHost", CONV_TYPE, API_RUNTIME)),
("cudaDeviceLmemResizeToMax", ("hipDeviceLmemResizeToMax", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaDeviceMask", ("hipDeviceMask", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaDeviceSetCacheConfig", ("hipDeviceSetCacheConfig", CONV_CACHE, API_RUNTIME)),
("cudaDeviceGetCacheConfig", ("hipDeviceGetCacheConfig", CONV_CACHE, API_RUNTIME)),
("cudaFuncSetCacheConfig", ("hipFuncSetCacheConfig", CONV_CACHE, API_RUNTIME)),
("cudaFuncCachePreferNone", ("hipFuncCachePreferNone", CONV_CACHE, API_RUNTIME)),
("cudaFuncCachePreferShared", ("hipFuncCachePreferShared", CONV_CACHE, API_RUNTIME)),
("cudaFuncCachePreferL1", ("hipFuncCachePreferL1", CONV_CACHE, API_RUNTIME)),
("cudaFuncCachePreferEqual", ("hipFuncCachePreferEqual", CONV_CACHE, API_RUNTIME)),
("cudaFuncGetAttributes", ("hipFuncGetAttributes", CONV_EXEC, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaFuncSetSharedMemConfig", ("hipFuncSetSharedMemConfig", CONV_EXEC, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaGetParameterBuffer", ("hipGetParameterBuffer", CONV_EXEC, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaSetDoubleForDevice", ("hipSetDoubleForDevice", CONV_EXEC, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaSetDoubleForHost", ("hipSetDoubleForHost", CONV_EXEC, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaConfigureCall", ("hipConfigureCall", CONV_EXEC, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaLaunch", ("hipLaunch", CONV_EXEC, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaSetupArgument", ("hipSetupArgument", CONV_EXEC, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaDriverGetVersion", ("hipDriverGetVersion", CONV_VERSION, API_RUNTIME)),
("cudaRuntimeGetVersion", ("hipRuntimeGetVersion", CONV_VERSION, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaOccupancyMaxPotentialBlockSize", ("hipOccupancyMaxPotentialBlockSize", CONV_OCCUPANCY, API_RUNTIME)),
("cudaOccupancyMaxPotentialBlockSizeWithFlags", ("hipOccupancyMaxPotentialBlockSizeWithFlags", CONV_OCCUPANCY, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaOccupancyMaxActiveBlocksPerMultiprocessor", ("hipOccupancyMaxActiveBlocksPerMultiprocessor", CONV_OCCUPANCY, API_RUNTIME)),
("cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags", ("hipOccupancyMaxActiveBlocksPerMultiprocessorWithFlags", CONV_OCCUPANCY, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaOccupancyMaxPotentialBlockSizeVariableSMem", ("hipOccupancyMaxPotentialBlockSizeVariableSMem", CONV_OCCUPANCY, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaOccupancyMaxPotentialBlockSizeVariableSMemWithFlags", ("hipOccupancyMaxPotentialBlockSizeVariableSMemWithFlags", CONV_OCCUPANCY, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaDeviceCanAccessPeer", ("hipDeviceCanAccessPeer", CONV_PEER, API_RUNTIME)),
("cudaDeviceDisablePeerAccess", ("hipDeviceDisablePeerAccess", CONV_PEER, API_RUNTIME)),
("cudaDeviceEnablePeerAccess", ("hipDeviceEnablePeerAccess", CONV_PEER, API_RUNTIME)),
("cudaMemcpyPeerAsync", ("hipMemcpyPeerAsync", CONV_MEM, API_RUNTIME)),
("cudaMemcpyPeer", ("hipMemcpyPeer", CONV_MEM, API_RUNTIME)),
("cudaIpcMemLazyEnablePeerAccess", ("hipIpcMemLazyEnablePeerAccess", CONV_TYPE, API_RUNTIME)),
("cudaDeviceSetSharedMemConfig", ("hipDeviceSetSharedMemConfig", CONV_DEVICE, API_RUNTIME)),
("cudaDeviceGetSharedMemConfig", ("hipDeviceGetSharedMemConfig", CONV_DEVICE, API_RUNTIME)),
("cudaSharedMemBankSizeDefault", ("hipSharedMemBankSizeDefault", CONV_TYPE, API_RUNTIME)),
("cudaSharedMemBankSizeFourByte", ("hipSharedMemBankSizeFourByte", CONV_TYPE, API_RUNTIME)),
("cudaSharedMemBankSizeEightByte", ("hipSharedMemBankSizeEightByte", CONV_TYPE, API_RUNTIME)),
("cudaLimitStackSize", ("hipLimitStackSize", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaLimitPrintfFifoSize", ("hipLimitPrintfFifoSize", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaLimitMallocHeapSize", ("hipLimitMallocHeapSize", CONV_TYPE, API_RUNTIME)),
("cudaLimitDevRuntimeSyncDepth", ("hipLimitDevRuntimeSyncDepth", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaLimitDevRuntimePendingLaunchCount", ("hipLimitDevRuntimePendingLaunchCount", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaDeviceGetLimit", ("hipDeviceGetLimit", CONV_DEVICE, API_RUNTIME)),
("cudaProfilerInitialize", ("hipProfilerInitialize", CONV_OTHER, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaProfilerStart", ("hipProfilerStart", CONV_OTHER, API_RUNTIME)),
("cudaProfilerStop", ("hipProfilerStop", CONV_OTHER, API_RUNTIME)),
("cudaKeyValuePair", ("hipKeyValuePair", CONV_OTHER, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaCSV", ("hipCSV", CONV_OTHER, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaReadModeElementType", ("hipReadModeElementType", CONV_TEX, API_RUNTIME)),
("cudaReadModeNormalizedFloat", ("hipReadModeNormalizedFloat", CONV_TEX, API_RUNTIME)),
("cudaFilterModePoint", ("hipFilterModePoint", CONV_TEX, API_RUNTIME)),
("cudaFilterModeLinear", ("hipFilterModeLinear", CONV_TEX, API_RUNTIME)),
("cudaBindTexture", ("hipBindTexture", CONV_TEX, API_RUNTIME)),
("cudaUnbindTexture", ("hipUnbindTexture", CONV_TEX, API_RUNTIME)),
("cudaBindTexture2D", ("hipBindTexture2D", CONV_TEX, API_RUNTIME)),
("cudaBindTextureToArray", ("hipBindTextureToArray", CONV_TEX, API_RUNTIME)),
("cudaBindTextureToMipmappedArray", ("hipBindTextureToMipmappedArray", CONV_TEX, API_RUNTIME)),
("cudaGetTextureAlignmentOffset", ("hipGetTextureAlignmentOffset", CONV_TEX, API_RUNTIME)),
("cudaGetTextureReference", ("hipGetTextureReference", CONV_TEX, API_RUNTIME)),
("cudaChannelFormatKindSigned", ("hipChannelFormatKindSigned", CONV_TEX, API_RUNTIME)),
("cudaChannelFormatKindUnsigned", ("hipChannelFormatKindUnsigned", CONV_TEX, API_RUNTIME)),
("cudaChannelFormatKindFloat", ("hipChannelFormatKindFloat", CONV_TEX, API_RUNTIME)),
("cudaChannelFormatKindNone", ("hipChannelFormatKindNone", CONV_TEX, API_RUNTIME)),
("cudaCreateChannelDesc", ("hipCreateChannelDesc", CONV_TEX, API_RUNTIME)),
("cudaGetChannelDesc", ("hipGetChannelDesc", CONV_TEX, API_RUNTIME)),
("cudaResourceTypeArray", ("hipResourceTypeArray", CONV_TEX, API_RUNTIME)),
("cudaResourceTypeMipmappedArray", ("hipResourceTypeMipmappedArray", CONV_TEX, API_RUNTIME)),
("cudaResourceTypeLinear", ("hipResourceTypeLinear", CONV_TEX, API_RUNTIME)),
("cudaResourceTypePitch2D", ("hipResourceTypePitch2D", CONV_TEX, API_RUNTIME)),
("cudaResViewFormatNone", ("hipResViewFormatNone", CONV_TEX, API_RUNTIME)),
("cudaResViewFormatUnsignedChar1", ("hipResViewFormatUnsignedChar1", CONV_TEX, API_RUNTIME)),
("cudaResViewFormatUnsignedChar2", ("hipResViewFormatUnsignedChar2", CONV_TEX, API_RUNTIME)),
("cudaResViewFormatUnsignedChar4", ("hipResViewFormatUnsignedChar4", CONV_TEX, API_RUNTIME)),
("cudaResViewFormatSignedChar1", ("hipResViewFormatSignedChar1", CONV_TEX, API_RUNTIME)),
("cudaResViewFormatSignedChar2", ("hipResViewFormatSignedChar2", CONV_TEX, API_RUNTIME)),
("cudaResViewFormatSignedChar4", ("hipResViewFormatSignedChar4", CONV_TEX, API_RUNTIME)),
("cudaResViewFormatUnsignedShort1", ("hipResViewFormatUnsignedShort1", CONV_TEX, API_RUNTIME)),
("cudaResViewFormatUnsignedShort2", ("hipResViewFormatUnsignedShort2", CONV_TEX, API_RUNTIME)),
("cudaResViewFormatUnsignedShort4", ("hipResViewFormatUnsignedShort4", CONV_TEX, API_RUNTIME)),
("cudaResViewFormatSignedShort1", ("hipResViewFormatSignedShort1", CONV_TEX, API_RUNTIME)),
("cudaResViewFormatSignedShort2", ("hipResViewFormatSignedShort2", CONV_TEX, API_RUNTIME)),
("cudaResViewFormatSignedShort4", ("hipResViewFormatSignedShort4", CONV_TEX, API_RUNTIME)),
("cudaResViewFormatUnsignedInt1", ("hipResViewFormatUnsignedInt1", CONV_TEX, API_RUNTIME)),
("cudaResViewFormatUnsignedInt2", ("hipResViewFormatUnsignedInt2", CONV_TEX, API_RUNTIME)),
("cudaResViewFormatUnsignedInt4", ("hipResViewFormatUnsignedInt4", CONV_TEX, API_RUNTIME)),
("cudaResViewFormatSignedInt1", ("hipResViewFormatSignedInt1", CONV_TEX, API_RUNTIME)),
("cudaResViewFormatSignedInt2", ("hipResViewFormatSignedInt2", CONV_TEX, API_RUNTIME)),
("cudaResViewFormatSignedInt4", ("hipResViewFormatSignedInt4", CONV_TEX, API_RUNTIME)),
("cudaResViewFormatHalf1", ("hipResViewFormatHalf1", CONV_TEX, API_RUNTIME)),
("cudaResViewFormatHalf2", ("hipResViewFormatHalf2", CONV_TEX, API_RUNTIME)),
("cudaResViewFormatHalf4", ("hipResViewFormatHalf4", CONV_TEX, API_RUNTIME)),
("cudaResViewFormatFloat1", ("hipResViewFormatFloat1", CONV_TEX, API_RUNTIME)),
("cudaResViewFormatFloat2", ("hipResViewFormatFloat2", CONV_TEX, API_RUNTIME)),
("cudaResViewFormatFloat4", ("hipResViewFormatFloat4", CONV_TEX, API_RUNTIME)),
("cudaResViewFormatUnsignedBlockCompressed1", ("hipResViewFormatUnsignedBlockCompressed1", CONV_TEX, API_RUNTIME)),
("cudaResViewFormatUnsignedBlockCompressed2", ("hipResViewFormatUnsignedBlockCompressed2", CONV_TEX, API_RUNTIME)),
("cudaResViewFormatUnsignedBlockCompressed3", ("hipResViewFormatUnsignedBlockCompressed3", CONV_TEX, API_RUNTIME)),
("cudaResViewFormatUnsignedBlockCompressed4", ("hipResViewFormatUnsignedBlockCompressed4", CONV_TEX, API_RUNTIME)),
("cudaResViewFormatSignedBlockCompressed4", ("hipResViewFormatSignedBlockCompressed4", CONV_TEX, API_RUNTIME)),
("cudaResViewFormatUnsignedBlockCompressed5", ("hipResViewFormatUnsignedBlockCompressed5", CONV_TEX, API_RUNTIME)),
("cudaResViewFormatSignedBlockCompressed5", ("hipResViewFormatSignedBlockCompressed5", CONV_TEX, API_RUNTIME)),
("cudaResViewFormatUnsignedBlockCompressed6H", ("hipResViewFormatUnsignedBlockCompressed6H", CONV_TEX, API_RUNTIME)),
("cudaResViewFormatSignedBlockCompressed6H", ("hipResViewFormatSignedBlockCompressed6H", CONV_TEX, API_RUNTIME)),
("cudaResViewFormatUnsignedBlockCompressed7", ("hipResViewFormatUnsignedBlockCompressed7", CONV_TEX, API_RUNTIME)),
("cudaAddressModeWrap", ("hipAddressModeWrap", CONV_TEX, API_RUNTIME)),
("cudaAddressModeClamp", ("hipAddressModeClamp", CONV_TEX, API_RUNTIME)),
("cudaAddressModeMirror", ("hipAddressModeMirror", CONV_TEX, API_RUNTIME)),
("cudaAddressModeBorder", ("hipAddressModeBorder", CONV_TEX, API_RUNTIME)),
("cudaCreateTextureObject", ("hipCreateTextureObject", CONV_TEX, API_RUNTIME)),
("cudaDestroyTextureObject", ("hipDestroyTextureObject", CONV_TEX, API_RUNTIME)),
("cudaGetTextureObjectResourceDesc", ("hipGetTextureObjectResourceDesc", CONV_TEX, API_RUNTIME)),
("cudaGetTextureObjectResourceViewDesc", ("hipGetTextureObjectResourceViewDesc", CONV_TEX, API_RUNTIME)),
("cudaGetTextureObjectTextureDesc", ("hipGetTextureObjectTextureDesc", CONV_TEX, API_RUNTIME)),
("cudaBindSurfaceToArray", ("hipBindSurfaceToArray", CONV_SURFACE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaGetSurfaceReference", ("hipGetSurfaceReference", CONV_SURFACE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaBoundaryModeZero", ("hipBoundaryModeZero", CONV_SURFACE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaBoundaryModeClamp", ("hipBoundaryModeClamp", CONV_SURFACE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaBoundaryModeTrap", ("hipBoundaryModeTrap", CONV_SURFACE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaFormatModeForced", ("hipFormatModeForced", CONV_SURFACE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaFormatModeAuto", ("hipFormatModeAuto", CONV_SURFACE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaCreateSurfaceObject", ("hipCreateSurfaceObject", CONV_SURFACE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaDestroySurfaceObject", ("hipDestroySurfaceObject", CONV_SURFACE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaGetSurfaceObjectResourceDesc", ("hipGetSurfaceObjectResourceDesc", CONV_SURFACE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaIpcCloseMemHandle", ("hipIpcCloseMemHandle", CONV_DEVICE, API_RUNTIME)),
("cudaIpcGetEventHandle", ("hipIpcGetEventHandle", CONV_DEVICE, API_RUNTIME)),
("cudaIpcGetMemHandle", ("hipIpcGetMemHandle", CONV_DEVICE, API_RUNTIME)),
("cudaIpcOpenEventHandle", ("hipIpcOpenEventHandle", CONV_DEVICE, API_RUNTIME)),
("cudaIpcOpenMemHandle", ("hipIpcOpenMemHandle", CONV_DEVICE, API_RUNTIME)),
("cudaGLGetDevices", ("hipGLGetDevices", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaGraphicsGLRegisterBuffer", ("hipGraphicsGLRegisterBuffer", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaGraphicsGLRegisterImage", ("hipGraphicsGLRegisterImage", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaWGLGetDevice", ("hipWGLGetDevice", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaGraphicsMapResources", ("hipGraphicsMapResources", CONV_GRAPHICS, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaGraphicsResourceGetMappedMipmappedArray", ("hipGraphicsResourceGetMappedMipmappedArray", CONV_GRAPHICS, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaGraphicsResourceGetMappedPointer", ("hipGraphicsResourceGetMappedPointer", CONV_GRAPHICS, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaGraphicsResourceSetMapFlags", ("hipGraphicsResourceSetMapFlags", CONV_GRAPHICS, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaGraphicsSubResourceGetMappedArray", ("hipGraphicsSubResourceGetMappedArray", CONV_GRAPHICS, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaGraphicsUnmapResources", ("hipGraphicsUnmapResources", CONV_GRAPHICS, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaGraphicsUnregisterResource", ("hipGraphicsUnregisterResource", CONV_GRAPHICS, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaGraphicsCubeFacePositiveX", ("hipGraphicsCubeFacePositiveX", CONV_GRAPHICS, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaGraphicsCubeFaceNegativeX", ("hipGraphicsCubeFaceNegativeX", CONV_GRAPHICS, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaGraphicsCubeFacePositiveY", ("hipGraphicsCubeFacePositiveY", CONV_GRAPHICS, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaGraphicsCubeFaceNegativeY", ("hipGraphicsCubeFaceNegativeY", CONV_GRAPHICS, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaGraphicsCubeFacePositiveZ", ("hipGraphicsCubeFacePositiveZ", CONV_GRAPHICS, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaGraphicsCubeFaceNegativeZ", ("hipGraphicsCubeFaceNegativeZ", CONV_GRAPHICS, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaGraphicsMapFlagsNone", ("hipGraphicsMapFlagsNone", CONV_GRAPHICS, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaGraphicsMapFlagsReadOnly", ("hipGraphicsMapFlagsReadOnly", CONV_GRAPHICS, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaGraphicsMapFlagsWriteDiscard", ("hipGraphicsMapFlagsWriteDiscard", CONV_GRAPHICS, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaGraphicsRegisterFlagsNone", ("hipGraphicsRegisterFlagsNone", CONV_GRAPHICS, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaGraphicsRegisterFlagsReadOnly", ("hipGraphicsRegisterFlagsReadOnly", CONV_GRAPHICS, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaGraphicsRegisterFlagsWriteDiscard", ("hipGraphicsRegisterFlagsWriteDiscard", CONV_GRAPHICS, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaGraphicsRegisterFlagsSurfaceLoadStore", ("hipGraphicsRegisterFlagsSurfaceLoadStore", CONV_GRAPHICS, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaGraphicsRegisterFlagsTextureGather", ("hipGraphicsRegisterFlagsTextureGather", CONV_GRAPHICS, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaGLDeviceListAll", ("HIP_GL_DEVICE_LIST_ALL", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaGLDeviceListCurrentFrame", ("HIP_GL_DEVICE_LIST_CURRENT_FRAME", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaGLDeviceListNextFrame", ("HIP_GL_DEVICE_LIST_NEXT_FRAME", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaGLGetDevices", ("hipGLGetDevices", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaGraphicsGLRegisterBuffer", ("hipGraphicsGLRegisterBuffer", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaGraphicsGLRegisterImage", ("hipGraphicsGLRegisterImage", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaWGLGetDevice", ("hipWGLGetDevice", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaGLMapFlagsNone", ("HIP_GL_MAP_RESOURCE_FLAGS_NONE", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaGLMapFlagsReadOnly", ("HIP_GL_MAP_RESOURCE_FLAGS_READ_ONLY", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaGLMapFlagsWriteDiscard", ("HIP_GL_MAP_RESOURCE_FLAGS_WRITE_DISCARD", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaGLMapBufferObject", ("hipGLMapBufferObject__", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaGLMapBufferObjectAsync", ("hipGLMapBufferObjectAsync__", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaGLRegisterBufferObject", ("hipGLRegisterBufferObject", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaGLSetBufferObjectMapFlags", ("hipGLSetBufferObjectMapFlags", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaGLSetGLDevice", ("hipGLSetGLDevice", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaGLUnmapBufferObject", ("hipGLUnmapBufferObject", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaGLUnmapBufferObjectAsync", ("hipGLUnmapBufferObjectAsync", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaGLUnregisterBufferObject", ("hipGLUnregisterBufferObject", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaD3D9DeviceListAll", ("HIP_D3D9_DEVICE_LIST_ALL", CONV_D3D9, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaD3D9DeviceListCurrentFrame", ("HIP_D3D9_DEVICE_LIST_CURRENT_FRAME", CONV_D3D9, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaD3D9DeviceListNextFrame", ("HIP_D3D9_DEVICE_LIST_NEXT_FRAME", CONV_D3D9, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaD3D9GetDevice", ("hipD3D9GetDevice", CONV_D3D9, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaD3D9GetDevices", ("hipD3D9GetDevices", CONV_D3D9, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaD3D9GetDirect3DDevice", ("hipD3D9GetDirect3DDevice", CONV_D3D9, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaD3D9SetDirect3DDevice", ("hipD3D9SetDirect3DDevice", CONV_D3D9, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaGraphicsD3D9RegisterResource", ("hipGraphicsD3D9RegisterResource", CONV_D3D9, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaD3D9MapFlags", ("hipD3D9MapFlags", CONV_D3D9, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaD3D9MapFlagsNone", ("HIP_D3D9_MAPRESOURCE_FLAGS_NONE", CONV_D3D9, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaD3D9MapFlagsReadOnly", ("HIP_D3D9_MAPRESOURCE_FLAGS_READONLY", CONV_D3D9, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaD3D9MapFlagsWriteDiscard", ("HIP_D3D9_MAPRESOURCE_FLAGS_WRITEDISCARD", CONV_D3D9, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaD3D9RegisterFlagsNone", ("HIP_D3D9_REGISTER_FLAGS_NONE", CONV_D3D9, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaD3D9RegisterFlagsArray", ("HIP_D3D9_REGISTER_FLAGS_ARRAY", CONV_D3D9, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaD3D9MapResources", ("hipD3D9MapResources", CONV_D3D9, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaD3D9RegisterResource", ("hipD3D9RegisterResource", CONV_D3D9, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaD3D9ResourceGetMappedArray", ("hipD3D9ResourceGetMappedArray", CONV_D3D9, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaD3D9ResourceGetMappedPitch", ("hipD3D9ResourceGetMappedPitch", CONV_D3D9, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaD3D9ResourceGetMappedPointer", ("hipD3D9ResourceGetMappedPointer", CONV_D3D9, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaD3D9ResourceGetMappedSize", ("hipD3D9ResourceGetMappedSize", CONV_D3D9, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaD3D9ResourceGetSurfaceDimensions", ("hipD3D9ResourceGetSurfaceDimensions", CONV_D3D9, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaD3D9ResourceSetMapFlags", ("hipD3D9ResourceSetMapFlags", CONV_D3D9, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaD3D9UnmapResources", ("hipD3D9UnmapResources", CONV_D3D9, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaD3D9UnregisterResource", ("hipD3D9UnregisterResource", CONV_D3D9, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaD3D10DeviceListAll", ("HIP_D3D10_DEVICE_LIST_ALL", CONV_D3D10, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaD3D10DeviceListCurrentFrame", ("HIP_D3D10_DEVICE_LIST_CURRENT_FRAME", CONV_D3D10, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaD3D10DeviceListNextFrame", ("HIP_D3D10_DEVICE_LIST_NEXT_FRAME", CONV_D3D10, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaD3D10GetDevice", ("hipD3D10GetDevice", CONV_D3D10, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaD3D10GetDevices", ("hipD3D10GetDevices", CONV_D3D10, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaGraphicsD3D10RegisterResource", ("hipGraphicsD3D10RegisterResource", CONV_D3D10, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaD3D10MapFlagsNone", ("HIP_D3D10_MAPRESOURCE_FLAGS_NONE", CONV_D3D10, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaD3D10MapFlagsReadOnly", ("HIP_D3D10_MAPRESOURCE_FLAGS_READONLY", CONV_D3D10, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaD3D10MapFlagsWriteDiscard", ("HIP_D3D10_MAPRESOURCE_FLAGS_WRITEDISCARD", CONV_D3D10, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaD3D10RegisterFlagsNone", ("HIP_D3D10_REGISTER_FLAGS_NONE", CONV_D3D10, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaD3D10RegisterFlagsArray", ("HIP_D3D10_REGISTER_FLAGS_ARRAY", CONV_D3D10, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaD3D10GetDirect3DDevice", ("hipD3D10GetDirect3DDevice", CONV_D3D10, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaD3D10MapResources", ("hipD3D10MapResources", CONV_D3D10, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaD3D10RegisterResource", ("hipD3D10RegisterResource", CONV_D3D10, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaD3D10ResourceGetMappedArray", ("hipD3D10ResourceGetMappedArray", CONV_D3D10, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaD3D10ResourceGetMappedPitch", ("hipD3D10ResourceGetMappedPitch", CONV_D3D10, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaD3D10ResourceGetMappedPointer", ("hipD3D10ResourceGetMappedPointer", CONV_D3D10, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaD3D10ResourceGetMappedSize", ("hipD3D10ResourceGetMappedSize", CONV_D3D10, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaD3D10ResourceGetSurfaceDimensions", ("hipD3D10ResourceGetSurfaceDimensions", CONV_D3D10, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaD3D10ResourceSetMapFlags", ("hipD3D10ResourceSetMapFlags", CONV_D3D10, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaD3D10SetDirect3DDevice", ("hipD3D10SetDirect3DDevice", CONV_D3D10, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaD3D10UnmapResources", ("hipD3D10UnmapResources", CONV_D3D10, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaD3D10UnregisterResource", ("hipD3D10UnregisterResource", CONV_D3D10, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaD3D11DeviceListAll", ("HIP_D3D11_DEVICE_LIST_ALL", CONV_D3D11, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaD3D11DeviceListCurrentFrame", ("HIP_D3D11_DEVICE_LIST_CURRENT_FRAME", CONV_D3D11, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaD3D11DeviceListNextFrame", ("HIP_D3D11_DEVICE_LIST_NEXT_FRAME", CONV_D3D11, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaD3D11GetDevice", ("hipD3D11GetDevice", CONV_D3D11, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaD3D11GetDevices", ("hipD3D11GetDevices", CONV_D3D11, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaGraphicsD3D11RegisterResource", ("hipGraphicsD3D11RegisterResource", CONV_D3D11, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaD3D11GetDevice", ("hipD3D11GetDevice", CONV_D3D11, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaD3D11GetDevices", ("hipD3D11GetDevices", CONV_D3D11, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaGraphicsD3D11RegisterResource", ("hipGraphicsD3D11RegisterResource", CONV_D3D11, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaGraphicsVDPAURegisterOutputSurface", ("hipGraphicsVDPAURegisterOutputSurface", CONV_VDPAU, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaGraphicsVDPAURegisterVideoSurface", ("hipGraphicsVDPAURegisterVideoSurface", CONV_VDPAU, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaVDPAUGetDevice", ("hipVDPAUGetDevice", CONV_VDPAU, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaVDPAUSetVDPAUDevice", ("hipVDPAUSetDevice", CONV_VDPAU, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaEGLStreamConsumerAcquireFrame", ("hipEGLStreamConsumerAcquireFrame", CONV_EGL, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaEGLStreamConsumerConnect", ("hipEGLStreamConsumerConnect", CONV_EGL, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaEGLStreamConsumerConnectWithFlags", ("hipEGLStreamConsumerConnectWithFlags", CONV_EGL, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaEGLStreamConsumerReleaseFrame", ("hipEGLStreamConsumerReleaseFrame", CONV_EGL, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaEGLStreamProducerConnect", ("hipEGLStreamProducerConnect", CONV_EGL, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaEGLStreamProducerDisconnect", ("hipEGLStreamProducerDisconnect", CONV_EGL, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaEGLStreamProducerPresentFrame", ("hipEGLStreamProducerPresentFrame", CONV_EGL, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaEGLStreamProducerReturnFrame", ("hipEGLStreamProducerReturnFrame", CONV_EGL, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaGraphicsEGLRegisterImage", ("hipGraphicsEGLRegisterImage", CONV_EGL, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaGraphicsResourceGetMappedEglFrame", ("hipGraphicsResourceGetMappedEglFrame", CONV_EGL, API_RUNTIME, HIP_UNSUPPORTED)),
("cublasInit", ("rocblas_init", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasShutdown", ("rocblas_shutdown", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasGetVersion", ("rocblas_get_version", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasGetError", ("rocblas_get_error", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasAlloc", ("rocblas_alloc", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasFree", ("rocblas_free", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasSetKernelStream", ("rocblas_set_kernel_stream", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasGetAtomicsMode", ("rocblas_get_atomics_mode", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasSetAtomicsMode", ("rocblas_set_atomics_mode", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasGetMathMode", ("rocblas_get_math_mode", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasSetMathMode", ("rocblas_set_math_mode", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("CUBLAS_OP_N", ("rocblas_operation_none", CONV_NUMERIC_LITERAL, API_BLAS)),
("CUBLAS_OP_T", ("rocblas_operation_transpose", CONV_NUMERIC_LITERAL, API_BLAS)),
("CUBLAS_OP_C", ("rocblas_operation_conjugate_transpose", CONV_NUMERIC_LITERAL, API_BLAS)),
("CUBLAS_STATUS_SUCCESS", ("rocblas_status_success", CONV_NUMERIC_LITERAL, API_BLAS)),
("CUBLAS_STATUS_NOT_INITIALIZED", ("rocblas_status_invalid_handle", CONV_NUMERIC_LITERAL, API_BLAS)),
("CUBLAS_STATUS_ALLOC_FAILED", ("rocblas_status_memory_error", CONV_NUMERIC_LITERAL, API_BLAS)),
("CUBLAS_STATUS_INVALID_VALUE", ("rocblas_status_invalid_pointer", CONV_NUMERIC_LITERAL, API_BLAS)),
("CUBLAS_STATUS_MAPPING_ERROR", ("rocblas_status_internal_error", CONV_NUMERIC_LITERAL, API_BLAS)),
("CUBLAS_STATUS_EXECUTION_FAILED", ("rocblas_status_internal_error", CONV_NUMERIC_LITERAL, API_BLAS)),
("CUBLAS_STATUS_INTERNAL_ERROR", ("rocblas_status_internal_error", CONV_NUMERIC_LITERAL, API_BLAS)),
("CUBLAS_STATUS_NOT_SUPPORTED", ("rocblas_status_not_implemented", CONV_NUMERIC_LITERAL, API_BLAS)),
("CUBLAS_STATUS_ARCH_MISMATCH", ("rocblas_status_not_implemented", CONV_NUMERIC_LITERAL, API_BLAS)),
("CUBLAS_FILL_MODE_LOWER", ("rocblas_fill_lower", CONV_NUMERIC_LITERAL, API_BLAS)),
("CUBLAS_FILL_MODE_UPPER", ("rocblas_fill_upper", CONV_NUMERIC_LITERAL, API_BLAS)),
("CUBLAS_DIAG_NON_UNIT", ("rocblas_diagonal_non_unit", CONV_NUMERIC_LITERAL, API_BLAS)),
("CUBLAS_DIAG_UNIT", ("rocblas_diagonal_unit", CONV_NUMERIC_LITERAL, API_BLAS)),
("CUBLAS_SIDE_LEFT", ("rocblas_side_left", CONV_NUMERIC_LITERAL, API_BLAS)),
("CUBLAS_SIDE_RIGHT", ("rocblas_side_right", CONV_NUMERIC_LITERAL, API_BLAS)),
("CUBLAS_POINTER_MODE_HOST", ("rocblas_pointer_mode_host", CONV_NUMERIC_LITERAL, API_BLAS)),
("CUBLAS_POINTER_MODE_DEVICE", ("rocblas_pointer_mode_device", CONV_NUMERIC_LITERAL, API_BLAS)),
("CUBLAS_ATOMICS_NOT_ALLOWED", ("rocblas_atomics_not_allowed", CONV_NUMERIC_LITERAL, API_BLAS, HIP_UNSUPPORTED)),
("CUBLAS_ATOMICS_ALLOWED", ("rocblas_atomics_allowed", CONV_NUMERIC_LITERAL, API_BLAS, HIP_UNSUPPORTED)),
("CUBLAS_DATA_FLOAT", ("rocblas_precision_float", CONV_NUMERIC_LITERAL, API_BLAS, HIP_UNSUPPORTED)),
("CUBLAS_DATA_DOUBLE", ("rocblas_precision_double", CONV_NUMERIC_LITERAL, API_BLAS, HIP_UNSUPPORTED)),
("CUBLAS_DATA_HALF", ("rocblas_precision_half", CONV_NUMERIC_LITERAL, API_BLAS, HIP_UNSUPPORTED)),
("CUBLAS_DATA_INT8", ("rocblas_precision_int8", CONV_NUMERIC_LITERAL, API_BLAS, HIP_UNSUPPORTED)),
("cublasCreate", ("rocblas_create_handle", CONV_MATH_FUNC, API_BLAS)),
("cublasDestroy", ("rocblas_destroy_handle", CONV_MATH_FUNC, API_BLAS)),
("cublasSetVector", ("rocblas_set_vector", CONV_MATH_FUNC, API_BLAS)),
("cublasGetVector", ("rocblas_get_vector", CONV_MATH_FUNC, API_BLAS)),
("cublasSetVectorAsync", ("rocblas_set_vector_async", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasGetVectorAsync", ("rocblas_get_vector_async", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasSetMatrix", ("rocblas_set_matrix", CONV_MATH_FUNC, API_BLAS)),
("cublasGetMatrix", ("rocblas_get_matrix", CONV_MATH_FUNC, API_BLAS)),
("cublasGetMatrixAsync", ("rocblas_get_matrix_async", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasSetMatrixAsync", ("rocblas_set_matrix_async", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasXerbla", ("rocblas_xerbla", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasSnrm2", ("rocblas_snrm2", CONV_MATH_FUNC, API_BLAS)),
("cublasDnrm2", ("rocblas_dnrm2", CONV_MATH_FUNC, API_BLAS)),
("cublasScnrm2", ("rocblas_scnrm2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDznrm2", ("rocblas_dznrm2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasNrm2Ex", ("rocblas_nrm2_ex", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasSdot", ("rocblas_sdot", CONV_MATH_FUNC, API_BLAS)),
("cublasSdotBatched", ("rocblas_sdot_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDdot", ("rocblas_ddot", CONV_MATH_FUNC, API_BLAS)),
("cublasDdotBatched", ("rocblas_ddot_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCdotu", ("rocblas_cdotu", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCdotc", ("rocblas_cdotc", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZdotu", ("rocblas_zdotu", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZdotc", ("rocblas_zdotc", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasSscal", ("rocblas_sscal", CONV_MATH_FUNC, API_BLAS)),
("cublasSscalBatched", ("rocblas_sscal_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDscal", ("rocblas_dscal", CONV_MATH_FUNC, API_BLAS)),
("cublasDscalBatched", ("rocblas_dscal_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCscal", ("rocblas_cscal", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCsscal", ("rocblas_csscal", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZscal", ("rocblas_zscal", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZdscal", ("rocblas_zdscal", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasSaxpy", ("rocblas_saxpy", CONV_MATH_FUNC, API_BLAS)),
("cublasSaxpyBatched", ("rocblas_saxpy_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDaxpy", ("rocblas_daxpy", CONV_MATH_FUNC, API_BLAS)),
("cublasCaxpy", ("rocblas_caxpy", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZaxpy", ("rocblas_zaxpy", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasScopy", ("rocblas_scopy", CONV_MATH_FUNC, API_BLAS)),
("cublasScopyBatched", ("rocblas_scopy_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDcopy", ("rocblas_dcopy", CONV_MATH_FUNC, API_BLAS)),
("cublasDcopyBatched", ("rocblas_dcopy_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCcopy", ("rocblas_ccopy", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZcopy", ("rocblas_zcopy", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasSswap", ("rocblas_sswap", CONV_MATH_FUNC, API_BLAS)),
("cublasDswap", ("rocblas_dswap", CONV_MATH_FUNC, API_BLAS)),
("cublasCswap", ("rocblas_cswap", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZswap", ("rocblas_zswap", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasIsamax", ("rocblas_isamax", CONV_MATH_FUNC, API_BLAS)),
("cublasIdamax", ("rocblas_idamax", CONV_MATH_FUNC, API_BLAS)),
("cublasIcamax", ("rocblas_icamax", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasIzamax", ("rocblas_izamax", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasIsamin", ("rocblas_isamin", CONV_MATH_FUNC, API_BLAS)),
("cublasIdamin", ("rocblas_idamin", CONV_MATH_FUNC, API_BLAS)),
("cublasIcamin", ("rocblas_icamin", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasIzamin", ("rocblas_izamin", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasSasum", ("rocblas_sasum", CONV_MATH_FUNC, API_BLAS)),
("cublasSasumBatched", ("rocblas_sasum_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDasum", ("rocblas_dasum", CONV_MATH_FUNC, API_BLAS)),
("cublasDasumBatched", ("rocblas_dasum_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasScasum", ("rocblas_scasum", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDzasum", ("rocblas_dzasum", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasSrot", ("rocblas_srot", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDrot", ("rocblas_drot", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCrot", ("rocblas_crot", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCsrot", ("rocblas_csrot", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZrot", ("rocblas_zrot", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZdrot", ("rocblas_zdrot", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasSrotg", ("rocblas_srotg", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDrotg", ("rocblas_drotg", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCrotg", ("rocblas_crotg", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZrotg", ("rocblas_zrotg", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasSrotm", ("rocblas_srotm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDrotm", ("rocblas_drotm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasSrotmg", ("rocblas_srotmg", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDrotmg", ("rocblas_drotmg", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasSgemv", ("rocblas_sgemv", CONV_MATH_FUNC, API_BLAS)),
("cublasSgemvBatched", ("rocblas_sgemv_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDgemv", ("rocblas_dgemv", CONV_MATH_FUNC, API_BLAS)),
("cublasCgemv", ("rocblas_cgemv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZgemv", ("rocblas_zgemv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasSgbmv", ("rocblas_sgbmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDgbmv", ("rocblas_dgbmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCgbmv", ("rocblas_cgbmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZgbmv", ("rocblas_zgbmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasStrmv", ("rocblas_strmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDtrmv", ("rocblas_dtrmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCtrmv", ("rocblas_ctrmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZtrmv", ("rocblas_ztrmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasStbmv", ("rocblas_stbmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDtbmv", ("rocblas_dtbmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCtbmv", ("rocblas_ctbmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZtbmv", ("rocblas_ztbmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasStpmv", ("rocblas_stpmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDtpmv", ("rocblas_dtpmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCtpmv", ("rocblas_ctpmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZtpmv", ("rocblas_ztpmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasStrsv", ("rocblas_strsv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDtrsv", ("rocblas_dtrsv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCtrsv", ("rocblas_ctrsv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZtrsv", ("rocblas_ztrsv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasStpsv", ("rocblas_stpsv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDtpsv", ("rocblas_dtpsv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCtpsv", ("rocblas_ctpsv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZtpsv", ("rocblas_ztpsv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasStbsv", ("rocblas_stbsv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDtbsv", ("rocblas_dtbsv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCtbsv", ("rocblas_ctbsv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZtbsv", ("rocblas_ztbsv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasSsymv", ("rocblas_ssymv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDsymv", ("rocblas_dsymv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCsymv", ("rocblas_csymv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZsymv", ("rocblas_zsymv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasChemv", ("rocblas_chemv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZhemv", ("rocblas_zhemv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasSsbmv", ("rocblas_ssbmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDsbmv", ("rocblas_dsbmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasChbmv", ("rocblas_chbmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZhbmv", ("rocblas_zhbmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasSspmv", ("rocblas_sspmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDspmv", ("rocblas_dspmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasChpmv", ("rocblas_chpmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZhpmv", ("rocblas_zhpmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasSger", ("rocblas_sger", CONV_MATH_FUNC, API_BLAS)),
("cublasDger", ("rocblas_dger", CONV_MATH_FUNC, API_BLAS)),
("cublasCgeru", ("rocblas_cgeru", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCgerc", ("rocblas_cgerc", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZgeru", ("rocblas_zgeru", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZgerc", ("rocblas_zgerc", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasSsyr", ("rocblas_ssyr", CONV_MATH_FUNC, API_BLAS)),
("cublasDsyr", ("rocblas_dsyr", CONV_MATH_FUNC, API_BLAS)),
("cublasCher", ("rocblas_cher", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZher", ("rocblas_zher", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasSspr", ("rocblas_sspr", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDspr", ("rocblas_dspr", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasChpr", ("rocblas_chpr", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZhpr", ("rocblas_zhpr", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasSsyr2", ("rocblas_ssyr2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDsyr2", ("rocblas_dsyr2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCher2", ("rocblas_cher2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZher2", ("rocblas_zher2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasSspr2", ("rocblas_sspr2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDspr2", ("rocblas_dspr2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasChpr2", ("rocblas_chpr2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZhpr2", ("rocblas_zhpr2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasSgemmBatched", ("rocblas_sgemm_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDgemmBatched", ("rocblas_dgemm_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasHgemmBatched", ("rocblas_hgemm_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasSgemmStridedBatched", ("rocblas_sgemm_strided_batched", CONV_MATH_FUNC, API_BLAS)),
("cublasDgemmStridedBatched", ("rocblas_dgemm_strided_batched", CONV_MATH_FUNC, API_BLAS)),
("cublasHgemmStridedBatched", ("rocblas_hgemm_strided_batched", CONV_MATH_FUNC, API_BLAS)),
("cublasCgemmBatched", ("rocblas_cgemm_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCgemm3mBatched", ("rocblas_cgemm_3m_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZgemmBatched", ("rocblas_zgemm_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCgemmStridedBatched", ("rocblas_cgemm_strided_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCgemm3mStridedBatched", ("rocblas_cgemm_3m_strided_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZgemmStridedBatched", ("rocblas_zgemm_strided_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasHgemmStridedBatched", ("rocblas_hgemm_strided_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasSgemm", ("rocblas_sgemm", CONV_MATH_FUNC, API_BLAS)),
("cublasDgemm", ("rocblas_dgemm", CONV_MATH_FUNC, API_BLAS)),
("cublasCgemm", ("rocblas_cgemm", CONV_MATH_FUNC, API_BLAS)),
("cublasZgemm", ("rocblas_zgemm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasHgemm", ("rocblas_hgemm", CONV_MATH_FUNC, API_BLAS)),
("cublasSsyrk", ("rocblas_ssyrk", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDsyrk", ("rocblas_dsyrk", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCsyrk", ("rocblas_csyrk", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZsyrk", ("rocblas_zsyrk", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCherk", ("rocblas_cherk", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZherk", ("rocblas_zherk", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasSsyr2k", ("rocblas_ssyr2k", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDsyr2k", ("rocblas_dsyr2k", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCsyr2k", ("rocblas_csyr2k", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZsyr2k", ("rocblas_zyr2k", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasSsyrkx", ("rocblas_ssyrkx", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDsyrkx", ("rocblas_dsyrkx", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCsyrkx", ("rocblas_csyrkx", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZsyrkx", ("rocblas_zsyrkx", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCher2k", ("rocblas_cher2k", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZher2k", ("rocblas_zher2k", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCherkx", ("rocblas_cherkx", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZherkx", ("rocblas_zherkx", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasSsymm", ("rocblas_ssymm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDsymm", ("rocblas_dsymm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCsymm", ("rocblas_csymm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZsymm", ("rocblas_zsymm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasChemm", ("rocblas_chemm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZhemm", ("rocblas_zhemm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasStrsm", ("rocblas_strsm", CONV_MATH_FUNC, API_BLAS)),
("cublasDtrsm", ("rocblas_dtrsm", CONV_MATH_FUNC, API_BLAS)),
("cublasCtrsm", ("rocblas_ctrsm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZtrsm", ("rocblas_ztrsm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasStrsmBatched", ("rocblas_strsm_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDtrsmBatched", ("rocblas_dtrsm_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCtrsmBatched", ("rocblas_ctrsm_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZtrsmBatched", ("rocblas_ztrsm_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasStrmm", ("rocblas_strmm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDtrmm", ("rocblas_dtrmm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCtrmm", ("rocblas_ctrmm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZtrmm", ("rocblas_ztrmm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasSgeam", ("rocblas_sgeam", CONV_MATH_FUNC, API_BLAS)),
("cublasDgeam", ("rocblas_dgeam", CONV_MATH_FUNC, API_BLAS)),
("cublasCgeam", ("rocblas_cgeam", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZgeam", ("rocblas_zgeam", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasSgetrfBatched", ("rocblas_sgetrf_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDgetrfBatched", ("rocblas_dgetrf_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCgetrfBatched", ("rocblas_cgetrf_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZgetrfBatched", ("rocblas_zgetrf_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasSgetriBatched", ("rocblas_sgetri_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDgetriBatched", ("rocblas_dgetri_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCgetriBatched", ("rocblas_cgetri_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZgetriBatched", ("rocblas_zgetri_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasSgetrsBatched", ("rocblas_sgetrs_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDgetrsBatched", ("rocblas_dgetrs_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCgetrsBatched", ("rocblas_cgetrs_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZgetrsBatched", ("rocblas_zgetrs_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasStrsmBatched", ("rocblas_strsm_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDtrsmBatched", ("rocblas_dtrsm_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCtrsmBatched", ("rocblas_ctrsm_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZtrsmBatched", ("rocblas_dtrsm_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasSmatinvBatched", ("rocblas_smatinv_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDmatinvBatched", ("rocblas_dmatinv_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCmatinvBatched", ("rocblas_cmatinv_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZmatinvBatched", ("rocblas_zmatinv_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasSgeqrfBatched", ("rocblas_sgeqrf_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDgeqrfBatched", ("rocblas_dgeqrf_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCgeqrfBatched", ("rocblas_cgeqrf_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZgeqrfBatched", ("rocblas_zgeqrf_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasSgelsBatched", ("rocblas_sgels_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDgelsBatched", ("rocblas_dgels_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCgelsBatched", ("rocblas_cgels_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZgelsBatched", ("rocblas_zgels_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasSdgmm", ("rocblas_sdgmm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDdgmm", ("rocblas_ddgmm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCdgmm", ("rocblas_cdgmm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZdgmm", ("rocblas_zdgmm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasStpttr", ("rocblas_stpttr", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDtpttr", ("rocblas_dtpttr", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCtpttr", ("rocblas_ctpttr", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZtpttr", ("rocblas_ztpttr", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasStrttp", ("rocblas_strttp", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDtrttp", ("rocblas_dtrttp", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCtrttp", ("rocblas_ctrttp", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZtrttp", ("rocblas_ztrttp", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCreate_v2", ("rocblas_create_handle", CONV_MATH_FUNC, API_BLAS)),
("cublasDestroy_v2", ("rocblas_destroy_handle", CONV_MATH_FUNC, API_BLAS)),
("cublasGetVersion_v2", ("rocblas_get_version", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasSetStream", ("rocblas_set_stream", CONV_MATH_FUNC, API_BLAS)),
("cublasGetStream", ("rocblas_get_stream", CONV_MATH_FUNC, API_BLAS)),
("cublasSetStream_v2", ("rocblas_set_stream", CONV_MATH_FUNC, API_BLAS)),
("cublasGetStream_v2", ("rocblas_get_stream", CONV_MATH_FUNC, API_BLAS)),
("cublasGetPointerMode", ("rocblas_get_pointer_mode", CONV_MATH_FUNC, API_BLAS)),
("cublasSetPointerMode", ("rocblas_set_pointer_mode", CONV_MATH_FUNC, API_BLAS)),
("cublasGetPointerMode_v2", ("rocblas_get_pointer_mode", CONV_MATH_FUNC, API_BLAS)),
("cublasSetPointerMode_v2", ("rocblas_set_pointer_mode", CONV_MATH_FUNC, API_BLAS)),
("cublasSgemv_v2", ("rocblas_sgemv", CONV_MATH_FUNC, API_BLAS)),
("cublasDgemv_v2", ("rocblas_dgemv", CONV_MATH_FUNC, API_BLAS)),
("cublasCgemv_v2", ("rocblas_cgemv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZgemv_v2", ("rocblas_zgemv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasSgbmv_v2", ("rocblas_sgbmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDgbmv_v2", ("rocblas_dgbmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCgbmv_v2", ("rocblas_cgbmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZgbmv_v2", ("rocblas_zgbmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasStrmv_v2", ("rocblas_strmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDtrmv_v2", ("rocblas_dtrmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCtrmv_v2", ("rocblas_ctrmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZtrmv_v2", ("rocblas_ztrmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasStbmv_v2", ("rocblas_stbmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDtbmv_v2", ("rocblas_dtbmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCtbmv_v2", ("rocblas_ctbmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZtbmv_v2", ("rocblas_ztbmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasStpmv_v2", ("rocblas_stpmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDtpmv_v2", ("rocblas_dtpmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCtpmv_v2", ("rocblas_ctpmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZtpmv_v2", ("rocblas_ztpmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasStrsv_v2", ("rocblas_strsv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDtrsv_v2", ("rocblas_dtrsv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCtrsv_v2", ("rocblas_ctrsv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZtrsv_v2", ("rocblas_ztrsv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasStpsv_v2", ("rocblas_stpsv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDtpsv_v2", ("rocblas_dtpsv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCtpsv_v2", ("rocblas_ctpsv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZtpsv_v2", ("rocblas_ztpsv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasStbsv_v2", ("rocblas_stbsv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDtbsv_v2", ("rocblas_dtbsv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCtbsv_v2", ("rocblas_ctbsv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZtbsv_v2", ("rocblas_ztbsv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasSsymv_v2", ("rocblas_ssymv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDsymv_v2", ("rocblas_dsymv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCsymv_v2", ("rocblas_csymv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZsymv_v2", ("rocblas_zsymv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasChemv_v2", ("rocblas_chemv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZhemv_v2", ("rocblas_zhemv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasSsbmv_v2", ("rocblas_ssbmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDsbmv_v2", ("rocblas_dsbmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasChbmv_v2", ("rocblas_chbmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZhbmv_v2", ("rocblas_zhbmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasSspmv_v2", ("rocblas_sspmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDspmv_v2", ("rocblas_dspmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasChpmv_v2", ("rocblas_chpmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZhpmv_v2", ("rocblas_zhpmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasSger_v2", ("rocblas_sger", CONV_MATH_FUNC, API_BLAS)),
("cublasDger_v2", ("rocblas_dger", CONV_MATH_FUNC, API_BLAS)),
("cublasCgeru_v2", ("rocblas_cgeru", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCgerc_v2", ("rocblas_cergc", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZgeru_v2", ("rocblas_zgeru", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZgerc_v2", ("rocblas_zgerc", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasSsyr_v2", ("rocblas_ssyr", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDsyr_v2", ("rocblas_dsyr", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCsyr_v2", ("rocblas_csyr", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZsyr_v2", ("rocblas_zsyr", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCher_v2", ("rocblas_cher", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZher_v2", ("rocblas_zher", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasSspr_v2", ("rocblas_sspr", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDspr_v2", ("rocblas_dspr", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasChpr_v2", ("rocblas_chpr", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZhpr_v2", ("rocblas_zhpr", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasSsyr2_v2", ("rocblas_ssyr2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDsyr2_v2", ("rocblas_dsyr2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCsyr2_v2", ("rocblas_csyr2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZsyr2_v2", ("rocblas_zsyr2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCher2_v2", ("rocblas_cher2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZher2_v2", ("rocblas_zher2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasSspr2_v2", ("rocblas_sspr2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDspr2_v2", ("rocblas_dspr2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasChpr2_v2", ("rocblas_chpr2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZhpr2_v2", ("rocblas_zhpr2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasSgemm_v2", ("rocblas_sgemm", CONV_MATH_FUNC, API_BLAS)),
("cublasDgemm_v2", ("rocblas_dgemm", CONV_MATH_FUNC, API_BLAS)),
("cublasCgemm_v2", ("rocblas_cgemm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCgemm3m", ("rocblas_cgemm_3m", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCgemm3mEx", ("rocblas_cgemm_3mex", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZgemm_v2", ("rocblas_zgemm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZgemm3m", ("rocblas_zgemm_3m", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
# NB: The function rocblas_sgemmex doesn't actually exist in
# rocblas, as of 2018-12-05
("cublasSgemmEx", ("rocblas_sgemmex", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasGemmEx", ("rocblas_gemmex", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCgemmEx", ("rocblas_cgemmex", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasUint8gemmBias", ("rocblas_uint8gemmbias", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasSsyrk_v2", ("rocblas_ssyrk", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDsyrk_v2", ("rocblas_dsyrk", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCsyrk_v2", ("rocblas_csyrk", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZsyrk_v2", ("rocblas_zsyrk", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCsyrkEx", ("rocblas_csyrkex", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCsyrk3mEx", ("rocblas_csyrk3mex", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCherk_v2", ("rocblas_cherk", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCherkEx", ("rocblas_cherkex", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCherk3mEx", ("rocblas_cherk3mex", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZherk_v2", ("rocblas_zherk", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasSsyr2k_v2", ("rocblas_ssyr2k", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDsyr2k_v2", ("rocblas_dsyr2k", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCsyr2k_v2", ("rocblas_csyr2k", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZsyr2k_v2", ("rocblas_zsyr2k", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCher2k_v2", ("rocblas_cher2k", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZher2k_v2", ("rocblas_zher2k", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasSsymm_v2", ("rocblas_ssymm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDsymm_v2", ("rocblas_dsymm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCsymm_v2", ("rocblas_csymm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZsymm_v2", ("rocblas_zsymm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasChemm_v2", ("rocblas_chemm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZhemm_v2", ("rocblas_zhemm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasStrsm_v2", ("rocblas_strsm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDtrsm_v2", ("rocblas_dtrsm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCtrsm_v2", ("rocblas_ctrsm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZtrsm_v2", ("rocblas_ztrsm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasStrmm_v2", ("rocblas_strmm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDtrmm_v2", ("rocblas_dtrmm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCtrmm_v2", ("rocblas_ctrmm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZtrmm_v2", ("rocblas_ztrmm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasSnrm2_v2", ("rocblas_snrm2", CONV_MATH_FUNC, API_BLAS)),
("cublasDnrm2_v2", ("rocblas_dnrm2", CONV_MATH_FUNC, API_BLAS)),
("cublasScnrm2_v2", ("rocblas_scnrm2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDznrm2_v2", ("rocblas_dznrm2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDotEx", ("rocblas_dotex", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDotcEx", ("rocblas_dotcex", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasSdot_v2", ("rocblas_sdot", CONV_MATH_FUNC, API_BLAS)),
("cublasDdot_v2", ("rocblas_ddot", CONV_MATH_FUNC, API_BLAS)),
("cublasCdotu_v2", ("rocblas_cdotu", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCdotc_v2", ("rocblas_cdotc", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZdotu_v2", ("rocblas_zdotu", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZdotc_v2", ("rocblas_zdotc", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasScalEx", ("rocblas_scalex", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasSscal_v2", ("rocblas_sscal", CONV_MATH_FUNC, API_BLAS)),
("cublasDscal_v2", ("rocblas_dscal", CONV_MATH_FUNC, API_BLAS)),
("cublasCscal_v2", ("rocblas_cscal", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCsscal_v2", ("rocblas_csscal", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZscal_v2", ("rocblas_zcsal", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZdscal_v2", ("rocblas_zdscal", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasAxpyEx", ("rocblas_axpyex", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasSaxpy_v2", ("rocblas_saxpy", CONV_MATH_FUNC, API_BLAS)),
("cublasDaxpy_v2", ("rocblas_daxpy", CONV_MATH_FUNC, API_BLAS)),
("cublasCaxpy_v2", ("rocblas_caxpy", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZaxpy_v2", ("rocblas_zaxpy", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasScopy_v2", ("rocblas_scopy", CONV_MATH_FUNC, API_BLAS)),
("cublasDcopy_v2", ("rocblas_dcopy", CONV_MATH_FUNC, API_BLAS)),
("cublasCcopy_v2", ("rocblas_ccopy", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZcopy_v2", ("rocblas_zcopy", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasSswap_v2", ("rocblas_sswap", CONV_MATH_FUNC, API_BLAS)),
("cublasDswap_v2", ("rocblas_dswap", CONV_MATH_FUNC, API_BLAS)),
("cublasCswap_v2", ("rocblas_cswap", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZswap_v2", ("rocblas_zswap", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasIsamax_v2", ("rocblas_isamax", CONV_MATH_FUNC, API_BLAS)),
("cublasIdamax_v2", ("rocblas_idamax", CONV_MATH_FUNC, API_BLAS)),
("cublasIcamax_v2", ("rocblas_icamax", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasIzamax_v2", ("rocblas_izamax", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasIsamin_v2", ("rocblas_isamin", CONV_MATH_FUNC, API_BLAS)),
("cublasIdamin_v2", ("rocblas_idamin", CONV_MATH_FUNC, API_BLAS)),
("cublasIcamin_v2", ("rocblas_icamin", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasIzamin_v2", ("rocblas_izamin", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasSasum_v2", ("rocblas_sasum", CONV_MATH_FUNC, API_BLAS)),
("cublasDasum_v2", ("rocblas_dasum", CONV_MATH_FUNC, API_BLAS)),
("cublasScasum_v2", ("rocblas_scasum", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDzasum_v2", ("rocblas_dzasum", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasSrot_v2", ("rocblas_srot", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDrot_v2", ("rocblas_drot", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCrot_v2", ("rocblas_crot", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCsrot_v2", ("rocblas_csrot", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZrot_v2", ("rocblas_zrot", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZdrot_v2", ("rocblas_zdrot", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasSrotg_v2", ("rocblas_srotg", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDrotg_v2", ("rocblas_drotg", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCrotg_v2", ("rocblas_crotg", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZrotg_v2", ("rocblas_zrotg", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasSrotm_v2", ("rocblas_srotm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDrotm_v2", ("rocblas_drotm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasSrotmg_v2", ("rocblas_srotmg", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDrotmg_v2", ("rocblas_drotmg", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("CURAND_STATUS_SUCCESS", ("HIPRAND_STATUS_SUCCESS", CONV_NUMERIC_LITERAL, API_RAND)),
("CURAND_STATUS_VERSION_MISMATCH", ("HIPRAND_STATUS_VERSION_MISMATCH", CONV_NUMERIC_LITERAL, API_RAND)),
("CURAND_STATUS_NOT_INITIALIZED", ("HIPRAND_STATUS_NOT_INITIALIZED", CONV_NUMERIC_LITERAL, API_RAND)),
("CURAND_STATUS_ALLOCATION_FAILED", ("HIPRAND_STATUS_ALLOCATION_FAILED", CONV_NUMERIC_LITERAL, API_RAND)),
("CURAND_STATUS_TYPE_ERROR", ("HIPRAND_STATUS_TYPE_ERROR", CONV_NUMERIC_LITERAL, API_RAND)),
("CURAND_STATUS_OUT_OF_RANGE", ("HIPRAND_STATUS_OUT_OF_RANGE", CONV_NUMERIC_LITERAL, API_RAND)),
("CURAND_STATUS_LENGTH_NOT_MULTIPLE", ("HIPRAND_STATUS_LENGTH_NOT_MULTIPLE", CONV_NUMERIC_LITERAL, API_RAND)),
("CURAND_STATUS_DOUBLE_PRECISION_REQUIRED", ("HIPRAND_STATUS_DOUBLE_PRECISION_REQUIRED", CONV_NUMERIC_LITERAL, API_RAND)),
("CURAND_STATUS_LAUNCH_FAILURE", ("HIPRAND_STATUS_LAUNCH_FAILURE", CONV_NUMERIC_LITERAL, API_RAND)),
("CURAND_STATUS_PREEXISTING_FAILURE", ("HIPRAND_STATUS_PREEXISTING_FAILURE", CONV_NUMERIC_LITERAL, API_RAND)),
("CURAND_STATUS_INITIALIZATION_FAILED", ("HIPRAND_STATUS_INITIALIZATION_FAILED", CONV_NUMERIC_LITERAL, API_RAND)),
("CURAND_STATUS_ARCH_MISMATCH", ("HIPRAND_STATUS_ARCH_MISMATCH", CONV_NUMERIC_LITERAL, API_RAND)),
("CURAND_STATUS_INTERNAL_ERROR", ("HIPRAND_STATUS_INTERNAL_ERROR", CONV_NUMERIC_LITERAL, API_RAND)),
("CURAND_RNG_TEST", ("HIPRAND_RNG_TEST", CONV_NUMERIC_LITERAL, API_RAND)),
("mtgp32dc_params_fast_11213", ("mtgp32dc_params_fast_11213", CONV_NUMERIC_LITERAL, API_RAND)),
("CURAND_RNG_PSEUDO_DEFAULT", ("HIPRAND_RNG_PSEUDO_DEFAULT", CONV_NUMERIC_LITERAL, API_RAND)),
("CURAND_RNG_PSEUDO_XORWOW", ("HIPRAND_RNG_PSEUDO_XORWOW", CONV_NUMERIC_LITERAL, API_RAND)),
("CURAND_RNG_PSEUDO_MRG32K3A", ("HIPRAND_RNG_PSEUDO_MRG32K3A", CONV_NUMERIC_LITERAL, API_RAND)),
("CURAND_RNG_PSEUDO_MTGP32", ("HIPRAND_RNG_PSEUDO_MTGP32", CONV_NUMERIC_LITERAL, API_RAND)),
("CURAND_RNG_PSEUDO_MT19937", ("HIPRAND_RNG_PSEUDO_MT19937", CONV_NUMERIC_LITERAL, API_RAND)),
("CURAND_RNG_PSEUDO_PHILOX4_32_10", ("HIPRAND_RNG_PSEUDO_PHILOX4_32_10", CONV_NUMERIC_LITERAL, API_RAND)),
("CURAND_RNG_QUASI_DEFAULT", ("HIPRAND_RNG_QUASI_DEFAULT", CONV_NUMERIC_LITERAL, API_RAND)),
("CURAND_RNG_QUASI_SOBOL32", ("HIPRAND_RNG_QUASI_SOBOL32", CONV_NUMERIC_LITERAL, API_RAND)),
("CURAND_RNG_QUASI_SCRAMBLED_SOBOL32", ("HIPRAND_RNG_QUASI_SCRAMBLED_SOBOL32", CONV_NUMERIC_LITERAL, API_RAND)),
("CURAND_RNG_QUASI_SOBOL64", ("HIPRAND_RNG_QUASI_SOBOL64", CONV_NUMERIC_LITERAL, API_RAND)),
("CURAND_RNG_QUASI_SCRAMBLED_SOBOL64", ("HIPRAND_RNG_QUASI_SCRAMBLED_SOBOL64", CONV_NUMERIC_LITERAL, API_RAND)),
("curand_ORDERING_PSEUDO_BEST", ("HIPRAND_ORDERING_PSEUDO_BEST", CONV_NUMERIC_LITERAL, API_RAND, HIP_UNSUPPORTED)),
("curand_ORDERING_PSEUDO_DEFAULT", ("HIPRAND_ORDERING_PSEUDO_DEFAULT", CONV_NUMERIC_LITERAL, API_RAND, HIP_UNSUPPORTED)),
("curand_ORDERING_PSEUDO_SEEDED", ("HIPRAND_ORDERING_PSEUDO_SEEDED", CONV_NUMERIC_LITERAL, API_RAND, HIP_UNSUPPORTED)),
("curand_ORDERING_QUASI_DEFAULT", ("HIPRAND_ORDERING_QUASI_DEFAULT", CONV_NUMERIC_LITERAL, API_RAND, HIP_UNSUPPORTED)),
("curand_DIRECTION_VECTORS_32_JOEKUO6", ("HIPRAND_DIRECTION_VECTORS_32_JOEKUO6", CONV_NUMERIC_LITERAL, API_RAND, HIP_UNSUPPORTED)),
("curand_SCRAMBLED_DIRECTION_VECTORS_32_JOEKUO6", ("HIPRAND_SCRAMBLED_DIRECTION_VECTORS_32_JOEKUO6", CONV_NUMERIC_LITERAL, API_RAND, HIP_UNSUPPORTED)),
("curand_DIRECTION_VECTORS_64_JOEKUO6", ("HIPRAND_DIRECTION_VECTORS_64_JOEKUO6", CONV_NUMERIC_LITERAL, API_RAND, HIP_UNSUPPORTED)),
("curand_SCRAMBLED_DIRECTION_VECTORS_64_JOEKUO6", ("HIPRAND_SCRAMBLED_DIRECTION_VECTORS_64_JOEKUO6", CONV_NUMERIC_LITERAL, API_RAND, HIP_UNSUPPORTED)),
("curand_CHOOSE_BEST", ("HIPRAND_CHOOSE_BEST", CONV_NUMERIC_LITERAL, API_RAND, HIP_UNSUPPORTED)),
("curand_ITR", ("HIPRAND_ITR", CONV_NUMERIC_LITERAL, API_RAND, HIP_UNSUPPORTED)),
("curand_KNUTH", ("HIPRAND_KNUTH", CONV_NUMERIC_LITERAL, API_RAND, HIP_UNSUPPORTED)),
("curand_HITR", ("HIPRAND_HITR", CONV_NUMERIC_LITERAL, API_RAND, HIP_UNSUPPORTED)),
("curand_M1", ("HIPRAND_M1", CONV_NUMERIC_LITERAL, API_RAND, HIP_UNSUPPORTED)),
("curand_M2", ("HIPRAND_M2", CONV_NUMERIC_LITERAL, API_RAND, HIP_UNSUPPORTED)),
("curand_BINARY_SEARCH", ("HIPRAND_BINARY_SEARCH", CONV_NUMERIC_LITERAL, API_RAND, HIP_UNSUPPORTED)),
("curand_DISCRETE_GAUSS", ("HIPRAND_DISCRETE_GAUSS", CONV_NUMERIC_LITERAL, API_RAND, HIP_UNSUPPORTED)),
("curand_REJECTION", ("HIPRAND_REJECTION", CONV_NUMERIC_LITERAL, API_RAND, HIP_UNSUPPORTED)),
("curand_DEVICE_API", ("HIPRAND_DEVICE_API", CONV_NUMERIC_LITERAL, API_RAND, HIP_UNSUPPORTED)),
("curand_FAST_REJECTION", ("HIPRAND_FAST_REJECTION", CONV_NUMERIC_LITERAL, API_RAND, HIP_UNSUPPORTED)),
("curand_3RD", ("HIPRAND_3RD", CONV_NUMERIC_LITERAL, API_RAND, HIP_UNSUPPORTED)),
("curand_DEFINITION", ("HIPRAND_DEFINITION", CONV_NUMERIC_LITERAL, API_RAND, HIP_UNSUPPORTED)),
("curand_POISSON", ("HIPRAND_POISSON", CONV_NUMERIC_LITERAL, API_RAND, HIP_UNSUPPORTED)),
("curandCreateGenerator", ("hiprandCreateGenerator", CONV_MATH_FUNC, API_RAND)),
("curandCreateGeneratorHost", ("hiprandCreateGeneratorHost", CONV_MATH_FUNC, API_RAND)),
("curandCreatePoissonDistribution", ("hiprandCreatePoissonDistribution", CONV_MATH_FUNC, API_RAND)),
("curandDestroyDistribution", ("hiprandDestroyDistribution", CONV_MATH_FUNC, API_RAND)),
("curandDestroyGenerator", ("hiprandDestroyGenerator", CONV_MATH_FUNC, API_RAND)),
("curandGenerate", ("hiprandGenerate", CONV_MATH_FUNC, API_RAND)),
("curandGenerateLogNormal", ("hiprandGenerateLogNormal", CONV_MATH_FUNC, API_RAND)),
("curandGenerateLogNormalDouble", ("hiprandGenerateLogNormalDouble", CONV_MATH_FUNC, API_RAND)),
("curandGenerateLongLong", ("hiprandGenerateLongLong", CONV_MATH_FUNC, API_RAND, HIP_UNSUPPORTED)),
("curandGenerateNormal", ("hiprandGenerateNormal", CONV_MATH_FUNC, API_RAND)),
("curandGenerateNormalDouble", ("hiprandGenerateNormalDouble", CONV_MATH_FUNC, API_RAND)),
("curandGeneratePoisson", ("hiprandGeneratePoisson", CONV_MATH_FUNC, API_RAND)),
("curandGenerateSeeds", ("hiprandGenerateSeeds", CONV_MATH_FUNC, API_RAND)),
("curandGenerateUniform", ("hiprandGenerateUniform", CONV_MATH_FUNC, API_RAND)),
("curandGenerateUniformDouble", ("hiprandGenerateUniformDouble", CONV_MATH_FUNC, API_RAND)),
("curandGetDirectionVectors32", ("hiprandGetDirectionVectors32", CONV_MATH_FUNC, API_RAND, HIP_UNSUPPORTED)),
("curandGetDirectionVectors64", ("hiprandGetDirectionVectors64", CONV_MATH_FUNC, API_RAND, HIP_UNSUPPORTED)),
("curandGetProperty", ("hiprandGetProperty", CONV_MATH_FUNC, API_RAND, HIP_UNSUPPORTED)),
("curandGetScrambleConstants32", ("hiprandGetScrambleConstants32", CONV_MATH_FUNC, API_RAND, HIP_UNSUPPORTED)),
("curandGetScrambleConstants64", ("hiprandGetScrambleConstants64", CONV_MATH_FUNC, API_RAND, HIP_UNSUPPORTED)),
("curandGetVersion", ("hiprandGetVersion", CONV_MATH_FUNC, API_RAND)),
("curandSetGeneratorOffset", ("hiprandSetGeneratorOffset", CONV_MATH_FUNC, API_RAND)),
("curandSetGeneratorOrdering", ("hiprandSetGeneratorOrdering", CONV_MATH_FUNC, API_RAND, HIP_UNSUPPORTED)),
("curandSetPseudoRandomGeneratorSeed", ("hiprandSetPseudoRandomGeneratorSeed", CONV_MATH_FUNC, API_RAND)),
("curandSetQuasiRandomGeneratorDimensions", ("hiprandSetQuasiRandomGeneratorDimensions", CONV_MATH_FUNC, API_RAND)),
("curandSetStream", ("hiprandSetStream", CONV_MATH_FUNC, API_RAND)),
("curand", ("hiprand", CONV_DEVICE_FUNC, API_RAND)),
("curand_init", ("hiprand_init", CONV_DEVICE_FUNC, API_RAND)),
("curand_log_normal", ("hiprand_log_normal", CONV_DEVICE_FUNC, API_RAND)),
("curand_log_normal_double", ("hiprand_log_normal_double", CONV_DEVICE_FUNC, API_RAND)),
("curand_log_normal2", ("hiprand_log_normal2", CONV_DEVICE_FUNC, API_RAND)),
("curand_log_normal2_double", ("hiprand_log_normal2_double", CONV_DEVICE_FUNC, API_RAND)),
("curand_log_normal4", ("hiprand_log_normal4", CONV_DEVICE_FUNC, API_RAND)),
("curand_log_normal4_double", ("hiprand_log_normal4_double", CONV_DEVICE_FUNC, API_RAND)),
("curand_mtgp32_single", ("hiprand_mtgp32_single", CONV_DEVICE_FUNC, API_RAND, HIP_UNSUPPORTED)),
("curand_mtgp32_single_specific", ("hiprand_mtgp32_single_specific", CONV_DEVICE_FUNC, API_RAND, HIP_UNSUPPORTED)),
("curand_mtgp32_specific", ("hiprand_mtgp32_specific", CONV_DEVICE_FUNC, API_RAND, HIP_UNSUPPORTED)),
("curand_normal", ("hiprand_normal", CONV_DEVICE_FUNC, API_RAND)),
("curandMakeMTGP32Constants", ("hiprandMakeMTGP32Constants", CONV_DEVICE_FUNC, API_RAND)),
("curandMakeMTGP32KernelState", ("hiprandMakeMTGP32KernelState", CONV_DEVICE_FUNC, API_RAND)),
("curand_normal_double", ("hiprand_normal_double", CONV_DEVICE_FUNC, API_RAND)),
("curand_normal2", ("hiprand_normal2", CONV_DEVICE_FUNC, API_RAND)),
("curand_normal2_double", ("hiprand_normal2_double", CONV_DEVICE_FUNC, API_RAND)),
("curand_normal4", ("hiprand_normal4", CONV_DEVICE_FUNC, API_RAND)),
("curand_normal4_double", ("hiprand_normal4_double", CONV_DEVICE_FUNC, API_RAND)),
("curand_uniform", ("hiprand_uniform", CONV_DEVICE_FUNC, API_RAND)),
("curand_uniform_double", ("hiprand_uniform_double", CONV_DEVICE_FUNC, API_RAND)),
("curand_uniform2_double", ("hiprand_uniform2_double", CONV_DEVICE_FUNC, API_RAND)),
("curand_uniform4", ("hiprand_uniform4", CONV_DEVICE_FUNC, API_RAND)),
("curand_uniform4_double", ("hiprand_uniform4_double", CONV_DEVICE_FUNC, API_RAND)),
("curand_discrete", ("hiprand_discrete", CONV_DEVICE_FUNC, API_RAND)),
("curand_discrete4", ("hiprand_discrete4", CONV_DEVICE_FUNC, API_RAND)),
("curand_poisson", ("hiprand_poisson", CONV_DEVICE_FUNC, API_RAND)),
("curand_poisson4", ("hiprand_poisson4", CONV_DEVICE_FUNC, API_RAND)),
("curand_Philox4x32_10", ("hiprand_Philox4x32_10", CONV_DEVICE_FUNC, API_RAND, HIP_UNSUPPORTED)),
("mtgp32_kernel_params", ("mtgp32_kernel_params_t", CONV_MATH_FUNC, API_RAND)),
("CUFFT_FORWARD", ("HIPFFT_FORWARD", CONV_NUMERIC_LITERAL, API_BLAS)),
("CUFFT_INVERSE", ("HIPFFT_BACKWARD", CONV_NUMERIC_LITERAL, API_BLAS)),
("CUFFT_COMPATIBILITY_DEFAULT", ("HIPFFT_COMPATIBILITY_DEFAULT", CONV_NUMERIC_LITERAL, API_BLAS, HIP_UNSUPPORTED)),
("cufftResult_t", ("hipfftResult_t", CONV_TYPE, API_FFT)),
("cufftResult", ("hipfftResult", CONV_TYPE, API_FFT)),
("CUFFT_SUCCESS", ("HIPFFT_SUCCESS", CONV_NUMERIC_LITERAL, API_FFT)),
("CUFFT_INVALID_PLAN", ("HIPFFT_INVALID_PLAN", CONV_NUMERIC_LITERAL, API_FFT)),
("CUFFT_ALLOC_FAILED", ("HIPFFT_ALLOC_FAILED", CONV_NUMERIC_LITERAL, API_FFT)),
("CUFFT_INVALID_TYPE", ("HIPFFT_INVALID_TYPE", CONV_NUMERIC_LITERAL, API_FFT)),
("CUFFT_INVALID_VALUE", ("HIPFFT_INVALID_VALUE", CONV_NUMERIC_LITERAL, API_FFT)),
("CUFFT_INTERNAL_ERROR", ("HIPFFT_INTERNAL_ERROR", CONV_NUMERIC_LITERAL, API_FFT)),
("CUFFT_EXEC_FAILED", ("HIPFFT_EXEC_FAILED", CONV_NUMERIC_LITERAL, API_FFT)),
("CUFFT_SETUP_FAILED", ("HIPFFT_SETUP_FAILED", CONV_NUMERIC_LITERAL, API_FFT)),
("CUFFT_INVALID_SIZE", ("HIPFFT_INVALID_SIZE", CONV_NUMERIC_LITERAL, API_FFT)),
("CUFFT_UNALIGNED_DATA", ("HIPFFT_UNALIGNED_DATA", CONV_NUMERIC_LITERAL, API_FFT)),
("CUFFT_INCOMPLETE_PARAMETER_LIST", ("HIPFFT_INCOMPLETE_PARAMETER_LIST", CONV_NUMERIC_LITERAL, API_FFT)),
("CUFFT_INVALID_DEVICE", ("HIPFFT_INVALID_DEVICE", CONV_NUMERIC_LITERAL, API_FFT)),
("CUFFT_PARSE_ERROR", ("HIPFFT_PARSE_ERROR", CONV_NUMERIC_LITERAL, API_FFT)),
("CUFFT_NO_WORKSPACE", ("HIPFFT_NO_WORKSPACE", CONV_NUMERIC_LITERAL, API_FFT)),
("CUFFT_NOT_IMPLEMENTED", ("HIPFFT_NOT_IMPLEMENTED", CONV_NUMERIC_LITERAL, API_FFT)),
("CUFFT_LICENSE_ERROR", ("HIPFFT_LICENSE_ERROR", CONV_NUMERIC_LITERAL, API_FFT, HIP_UNSUPPORTED)),
("CUFFT_NOT_SUPPORTED", ("HIPFFT_NOT_SUPPORTED", CONV_NUMERIC_LITERAL, API_FFT)),
("cufftType_t", ("hipfftType_t", CONV_TYPE, API_FFT)),
("cufftType", ("hipfftType", CONV_TYPE, API_FFT)),
("CUFFT_R2C", ("HIPFFT_R2C", CONV_NUMERIC_LITERAL, API_FFT)),
("CUFFT_C2R", ("HIPFFT_C2R", CONV_NUMERIC_LITERAL, API_FFT)),
("CUFFT_C2C", ("HIPFFT_C2C", CONV_NUMERIC_LITERAL, API_FFT)),
("CUFFT_D2Z", ("HIPFFT_D2Z", CONV_NUMERIC_LITERAL, API_FFT)),
("CUFFT_Z2D", ("HIPFFT_Z2D", CONV_NUMERIC_LITERAL, API_FFT)),
("CUFFT_Z2Z", ("HIPFFT_Z2Z", CONV_NUMERIC_LITERAL, API_FFT)),
("cufftCompatibility_t", ("hipfftCompatibility_t", CONV_TYPE, API_FFT, HIP_UNSUPPORTED)),
("cufftCompatibility", ("hipfftCompatibility", CONV_TYPE, API_FFT, HIP_UNSUPPORTED)),
("CUFFT_COMPATIBILITY_FFTW_PADDING", ("HIPFFT_COMPATIBILITY_FFTW_PADDING", CONV_NUMERIC_LITERAL, API_FFT, HIP_UNSUPPORTED)),
("cufftReal", ("hipfftReal", CONV_TYPE, API_FFT)),
("cufftDoubleReal", ("hipfftDoubleReal", CONV_TYPE, API_FFT)),
("cufftComplex", ("hipfftComplex", CONV_TYPE, API_FFT)),
("cufftDoubleComplex", ("hipfftDoubleComplex", CONV_TYPE, API_FFT)),
("cufftHandle", ("hipfftHandle", CONV_TYPE, API_FFT)),
("cufftPlan1d", ("hipfftPlan1d", CONV_MATH_FUNC, API_FFT)),
("cufftPlan2d", ("hipfftPlan2d", CONV_MATH_FUNC, API_FFT)),
("cufftPlan3d", ("hipfftPlan3d", CONV_MATH_FUNC, API_FFT)),
("cufftPlanMany", ("hipfftPlanMany", CONV_MATH_FUNC, API_FFT)),
("cufftMakePlan1d", ("hipfftMakePlan1d", CONV_MATH_FUNC, API_FFT)),
("cufftMakePlan2d", ("hipfftMakePlan2d", CONV_MATH_FUNC, API_FFT)),
("cufftMakePlan3d", ("hipfftMakePlan3d", CONV_MATH_FUNC, API_FFT)),
("cufftMakePlanMany", ("hipfftMakePlanMany", CONV_MATH_FUNC, API_FFT)),
("cufftMakePlanMany64", ("hipfftMakePlanMany64", CONV_MATH_FUNC, API_FFT)),
("cufftGetSizeMany64", ("hipfftGetSizeMany64", CONV_MATH_FUNC, API_FFT)),
("cufftEstimate1d", ("hipfftEstimate1d", CONV_MATH_FUNC, API_FFT)),
("cufftEstimate2d", ("hipfftEstimate2d", CONV_MATH_FUNC, API_FFT)),
("cufftEstimate3d", ("hipfftEstimate3d", CONV_MATH_FUNC, API_FFT)),
("cufftEstimateMany", ("hipfftEstimateMany", CONV_MATH_FUNC, API_FFT)),
("cufftCreate", ("hipfftCreate", CONV_MATH_FUNC, API_FFT)),
("cufftGetSize1d", ("hipfftGetSize1d", CONV_MATH_FUNC, API_FFT)),
("cufftGetSize2d", ("hipfftGetSize2d", CONV_MATH_FUNC, API_FFT)),
("cufftGetSize3d", ("hipfftGetSize3d", CONV_MATH_FUNC, API_FFT)),
("cufftGetSizeMany", ("hipfftGetSizeMany", CONV_MATH_FUNC, API_FFT)),
("cufftGetSize", ("hipfftGetSize", CONV_MATH_FUNC, API_FFT)),
("cufftSetWorkArea", ("hipfftSetWorkArea", CONV_MATH_FUNC, API_FFT)),
("cufftSetAutoAllocation", ("hipfftSetAutoAllocation", CONV_MATH_FUNC, API_FFT)),
("cufftExecC2C", ("hipfftExecC2C", CONV_MATH_FUNC, API_FFT)),
("cufftExecR2C", ("hipfftExecR2C", CONV_MATH_FUNC, API_FFT)),
("cufftExecC2R", ("hipfftExecC2R", CONV_MATH_FUNC, API_FFT)),
("cufftExecZ2Z", ("hipfftExecZ2Z", CONV_MATH_FUNC, API_FFT)),
("cufftExecD2Z", ("hipfftExecD2Z", CONV_MATH_FUNC, API_FFT)),
("cufftExecZ2D", ("hipfftExecZ2D", CONV_MATH_FUNC, API_FFT)),
("cufftSetStream", ("hipfftSetStream", CONV_MATH_FUNC, API_FFT)),
("cufftDestroy", ("hipfftDestroy", CONV_MATH_FUNC, API_FFT)),
("cufftGetVersion", ("hipfftGetVersion", CONV_MATH_FUNC, API_FFT)),
("cufftGetProperty", ("hipfftGetProperty", CONV_MATH_FUNC, API_FFT, HIP_UNSUPPORTED)),
])
CUDA_SPARSE_MAP = collections.OrderedDict([
("cusparseStatus_t", ("hipsparseStatus_t", CONV_MATH_FUNC, API_SPARSE)),
("cusparseHandle_t", ("hipsparseHandle_t", CONV_MATH_FUNC, API_SPARSE)),
("cusparseOperation_t", ("hipsparseOperation_t", CONV_TYPE, API_SPARSE)),
("cusparseCreateMatDescr", ("hipsparseCreateMatDescr", CONV_MATH_FUNC, API_SPARSE)),
("cusparseCreate", ("hipsparseCreate", CONV_MATH_FUNC, API_SPARSE)),
("cusparseDestroyMatDescr", ("hipsparseDestroyMatDescr", CONV_MATH_FUNC, API_SPARSE)),
("cusparseDestroy", ("hipsparseDestroy", CONV_MATH_FUNC, API_SPARSE)),
("cusparseXcoo2csr", ("hipsparseXcoo2csr", CONV_MATH_FUNC, API_SPARSE)),
("cusparseMatDescr_t", ("hipsparseMatDescr_t", CONV_MATH_FUNC, API_SPARSE)),
("cusparseScsrmm2", ("hipsparseScsrmm2", CONV_MATH_FUNC, API_SPARSE)),
("cusparseDcsrmm2", ("hipsparseDcsrmm2", CONV_MATH_FUNC, API_SPARSE)),
("cusparseScsrmm", ("hipsparseScsrmm", CONV_MATH_FUNC, API_SPARSE)),
("cusparseDcsrmm", ("hipsparseDcsrmm", CONV_MATH_FUNC, API_SPARSE)),
("cusparseXcsrsort_bufferSizeExt", ("hipsparseXcsrsort_bufferSizeExt", CONV_MATH_FUNC, API_SPARSE)),
("cusparseXcsrsort", ("hipsparseXcsrsort", CONV_MATH_FUNC, API_SPARSE)),
("cusparseXcoosort_bufferSizeExt", ("hipsparseXcoosort_bufferSizeExt", CONV_MATH_FUNC, API_SPARSE)),
("cusparseXcoosortByRow", ("hipsparseXcoosortByRow", CONV_MATH_FUNC, API_SPARSE)),
("cusparseSetStream", ("hipsparseSetStream", CONV_MATH_FUNC, API_SPARSE)),
("cusparseCreateIdentityPermutation", ("hipsparseCreateIdentityPermutation", CONV_MATH_FUNC, API_SPARSE)),
("cusparseSetMatIndexBase", ("hipsparseSetMatIndexBase", CONV_MATH_FUNC, API_SPARSE)),
("cusparseSetMatType", ("hipsparseSetMatType", CONV_MATH_FUNC, API_SPARSE)),
("CUSPARSE_STATUS_SUCCESS", ("HIPSPARSE_STATUS_SUCCESS", CONV_NUMERIC_LITERAL, API_SPARSE)),
("CUSPARSE_STATUS_NOT_INITIALIZED", ("HIPSPARSE_STATUS_NOT_INITIALIZED", CONV_NUMERIC_LITERAL, API_SPARSE)),
("CUSPARSE_STATUS_ALLOC_FAILED", ("HIPSPARSE_STATUS_ALLOC_FAILED", CONV_NUMERIC_LITERAL, API_SPARSE)),
("CUSPARSE_STATUS_INVALID_VALUE", ("HIPSPARSE_STATUS_INVALID_VALUE", CONV_NUMERIC_LITERAL, API_SPARSE)),
("CUSPARSE_STATUS_MAPPING_ERROR", ("HIPSPARSE_STATUS_MAPPING_ERROR", CONV_NUMERIC_LITERAL, API_SPARSE)),
("CUSPARSE_STATUS_EXECUTION_FAILED", ("HIPSPARSE_STATUS_EXECUTION_FAILED", CONV_NUMERIC_LITERAL, API_SPARSE)),
("CUSPARSE_STATUS_INTERNAL_ERROR", ("HIPSPARSE_STATUS_INTERNAL_ERROR", CONV_NUMERIC_LITERAL, API_SPARSE)),
("CUSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED", ("HIPSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED", CONV_NUMERIC_LITERAL, API_SPARSE)),
("CUSPARSE_STATUS_ARCH_MISMATCH", ("HIPSPARSE_STATUS_ARCH_MISMATCH", CONV_NUMERIC_LITERAL, API_SPARSE)),
("CUSPARSE_STATUS_ZERO_PIVOT", ("HIPSPARSE_STATUS_ZERO_PIVOT", CONV_NUMERIC_LITERAL, API_SPARSE)),
("CUSPARSE_OPERATION_TRANSPOSE", ("HIPSPARSE_OPERATION_TRANSPOSE", CONV_NUMERIC_LITERAL, API_SPARSE)),
("CUSPARSE_OPERATION_NON_TRANSPOSE", ("HIPSPARSE_OPERATION_NON_TRANSPOSE", CONV_NUMERIC_LITERAL, API_SPARSE)),
("CUSPARSE_OPERATION_CONJUGATE_TRANSPOSE", ("HIPSPARSE_OPERATION_CONJUGATE_TRANSPOSE", CONV_NUMERIC_LITERAL, API_SPARSE)),
("CUSPARSE_INDEX_BASE_ZERO", ("HIPSPARSE_INDEX_BASE_ZERO", CONV_NUMERIC_LITERAL, API_SPARSE)),
("CUSPARSE_INDEX_BASE_ONE", ("HIPSPARSE_INDEX_BASE_ONE", CONV_NUMERIC_LITERAL, API_SPARSE)),
("CUSPARSE_MATRIX_TYPE_GENERAL", ("HIPSPARSE_MATRIX_TYPE_GENERAL", CONV_NUMERIC_LITERAL, API_SPARSE)),
])
GLOO_SPECIFIC_MAPPINGS = collections.OrderedDict([
("cuda_stream" , ("hip_stream", API_GLOO)),
("REGISTER_CUDA_OPERATOR" , ("REGISTER_HIP_OPERATOR", API_GLOO)),
("CUDA_1D_KERNEL_LOOP" , ("HIP_1D_KERNEL_LOOP", API_GLOO)),
("CUDAContext" , ("HIPContext", API_GLOO)),
("CAFFE_CUDA_NUM_THREADS" , ("CAFFE_HIP_NUM_THREADS", API_GLOO)),
("HasCudaGPU" , ("HasHipGPU", API_GLOO)),
("__expf" , ("expf", API_GLOO)),
("CUBLAS_ENFORCE" , ("ROCBLAS_ENFORCE", API_GLOO)),
("CUBLAS_CHECK" , ("ROCBLAS_CHECK", API_GLOO)),
("cublas_handle" , ("rocblashandle", API_GLOO)),
("CURAND_ENFORCE" ,("HIPRAND_ENFORCE", API_GLOO)),
("CURAND_CHECK" ,("HIPRAND_CHECK", API_GLOO)),
("curandGenerateUniform" , ("hiprandGenerateUniform", API_GLOO)),
("curand_generator" , ("hiprand_generator", API_GLOO)),
("CaffeCudaGetDevice" , ("CaffeHipGetDevice", API_GLOO)),
("USE_CUDA" ,("USE_ROCM", API_GLOO)),
("CUDA" ,("HIP", API_GLOO)),
("Cuda" ,("Hip", API_GLOO)),
("cuda_" ,("hip_", API_GLOO)),
("_cuda" ,("_hip", API_GLOO)),
("cuda::" ,("hip::", API_GLOO)),
("CUDNN" ,("MIOPEN", API_GLOO)),
("CuDNN" ,("MIOPEN", API_GLOO)),
("cudnn" ,("miopen", API_GLOO)),
("namespace cuda", ("namespace hip", API_GLOO)),
("gloo/cuda.h", ("gloo/hip.h", API_GLOO)),
("<nccl.h>", ("<rccl.h>", API_GLOO)),
("GLOO_USE_NCCL", ("GLOO_USE_RCCL", API_GLOO)),
])
# NB: C10 mappings are more specific than Caffe2 mappings, so run them
# first
CUDA_TO_HIP_MAPPINGS = [CUDA_IDENTIFIER_MAP, CUDA_TYPE_NAME_MAP,
CUDA_INCLUDE_MAP, CUDA_SPARSE_MAP, GLOO_SPECIFIC_MAPPINGS]
|
""" Constants for annotations in the mapping.
The constants defined here are used to annotate the mapping tuples in cuda_to_hip_mappings.py.
They are based on
https://github.com/ROCm-Developer-Tools/HIP/blob/master/hipify-clang/src/Statistics.h
and fall in three categories: 1) type of mapping, 2) API of mapping, 3) unsupported
mapping.
"""
CONV_VERSION = 0,
CONV_INIT = 1
CONV_DEVICE = 2
CONV_MEM = 3
CONV_KERN = 4
CONV_COORD_FUNC = 5
CONV_MATH_FUNC = 6
CONV_DEVICE_FUNC = 7
CONV_SPECIAL_FUNC = 8
CONV_STREAM = 9
CONV_EVENT = 10
CONV_OCCUPANCY = 11
CONV_CONTEXT = 12
CONV_PEER = 13
CONV_MODULE = 14
CONV_CACHE = 15
CONV_EXEC = 16
CONV_ERROR = 17
CONV_DEF = 18
CONV_TEX = 19
CONV_GL = 20
CONV_GRAPHICS = 21
CONV_SURFACE = 22
CONV_JIT = 23
CONV_D3D9 = 24
CONV_D3D10 = 25
CONV_D3D11 = 26
CONV_VDPAU = 27
CONV_EGL = 28
CONV_THREAD = 29
CONV_OTHER = 30
CONV_INCLUDE = 31
CONV_INCLUDE_CUDA_MAIN_H = 32
CONV_TYPE = 33
CONV_LITERAL = 34
CONV_NUMERIC_LITERAL = 35
CONV_LAST = 36
API_DRIVER = 37
API_RUNTIME = 38
API_BLAS = 39
API_SPARSE = 40
API_RAND = 41
API_LAST = 42
API_FFT = 43
HIP_UNSUPPORTED = 43
API_GLOO = 1340
|
#!/usr/bin/env python
""" The Python Hipify script.
##
# Copyright (c) 2015-2016 Advanced Micro Devices, Inc. All rights reserved.
# 2017-2018 Advanced Micro Devices, Inc. and
# Facebook Inc. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
from __future__ import absolute_import, division, print_function
import fnmatch
import re
import shutil
import sys
import os
import json
from pyHIPIFY.cuda_to_hip_mappings import CUDA_TO_HIP_MAPPINGS
class InputError(Exception):
# Exception raised for errors in the input.
def __init__(self, message):
super(InputError, self).__init__(message)
self.message = message
def __str__(self):
return "{}: {}".format("Input error", self.message)
def matched_files_iter(root_path, includes=('*',), ignores=(), extensions=()):
def _fnmatch(filepath, patterns):
return any(fnmatch.fnmatch(filepath, pattern) for pattern in patterns)
def match_extensions(filename):
"""Helper method to see if filename ends with certain extension"""
return any(filename.endswith(e) for e in extensions)
exact_matches = set(includes)
# This is a very rough heuristic; really, we want to avoid scanning
# any file which is not checked into source control, but this script
# needs to work even if you're in a Git or Hg checkout, so easier to
# just blacklist the biggest time sinks that won't matter in the
# end.
for (abs_dirpath, dirs, filenames) in os.walk(root_path, topdown=True):
rel_dirpath = os.path.relpath(abs_dirpath, root_path)
if rel_dirpath == '.':
# Blah blah blah O(n) blah blah
if ".git" in dirs:
dirs.remove(".git")
if "build" in dirs:
dirs.remove("build")
if "third_party" in dirs:
dirs.remove("third_party")
for filename in filenames:
filepath = os.path.join(rel_dirpath, filename)
# We respect extensions, UNLESS you wrote the entire
# filename verbatim, in which case we always accept it
if _fnmatch(filepath, includes) and \
(not _fnmatch(filepath, ignores)) and \
(match_extensions(filepath) or filepath in exact_matches):
yield filepath
def preprocess(
project_directory,
output_directory,
all_files,
show_progress=True):
"""
Call preprocessor on selected files.
"""
for filepath in all_files:
preprocessor(project_directory, output_directory, filepath)
if show_progress:
print(
filepath, "->",
get_hip_file_path(filepath))
print("Successfully preprocessed all matching files.")
def add_dim3(kernel_string, cuda_kernel):
'''adds dim3() to the second and third arguments in the kernel launch'''
count = 0
closure = 0
kernel_string = kernel_string.replace("<<<", "").replace(">>>", "")
arg_locs = [{} for _ in range(2)]
arg_locs[count]['start'] = 0
for ind, c in enumerate(kernel_string):
if count > 1:
break
if c == "(":
closure += 1
elif c == ")":
closure -= 1
elif (c == "," or ind == len(kernel_string) - 1) and closure == 0:
arg_locs[count]['end'] = ind + (c != ",")
count += 1
if count < 2:
arg_locs[count]['start'] = ind + 1
first_arg_raw = kernel_string[arg_locs[0]['start']:arg_locs[0]['end'] + 1]
second_arg_raw = kernel_string[arg_locs[1]['start']:arg_locs[1]['end']]
first_arg_clean = kernel_string[arg_locs[0]['start']:arg_locs[0]['end']].replace("\n", "").strip(" ")
second_arg_clean = kernel_string[arg_locs[1]['start']:arg_locs[1]['end']].replace("\n", "").strip(" ")
first_arg_dim3 = "dim3({})".format(first_arg_clean)
second_arg_dim3 = "dim3({})".format(second_arg_clean)
first_arg_raw_dim3 = first_arg_raw.replace(first_arg_clean, first_arg_dim3)
second_arg_raw_dim3 = second_arg_raw.replace(second_arg_clean, second_arg_dim3)
cuda_kernel = cuda_kernel.replace(first_arg_raw + second_arg_raw, first_arg_raw_dim3 + second_arg_raw_dim3)
return cuda_kernel
RE_KERNEL_LAUNCH = re.compile(r'([ ]+)(detail?)::[ ]+\\\n[ ]+')
def processKernelLaunches(string):
""" Replace the CUDA style Kernel launches with the HIP style kernel launches."""
# Concat the namespace with the kernel names. (Find cleaner way of doing this later).
string = RE_KERNEL_LAUNCH.sub(lambda inp: "{0}{1}::".format(inp.group(1), inp.group(2)), string)
def grab_method_and_template(in_kernel):
# The positions for relevant kernel components.
pos = {
"kernel_launch": {"start": in_kernel["start"], "end": in_kernel["end"]},
"kernel_name": {"start": -1, "end": -1},
"template": {"start": -1, "end": -1}
}
# Count for balancing template
count = {"<>": 0}
# Status for whether we are parsing a certain item.
START = 0
AT_TEMPLATE = 1
AFTER_TEMPLATE = 2
AT_KERNEL_NAME = 3
status = START
# Parse the string character by character
for i in range(pos["kernel_launch"]["start"] - 1, -1, -1):
char = string[i]
# Handle Templating Arguments
if status == START or status == AT_TEMPLATE:
if char == ">":
if status == START:
status = AT_TEMPLATE
pos["template"]["end"] = i
count["<>"] += 1
if char == "<":
count["<>"] -= 1
if count["<>"] == 0 and (status == AT_TEMPLATE):
pos["template"]["start"] = i
status = AFTER_TEMPLATE
# Handle Kernel Name
if status != AT_TEMPLATE:
if string[i].isalnum() or string[i] in {'(', ')', '_', ':', '#'}:
if status != AT_KERNEL_NAME:
status = AT_KERNEL_NAME
pos["kernel_name"]["end"] = i
# Case: Kernel name starts the string.
if i == 0:
pos["kernel_name"]["start"] = 0
# Finished
return [(pos["kernel_name"]), (pos["template"]), (pos["kernel_launch"])]
else:
# Potential ending point if we're already traversing a kernel's name.
if status == AT_KERNEL_NAME:
pos["kernel_name"]["start"] = i
# Finished
return [(pos["kernel_name"]), (pos["template"]), (pos["kernel_launch"])]
def find_kernel_bounds(string):
"""Finds the starting and ending points for all kernel launches in the string."""
kernel_end = 0
kernel_positions = []
# Continue until we cannot find any more kernels anymore.
while string.find("<<<", kernel_end) != -1:
# Get kernel starting position (starting from the previous ending point)
kernel_start = string.find("<<<", kernel_end)
# Get kernel ending position (adjust end point past the >>>)
kernel_end = string.find(">>>", kernel_start) + 3
if kernel_end <= 0:
raise InputError("no kernel end found")
# Add to list of traversed kernels
kernel_positions.append({"start": kernel_start, "end": kernel_end,
"group": string[kernel_start: kernel_end]})
return kernel_positions
# Grab positional ranges of all kernel launchces
get_kernel_positions = [k for k in find_kernel_bounds(string)]
output_string = string
# Replace each CUDA kernel with a HIP kernel.
for kernel in get_kernel_positions:
# Get kernel components
params = grab_method_and_template(kernel)
# Find parenthesis after kernel launch
parenthesis = string.find("(", kernel["end"])
# Extract cuda kernel
cuda_kernel = string[params[0]["start"]:parenthesis + 1]
kernel_string = string[kernel['start']:kernel['end']]
cuda_kernel_dim3 = add_dim3(kernel_string, cuda_kernel)
# Keep number of kernel launch params consistent (grid dims, group dims, stream, dynamic shared size)
num_klp = len(extract_arguments(0, kernel["group"].replace("<<<", "(").replace(">>>", ")")))
hip_kernel = "hipLaunchKernelGGL(" + cuda_kernel_dim3[0:-1].replace(
">>>", ", 0" * (4 - num_klp) + ">>>").replace("<<<", ", ").replace(">>>", ", ")
# Replace cuda kernel with hip kernel
output_string = output_string.replace(cuda_kernel, hip_kernel)
return output_string
def get_hip_file_path(filepath):
"""
Returns the new name of the hipified file
"""
dirpath, filename = os.path.split(filepath)
root, ext = os.path.splitext(filename)
# Concretely, we do the following:
#
# - If there is a directory component named "cuda", replace
# it with "hip", AND
#
# - If the file name contains "CUDA", replace it with "HIP", AND
# Furthermore, ALWAYS replace '.cu' with '.hip', because those files
# contain CUDA kernels that needs to be hipified and processed with
# hcc compiler
#
# This isn't set in stone; we might adjust this to support other
# naming conventions.
if ext == '.cu':
ext = '.hip'
orig_dirpath = dirpath
dirpath = dirpath.replace('cuda', 'hip')
root = root.replace('cuda', 'hip')
root = root.replace('CUDA', 'HIP')
return os.path.join(dirpath, root + ext)
# Cribbed from https://stackoverflow.com/questions/42742810/speed-up-millions-of-regex-replacements-in-python-3/42789508#42789508
class Trie():
"""Regex::Trie in Python. Creates a Trie out of a list of words. The trie can be exported to a Regex pattern.
The corresponding Regex should match much faster than a simple Regex union."""
def __init__(self):
self.data = {}
def add(self, word):
ref = self.data
for char in word:
ref[char] = char in ref and ref[char] or {}
ref = ref[char]
ref[''] = 1
def dump(self):
return self.data
def quote(self, char):
return re.escape(char)
def _pattern(self, pData):
data = pData
if "" in data and len(data.keys()) == 1:
return None
alt = []
cc = []
q = 0
for char in sorted(data.keys()):
if isinstance(data[char], dict):
try:
recurse = self._pattern(data[char])
alt.append(self.quote(char) + recurse)
except Exception:
cc.append(self.quote(char))
else:
q = 1
cconly = not len(alt) > 0
if len(cc) > 0:
if len(cc) == 1:
alt.append(cc[0])
else:
alt.append('[' + ''.join(cc) + ']')
if len(alt) == 1:
result = alt[0]
else:
result = "(?:" + "|".join(alt) + ")"
if q:
if cconly:
result += "?"
else:
result = "(?:%s)?" % result
return result
def pattern(self):
return self._pattern(self.dump())
RE_TRIE = Trie()
RE_MAP = {}
for mapping in CUDA_TO_HIP_MAPPINGS:
for src, value in mapping.items():
dst = value[0]
RE_TRIE.add(src)
RE_MAP[src] = dst
RE_PREPROCESSOR = re.compile(RE_TRIE.pattern())
def re_replace(input_string):
def sub_repl(m):
return RE_MAP[m.group(0)]
return RE_PREPROCESSOR.sub(sub_repl, input_string)
def preprocessor(project_directory, output_directory, filepath):
""" Executes the CUDA -> HIP conversion on the specified file. """
fin_path = os.path.join(project_directory, filepath)
with open(fin_path, 'r') as fin:
output_source = fin.read()
fout_path = os.path.join(output_directory, get_hip_file_path(filepath))
assert(os.path.join(output_directory, fout_path) != os.path.join(project_directory, fin_path))
if not os.path.exists(os.path.dirname(fout_path)):
os.makedirs(os.path.dirname(fout_path))
with open(fout_path, 'w') as fout:
output_source = re_replace(output_source)
# Perform Kernel Launch Replacements
output_source = processKernelLaunches(output_source)
fout.write(output_source)
def extract_arguments(start, string):
""" Return the list of arguments in the upcoming function parameter closure.
Example:
string (input): '(blocks, threads, 0, THCState_getCurrentStream(state))'
arguments (output):
'[{'start': 1, 'end': 7},
{'start': 8, 'end': 16},
{'start': 17, 'end': 19},
{'start': 20, 'end': 53}]'
"""
arguments = []
closures = {
"<": 0,
"(": 0
}
current_position = start
argument_start_pos = current_position + 1
# Search for final parenthesis
while current_position < len(string):
if string[current_position] == "(":
closures["("] += 1
elif string[current_position] == ")":
closures["("] -= 1
elif string[current_position] == "<":
closures["<"] += 1
elif string[current_position] == ">" and string[current_position - 1] != "-" and closures["<"] > 0:
closures["<"] -= 1
# Finished all arguments
if closures["("] == 0 and closures["<"] == 0:
# Add final argument
arguments.append({"start": argument_start_pos, "end": current_position})
break
# Finished current argument
if closures["("] == 1 and closures["<"] == 0 and string[current_position] == ",":
arguments.append({"start": argument_start_pos, "end": current_position})
argument_start_pos = current_position + 1
current_position += 1
return arguments
def hipify(
project_directory,
extensions=(".cu", ".cuh", ".c", ".cc", ".cpp", ".h", ".in", ".hpp"),
output_directory=None,
includes=(),
ignores=(),
list_files_only=False,
show_progress=True,
):
assert os.path.exists(project_directory)
# If no output directory, provide a default one.
if not output_directory:
output_directory = os.path.join(project_directory, "hip")
all_files = list(matched_files_iter(project_directory, includes=includes,
ignores=ignores, extensions=extensions))
if list_files_only:
print(os.linesep.join(all_files))
return
# Start Preprocessor
preprocess(
project_directory,
output_directory,
all_files,
show_progress=show_progress)
|
# -*- coding: utf-8 -*-
import unittest
from collections import namedtuple
from flake8_coding import CodingChecker
from mock import patch
Options = namedtuple('Options', 'no_accept_encodings, accept_encodings')
class TestFlake8Coding(unittest.TestCase):
def test_has_utf8_coding_header(self):
checker = CodingChecker(None, 'testsuite/utf8.py')
checker.encodings = ['latin-1', 'utf-8']
ret = list(checker.run())
self.assertEqual(ret, [])
def test_file_not_found(self):
checker = CodingChecker(None, 'file_not_found')
checker.encodings = ['latin-1', 'utf-8']
ret = list(checker.run())
self.assertEqual(ret, [])
def test_empty_file(self):
checker = CodingChecker(None, 'testsuite/empty.py')
checker.encodings = ['latin-1', 'utf-8']
ret = list(checker.run())
self.assertEqual(ret, [])
def test_has_latin1_coding_header(self):
checker = CodingChecker(None, 'testsuite/latin1.py')
checker.encodings = ['latin-1', 'utf-8']
ret = list(checker.run())
self.assertEqual(ret, [])
def test_has_coding_header_at_2nd_line(self):
checker = CodingChecker(None, 'testsuite/2nd-line.py')
checker.encodings = ['latin-1', 'utf-8']
ret = list(checker.run())
self.assertEqual(ret, [])
def test_has_coding_header_at_3rd_line(self):
checker = CodingChecker(None, 'testsuite/3rd-line.py')
checker.encodings = ['latin-1', 'utf-8']
ret = list(checker.run())
self.assertEqual(len(ret), 1)
self.assertEqual(ret[0][0], 1)
self.assertEqual(ret[0][1], 0)
self.assertTrue(ret[0][2].startswith('C101 '))
def test_has_vim_styled_coding_header(self):
checker = CodingChecker(None, 'testsuite/vim-style.py')
checker.encodings = ['latin-1', 'utf-8']
ret = list(checker.run())
self.assertEqual(ret, [])
def test_has_coding_header_with_invalid_encoding_name(self):
checker = CodingChecker(None, 'testsuite/invalid.py')
checker.encodings = ['latin-1', 'utf-8']
ret = list(checker.run())
self.assertEqual(len(ret), 1)
self.assertEqual(ret[0][0], 2)
self.assertEqual(ret[0][1], 0)
self.assertTrue(ret[0][2].startswith('C102 '))
def test_has_no_coding_headers(self):
checker = CodingChecker(None, 'testsuite/nocodings.py')
checker.encodings = ['latin-1', 'utf-8']
ret = list(checker.run())
self.assertEqual(len(ret), 1)
self.assertEqual(ret[0][0], 1)
self.assertEqual(ret[0][1], 0)
self.assertTrue(ret[0][2].startswith('C101 '))
def test_default_encoding(self):
try:
options = Options(False, 'latin-1, utf-8')
CodingChecker.parse_options(options)
self.assertEqual(CodingChecker.encodings, ['latin-1', 'utf-8'])
finally:
if hasattr(CodingChecker, 'encodings'):
del CodingChecker.encodings
def test_change_encoding(self):
try:
options = Options(False, 'utf-8,utf-16')
CodingChecker.parse_options(options)
self.assertEqual(CodingChecker.encodings, ['utf-8', 'utf-16'])
finally:
if hasattr(CodingChecker, 'encodings'):
del CodingChecker.encodings
def test_stdin(self):
try:
from flake8.engine import pep8 as stdin_utils # noqa
target = 'flake8.engine.pep8.stdin_get_value'
except ImportError:
from flake8 import utils as stdin_utils # noqa
target = 'flake8.utils.stdin_get_value'
with patch(target) as stdin_get_value:
with open('testsuite/nocodings.py') as fp:
stdin_get_value.return_value = fp.read()
for input in ['stdin', '-', None]:
checker = CodingChecker(None, input)
checker.encodings = ['latin-1', 'utf-8']
ret = list(checker.run())
self.assertEqual(len(ret), 1)
self.assertEqual(ret[0][0], 1)
self.assertEqual(ret[0][1], 0)
self.assertTrue(ret[0][2].startswith('C101 '))
def test_no_accept_encodings_sets_encodings_none(self):
try:
options = Options(True, 'latin-1,utf-8')
CodingChecker.parse_options(options)
self.assertTrue(CodingChecker.encodings is None)
finally:
if hasattr(CodingChecker, 'encodings'):
del CodingChecker.encodings
def test_encoding_none_with_no_coding_comment(self):
checker = CodingChecker(None, 'testsuite/nocoding.py')
checker.encodings = None
ret = list(checker.run())
self.assertEqual(ret, [])
def test_encoding_none_with_coding_comment(self):
checker = CodingChecker(None, 'testsuite/utf8.py')
checker.encodings = None
ret = list(checker.run())
self.assertEqual(len(ret), 1)
self.assertEqual(ret[0][0], 1)
self.assertEqual(ret[0][1], 0)
self.assertTrue(ret[0][2].startswith('C103 '))
if __name__ == '__main__':
unittest.main()
|
# -*- coding: utf-8 -*-
import re
from setuptools import setup
def get_version(filename):
"""
Return package version as listed in `__version__` in `filename`.
"""
init_py = open(filename).read()
return re.search("__version__ = ['\"]([^'\"]+)['\"]", init_py).group(1)
version = get_version('flake8_coding.py')
with open('README.rst') as readme_file:
readme = readme_file.read()
setup(
name='flake8-coding',
version=version,
description='Adds coding magic comment checks to flake8',
long_description=readme,
classifiers=[
'Development Status :: 3 - Alpha',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Topic :: Software Development',
],
author='Takeshi KOMIYA',
author_email='[email protected]',
url='https://github.com/tk0miya/flake8-coding',
license='Apache License 2.0',
keywords='pep8 flake8 coding',
py_modules=['flake8_coding'],
install_requires=[
'flake8',
],
entry_points={
'flake8.extension': ['C10 = flake8_coding:CodingChecker'],
},
)
|
# -*- coding: utf-8 -*-
import re
__version__ = '1.3.3'
class CodingChecker(object):
name = 'flake8_coding'
version = __version__
def __init__(self, tree, filename):
self.filename = filename
@classmethod
def add_options(cls, parser):
parser.add_option(
'--accept-encodings', default='latin-1, utf-8', action='store',
help="Acceptable source code encodings for `coding:` magic comment"
)
parser.add_option(
'--no-accept-encodings', action='store_true', parse_from_config=True,
help="Warn for files containing a `coding:` magic comment"
)
parser.add_option(
'--optional-ascii-coding', action='store_true', parse_from_config=True,
help="Do not force 'coding:' header on ascii only files"
)
if hasattr(parser, 'config_options'): # for flake8 < 3.0
parser.config_options.append('accept-encodings')
parser.config_options.append('no-accept-encodings')
parser.config_options.append('optional-ascii-coding')
@classmethod
def parse_options(cls, options):
if options.no_accept_encodings:
cls.encodings = None
else:
cls.encodings = [e.strip().lower() for e in options.accept_encodings.split(',')]
cls.optional_ascii_coding = options.optional_ascii_coding
@classmethod
def has_non_ascii_characters(cls, lines):
return any(any(ord(c) > 127 for c in line) for line in lines)
def read_lines(self):
if self.filename in ('stdin', '-', None):
try:
# flake8 >= v3.0
from flake8.engine import pep8 as stdin_utils
except ImportError:
from flake8 import utils as stdin_utils
return stdin_utils.stdin_get_value().splitlines(True)
else:
try:
import pycodestyle
except ImportError:
import pep8 as pycodestyle
return pycodestyle.readlines(self.filename)
def run(self):
try:
# PEP-263 says: a magic comment must be placed into the source
# files either as first or second line in the file
lines = self.read_lines()
if len(lines) == 0:
return
for lineno, line in enumerate(lines[:2], start=1):
matched = re.search(r'coding[:=]\s*([-\w.]+)', line, re.IGNORECASE)
if matched:
if self.encodings:
if matched.group(1).lower() not in self.encodings:
yield lineno, 0, "C102 Unknown encoding found in coding magic comment", type(self)
else:
yield lineno, 0, "C103 Coding magic comment present", type(self)
break
else:
if self.encodings:
if not self.optional_ascii_coding or self.has_non_ascii_characters(lines):
yield 1, 0, "C101 Coding magic comment not found", type(self)
except IOError:
pass
|
# -*- coding: latin-1 -*-
|
#!/usr/bin/python
#
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
#!/usr/bin/python
# -*- coding: utf -*-
|
#!/usr/bin/python
#
|
#!/usr/bin/python
# vim: set fileencoding=utf-8 :
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
|
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import os
import sys
import argparse
import json
from hipify import hipify_python
def main():
parser = argparse.ArgumentParser(
description='Top-level script for HIPifying, filling in most common parameters')
parser.add_argument(
'--project-directory',
type=str,
help="The root of the project. (default: %(default)s)")
parser.add_argument(
'--output-directory',
type=str,
default=None,
help="The Directory to Store the Hipified Project",
required=False)
parser.add_argument(
'--list-files-only',
action='store_true',
help="Only print the list of hipify files.")
parser.add_argument(
'--header-include-dirs',
default=[],
help="Directories to add to search path for header includes",
required=False)
parser.add_argument(
'--includes',
default=['*'],
help="Source files to be included for hipify",
required=False)
parser.add_argument(
'--ignores',
default=[],
help="Source files to be excluded for hipify",
required=False)
parser.add_argument(
'--dump-dict-file',
type=str,
help="The file to Store the return dict output after hipification",
required=False)
parser.add_argument(
'--config-json',
type=str,
help="relative path of hipify config json which contains arguments to hipify",
required=False)
args = parser.parse_args()
if(args.config_json):
if(os.path.exists(args.config_json)):
with open(args.config_json) as jsonf:
json_args = json.load(jsonf)
if(json_args.get('project_directory') is not None):
project_directory = os.path.join(os.path.dirname(args.config_json), json_args['project_directory'])
else:
raise ValueError('relative path to project_dir to config_json should be mentioned')
if(json_args.get('output_directory') is not None):
output_directory = os.path.join(os.path.dirname(args.config_json), json_args['output_directory'])
else:
output_directory = project_directory
if(json_args.get('includes') is not None):
includes = json_args['includes']
else:
includes = ['*']
if(json_args.get('header_include_dirs') is not None):
header_include_dirs = json_args['header_include_dirs']
else:
header_include_dirs = []
if(json_args.get('ignores') is not None):
ignores = json_args['ignores']
else:
ignores = []
else:
raise ValueError('config json file specified should be a valid file path')
else:
if args.project_directory is not None:
project_directory=args.project_directory;
else:
raise ValueError('If not using config json , project_directory should be mentioned in commadline')
if args.output_directory:
output_directory = args.output_directory
else:
output_directory = args.project_directory
includes=args.includes
ignores=args.ignores
header_include_dirs=args.header_include_dirs
dump_dict_file = args.dump_dict_file
print("project_directory :",project_directory , " output_directory: ", output_directory, " includes: ", includes, " ignores: ", ignores, " header_include_dirs: ", header_include_dirs)
HipifyFinalResult = hipify_python.hipify(
project_directory=project_directory,
output_directory=output_directory,
includes=includes,
ignores=ignores,
header_include_dirs=args.header_include_dirs if type(args.header_include_dirs) is list \
else args.header_include_dirs.strip("[]").split(";"),
is_pytorch_extension=True,
show_detailed=True)
if dump_dict_file:
with open(dump_dict_file, 'w') as dict_file:
dict_file.write(json.dumps(HipifyFinalResult))
else:
raise ValueError ('dump_dict_file should be defined')
if __name__ == "__main__":
main()
|
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import os
import sys
import argparse
import json
def main():
parser = argparse.ArgumentParser(description="")
parser.add_argument(
'--io-file',
type=str,
help="Input file containing list of files which will be overwritten by hipified file names",
required=True)
parser.add_argument(
'--dump-dict-file',
type=str,
help="Input file where the dictionary output of hipify is stored",
required=True)
args = parser.parse_args()
file_obj = open(args.dump_dict_file, mode='r')
json_string = file_obj.read()
file_obj.close()
hipified_result = json.loads(json_string)
out_list = []
with open(args.io_file) as inp_file:
for line in inp_file:
line = line.strip()
if line in hipified_result:
out_list.append(hipified_result[line]['hipified_path'])
else:
out_list.append(line)
w_file_obj = open(args.io_file, mode='w')
for f in out_list:
w_file_obj.write(f+"\n")
w_file_obj.close()
if __name__ == "__main__":
main()
|
__version__ = '1.0.0'
|
import collections
from .constants import (API_BLAS, API_C10, API_CAFFE2, API_DRIVER, API_FFT,
API_PYTORCH, API_RAND, API_ROCTX, API_RTC, API_RUNTIME,
API_SPARSE, CONV_CACHE, CONV_CONTEXT, CONV_D3D9,
CONV_D3D10, CONV_D3D11, CONV_DEF, CONV_DEVICE,
CONV_DEVICE_FUNC, CONV_EGL, CONV_ERROR, CONV_EVENT,
CONV_EXEC, CONV_GL, CONV_GRAPHICS, CONV_INCLUDE,
CONV_INCLUDE_CUDA_MAIN_H, CONV_INIT, CONV_JIT,
CONV_MATH_FUNC, CONV_MEM, CONV_MODULE,
CONV_NUMERIC_LITERAL, CONV_OCCUPANCY, CONV_OTHER,
CONV_PEER, CONV_SPECIAL_FUNC, CONV_STREAM,
CONV_SURFACE, CONV_TEX, CONV_THREAD, CONV_TYPE,
CONV_VDPAU, CONV_VERSION, HIP_UNSUPPORTED)
""" Mapping of CUDA functions, include files, constants, and types to ROCm/HIP equivalents
This closely follows the implementation in hipify-clang
https://github.com/ROCm-Developer-Tools/HIP/blob/master/hipify-clang/src/CUDA2HipMap.cpp
and its structure.
There are different maps for fundamental names, include files, identifies, sparse, and
PyTorch specific translations.
Each of the entries in these maps translates a CUDA string to a tuple containing the
ROCm/HIP string, a type and API annotation and - optionally - an annotation if it is not
supported in ROCm/HIP yet.
"""
# List of math functions that should be replaced inside device code only.
MATH_TRANSPILATIONS = collections.OrderedDict(
[
("std::max", ("::max")),
("std::min", ("::min")),
("std::ceil", ("::ceil")),
("std::floor", ("::floor")),
("std::exp", ("::exp")),
("std::log", ("::log")),
("std::pow", ("::pow")),
("std::fabs", ("::fabs")),
("std::fmod", ("::fmod")),
("std::remainder", ("::remainder")),
("std::frexp", ("::frexp")),
]
)
CUDA_TYPE_NAME_MAP = collections.OrderedDict(
[
("CUresult", ("hipError_t", CONV_TYPE, API_DRIVER)),
("cudaError_t", ("hipError_t", CONV_TYPE, API_RUNTIME)),
(
"CUDA_ARRAY3D_DESCRIPTOR",
("HIP_ARRAY3D_DESCRIPTOR", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
("CUDA_ARRAY_DESCRIPTOR", ("HIP_ARRAY_DESCRIPTOR", CONV_TYPE, API_DRIVER)),
("CUDA_MEMCPY2D", ("hip_Memcpy2D", CONV_TYPE, API_DRIVER)),
("CUDA_MEMCPY3D", ("HIP_MEMCPY3D", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
(
"CUDA_MEMCPY3D_PEER",
("HIP_MEMCPY3D_PEER", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CUDA_POINTER_ATTRIBUTE_P2P_TOKENS",
(
"HIP_POINTER_ATTRIBUTE_P2P_TOKENS",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CUDA_RESOURCE_DESC",
("HIP_RESOURCE_DESC", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CUDA_RESOURCE_VIEW_DESC",
("HIP_RESOURCE_VIEW_DESC", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CUipcEventHandle",
("hipIpcEventHandle", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
("CUipcMemHandle", ("hipIpcMemHandle", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CUaddress_mode", ("hipAddress_mode", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
(
"CUarray_cubemap_face",
("hipArray_cubemap_face", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
("CUarray_format", ("hipArray_format", CONV_TYPE, API_DRIVER)),
("CUcomputemode", ("hipComputemode", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CUmem_advise", ("hipMemAdvise", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
(
"CUmem_range_attribute",
("hipMemRangeAttribute", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
("CUctx_flags", ("hipCctx_flags", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CUdevice", ("hipDevice_t", CONV_TYPE, API_DRIVER)),
("CUdevice_attribute_enum", ("hipDeviceAttribute_t", CONV_TYPE, API_DRIVER)),
("CUdevice_attribute", ("hipDeviceAttribute_t", CONV_TYPE, API_DRIVER)),
("CUdeviceptr", ("hipDeviceptr_t", CONV_TYPE, API_DRIVER)),
("CUarray_st", ("hipArray", CONV_TYPE, API_DRIVER)),
("CUarray", ("hipArray *", CONV_TYPE, API_DRIVER)),
("CUdevprop_st", ("hipDeviceProp_t", CONV_TYPE, API_DRIVER)),
("CUdevprop", ("hipDeviceProp_t", CONV_TYPE, API_DRIVER)),
("CUfunction", ("hipFunction_t", CONV_TYPE, API_DRIVER)),
(
"CUgraphicsResource",
("hipGraphicsResource_t", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CUmipmappedArray",
("hipMipmappedArray_t", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CUfunction_attribute",
("hipFuncAttribute_t", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CUfunction_attribute_enum",
("hipFuncAttribute_t", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CUgraphicsMapResourceFlags",
("hipGraphicsMapFlags", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CUgraphicsMapResourceFlags_enum",
("hipGraphicsMapFlags", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CUgraphicsRegisterFlags",
("hipGraphicsRegisterFlags", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CUgraphicsRegisterFlags_enum",
("hipGraphicsRegisterFlags", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CUoccupancy_flags",
("hipOccupancyFlags", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CUoccupancy_flags_enum",
("hipOccupancyFlags", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
("CUfunc_cache_enum", ("hipFuncCache", CONV_TYPE, API_DRIVER)),
("CUfunc_cache", ("hipFuncCache", CONV_TYPE, API_DRIVER)),
("CUipcMem_flags", ("hipIpcMemFlags", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
(
"CUipcMem_flags_enum",
("hipIpcMemFlags", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
("CUjit_cacheMode", ("hipJitCacheMode", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED)),
(
"CUjit_cacheMode_enum",
("hipJitCacheMode", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED),
),
("CUjit_fallback", ("hipJitFallback", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED)),
(
"CUjit_fallback_enum",
("hipJitFallback", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED),
),
("CUjit_option", ("hipJitOption", CONV_JIT, API_DRIVER)),
("CUjit_option_enum", ("hipJitOption", CONV_JIT, API_DRIVER)),
("CUjit_target", ("hipJitTarget", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED)),
("CUjit_target_enum", ("hipJitTarget", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED)),
("CUjitInputType", ("hipJitInputType", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED)),
(
"CUjitInputType_enum",
("hipJitInputType", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED),
),
("CUlimit", ("hipLimit_t", CONV_TYPE, API_DRIVER)),
("CUlimit_enum", ("hipLimit_t", CONV_TYPE, API_DRIVER)),
(
"CUmemAttach_flags",
("hipMemAttachFlags_t", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CUmemAttach_flags_enum",
("hipMemAttachFlags_t", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
("CUmemorytype", ("hipMemType_t", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CUmemorytype_enum", ("hipMemType_t", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CUresourcetype", ("hipResourceType", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED)),
(
"CUresourcetype_enum",
("hipResourceType", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED),
),
("CUresourceViewFormat", ("hipResourceViewFormat", CONV_TEX, API_DRIVER)),
("CUresourceViewFormat_enum", ("hipResourceViewFormat", CONV_TEX, API_DRIVER)),
("CUsharedconfig", ("hipSharedMemConfig", CONV_TYPE, API_DRIVER)),
("CUsharedconfig_enum", ("hipSharedMemConfig", CONV_TYPE, API_DRIVER)),
("CUcontext", ("hipCtx_t", CONV_TYPE, API_DRIVER)),
("CUmodule", ("hipModule_t", CONV_TYPE, API_DRIVER)),
("CUstream", ("hipStream_t", CONV_TYPE, API_DRIVER)),
("CUstream_st", ("ihipStream_t", CONV_TYPE, API_DRIVER)),
("CUstreamCallback", ("hipStreamCallback_t", CONV_TYPE, API_DRIVER)),
("CUsurfObject", ("hipSurfaceObject", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
(
"CUsurfref",
("hipSurfaceReference_t", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
("CUtexObject", ("hipTextureObject_t", CONV_TYPE, API_DRIVER)),
("CUtexref", ("textureReference", CONV_TYPE, API_DRIVER)),
("CUstream_flags", ("hipStreamFlags", CONV_TYPE, API_DRIVER)),
(
"CUstreamWaitValue_flags",
("hipStreamWaitValueFlags", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CUstreamWriteValue_flags",
("hipStreamWriteValueFlags", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CUstreamBatchMemOpType",
("hipStreamBatchMemOpType", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CUdevice_P2PAttribute",
("hipDeviceP2PAttribute", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
("CUevent", ("hipEvent_t", CONV_TYPE, API_DRIVER)),
("CUevent_st", ("ihipEvent_t", CONV_TYPE, API_DRIVER)),
("CUevent_flags", ("hipEventFlags", CONV_EVENT, API_DRIVER, HIP_UNSUPPORTED)),
("CUfilter_mode", ("hipTextureFilterMode", CONV_TEX, API_DRIVER)),
("CUGLDeviceList", ("hipGLDeviceList", CONV_GL, API_DRIVER, HIP_UNSUPPORTED)),
("CUGLmap_flags", ("hipGLMapFlags", CONV_GL, API_DRIVER, HIP_UNSUPPORTED)),
(
"CUd3d9DeviceList",
("hipD3D9DeviceList", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CUd3d9map_flags",
("hipD3D9MapFlags", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CUd3d9register_flags",
("hipD3D9RegisterFlags", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CUd3d10DeviceList",
("hipd3d10DeviceList", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CUd3d10map_flags",
("hipD3D10MapFlags", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CUd3d10register_flags",
("hipD3D10RegisterFlags", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CUd3d11DeviceList",
("hipd3d11DeviceList", CONV_D3D11, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CUeglStreamConnection_st",
("hipEglStreamConnection", CONV_EGL, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CUeglStreamConnection",
("hipEglStreamConnection", CONV_EGL, API_DRIVER, HIP_UNSUPPORTED),
),
(
"libraryPropertyType_t",
("hipLibraryPropertyType_t", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"libraryPropertyType",
("hipLibraryPropertyType_t", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
("cudaStreamCallback_t", ("hipStreamCallback_t", CONV_TYPE, API_RUNTIME)),
("cudaArray", ("hipArray", CONV_MEM, API_RUNTIME)),
("cudaArray_t", ("hipArray_t", CONV_MEM, API_RUNTIME)),
("cudaArray_const_t", ("hipArray_const_t", CONV_MEM, API_RUNTIME)),
("cudaMipmappedArray_t", ("hipMipmappedArray_t", CONV_MEM, API_RUNTIME)),
(
"cudaMipmappedArray_const_t",
("hipMipmappedArray_const_t", CONV_MEM, API_RUNTIME),
),
("cudaArrayDefault", ("hipArrayDefault", CONV_MEM, API_RUNTIME)),
("cudaArrayLayered", ("hipArrayLayered", CONV_MEM, API_RUNTIME)),
(
"cudaArraySurfaceLoadStore",
("hipArraySurfaceLoadStore", CONV_MEM, API_RUNTIME),
),
("cudaArrayCubemap", ("hipArrayCubemap", CONV_MEM, API_RUNTIME)),
("cudaArrayTextureGather", ("hipArrayTextureGather", CONV_MEM, API_RUNTIME)),
("cudaMemoryAdvise", ("hipMemoryAdvise", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
(
"cudaMemRangeAttribute",
("hipMemRangeAttribute", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
("cudaMemcpyKind", ("hipMemcpyKind", CONV_MEM, API_RUNTIME)),
("cudaMemoryType", ("hipMemoryType", CONV_MEM, API_RUNTIME)),
("cudaExtent", ("hipExtent", CONV_MEM, API_RUNTIME)),
("cudaPitchedPtr", ("hipPitchedPtr", CONV_MEM, API_RUNTIME)),
("cudaPos", ("hipPos", CONV_MEM, API_RUNTIME)),
("cudaEvent_t", ("hipEvent_t", CONV_TYPE, API_RUNTIME)),
("cudaStream_t", ("hipStream_t", CONV_TYPE, API_RUNTIME)),
("cudaPointerAttributes", ("hipPointerAttribute_t", CONV_TYPE, API_RUNTIME)),
("cudaDeviceAttr", ("hipDeviceAttribute_t", CONV_TYPE, API_RUNTIME)),
("cudaDeviceProp", ("hipDeviceProp_t", CONV_TYPE, API_RUNTIME)),
(
"cudaDeviceP2PAttr",
("hipDeviceP2PAttribute", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaComputeMode",
("hipComputeMode", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
("cudaFuncCache", ("hipFuncCache_t", CONV_CACHE, API_RUNTIME)),
(
"cudaFuncAttributes",
("hipFuncAttributes", CONV_EXEC, API_RUNTIME, HIP_UNSUPPORTED),
),
("cudaSharedMemConfig", ("hipSharedMemConfig", CONV_TYPE, API_RUNTIME)),
("cudaLimit", ("hipLimit_t", CONV_TYPE, API_RUNTIME)),
("cudaOutputMode", ("hipOutputMode", CONV_OTHER, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaTextureReadMode", ("hipTextureReadMode", CONV_TEX, API_RUNTIME)),
("cudaTextureFilterMode", ("hipTextureFilterMode", CONV_TEX, API_RUNTIME)),
("cudaChannelFormatKind", ("hipChannelFormatKind", CONV_TEX, API_RUNTIME)),
("cudaChannelFormatDesc", ("hipChannelFormatDesc", CONV_TEX, API_RUNTIME)),
("cudaResourceDesc", ("hipResourceDesc", CONV_TEX, API_RUNTIME)),
("cudaResourceViewDesc", ("hipResourceViewDesc", CONV_TEX, API_RUNTIME)),
("cudaTextureDesc", ("hipTextureDesc", CONV_TEX, API_RUNTIME)),
(
"surfaceReference",
("hipSurfaceReference", CONV_SURFACE, API_RUNTIME, HIP_UNSUPPORTED),
),
("cudaTextureObject_t", ("hipTextureObject_t", CONV_TEX, API_RUNTIME)),
("cudaResourceType", ("hipResourceType", CONV_TEX, API_RUNTIME)),
("cudaResourceViewFormat", ("hipResourceViewFormat", CONV_TEX, API_RUNTIME)),
("cudaTextureAddressMode", ("hipTextureAddressMode", CONV_TEX, API_RUNTIME)),
(
"cudaSurfaceBoundaryMode",
("hipSurfaceBoundaryMode", CONV_SURFACE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaSurfaceFormatMode",
("hipSurfaceFormatMode", CONV_SURFACE, API_RUNTIME, HIP_UNSUPPORTED),
),
("cudaTextureType1D", ("hipTextureType1D", CONV_TEX, API_RUNTIME)),
("cudaTextureType2D", ("hipTextureType2D", CONV_TEX, API_RUNTIME)),
("cudaTextureType3D", ("hipTextureType3D", CONV_TEX, API_RUNTIME)),
("cudaTextureTypeCubemap", ("hipTextureTypeCubemap", CONV_TEX, API_RUNTIME)),
(
"cudaTextureType1DLayered",
("hipTextureType1DLayered", CONV_TEX, API_RUNTIME),
),
(
"cudaTextureType2DLayered",
("hipTextureType2DLayered", CONV_TEX, API_RUNTIME),
),
(
"cudaTextureTypeCubemapLayered",
("hipTextureTypeCubemapLayered", CONV_TEX, API_RUNTIME),
),
("cudaIpcEventHandle_t", ("hipIpcEventHandle_t", CONV_TYPE, API_RUNTIME)),
("cudaIpcEventHandle_st", ("hipIpcEventHandle_t", CONV_TYPE, API_RUNTIME)),
("cudaIpcMemHandle_t", ("hipIpcMemHandle_t", CONV_TYPE, API_RUNTIME)),
("cudaIpcMemHandle_st", ("hipIpcMemHandle_t", CONV_TYPE, API_RUNTIME)),
(
"cudaGraphicsCubeFace",
("hipGraphicsCubeFace", CONV_GRAPHICS, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaGraphicsMapFlags",
("hipGraphicsMapFlags", CONV_GRAPHICS, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaGraphicsRegisterFlags",
("hipGraphicsRegisterFlags", CONV_GRAPHICS, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaGLDeviceList",
("hipGLDeviceList", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED),
),
("cudaGLMapFlags", ("hipGLMapFlags", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED)),
(
"cudaD3D9DeviceList",
("hipD3D9DeviceList", CONV_D3D9, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaD3D9MapFlags",
("hipD3D9MapFlags", CONV_D3D9, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaD3D9RegisterFlags",
("hipD3D9RegisterFlags", CONV_D3D9, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaD3D10DeviceList",
("hipd3d10DeviceList", CONV_D3D10, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaD3D10MapFlags",
("hipD3D10MapFlags", CONV_D3D10, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaD3D10RegisterFlags",
("hipD3D10RegisterFlags", CONV_D3D10, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaD3D11DeviceList",
("hipd3d11DeviceList", CONV_D3D11, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaEglStreamConnection",
("hipEglStreamConnection", CONV_EGL, API_RUNTIME, HIP_UNSUPPORTED),
),
("cublasHandle_t", ("rocblas_handle", CONV_TYPE, API_BLAS)),
("cublasOperation_t", ("rocblas_operation", CONV_TYPE, API_BLAS)),
("cublasStatus_t", ("rocblas_status", CONV_TYPE, API_BLAS)),
("cublasFillMode_t", ("rocblas_fill", CONV_TYPE, API_BLAS)),
("cublasDiagType_t", ("rocblas_diagonal", CONV_TYPE, API_BLAS)),
("cublasSideMode_t", ("rocblas_side", CONV_TYPE, API_BLAS)),
("cublasPointerMode_t", ("rocblas_pointer_mode", CONV_TYPE, API_BLAS)),
(
"cublasAtomicsMode_t",
("rocblas_atomics_mode", CONV_TYPE, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasDataType_t",
("rocblas_data_type", CONV_TYPE, API_BLAS, HIP_UNSUPPORTED),
),
("curandStatus", ("hiprandStatus_t", CONV_TYPE, API_RAND)),
("curandStatus_t", ("hiprandStatus_t", CONV_TYPE, API_RAND)),
("curandRngType", ("hiprandRngType_t", CONV_TYPE, API_RAND)),
("curandRngType_t", ("hiprandRngType_t", CONV_TYPE, API_RAND)),
("curandGenerator_st", ("hiprandGenerator_st", CONV_TYPE, API_RAND)),
("curandGenerator_t", ("hiprandGenerator_t", CONV_TYPE, API_RAND)),
(
"curandDirectionVectorSet",
("hiprandDirectionVectorSet_t", CONV_TYPE, API_RAND, HIP_UNSUPPORTED),
),
(
"curandDirectionVectorSet_t",
("hiprandDirectionVectorSet_t", CONV_TYPE, API_RAND, HIP_UNSUPPORTED),
),
("curandOrdering", ("hiprandOrdering_t", CONV_TYPE, API_RAND, HIP_UNSUPPORTED)),
(
"curandOrdering_t",
("hiprandOrdering_t", CONV_TYPE, API_RAND, HIP_UNSUPPORTED),
),
(
"curandDistribution_st",
("hiprandDistribution_st", CONV_TYPE, API_RAND, HIP_UNSUPPORTED),
),
(
"curandHistogramM2V_st",
("hiprandDistribution_st", CONV_TYPE, API_RAND, HIP_UNSUPPORTED),
),
(
"curandDistribution_t",
("hiprandDistribution_t", CONV_TYPE, API_RAND, HIP_UNSUPPORTED),
),
(
"curandHistogramM2V_t",
("hiprandDistribution_t", CONV_TYPE, API_RAND, HIP_UNSUPPORTED),
),
(
"curandDistributionShift_st",
("hiprandDistributionShift_st", CONV_TYPE, API_RAND, HIP_UNSUPPORTED),
),
(
"curandDistributionShift_t",
("hiprandDistributionShift_t", CONV_TYPE, API_RAND, HIP_UNSUPPORTED),
),
(
"curandDistributionM2Shift_st",
("hiprandDistributionM2Shift_st", CONV_TYPE, API_RAND, HIP_UNSUPPORTED),
),
(
"curandDistributionM2Shift_t",
("hiprandDistributionM2Shift_t", CONV_TYPE, API_RAND, HIP_UNSUPPORTED),
),
(
"curandHistogramM2_st",
("hiprandHistogramM2_st", CONV_TYPE, API_RAND, HIP_UNSUPPORTED),
),
(
"curandHistogramM2_t",
("hiprandHistogramM2_t", CONV_TYPE, API_RAND, HIP_UNSUPPORTED),
),
(
"curandHistogramM2K_st",
("hiprandHistogramM2K_st", CONV_TYPE, API_RAND, HIP_UNSUPPORTED),
),
(
"curandHistogramM2K_t",
("hiprandHistogramM2K_t", CONV_TYPE, API_RAND, HIP_UNSUPPORTED),
),
(
"curandDiscreteDistribution_st",
("hiprandDiscreteDistribution_st", CONV_TYPE, API_RAND),
),
(
"curandDiscreteDistribution_t",
("hiprandDiscreteDistribution_t", CONV_TYPE, API_RAND),
),
("curandMethod", ("hiprandMethod_t", CONV_TYPE, API_RAND, HIP_UNSUPPORTED)),
("curandMethod_t", ("hiprandMethod_t", CONV_TYPE, API_RAND, HIP_UNSUPPORTED)),
(
"curandDirectionVectors32_t",
("hiprandDirectionVectors32_t", CONV_TYPE, API_RAND),
),
(
"curandDirectionVectors64_t",
("hiprandDirectionVectors64_t", CONV_TYPE, API_RAND, HIP_UNSUPPORTED),
),
("curandStateMtgp32_t", ("hiprandStateMtgp32_t", CONV_TYPE, API_RAND)),
("curandStateMtgp32", ("hiprandStateMtgp32_t", CONV_TYPE, API_RAND)),
(
"curandStateScrambledSobol64_t",
("hiprandStateScrambledSobol64_t", CONV_TYPE, API_RAND, HIP_UNSUPPORTED),
),
(
"curandStateSobol64_t",
("hiprandStateSobol64_t", CONV_TYPE, API_RAND, HIP_UNSUPPORTED),
),
(
"curandStateScrambledSobol32_t",
("hiprandStateScrambledSobol32_t", CONV_TYPE, API_RAND, HIP_UNSUPPORTED),
),
("curandStateSobol32_t", ("hiprandStateSobol32_t", CONV_TYPE, API_RAND)),
("curandStateMRG32k3a_t", ("hiprandStateMRG32k3a_t", CONV_TYPE, API_RAND)),
(
"curandStatePhilox4_32_10_t",
("hiprandStatePhilox4_32_10_t", CONV_TYPE, API_RAND),
),
("curandStateXORWOW_t", ("hiprandStateXORWOW_t", CONV_TYPE, API_RAND)),
("curandState_t", ("hiprandState_t", CONV_TYPE, API_RAND)),
("curandState", ("hiprandState_t", CONV_TYPE, API_RAND)),
]
)
CUDA_INCLUDE_MAP = collections.OrderedDict(
[
# since pytorch uses "\b{pattern}\b" as the actual re pattern,
# patterns listed here have to begin and end with alnum chars
(
"include <cuda.h",
("include <hip/hip_runtime.h", CONV_INCLUDE_CUDA_MAIN_H, API_DRIVER),
),
(
'include "cuda.h',
('include "hip/hip_runtime.h', CONV_INCLUDE_CUDA_MAIN_H, API_DRIVER),
),
(
"cuda_runtime.h",
("hip/hip_runtime.h", CONV_INCLUDE_CUDA_MAIN_H, API_RUNTIME),
),
("cuda_runtime_api.h", ("hip/hip_runtime_api.h", CONV_INCLUDE, API_RUNTIME)),
(
"channel_descriptor.h",
("hip/channel_descriptor.h", CONV_INCLUDE, API_RUNTIME),
),
("device_functions.h", ("hip/device_functions.h", CONV_INCLUDE, API_RUNTIME)),
("driver_types.h", ("hip/driver_types.h", CONV_INCLUDE, API_RUNTIME)),
("library_types.h", ("hip/library_types.h", CONV_INCLUDE, API_RUNTIME)),
("cuComplex.h", ("hip/hip_complex.h", CONV_INCLUDE, API_RUNTIME)),
("cuda_fp16.h", ("hip/hip_fp16.h", CONV_INCLUDE, API_RUNTIME)),
(
"cuda_texture_types.h",
("hip/hip_texture_types.h", CONV_INCLUDE, API_RUNTIME),
),
("vector_types.h", ("hip/hip_vector_types.h", CONV_INCLUDE, API_RUNTIME)),
("cublas.h", ("rocblas.h", CONV_INCLUDE_CUDA_MAIN_H, API_BLAS)),
("cublas_v2.h", ("rocblas.h", CONV_INCLUDE_CUDA_MAIN_H, API_BLAS)),
("curand.h", ("hiprand/hiprand.h", CONV_INCLUDE_CUDA_MAIN_H, API_RAND)),
("curand_kernel.h", ("hiprand/hiprand_kernel.h", CONV_INCLUDE, API_RAND)),
("curand_discrete.h", ("hiprand/hiprand_kernel.h", CONV_INCLUDE, API_RAND)),
("curand_discrete2.h", ("hiprand/hiprand_kernel.h", CONV_INCLUDE, API_RAND)),
("curand_globals.h", ("hiprand/hiprand_kernel.h", CONV_INCLUDE, API_RAND)),
("curand_lognormal.h", ("hiprand/hiprand_kernel.h", CONV_INCLUDE, API_RAND)),
("curand_mrg32k3a.h", ("hiprand/hiprand_kernel.h", CONV_INCLUDE, API_RAND)),
("curand_mtgp32.h", ("hiprand/hiprand_kernel.h", CONV_INCLUDE, API_RAND)),
("curand_mtgp32_host.h", ("hiprand/hiprand_mtgp32_host.h", CONV_INCLUDE, API_RAND)),
("curand_mtgp32_kernel.h", ("hiprand/hiprand_kernel.h", CONV_INCLUDE, API_RAND)),
(
"curand_mtgp32dc_p_11213.h",
("rocrand/rocrand_mtgp32_11213.h", CONV_INCLUDE, API_RAND),
),
("curand_normal.h", ("hiprand/hiprand_kernel.h", CONV_INCLUDE, API_RAND)),
("curand_normal_static.h", ("hiprand/hiprand_kernel.h", CONV_INCLUDE, API_RAND)),
("curand_philox4x32_x.h", ("hiprand/hiprand_kernel.h", CONV_INCLUDE, API_RAND)),
("curand_poisson.h", ("hiprand/hiprand_kernel.h", CONV_INCLUDE, API_RAND)),
("curand_precalc.h", ("hiprand/hiprand_kernel.h", CONV_INCLUDE, API_RAND)),
("curand_uniform.h", ("hiprand/hiprand_kernel.h", CONV_INCLUDE, API_RAND)),
("cusparse.h", ("hipsparse.h", CONV_INCLUDE, API_RAND)),
("cufft.h", ("hipfft.h", CONV_INCLUDE, API_BLAS)),
("cufftXt.h", ("hipfft.h", CONV_INCLUDE, API_BLAS)),
# PyTorch also has a source file named "nccl.h", so we need to "<"">" to differentiate
("<nccl.h>", ("<rccl.h>", CONV_INCLUDE, API_RUNTIME)),
("nvrtc.h", ("hip/hiprtc.h", CONV_INCLUDE, API_RTC)),
("thrust/system/cuda", ("thrust/system/hip", CONV_INCLUDE, API_BLAS)),
("cub/util_allocator.cuh", ("hipcub/hipcub.hpp", CONV_INCLUDE, API_BLAS)),
("cub/block/block_reduce.cuh", ("hipcub/hipcub.hpp", CONV_INCLUDE, API_BLAS)),
("cub/cub.cuh", ("hipcub/hipcub.hpp", CONV_INCLUDE, API_BLAS)),
("cub/device/device_run_length_encode.cuh", ("hipcub/hipcub.hpp", CONV_INCLUDE, API_BLAS)),
("cub/block/block_load.cuh", ("hipcub/hipcub.hpp", CONV_INCLUDE, API_BLAS)),
("cub/device/device_radix_sort.cuh", ("hipcub/hipcub.hpp", CONV_INCLUDE, API_BLAS)),
("cub/device/device_reduce.cuh", ("hipcub/hipcub.hpp", CONV_INCLUDE, API_BLAS)),
("cub/device/device_scan.cuh", ("hipcub/hipcub.hpp", CONV_INCLUDE, API_BLAS)),
("cub/device/device_select.cuh", ("hipcub/hipcub.hpp", CONV_INCLUDE, API_BLAS)),
("nvToolsExt.h", ("roctracer/roctx.h", CONV_INCLUDE, API_ROCTX)),
]
)
CUDA_IDENTIFIER_MAP = collections.OrderedDict(
[
("__CUDACC__", ("__HIPCC__", CONV_DEF, API_RUNTIME)),
(
"CUDA_ERROR_INVALID_CONTEXT",
("hipErrorInvalidContext", CONV_TYPE, API_DRIVER),
),
(
"CUDA_ERROR_CONTEXT_ALREADY_CURRENT",
("hipErrorContextAlreadyCurrent", CONV_TYPE, API_DRIVER),
),
(
"CUDA_ERROR_ARRAY_IS_MAPPED",
("hipErrorArrayIsMapped", CONV_TYPE, API_DRIVER),
),
("CUDA_ERROR_ALREADY_MAPPED", ("hipErrorAlreadyMapped", CONV_TYPE, API_DRIVER)),
(
"CUDA_ERROR_ALREADY_ACQUIRED",
("hipErrorAlreadyAcquired", CONV_TYPE, API_DRIVER),
),
("CUDA_ERROR_NOT_MAPPED", ("hipErrorNotMapped", CONV_TYPE, API_DRIVER)),
(
"CUDA_ERROR_NOT_MAPPED_AS_ARRAY",
("hipErrorNotMappedAsArray", CONV_TYPE, API_DRIVER),
),
(
"CUDA_ERROR_NOT_MAPPED_AS_POINTER",
("hipErrorNotMappedAsPointer", CONV_TYPE, API_DRIVER),
),
(
"CUDA_ERROR_CONTEXT_ALREADY_IN_USE",
("hipErrorContextAlreadyInUse", CONV_TYPE, API_DRIVER),
),
("CUDA_ERROR_INVALID_SOURCE", ("hipErrorInvalidSource", CONV_TYPE, API_DRIVER)),
("CUDA_ERROR_FILE_NOT_FOUND", ("hipErrorFileNotFound", CONV_TYPE, API_DRIVER)),
("CUDA_ERROR_NOT_FOUND", ("hipErrorNotFound", CONV_TYPE, API_DRIVER)),
(
"CUDA_ERROR_LAUNCH_INCOMPATIBLE_TEXTURING",
(
"hipErrorLaunchIncompatibleTexturing",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CUDA_ERROR_PRIMARY_CONTEXT_ACTIVE",
("hipErrorPrimaryContextActive", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CUDA_ERROR_CONTEXT_IS_DESTROYED",
("hipErrorContextIsDestroyed", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CUDA_ERROR_NOT_PERMITTED",
("hipErrorNotPermitted", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CUDA_ERROR_NOT_SUPPORTED",
("hipErrorNotSupported", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cudaErrorMissingConfiguration",
("hipErrorMissingConfiguration", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaErrorPriorLaunchFailure",
("hipErrorPriorLaunchFailure", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaErrorInvalidDeviceFunction",
("hipErrorInvalidDeviceFunction", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaErrorInvalidConfiguration",
("hipErrorInvalidConfiguration", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaErrorInvalidPitchValue",
("hipErrorInvalidPitchValue", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaErrorInvalidSymbol",
("hipErrorInvalidSymbol", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaErrorInvalidHostPointer",
("hipErrorInvalidHostPointer", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaErrorInvalidDevicePointer",
("hipErrorInvalidDevicePointer", CONV_TYPE, API_RUNTIME),
),
(
"cudaErrorInvalidTexture",
("hipErrorInvalidTexture", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaErrorInvalidTextureBinding",
("hipErrorInvalidTextureBinding", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaErrorInvalidChannelDescriptor",
(
"hipErrorInvalidChannelDescriptor",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaErrorInvalidMemcpyDirection",
("hipErrorInvalidMemcpyDirection", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaErrorAddressOfConstant",
("hipErrorAddressOfConstant", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaErrorTextureFetchFailed",
("hipErrorTextureFetchFailed", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaErrorTextureNotBound",
("hipErrorTextureNotBound", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaErrorSynchronizationError",
("hipErrorSynchronizationError", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaErrorInvalidFilterSetting",
("hipErrorInvalidFilterSetting", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaErrorInvalidNormSetting",
("hipErrorInvalidNormSetting", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaErrorMixedDeviceExecution",
("hipErrorMixedDeviceExecution", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaErrorNotYetImplemented",
("hipErrorNotYetImplemented", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaErrorMemoryValueTooLarge",
("hipErrorMemoryValueTooLarge", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaErrorInsufficientDriver",
("hipErrorInsufficientDriver", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaErrorSetOnActiveProcess",
("hipErrorSetOnActiveProcess", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaErrorInvalidSurface",
("hipErrorInvalidSurface", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaErrorDuplicateVariableName",
("hipErrorDuplicateVariableName", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaErrorDuplicateTextureName",
("hipErrorDuplicateTextureName", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaErrorDuplicateSurfaceName",
("hipErrorDuplicateSurfaceName", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaErrorDevicesUnavailable",
("hipErrorDevicesUnavailable", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaErrorIncompatibleDriverContext",
(
"hipErrorIncompatibleDriverContext",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaErrorDeviceAlreadyInUse",
("hipErrorDeviceAlreadyInUse", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaErrorLaunchMaxDepthExceeded",
("hipErrorLaunchMaxDepthExceeded", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaErrorLaunchFileScopedTex",
("hipErrorLaunchFileScopedTex", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaErrorLaunchFileScopedSurf",
("hipErrorLaunchFileScopedSurf", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaErrorSyncDepthExceeded",
("hipErrorSyncDepthExceeded", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaErrorLaunchPendingCountExceeded",
(
"hipErrorLaunchPendingCountExceeded",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaErrorNotPermitted",
("hipErrorNotPermitted", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaErrorNotSupported",
("hipErrorNotSupported", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaErrorStartupFailure",
("hipErrorStartupFailure", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaErrorApiFailureBase",
("hipErrorApiFailureBase", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
("CUDA_SUCCESS", ("hipSuccess", CONV_TYPE, API_DRIVER)),
("cudaSuccess", ("hipSuccess", CONV_TYPE, API_RUNTIME)),
("CUDA_ERROR_INVALID_VALUE", ("hipErrorInvalidValue", CONV_TYPE, API_DRIVER)),
("cudaErrorInvalidValue", ("hipErrorInvalidValue", CONV_TYPE, API_RUNTIME)),
(
"CUDA_ERROR_OUT_OF_MEMORY",
("hipErrorMemoryAllocation", CONV_TYPE, API_DRIVER),
),
(
"cudaErrorMemoryAllocation",
("hipErrorMemoryAllocation", CONV_TYPE, API_RUNTIME),
),
(
"CUDA_ERROR_NOT_INITIALIZED",
("hipErrorNotInitialized", CONV_TYPE, API_DRIVER),
),
(
"cudaErrorInitializationError",
("hipErrorInitializationError", CONV_TYPE, API_RUNTIME),
),
("CUDA_ERROR_DEINITIALIZED", ("hipErrorDeinitialized", CONV_TYPE, API_DRIVER)),
(
"cudaErrorCudartUnloading",
("hipErrorDeinitialized", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"CUDA_ERROR_PROFILER_DISABLED",
("hipErrorProfilerDisabled", CONV_TYPE, API_DRIVER),
),
(
"cudaErrorProfilerDisabled",
("hipErrorProfilerDisabled", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"CUDA_ERROR_PROFILER_NOT_INITIALIZED",
("hipErrorProfilerNotInitialized", CONV_TYPE, API_DRIVER),
),
(
"cudaErrorProfilerNotInitialized",
("hipErrorProfilerNotInitialized", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"CUDA_ERROR_PROFILER_ALREADY_STARTED",
("hipErrorProfilerAlreadyStarted", CONV_TYPE, API_DRIVER),
),
(
"cudaErrorProfilerAlreadyStarted",
("hipErrorProfilerAlreadyStarted", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"CUDA_ERROR_PROFILER_ALREADY_STOPPED",
("hipErrorProfilerAlreadyStopped", CONV_TYPE, API_DRIVER),
),
(
"cudaErrorProfilerAlreadyStopped",
("hipErrorProfilerAlreadyStopped", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
("CUDA_ERROR_NO_DEVICE", ("hipErrorNoDevice", CONV_TYPE, API_DRIVER)),
("cudaErrorNoDevice", ("hipErrorNoDevice", CONV_TYPE, API_RUNTIME)),
("CUDA_ERROR_INVALID_DEVICE", ("hipErrorInvalidDevice", CONV_TYPE, API_DRIVER)),
("cudaErrorInvalidDevice", ("hipErrorInvalidDevice", CONV_TYPE, API_RUNTIME)),
("CUDA_ERROR_INVALID_IMAGE", ("hipErrorInvalidImage", CONV_TYPE, API_DRIVER)),
(
"cudaErrorInvalidKernelImage",
("hipErrorInvalidImage", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
("CUDA_ERROR_MAP_FAILED", ("hipErrorMapFailed", CONV_TYPE, API_DRIVER)),
(
"cudaErrorMapBufferObjectFailed",
("hipErrorMapFailed", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
("CUDA_ERROR_UNMAP_FAILED", ("hipErrorUnmapFailed", CONV_TYPE, API_DRIVER)),
(
"cudaErrorUnmapBufferObjectFailed",
("hipErrorUnmapFailed", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"CUDA_ERROR_NO_BINARY_FOR_GPU",
("hipErrorNoBinaryForGpu", CONV_TYPE, API_DRIVER),
),
(
"cudaErrorNoKernelImageForDevice",
("hipErrorNoBinaryForGpu", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"CUDA_ERROR_ECC_UNCORRECTABLE",
("hipErrorECCNotCorrectable", CONV_TYPE, API_DRIVER),
),
(
"cudaErrorECCUncorrectable",
("hipErrorECCNotCorrectable", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"CUDA_ERROR_UNSUPPORTED_LIMIT",
("hipErrorUnsupportedLimit", CONV_TYPE, API_DRIVER),
),
(
"cudaErrorUnsupportedLimit",
("hipErrorUnsupportedLimit", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"CUDA_ERROR_PEER_ACCESS_UNSUPPORTED",
("hipErrorPeerAccessUnsupported", CONV_TYPE, API_DRIVER),
),
(
"cudaErrorPeerAccessUnsupported",
("hipErrorPeerAccessUnsupported", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"CUDA_ERROR_INVALID_PTX",
("hipErrorInvalidKernelFile", CONV_TYPE, API_DRIVER),
),
(
"cudaErrorInvalidPtx",
("hipErrorInvalidKernelFile", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"CUDA_ERROR_INVALID_GRAPHICS_CONTEXT",
("hipErrorInvalidGraphicsContext", CONV_TYPE, API_DRIVER),
),
(
"cudaErrorInvalidGraphicsContext",
("hipErrorInvalidGraphicsContext", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"CUDA_ERROR_NVLINK_UNCORRECTABLE",
("hipErrorNvlinkUncorrectable", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cudaErrorNvlinkUncorrectable",
("hipErrorNvlinkUncorrectable", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"CUDA_ERROR_SHARED_OBJECT_SYMBOL_NOT_FOUND",
("hipErrorSharedObjectSymbolNotFound", CONV_TYPE, API_DRIVER),
),
(
"cudaErrorSharedObjectSymbolNotFound",
(
"hipErrorSharedObjectSymbolNotFound",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"CUDA_ERROR_SHARED_OBJECT_INIT_FAILED",
("hipErrorSharedObjectInitFailed", CONV_TYPE, API_DRIVER),
),
(
"cudaErrorSharedObjectInitFailed",
("hipErrorSharedObjectInitFailed", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"CUDA_ERROR_OPERATING_SYSTEM",
("hipErrorOperatingSystem", CONV_TYPE, API_DRIVER),
),
(
"cudaErrorOperatingSystem",
("hipErrorOperatingSystem", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"CUDA_ERROR_INVALID_HANDLE",
("hipErrorInvalidResourceHandle", CONV_TYPE, API_DRIVER),
),
(
"cudaErrorInvalidResourceHandle",
("hipErrorInvalidResourceHandle", CONV_TYPE, API_RUNTIME),
),
("CUDA_ERROR_NOT_READY", ("hipErrorNotReady", CONV_TYPE, API_DRIVER)),
("cudaErrorNotReady", ("hipErrorNotReady", CONV_TYPE, API_RUNTIME)),
(
"CUDA_ERROR_ILLEGAL_ADDRESS",
("hipErrorIllegalAddress", CONV_TYPE, API_DRIVER),
),
(
"cudaErrorIllegalAddress",
("hipErrorIllegalAddress", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"CUDA_ERROR_LAUNCH_OUT_OF_RESOURCES",
("hipErrorLaunchOutOfResources", CONV_TYPE, API_DRIVER),
),
(
"cudaErrorLaunchOutOfResources",
("hipErrorLaunchOutOfResources", CONV_TYPE, API_RUNTIME),
),
("CUDA_ERROR_LAUNCH_TIMEOUT", ("hipErrorLaunchTimeOut", CONV_TYPE, API_DRIVER)),
(
"cudaErrorLaunchTimeout",
("hipErrorLaunchTimeOut", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"CUDA_ERROR_PEER_ACCESS_ALREADY_ENABLED",
("hipErrorPeerAccessAlreadyEnabled", CONV_TYPE, API_DRIVER),
),
(
"cudaErrorPeerAccessAlreadyEnabled",
("hipErrorPeerAccessAlreadyEnabled", CONV_TYPE, API_RUNTIME),
),
(
"CUDA_ERROR_PEER_ACCESS_NOT_ENABLED",
("hipErrorPeerAccessNotEnabled", CONV_TYPE, API_DRIVER),
),
(
"cudaErrorPeerAccessNotEnabled",
("hipErrorPeerAccessNotEnabled", CONV_TYPE, API_RUNTIME),
),
(
"CUDA_ERROR_ASSERT",
("hipErrorAssert", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cudaErrorAssert",
("hipErrorAssert", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"CUDA_ERROR_TOO_MANY_PEERS",
("hipErrorTooManyPeers", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cudaErrorTooManyPeers",
("hipErrorTooManyPeers", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"CUDA_ERROR_HOST_MEMORY_ALREADY_REGISTERED",
("hipErrorHostMemoryAlreadyRegistered", CONV_TYPE, API_DRIVER),
),
(
"cudaErrorHostMemoryAlreadyRegistered",
("hipErrorHostMemoryAlreadyRegistered", CONV_TYPE, API_RUNTIME),
),
(
"CUDA_ERROR_HOST_MEMORY_NOT_REGISTERED",
("hipErrorHostMemoryNotRegistered", CONV_TYPE, API_DRIVER),
),
(
"cudaErrorHostMemoryNotRegistered",
("hipErrorHostMemoryNotRegistered", CONV_TYPE, API_RUNTIME),
),
(
"CUDA_ERROR_HARDWARE_STACK_ERROR",
("hipErrorHardwareStackError", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cudaErrorHardwareStackError",
("hipErrorHardwareStackError", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"CUDA_ERROR_ILLEGAL_INSTRUCTION",
("hipErrorIllegalInstruction", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cudaErrorIllegalInstruction",
("hipErrorIllegalInstruction", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"CUDA_ERROR_MISALIGNED_ADDRESS",
("hipErrorMisalignedAddress", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cudaErrorMisalignedAddress",
("hipErrorMisalignedAddress", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"CUDA_ERROR_INVALID_ADDRESS_SPACE",
("hipErrorInvalidAddressSpace", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cudaErrorInvalidAddressSpace",
("hipErrorInvalidAddressSpace", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"CUDA_ERROR_INVALID_PC",
("hipErrorInvalidPc", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cudaErrorInvalidPc",
("hipErrorInvalidPc", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"CUDA_ERROR_LAUNCH_FAILED",
("hipErrorLaunchFailure", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cudaErrorLaunchFailure",
("hipErrorLaunchFailure", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"CUDA_ERROR_UNKNOWN",
("hipErrorUnknown", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
("cudaErrorUnknown", ("hipErrorUnknown", CONV_TYPE, API_RUNTIME)),
(
"CU_TR_ADDRESS_MODE_WRAP",
("HIP_TR_ADDRESS_MODE_WRAP", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_TR_ADDRESS_MODE_CLAMP",
("HIP_TR_ADDRESS_MODE_CLAMP", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_TR_ADDRESS_MODE_MIRROR",
("HIP_TR_ADDRESS_MODE_MIRROR", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_TR_ADDRESS_MODE_BORDER",
("HIP_TR_ADDRESS_MODE_BORDER", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_CUBEMAP_FACE_POSITIVE_X",
("HIP_CUBEMAP_FACE_POSITIVE_X", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_CUBEMAP_FACE_NEGATIVE_X",
("HIP_CUBEMAP_FACE_NEGATIVE_X", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_CUBEMAP_FACE_POSITIVE_Y",
("HIP_CUBEMAP_FACE_POSITIVE_Y", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_CUBEMAP_FACE_NEGATIVE_Y",
("HIP_CUBEMAP_FACE_NEGATIVE_Y", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_CUBEMAP_FACE_POSITIVE_Z",
("HIP_CUBEMAP_FACE_POSITIVE_Z", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_CUBEMAP_FACE_NEGATIVE_Z",
("HIP_CUBEMAP_FACE_NEGATIVE_Z", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_AD_FORMAT_UNSIGNED_INT8",
("HIP_AD_FORMAT_UNSIGNED_INT8", CONV_TYPE, API_DRIVER),
),
(
"CU_AD_FORMAT_UNSIGNED_INT16",
("HIP_AD_FORMAT_UNSIGNED_INT16", CONV_TYPE, API_DRIVER),
),
(
"CU_AD_FORMAT_UNSIGNED_INT32",
("HIP_AD_FORMAT_UNSIGNED_INT32", CONV_TYPE, API_DRIVER),
),
(
"CU_AD_FORMAT_SIGNED_INT8",
("HIP_AD_FORMAT_SIGNED_INT8", CONV_TYPE, API_DRIVER),
),
(
"CU_AD_FORMAT_SIGNED_INT16",
("HIP_AD_FORMAT_SIGNED_INT16", CONV_TYPE, API_DRIVER),
),
(
"CU_AD_FORMAT_SIGNED_INT32",
("HIP_AD_FORMAT_SIGNED_INT32", CONV_TYPE, API_DRIVER),
),
("CU_AD_FORMAT_HALF", ("HIP_AD_FORMAT_HALF", CONV_TYPE, API_DRIVER)),
("CU_AD_FORMAT_FLOAT", ("HIP_AD_FORMAT_FLOAT", CONV_TYPE, API_DRIVER)),
(
"CU_COMPUTEMODE_DEFAULT",
("hipComputeModeDefault", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_COMPUTEMODE_EXCLUSIVE",
("hipComputeModeExclusive", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_COMPUTEMODE_PROHIBITED",
("hipComputeModeProhibited", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_COMPUTEMODE_EXCLUSIVE_PROCESS",
("hipComputeModeExclusiveProcess", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_MEM_ADVISE_SET_READ_MOSTLY",
("hipMemAdviseSetReadMostly", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_MEM_ADVISE_UNSET_READ_MOSTLY",
("hipMemAdviseUnsetReadMostly", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_MEM_ADVISE_SET_PREFERRED_LOCATION",
(
"hipMemAdviseSetPreferredLocation",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_MEM_ADVISE_UNSET_PREFERRED_LOCATION",
(
"hipMemAdviseUnsetPreferredLocation",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_MEM_ADVISE_SET_ACCESSED_BY",
("hipMemAdviseSetAccessedBy", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_MEM_ADVISE_UNSET_ACCESSED_BY",
("hipMemAdviseUnsetAccessedBy", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_MEM_RANGE_ATTRIBUTE_READ_MOSTLY",
("hipMemRangeAttributeReadMostly", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_MEM_RANGE_ATTRIBUTE_PREFERRED_LOCATION",
(
"hipMemRangeAttributePreferredLocation",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_MEM_RANGE_ATTRIBUTE_ACCESSED_BY",
("hipMemRangeAttributeAccessedBy", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_MEM_RANGE_ATTRIBUTE_LAST_PREFETCH_LOCATION",
(
"hipMemRangeAttributeLastPrefetchLocation",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_CTX_SCHED_AUTO",
("HIP_CTX_SCHED_AUTO", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_CTX_SCHED_SPIN",
("HIP_CTX_SCHED_SPIN", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_CTX_SCHED_YIELD",
("HIP_CTX_SCHED_YIELD", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_CTX_SCHED_BLOCKING_SYNC",
("HIP_CTX_SCHED_BLOCKING_SYNC", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_CTX_BLOCKING_SYNC",
("HIP_CTX_BLOCKING_SYNC", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_CTX_SCHED_MASK",
("HIP_CTX_SCHED_MASK", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_CTX_MAP_HOST",
("HIP_CTX_MAP_HOST", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_CTX_LMEM_RESIZE_TO_MAX",
("HIP_CTX_LMEM_RESIZE_TO_MAX", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_CTX_FLAGS_MASK",
("HIP_CTX_FLAGS_MASK", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_LAUNCH_PARAM_BUFFER_POINTER",
("HIP_LAUNCH_PARAM_BUFFER_POINTER", CONV_TYPE, API_DRIVER),
),
(
"CU_LAUNCH_PARAM_BUFFER_SIZE",
("HIP_LAUNCH_PARAM_BUFFER_SIZE", CONV_TYPE, API_DRIVER),
),
("CU_LAUNCH_PARAM_END", ("HIP_LAUNCH_PARAM_END", CONV_TYPE, API_DRIVER)),
(
"CU_IPC_HANDLE_SIZE",
("HIP_IPC_HANDLE_SIZE", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_MEMHOSTALLOC_DEVICEMAP",
("HIP_MEMHOSTALLOC_DEVICEMAP", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_MEMHOSTALLOC_PORTABLE",
("HIP_MEMHOSTALLOC_PORTABLE", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_MEMHOSTALLOC_WRITECOMBINED",
("HIP_MEMHOSTALLOC_WRITECOMBINED", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_MEMHOSTREGISTER_DEVICEMAP",
("HIP_MEMHOSTREGISTER_DEVICEMAP", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_MEMHOSTREGISTER_IOMEMORY",
("HIP_MEMHOSTREGISTER_IOMEMORY", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_MEMHOSTREGISTER_PORTABLE",
("HIP_MEMHOSTREGISTER_PORTABLE", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_PARAM_TR_DEFAULT",
("HIP_PARAM_TR_DEFAULT", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_STREAM_LEGACY",
("HIP_STREAM_LEGACY", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_STREAM_PER_THREAD",
("HIP_STREAM_PER_THREAD", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_TRSA_OVERRIDE_FORMAT",
("HIP_TRSA_OVERRIDE_FORMAT", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_TRSF_NORMALIZED_COORDINATES",
("HIP_TRSF_NORMALIZED_COORDINATES", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_TRSF_READ_AS_INTEGER",
("HIP_TRSF_READ_AS_INTEGER", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
("CU_TRSF_SRGB", ("HIP_TRSF_SRGB", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
(
"CUDA_ARRAY3D_2DARRAY",
("HIP_ARRAY3D_LAYERED", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CUDA_ARRAY3D_CUBEMAP",
("HIP_ARRAY3D_CUBEMAP", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CUDA_ARRAY3D_DEPTH_TEXTURE",
("HIP_ARRAY3D_DEPTH_TEXTURE", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CUDA_ARRAY3D_LAYERED",
("HIP_ARRAY3D_LAYERED", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CUDA_ARRAY3D_SURFACE_LDST",
("HIP_ARRAY3D_SURFACE_LDST", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CUDA_ARRAY3D_TEXTURE_GATHER",
("HIP_ARRAY3D_TEXTURE_GATHER", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_BLOCK",
(
"hipDeviceAttributeMaxThreadsPerBlock",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_X",
("hipDeviceAttributeMaxBlockDimX", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_Y",
("hipDeviceAttributeMaxBlockDimY", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_Z",
("hipDeviceAttributeMaxBlockDimZ", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_X",
("hipDeviceAttributeMaxGridDimX", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_Y",
("hipDeviceAttributeMaxGridDimY", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_Z",
("hipDeviceAttributeMaxGridDimZ", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK",
(
"hipDeviceAttributeMaxSharedMemoryPerBlock",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_SHARED_MEMORY_PER_BLOCK",
(
"hipDeviceAttributeMaxSharedMemoryPerBlock",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_TOTAL_CONSTANT_MEMORY",
(
"hipDeviceAttributeTotalConstantMemory",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_WARP_SIZE",
("hipDeviceAttributeWarpSize", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_DEVICE_ATTRIBUTE_MAX_PITCH",
("hipDeviceAttributeMaxPitch", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_DEVICE_ATTRIBUTE_MAX_REGISTERS_PER_BLOCK",
(
"hipDeviceAttributeMaxRegistersPerBlock",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_REGISTERS_PER_BLOCK",
(
"hipDeviceAttributeMaxRegistersPerBlock",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_CLOCK_RATE",
("hipDeviceAttributeClockRate", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_DEVICE_ATTRIBUTE_TEXTURE_ALIGNMENT",
(
"hipDeviceAttributeTextureAlignment",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_GPU_OVERLAP",
(
"hipDeviceAttributeAsyncEngineCount",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT",
(
"hipDeviceAttributeMultiprocessorCount",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_KERNEL_EXEC_TIMEOUT",
(
"hipDeviceAttributeKernelExecTimeout",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_INTEGRATED",
("hipDeviceAttributeIntegrated", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_DEVICE_ATTRIBUTE_CAN_MAP_HOST_MEMORY",
(
"hipDeviceAttributeCanMapHostMemory",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_COMPUTE_MODE",
("hipDeviceAttributeComputeMode", CONV_TYPE, API_DRIVER),
),
(
"CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_WIDTH",
(
"hipDeviceAttributeMaxTexture1DWidth",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_WIDTH",
(
"hipDeviceAttributeMaxTexture2DWidth",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_HEIGHT",
(
"hipDeviceAttributeMaxTexture2DHeight",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_WIDTH",
(
"hipDeviceAttributeMaxTexture3DWidth",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_HEIGHT",
(
"hipDeviceAttributeMaxTexture3DHeight",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_DEPTH",
(
"hipDeviceAttributeMaxTexture3DDepth",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_WIDTH",
(
"hipDeviceAttributeMaxTexture2DLayeredWidth",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_HEIGHT",
(
"hipDeviceAttributeMaxTexture2DLayeredHeight",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_LAYERS",
(
"hipDeviceAttributeMaxTexture2DLayeredLayers",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_ARRAY_WIDTH",
(
"hipDeviceAttributeMaxTexture2DLayeredWidth",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_ARRAY_HEIGHT",
(
"hipDeviceAttributeMaxTexture2DLayeredHeight",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_ARRAY_NUMSLICES",
(
"hipDeviceAttributeMaxTexture2DLayeredLayers",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_SURFACE_ALIGNMENT",
(
"hipDeviceAttributeSurfaceAlignment",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_CONCURRENT_KERNELS",
("hipDeviceAttributeConcurrentKernels", CONV_TYPE, API_DRIVER),
),
(
"CU_DEVICE_ATTRIBUTE_ECC_ENABLED",
("hipDeviceAttributeEccEnabled", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_DEVICE_ATTRIBUTE_PCI_BUS_ID",
("hipDeviceAttributePciBusId", CONV_TYPE, API_DRIVER),
),
(
"CU_DEVICE_ATTRIBUTE_PCI_DEVICE_ID",
("hipDeviceAttributePciDeviceId", CONV_TYPE, API_DRIVER),
),
(
"CU_DEVICE_ATTRIBUTE_TCC_DRIVER",
("hipDeviceAttributeTccDriver", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_DEVICE_ATTRIBUTE_MEMORY_CLOCK_RATE",
(
"hipDeviceAttributeMemoryClockRate",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_GLOBAL_MEMORY_BUS_WIDTH",
("hipDeviceAttributeMemoryBusWidth", CONV_TYPE, API_DRIVER),
),
(
"CU_DEVICE_ATTRIBUTE_L2_CACHE_SIZE",
("hipDeviceAttributeL2CacheSize", CONV_TYPE, API_DRIVER),
),
(
"CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_MULTIPROCESSOR",
("hipDeviceAttributeMaxThreadsPerMultiProcessor", CONV_TYPE, API_DRIVER),
),
(
"CU_DEVICE_ATTRIBUTE_ASYNC_ENGINE_COUNT",
(
"hipDeviceAttributeAsyncEngineCount",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_UNIFIED_ADDRESSING",
(
"hipDeviceAttributeUnifiedAddressing",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LAYERED_WIDTH",
(
"hipDeviceAttributeMaxTexture1DLayeredWidth",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LAYERED_LAYERS",
(
"hipDeviceAttributeMaxTexture1DLayeredLayers",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_CAN_TEX2D_GATHER",
(
"hipDeviceAttributeCanTex2DGather",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_GATHER_WIDTH",
(
"hipDeviceAttributeMaxTexture2DGatherWidth",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_GATHER_HEIGHT",
(
"hipDeviceAttributeMaxTexture2DGatherHeight",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_WIDTH_ALTERNATE",
(
"hipDeviceAttributeMaxTexture3DWidthAlternate",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_HEIGHT_ALTERNATE",
(
"hipDeviceAttributeMaxTexture3DHeightAlternate",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_DEPTH_ALTERNATE",
(
"hipDeviceAttributeMaxTexture3DDepthAlternate",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_PCI_DOMAIN_ID",
("hipDeviceAttributePciDomainId", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_DEVICE_ATTRIBUTE_TEXTURE_PITCH_ALIGNMENT",
(
"hipDeviceAttributeTexturePitchAlignment",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_WIDTH",
(
"hipDeviceAttributeMaxTextureCubemapWidth",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_LAYERED_WIDTH",
(
"hipDeviceAttributeMaxTextureCubemapLayeredWidth",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_LAYERED_LAYERS",
(
"hipDeviceAttributeMaxTextureCubemapLayeredLayers",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_WIDTH",
(
"hipDeviceAttributeMaxSurface1DWidth",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_WIDTH",
(
"hipDeviceAttributeMaxSurface2DWidth",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_HEIGHT",
(
"hipDeviceAttributeMaxSurface2DHeight",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_WIDTH",
(
"hipDeviceAttributeMaxSurface3DWidth",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_HEIGHT",
(
"hipDeviceAttributeMaxSurface3DHeight",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_DEPTH",
(
"hipDeviceAttributeMaxSurface3DDepth",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_LAYERED_WIDTH",
(
"hipDeviceAttributeMaxSurface1DLayeredWidth",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_LAYERED_LAYERS",
(
"hipDeviceAttributeMaxSurface1DLayeredLayers",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_WIDTH",
(
"hipDeviceAttributeMaxSurface2DLayeredWidth",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_HEIGHT",
(
"hipDeviceAttributeMaxSurface2DLayeredHeight",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_LAYERS",
(
"hipDeviceAttributeMaxSurface2DLayeredLayers",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_WIDTH",
(
"hipDeviceAttributeMaxSurfaceCubemapWidth",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_LAYERED_WIDTH",
(
"hipDeviceAttributeMaxSurfaceCubemapLayeredWidth",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_LAYERED_LAYERS",
(
"hipDeviceAttributeMaxSurfaceCubemapLayeredLayers",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LINEAR_WIDTH",
(
"hipDeviceAttributeMaxTexture1DLinearWidth",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_WIDTH",
(
"hipDeviceAttributeMaxTexture2DLinearWidth",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_HEIGHT",
(
"hipDeviceAttributeMaxTexture2DLinearHeight",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_PITCH",
(
"hipDeviceAttributeMaxTexture2DLinearPitch",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_MIPMAPPED_WIDTH",
(
"hipDeviceAttributeMaxTexture2DMipmappedWidth",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_MIPMAPPED_HEIGHT",
(
"hipDeviceAttributeMaxTexture2DMipmappedHeight",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR",
("hipDeviceAttributeComputeCapabilityMajor", CONV_TYPE, API_DRIVER),
),
(
"CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR",
("hipDeviceAttributeComputeCapabilityMinor", CONV_TYPE, API_DRIVER),
),
(
"CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_MIPMAPPED_WIDTH",
(
"hipDeviceAttributeMaxTexture1DMipmappedWidth",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_STREAM_PRIORITIES_SUPPORTED",
(
"hipDeviceAttributeStreamPrioritiesSupported",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_GLOBAL_L1_CACHE_SUPPORTED",
(
"hipDeviceAttributeGlobalL1CacheSupported",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_LOCAL_L1_CACHE_SUPPORTED",
(
"hipDeviceAttributeLocalL1CacheSupported",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_MULTIPROCESSOR",
(
"hipDeviceAttributeMaxSharedMemoryPerMultiprocessor",
CONV_TYPE,
API_DRIVER,
),
),
(
"CU_DEVICE_ATTRIBUTE_MAX_REGISTERS_PER_MULTIPROCESSOR",
(
"hipDeviceAttributeMaxRegistersPerMultiprocessor",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_MANAGED_MEMORY",
("hipDeviceAttributeManagedMemory", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_DEVICE_ATTRIBUTE_MULTI_GPU_BOARD",
("hipDeviceAttributeIsMultiGpuBoard", CONV_TYPE, API_DRIVER),
),
(
"CU_DEVICE_ATTRIBUTE_MULTI_GPU_BOARD_GROUP_ID",
(
"hipDeviceAttributeMultiGpuBoardGroupId",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_HOST_NATIVE_ATOMIC_SUPPORTED",
(
"hipDeviceAttributeHostNativeAtomicSupported",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_SINGLE_TO_DOUBLE_PRECISION_PERF_RATIO",
(
"hipDeviceAttributeSingleToDoublePrecisionPerfRatio",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS",
(
"hipDeviceAttributePageableMemoryAccess",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS",
(
"hipDeviceAttributeConcurrentManagedAccess",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_COMPUTE_PREEMPTION_SUPPORTED",
(
"hipDeviceAttributeComputePreemptionSupported",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_CAN_USE_HOST_POINTER_FOR_REGISTERED_MEM",
(
"hipDeviceAttributeCanUseHostPointerForRegisteredMem",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_MAX",
("hipDeviceAttributeMax", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_POINTER_ATTRIBUTE_CONTEXT",
("hipPointerAttributeContext", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_POINTER_ATTRIBUTE_MEMORY_TYPE",
("hipPointerAttributeMemoryType", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_POINTER_ATTRIBUTE_DEVICE_POINTER",
(
"hipPointerAttributeDevicePointer",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_POINTER_ATTRIBUTE_HOST_POINTER",
("hipPointerAttributeHostPointer", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_POINTER_ATTRIBUTE_P2P_TOKENS",
("hipPointerAttributeP2pTokens", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_POINTER_ATTRIBUTE_SYNC_MEMOPS",
("hipPointerAttributeSyncMemops", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_POINTER_ATTRIBUTE_BUFFER_ID",
("hipPointerAttributeBufferId", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_POINTER_ATTRIBUTE_IS_MANAGED",
("hipPointerAttributeIsManaged", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK",
(
"hipFuncAttributeMaxThreadsPerBlocks",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_FUNC_ATTRIBUTE_SHARED_SIZE_BYTES",
("hipFuncAttributeSharedSizeBytes", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_FUNC_ATTRIBUTE_CONST_SIZE_BYTES",
("hipFuncAttributeConstSizeBytes", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_FUNC_ATTRIBUTE_LOCAL_SIZE_BYTES",
("hipFuncAttributeLocalSizeBytes", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_FUNC_ATTRIBUTE_NUM_REGS",
("hipFuncAttributeNumRegs", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_FUNC_ATTRIBUTE_PTX_VERSION",
("hipFuncAttributePtxVersion", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_FUNC_ATTRIBUTE_BINARY_VERSION",
("hipFuncAttributeBinaryVersion", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_FUNC_ATTRIBUTE_CACHE_MODE_CA",
("hipFuncAttributeCacheModeCA", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_FUNC_ATTRIBUTE_MAX",
("hipFuncAttributeMax", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_GRAPHICS_MAP_RESOURCE_FLAGS_NONE",
("hipGraphicsMapFlagsNone", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_GRAPHICS_MAP_RESOURCE_FLAGS_READ_ONLY",
("hipGraphicsMapFlagsReadOnly", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_GRAPHICS_MAP_RESOURCE_FLAGS_WRITE_DISCARD",
("hipGraphicsMapFlagsWriteDiscard", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_GRAPHICS_REGISTER_FLAGS_NONE",
("hipGraphicsRegisterFlagsNone", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_GRAPHICS_REGISTER_FLAGS_READ_ONLY",
(
"hipGraphicsRegisterFlagsReadOnly",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_GRAPHICS_REGISTER_FLAGS_WRITE_DISCARD",
(
"hipGraphicsRegisterFlagsWriteDiscard",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_GRAPHICS_REGISTER_FLAGS_SURFACE_LDST",
(
"hipGraphicsRegisterFlagsSurfaceLoadStore",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_GRAPHICS_REGISTER_FLAGS_TEXTURE_GATHER",
(
"hipGraphicsRegisterFlagsTextureGather",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_OCCUPANCY_DEFAULT",
("hipOccupancyDefault", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_OCCUPANCY_DISABLE_CACHING_OVERRIDE",
(
"hipOccupancyDisableCachingOverride",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_FUNC_CACHE_PREFER_NONE",
("hipFuncCachePreferNone", CONV_CACHE, API_DRIVER),
),
(
"CU_FUNC_CACHE_PREFER_SHARED",
("hipFuncCachePreferShared", CONV_CACHE, API_DRIVER),
),
("CU_FUNC_CACHE_PREFER_L1", ("hipFuncCachePreferL1", CONV_CACHE, API_DRIVER)),
(
"CU_FUNC_CACHE_PREFER_EQUAL",
("hipFuncCachePreferEqual", CONV_CACHE, API_DRIVER),
),
(
"CU_IPC_MEM_LAZY_ENABLE_PEER_ACCESS",
("hipIpcMemLazyEnablePeerAccess", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
("CUDA_IPC_HANDLE_SIZE", ("HIP_IPC_HANDLE_SIZE", CONV_TYPE, API_DRIVER)),
(
"CU_JIT_CACHE_OPTION_NONE",
("hipJitCacheModeOptionNone", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_JIT_CACHE_OPTION_CG",
("hipJitCacheModeOptionCG", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_JIT_CACHE_OPTION_CA",
("hipJitCacheModeOptionCA", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_PREFER_PTX",
("hipJitFallbackPreferPtx", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_PREFER_BINARY",
("hipJitFallbackPreferBinary", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED),
),
("CU_JIT_MAX_REGISTERS", ("hipJitOptionMaxRegisters", CONV_JIT, API_DRIVER)),
(
"CU_JIT_THREADS_PER_BLOCK",
("hipJitOptionThreadsPerBlock", CONV_JIT, API_DRIVER),
),
("CU_JIT_WALL_TIME", ("hipJitOptionWallTime", CONV_JIT, API_DRIVER)),
("CU_JIT_INFO_LOG_BUFFER", ("hipJitOptionInfoLogBuffer", CONV_JIT, API_DRIVER)),
(
"CU_JIT_INFO_LOG_BUFFER_SIZE_BYTES",
("hipJitOptionInfoLogBufferSizeBytes", CONV_JIT, API_DRIVER),
),
(
"CU_JIT_ERROR_LOG_BUFFER",
("hipJitOptionErrorLogBuffer", CONV_JIT, API_DRIVER),
),
(
"CU_JIT_ERROR_LOG_BUFFER_SIZE_BYTES",
("hipJitOptionErrorLogBufferSizeBytes", CONV_JIT, API_DRIVER),
),
(
"CU_JIT_OPTIMIZATION_LEVEL",
("hipJitOptionOptimizationLevel", CONV_JIT, API_DRIVER),
),
(
"CU_JIT_TARGET_FROM_CUCONTEXT",
("hipJitOptionTargetFromContext", CONV_JIT, API_DRIVER),
),
("CU_JIT_TARGET", ("hipJitOptionTarget", CONV_JIT, API_DRIVER)),
(
"CU_JIT_FALLBACK_STRATEGY",
("hipJitOptionFallbackStrategy", CONV_JIT, API_DRIVER),
),
(
"CU_JIT_GENERATE_DEBUG_INFO",
("hipJitOptionGenerateDebugInfo", CONV_JIT, API_DRIVER),
),
("CU_JIT_LOG_VERBOSE", ("hipJitOptionLogVerbose", CONV_JIT, API_DRIVER)),
(
"CU_JIT_GENERATE_LINE_INFO",
("hipJitOptionGenerateLineInfo", CONV_JIT, API_DRIVER),
),
("CU_JIT_CACHE_MODE", ("hipJitOptionCacheMode", CONV_JIT, API_DRIVER)),
("CU_JIT_NEW_SM3X_OPT", ("hipJitOptionSm3xOpt", CONV_JIT, API_DRIVER)),
("CU_JIT_FAST_COMPILE", ("hipJitOptionFastCompile", CONV_JIT, API_DRIVER)),
("CU_JIT_NUM_OPTIONS", ("hipJitOptionNumOptions", CONV_JIT, API_DRIVER)),
(
"CU_TARGET_COMPUTE_10",
("hipJitTargetCompute10", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_TARGET_COMPUTE_11",
("hipJitTargetCompute11", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_TARGET_COMPUTE_12",
("hipJitTargetCompute12", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_TARGET_COMPUTE_13",
("hipJitTargetCompute13", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_TARGET_COMPUTE_20",
("hipJitTargetCompute20", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_TARGET_COMPUTE_21",
("hipJitTargetCompute21", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_TARGET_COMPUTE_30",
("hipJitTargetCompute30", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_TARGET_COMPUTE_32",
("hipJitTargetCompute32", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_TARGET_COMPUTE_35",
("hipJitTargetCompute35", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_TARGET_COMPUTE_37",
("hipJitTargetCompute37", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_TARGET_COMPUTE_50",
("hipJitTargetCompute50", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_TARGET_COMPUTE_52",
("hipJitTargetCompute52", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_TARGET_COMPUTE_53",
("hipJitTargetCompute53", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_TARGET_COMPUTE_60",
("hipJitTargetCompute60", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_TARGET_COMPUTE_61",
("hipJitTargetCompute61", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_TARGET_COMPUTE_62",
("hipJitTargetCompute62", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_JIT_INPUT_CUBIN",
("hipJitInputTypeBin", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_JIT_INPUT_PTX",
("hipJitInputTypePtx", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_JIT_INPUT_FATBINARY",
("hipJitInputTypeFatBinary", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_JIT_INPUT_OBJECT",
("hipJitInputTypeObject", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_JIT_INPUT_LIBRARY",
("hipJitInputTypeLibrary", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_JIT_NUM_INPUT_TYPES",
("hipJitInputTypeNumInputTypes", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_LIMIT_STACK_SIZE",
("hipLimitStackSize", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_LIMIT_PRINTF_FIFO_SIZE",
("hipLimitPrintfFifoSize", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_LIMIT_MALLOC_HEAP_SIZE",
("hipLimitMallocHeapSize", CONV_TYPE, API_DRIVER),
),
(
"CU_LIMIT_DEV_RUNTIME_SYNC_DEPTH",
("hipLimitDevRuntimeSyncDepth", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_LIMIT_DEV_RUNTIME_PENDING_LAUNCH_COUNT",
(
"hipLimitDevRuntimePendingLaunchCount",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_LIMIT_STACK_SIZE",
("hipLimitStackSize", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_MEM_ATTACH_GLOBAL",
("hipMemAttachGlobal", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_MEM_ATTACH_HOST",
("hipMemAttachHost", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_MEM_ATTACH_SINGLE",
("hipMemAttachSingle", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_MEMORYTYPE_HOST",
("hipMemTypeHost", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_MEMORYTYPE_DEVICE",
("hipMemTypeDevice", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_MEMORYTYPE_ARRAY",
("hipMemTypeArray", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_MEMORYTYPE_UNIFIED",
("hipMemTypeUnified", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_RESOURCE_TYPE_ARRAY",
("hipResourceTypeArray", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_RESOURCE_TYPE_MIPMAPPED_ARRAY",
("hipResourceTypeMipmappedArray", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_RESOURCE_TYPE_LINEAR",
("hipResourceTypeLinear", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_RESOURCE_TYPE_PITCH2D",
("hipResourceTypePitch2D", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED),
),
("CU_RES_VIEW_FORMAT_NONE", ("hipResViewFormatNone", CONV_TEX, API_DRIVER)),
(
"CU_RES_VIEW_FORMAT_UINT_1X8",
("hipResViewFormatUnsignedChar1", CONV_TEX, API_DRIVER),
),
(
"CU_RES_VIEW_FORMAT_UINT_2X8",
("hipResViewFormatUnsignedChar2", CONV_TEX, API_DRIVER),
),
(
"CU_RES_VIEW_FORMAT_UINT_4X8",
("hipResViewFormatUnsignedChar4", CONV_TEX, API_DRIVER),
),
(
"CU_RES_VIEW_FORMAT_SINT_1X8",
("hipResViewFormatSignedChar1", CONV_TEX, API_DRIVER),
),
(
"CU_RES_VIEW_FORMAT_SINT_2X8",
("hipResViewFormatSignedChar2", CONV_TEX, API_DRIVER),
),
(
"CU_RES_VIEW_FORMAT_SINT_4X8",
("hipResViewFormatSignedChar4", CONV_TEX, API_DRIVER),
),
(
"CU_RES_VIEW_FORMAT_UINT_1X16",
("hipResViewFormatUnsignedShort1", CONV_TEX, API_DRIVER),
),
(
"CU_RES_VIEW_FORMAT_UINT_2X16",
("hipResViewFormatUnsignedShort2", CONV_TEX, API_DRIVER),
),
(
"CU_RES_VIEW_FORMAT_UINT_4X16",
("hipResViewFormatUnsignedShort4", CONV_TEX, API_DRIVER),
),
(
"CU_RES_VIEW_FORMAT_SINT_1X16",
("hipResViewFormatSignedShort1", CONV_TEX, API_DRIVER),
),
(
"CU_RES_VIEW_FORMAT_SINT_2X16",
("hipResViewFormatSignedShort2", CONV_TEX, API_DRIVER),
),
(
"CU_RES_VIEW_FORMAT_SINT_4X16",
("hipResViewFormatSignedShort4", CONV_TEX, API_DRIVER),
),
(
"CU_RES_VIEW_FORMAT_UINT_1X32",
("hipResViewFormatUnsignedInt1", CONV_TEX, API_DRIVER),
),
(
"CU_RES_VIEW_FORMAT_UINT_2X32",
("hipResViewFormatUnsignedInt2", CONV_TEX, API_DRIVER),
),
(
"CU_RES_VIEW_FORMAT_UINT_4X32",
("hipResViewFormatUnsignedInt4", CONV_TEX, API_DRIVER),
),
(
"CU_RES_VIEW_FORMAT_SINT_1X32",
("hipResViewFormatSignedInt1", CONV_TEX, API_DRIVER),
),
(
"CU_RES_VIEW_FORMAT_SINT_2X32",
("hipResViewFormatSignedInt2", CONV_TEX, API_DRIVER),
),
(
"CU_RES_VIEW_FORMAT_SINT_4X32",
("hipResViewFormatSignedInt4", CONV_TEX, API_DRIVER),
),
(
"CU_RES_VIEW_FORMAT_FLOAT_1X16",
("hipResViewFormatHalf1", CONV_TEX, API_DRIVER),
),
(
"CU_RES_VIEW_FORMAT_FLOAT_2X16",
("hipResViewFormatHalf2", CONV_TEX, API_DRIVER),
),
(
"CU_RES_VIEW_FORMAT_FLOAT_4X16",
("hipResViewFormatHalf4", CONV_TEX, API_DRIVER),
),
(
"CU_RES_VIEW_FORMAT_FLOAT_1X32",
("hipResViewFormatFloat1", CONV_TEX, API_DRIVER),
),
(
"CU_RES_VIEW_FORMAT_FLOAT_2X32",
("hipResViewFormatFloat2", CONV_TEX, API_DRIVER),
),
(
"CU_RES_VIEW_FORMAT_FLOAT_4X32",
("hipResViewFormatFloat4", CONV_TEX, API_DRIVER),
),
(
"CU_RES_VIEW_FORMAT_UNSIGNED_BC1",
("hipResViewFormatUnsignedBlockCompressed1", CONV_TEX, API_DRIVER),
),
(
"CU_RES_VIEW_FORMAT_UNSIGNED_BC2",
("hipResViewFormatUnsignedBlockCompressed2", CONV_TEX, API_DRIVER),
),
(
"CU_RES_VIEW_FORMAT_UNSIGNED_BC3",
("hipResViewFormatUnsignedBlockCompressed3", CONV_TEX, API_DRIVER),
),
(
"CU_RES_VIEW_FORMAT_UNSIGNED_BC4",
("hipResViewFormatUnsignedBlockCompressed4", CONV_TEX, API_DRIVER),
),
(
"CU_RES_VIEW_FORMAT_SIGNED_BC4",
("hipResViewFormatSignedBlockCompressed4", CONV_TEX, API_DRIVER),
),
(
"CU_RES_VIEW_FORMAT_UNSIGNED_BC5",
("hipResViewFormatUnsignedBlockCompressed5", CONV_TEX, API_DRIVER),
),
(
"CU_RES_VIEW_FORMAT_SIGNED_BC5",
("hipResViewFormatSignedBlockCompressed5", CONV_TEX, API_DRIVER),
),
(
"CU_RES_VIEW_FORMAT_UNSIGNED_BC6H",
("hipResViewFormatUnsignedBlockCompressed6H", CONV_TEX, API_DRIVER),
),
(
"CU_RES_VIEW_FORMAT_SIGNED_BC6H",
("hipResViewFormatSignedBlockCompressed6H", CONV_TEX, API_DRIVER),
),
(
"CU_RES_VIEW_FORMAT_UNSIGNED_BC7",
("hipResViewFormatUnsignedBlockCompressed7", CONV_TEX, API_DRIVER),
),
(
"CU_SHARED_MEM_CONFIG_DEFAULT_BANK_SIZE",
("hipSharedMemBankSizeDefault", CONV_TYPE, API_DRIVER),
),
(
"CU_SHARED_MEM_CONFIG_FOUR_BYTE_BANK_SIZE",
("hipSharedMemBankSizeFourByte", CONV_TYPE, API_DRIVER),
),
(
"CU_SHARED_MEM_CONFIG_EIGHT_BYTE_BANK_SIZE",
("hipSharedMemBankSizeEightByte", CONV_TYPE, API_DRIVER),
),
("CU_STREAM_DEFAULT", ("hipStreamDefault", CONV_TYPE, API_DRIVER)),
("CU_STREAM_NON_BLOCKING", ("hipStreamNonBlocking", CONV_TYPE, API_DRIVER)),
(
"CU_STREAM_WAIT_VALUE_GEQ",
("hipStreamWaitValueGeq", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_STREAM_WAIT_VALUE_EQ",
("hipStreamWaitValueEq", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_STREAM_WAIT_VALUE_AND",
("hipStreamWaitValueAnd", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_STREAM_WAIT_VALUE_FLUSH",
("hipStreamWaitValueFlush", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_STREAM_WRITE_VALUE_DEFAULT",
("hipStreamWriteValueDefault", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_STREAM_WRITE_VALUE_NO_MEMORY_BARRIER",
(
"hipStreamWriteValueNoMemoryBarrier",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_STREAM_MEM_OP_WAIT_VALUE_32",
("hipStreamBatchMemOpWaitValue32", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_STREAM_MEM_OP_WRITE_VALUE_32",
("hipStreamBatchMemOpWriteValue32", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_STREAM_MEM_OP_FLUSH_REMOTE_WRITES",
(
"hipStreamBatchMemOpFlushRemoteWrites",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"cuGetErrorName",
("hipGetErrorName___", CONV_ERROR, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuGetErrorString",
("hipGetErrorString___", CONV_ERROR, API_DRIVER, HIP_UNSUPPORTED),
),
("cuInit", ("hipInit", CONV_INIT, API_DRIVER)),
("cuDriverGetVersion", ("hipDriverGetVersion", CONV_VERSION, API_DRIVER)),
("cuCtxCreate", ("hipCtxCreate", CONV_CONTEXT, API_DRIVER)),
("cuCtxCreate_v2", ("hipCtxCreate", CONV_CONTEXT, API_DRIVER)),
("cuCtxDestroy", ("hipCtxDestroy", CONV_CONTEXT, API_DRIVER)),
("cuCtxDestroy_v2", ("hipCtxDestroy", CONV_CONTEXT, API_DRIVER)),
("cuCtxGetApiVersion", ("hipCtxGetApiVersion", CONV_CONTEXT, API_DRIVER)),
("cuCtxGetCacheConfig", ("hipCtxGetCacheConfig", CONV_CONTEXT, API_DRIVER)),
("cuCtxGetCurrent", ("hipCtxGetCurrent", CONV_CONTEXT, API_DRIVER)),
("cuCtxGetDevice", ("hipCtxGetDevice", CONV_CONTEXT, API_DRIVER)),
("cuCtxGetFlags", ("hipCtxGetFlags", CONV_CONTEXT, API_DRIVER)),
(
"cuCtxGetLimit",
("hipCtxGetLimit", CONV_CONTEXT, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuCtxGetSharedMemConfig",
("hipCtxGetSharedMemConfig", CONV_CONTEXT, API_DRIVER),
),
(
"cuCtxGetStreamPriorityRange",
("hipCtxGetStreamPriorityRange", CONV_CONTEXT, API_DRIVER, HIP_UNSUPPORTED),
),
("cuCtxPopCurrent_v2", ("hipCtxPopCurrent", CONV_CONTEXT, API_DRIVER)),
("cuCtxPushCurrent_v2", ("hipCtxPushCurrent", CONV_CONTEXT, API_DRIVER)),
("cuCtxSetCacheConfig", ("hipCtxSetCacheConfig", CONV_CONTEXT, API_DRIVER)),
("cuCtxSetCurrent", ("hipCtxSetCurrent", CONV_CONTEXT, API_DRIVER)),
(
"cuCtxSetLimit",
("hipCtxSetLimit", CONV_CONTEXT, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuCtxSetSharedMemConfig",
("hipCtxSetSharedMemConfig", CONV_CONTEXT, API_DRIVER),
),
("cuCtxSynchronize", ("hipCtxSynchronize", CONV_CONTEXT, API_DRIVER)),
("cuCtxAttach", ("hipCtxAttach", CONV_CONTEXT, API_DRIVER, HIP_UNSUPPORTED)),
("cuCtxDetach", ("hipCtxDetach", CONV_CONTEXT, API_DRIVER, HIP_UNSUPPORTED)),
("cuCtxEnablePeerAccess", ("hipCtxEnablePeerAccess", CONV_PEER, API_DRIVER)),
("cuCtxDisablePeerAccess", ("hipCtxDisablePeerAccess", CONV_PEER, API_DRIVER)),
("cuDeviceCanAccessPeer", ("hipDeviceCanAccessPeer", CONV_PEER, API_DRIVER)),
(
"cuDeviceGetP2PAttribute",
("hipDeviceGetP2PAttribute", CONV_PEER, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuDevicePrimaryCtxGetState",
("hipDevicePrimaryCtxGetState", CONV_CONTEXT, API_DRIVER),
),
(
"cuDevicePrimaryCtxRelease",
("hipDevicePrimaryCtxRelease", CONV_CONTEXT, API_DRIVER),
),
(
"cuDevicePrimaryCtxReset",
("hipDevicePrimaryCtxReset", CONV_CONTEXT, API_DRIVER),
),
(
"cuDevicePrimaryCtxRetain",
("hipDevicePrimaryCtxRetain", CONV_CONTEXT, API_DRIVER),
),
(
"cuDevicePrimaryCtxSetFlags",
("hipDevicePrimaryCtxSetFlags", CONV_CONTEXT, API_DRIVER),
),
("cuDeviceGet", ("hipGetDevice", CONV_DEVICE, API_DRIVER)),
("cuDeviceGetName", ("hipDeviceGetName", CONV_DEVICE, API_DRIVER)),
("cuDeviceGetCount", ("hipGetDeviceCount", CONV_DEVICE, API_DRIVER)),
("cuDeviceGetAttribute", ("hipDeviceGetAttribute", CONV_DEVICE, API_DRIVER)),
("cuDeviceGetPCIBusId", ("hipDeviceGetPCIBusId", CONV_DEVICE, API_DRIVER)),
("cuDeviceGetByPCIBusId", ("hipDeviceGetByPCIBusId", CONV_DEVICE, API_DRIVER)),
("cuDeviceTotalMem_v2", ("hipDeviceTotalMem", CONV_DEVICE, API_DRIVER)),
(
"cuDeviceComputeCapability",
("hipDeviceComputeCapability", CONV_DEVICE, API_DRIVER),
),
("cuDeviceGetProperties", ("hipGetDeviceProperties", CONV_DEVICE, API_DRIVER)),
("cuLinkAddData", ("hipLinkAddData", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED)),
("cuLinkAddFile", ("hipLinkAddFile", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED)),
(
"cuLinkComplete",
("hipLinkComplete", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED),
),
("cuLinkCreate", ("hipLinkCreate", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED)),
("cuLinkDestroy", ("hipLinkDestroy", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED)),
("cuModuleGetFunction", ("hipModuleGetFunction", CONV_MODULE, API_DRIVER)),
("cuModuleGetGlobal_v2", ("hipModuleGetGlobal", CONV_MODULE, API_DRIVER)),
(
"cuModuleGetSurfRef",
("hipModuleGetSurfRef", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED),
),
("cuModuleGetTexRef", ("hipModuleGetTexRef", CONV_MODULE, API_DRIVER)),
("cuModuleLoad", ("hipModuleLoad", CONV_MODULE, API_DRIVER)),
("cuModuleLoadData", ("hipModuleLoadData", CONV_MODULE, API_DRIVER)),
("cuModuleLoadDataEx", ("hipModuleLoadDataEx", CONV_MODULE, API_DRIVER)),
(
"cuModuleLoadFatBinary",
("hipModuleLoadFatBinary", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED),
),
("cuModuleUnload", ("hipModuleUnload", CONV_MODULE, API_DRIVER)),
(
"CU_DEVICE_P2P_ATTRIBUTE_PERFORMANCE_RANK",
(
"hipDeviceP2PAttributePerformanceRank",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_P2P_ATTRIBUTE_ACCESS_SUPPORTED",
(
"hipDeviceP2PAttributeAccessSupported",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_P2P_ATTRIBUTE_NATIVE_ATOMIC_SUPPORTED",
(
"hipDeviceP2PAttributeNativeAtomicSupported",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
("CU_EVENT_DEFAULT", ("hipEventDefault", CONV_EVENT, API_DRIVER)),
("CU_EVENT_BLOCKING_SYNC", ("hipEventBlockingSync", CONV_EVENT, API_DRIVER)),
("CU_EVENT_DISABLE_TIMING", ("hipEventDisableTiming", CONV_EVENT, API_DRIVER)),
("CU_EVENT_INTERPROCESS", ("hipEventInterprocess", CONV_EVENT, API_DRIVER)),
("cuEventCreate", ("hipEventCreate", CONV_EVENT, API_DRIVER)),
("cuEventDestroy", ("hipEventDestroy", CONV_EVENT, API_DRIVER)),
("cuEventDestroy_v2", ("hipEventDestroy", CONV_EVENT, API_DRIVER)),
("cuEventElapsedTime", ("hipEventElapsedTime", CONV_EVENT, API_DRIVER)),
("cuEventQuery", ("hipEventQuery", CONV_EVENT, API_DRIVER)),
("cuEventRecord", ("hipEventRecord", CONV_EVENT, API_DRIVER)),
("cuEventSynchronize", ("hipEventSynchronize", CONV_EVENT, API_DRIVER)),
(
"cuFuncGetAttribute",
("hipFuncGetAttribute", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED),
),
("cuFuncSetCacheConfig", ("hipFuncSetCacheConfig", CONV_MODULE, API_DRIVER)),
(
"cuFuncSetSharedMemConfig",
("hipFuncSetSharedMemConfig", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED),
),
("cuLaunchKernel", ("hipModuleLaunchKernel", CONV_MODULE, API_DRIVER)),
(
"cuFuncSetBlockShape",
("hipFuncSetBlockShape", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuFuncSetSharedSize",
("hipFuncSetSharedSize", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED),
),
("cuLaunch", ("hipLaunch", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED)),
("cuLaunchGrid", ("hipLaunchGrid", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED)),
(
"cuLaunchGridAsync",
("hipLaunchGridAsync", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED),
),
("cuParamSetf", ("hipParamSetf", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED)),
("cuParamSeti", ("hipParamSeti", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED)),
(
"cuParamSetSize",
("hipParamSetSize", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuParamSetSize",
("hipParamSetSize", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED),
),
("cuParamSetv", ("hipParamSetv", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED)),
(
"cuOccupancyMaxActiveBlocksPerMultiprocessor",
(
"hipModuleOccupancyMaxActiveBlocksPerMultiprocessor",
CONV_OCCUPANCY,
API_DRIVER,
),
),
(
"cuOccupancyMaxActiveBlocksPerMultiprocessorWithFlags",
(
"hipModuleOccupancyMaxActiveBlocksPerMultiprocessorWithFlags",
CONV_OCCUPANCY,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"cuOccupancyMaxPotentialBlockSize",
("hipModuleOccupancyMaxPotentialBlockSize", CONV_OCCUPANCY, API_DRIVER),
),
(
"cuOccupancyMaxPotentialBlockSizeWithFlags",
(
"hipModuleOccupancyMaxPotentialBlockSizeWithFlags",
CONV_OCCUPANCY,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
("cuStreamAddCallback", ("hipStreamAddCallback", CONV_STREAM, API_DRIVER)),
(
"cuStreamAttachMemAsync",
("hipStreamAttachMemAsync", CONV_STREAM, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuStreamCreate",
("hipStreamCreate__", CONV_STREAM, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuStreamCreateWithPriority",
("hipStreamCreateWithPriority", CONV_STREAM, API_DRIVER, HIP_UNSUPPORTED),
),
("cuStreamDestroy", ("hipStreamDestroy", CONV_STREAM, API_DRIVER)),
("cuStreamDestroy_v2", ("hipStreamDestroy", CONV_STREAM, API_DRIVER)),
("cuStreamGetFlags", ("hipStreamGetFlags", CONV_STREAM, API_DRIVER)),
(
"cuStreamGetPriority",
("hipStreamGetPriority", CONV_STREAM, API_DRIVER, HIP_UNSUPPORTED),
),
("cuStreamQuery", ("hipStreamQuery", CONV_STREAM, API_DRIVER)),
("cuStreamSynchronize", ("hipStreamSynchronize", CONV_STREAM, API_DRIVER)),
("cuStreamWaitEvent", ("hipStreamWaitEvent", CONV_STREAM, API_DRIVER)),
(
"cuStreamWaitValue32",
("hipStreamWaitValue32", CONV_STREAM, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuStreamWriteValue32",
("hipStreamWriteValue32", CONV_STREAM, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuStreamBatchMemOp",
("hipStreamBatchMemOp", CONV_STREAM, API_DRIVER, HIP_UNSUPPORTED),
),
("cuArray3DCreate", ("hipArray3DCreate", CONV_MEM, API_DRIVER)),
(
"cuArray3DGetDescriptor",
("hipArray3DGetDescriptor", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED),
),
("cuArrayCreate", ("hipArrayCreate", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)),
("cuArrayDestroy", ("hipArrayDestroy", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)),
(
"cuArrayGetDescriptor",
("hipArrayGetDescriptor", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuIpcCloseMemHandle",
("hipIpcCloseMemHandle", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuIpcGetEventHandle",
("hipIpcGetEventHandle", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuIpcGetMemHandle",
("hipIpcGetMemHandle", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuIpcOpenEventHandle",
("hipIpcOpenEventHandle", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuIpcOpenMemHandle",
("hipIpcOpenMemHandle", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED),
),
("cuMemAlloc_v2", ("hipMalloc", CONV_MEM, API_DRIVER)),
("cuMemAllocHost", ("hipMemAllocHost", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)),
(
"cuMemAllocManaged",
("hipMemAllocManaged", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuMemAllocPitch",
("hipMemAllocPitch__", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED),
),
("cuMemcpy", ("hipMemcpy__", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)),
("cuMemcpy2D", ("hipMemcpy2D__", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)),
(
"cuMemcpy2DAsync",
("hipMemcpy2DAsync__", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuMemcpy2DUnaligned",
("hipMemcpy2DUnaligned", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED),
),
("cuMemcpy3D", ("hipMemcpy3D__", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)),
(
"cuMemcpy3DAsync",
("hipMemcpy3DAsync__", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuMemcpy3DPeer",
("hipMemcpy3DPeer__", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuMemcpy3DPeerAsync",
("hipMemcpy3DPeerAsync__", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED),
),
("cuMemcpyAsync", ("hipMemcpyAsync__", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)),
("cuMemcpyAtoA", ("hipMemcpyAtoA", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)),
("cuMemcpyAtoD", ("hipMemcpyAtoD", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)),
("cuMemcpyAtoH", ("hipMemcpyAtoH", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)),
(
"cuMemcpyAtoHAsync",
("hipMemcpyAtoHAsync", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED),
),
("cuMemcpyDtoA", ("hipMemcpyDtoA", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)),
("cuMemcpyDtoD_v2", ("hipMemcpyDtoD", CONV_MEM, API_DRIVER)),
("cuMemcpyDtoDAsync_v2", ("hipMemcpyDtoDAsync", CONV_MEM, API_DRIVER)),
("cuMemcpyDtoH_v2", ("hipMemcpyDtoH", CONV_MEM, API_DRIVER)),
("cuMemcpyDtoHAsync_v2", ("hipMemcpyDtoHAsync", CONV_MEM, API_DRIVER)),
("cuMemcpyHtoA", ("hipMemcpyHtoA", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)),
(
"cuMemcpyHtoAAsync",
("hipMemcpyHtoAAsync", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED),
),
("cuMemcpyHtoD_v2", ("hipMemcpyHtoD", CONV_MEM, API_DRIVER)),
("cuMemcpyHtoDAsync_v2", ("hipMemcpyHtoDAsync", CONV_MEM, API_DRIVER)),
(
"cuMemcpyPeerAsync",
("hipMemcpyPeerAsync__", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED),
),
("cuMemcpyPeer", ("hipMemcpyPeer__", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)),
("cuMemFree", ("hipFree", CONV_MEM, API_DRIVER)),
("cuMemFree_v2", ("hipFree", CONV_MEM, API_DRIVER)),
("cuMemFreeHost", ("hipHostFree", CONV_MEM, API_DRIVER)),
(
"cuMemGetAddressRange",
("hipMemGetAddressRange", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED),
),
("cuMemGetInfo_v2", ("hipMemGetInfo", CONV_MEM, API_DRIVER)),
("cuMemHostAlloc", ("hipHostMalloc", CONV_MEM, API_DRIVER)),
(
"cuMemHostGetDevicePointer",
("hipMemHostGetDevicePointer", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuMemHostGetFlags",
("hipMemHostGetFlags", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED),
),
("cuMemHostRegister_v2", ("hipHostRegister", CONV_MEM, API_DRIVER)),
("cuMemHostUnregister", ("hipHostUnregister", CONV_MEM, API_DRIVER)),
("cuMemsetD16_v2", ("hipMemsetD16", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)),
(
"cuMemsetD16Async",
("hipMemsetD16Async", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED),
),
("cuMemsetD2D16_v2", ("hipMemsetD2D16", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)),
(
"cuMemsetD2D16Async",
("hipMemsetD2D16Async", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED),
),
("cuMemsetD2D32_v2", ("hipMemsetD2D32", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)),
(
"cuMemsetD2D32Async",
("hipMemsetD2D32Async", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED),
),
("cuMemsetD2D8_v2", ("hipMemsetD2D8", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)),
(
"cuMemsetD2D8Async",
("hipMemsetD2D8Async", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED),
),
("cuMemsetD32_v2", ("hipMemset", CONV_MEM, API_DRIVER)),
("cuMemsetD32Async", ("hipMemsetAsync", CONV_MEM, API_DRIVER)),
("cuMemsetD8_v2", ("hipMemsetD8", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)),
(
"cuMemsetD8Async",
("hipMemsetD8Async", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuMipmappedArrayCreate",
("hipMipmappedArrayCreate", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuMipmappedArrayDestroy",
("hipMipmappedArrayDestroy", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuMipmappedArrayGetLevel",
("hipMipmappedArrayGetLevel", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuMemPrefetchAsync",
("hipMemPrefetchAsync__", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED),
),
("cuMemAdvise", ("hipMemAdvise", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)),
(
"cuMemRangeGetAttribute",
("hipMemRangeGetAttribute", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuMemRangeGetAttributes",
("hipMemRangeGetAttributes", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuPointerGetAttribute",
("hipPointerGetAttribute", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuPointerGetAttributes",
("hipPointerGetAttributes", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuPointerSetAttribute",
("hipPointerSetAttribute", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED),
),
("CU_TR_FILTER_MODE_POINT", ("hipFilterModePoint", CONV_TEX, API_DRIVER)),
(
"CU_TR_FILTER_MODE_LINEAR",
("hipFilterModeLinear", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuTexRefGetAddress",
("hipTexRefGetAddress", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuTexRefGetAddressMode",
("hipTexRefGetAddressMode", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuTexRefGetArray",
("hipTexRefGetArray", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuTexRefGetBorderColor",
("hipTexRefGetBorderColor", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuTexRefGetFilterMode",
("hipTexRefGetFilterMode", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuTexRefGetFlags",
("hipTexRefGetFlags", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuTexRefGetFormat",
("hipTexRefGetFormat", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuTexRefGetMaxAnisotropy",
("hipTexRefGetMaxAnisotropy", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuTexRefGetMipmapFilterMode",
("hipTexRefGetMipmapFilterMode", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuTexRefGetMipmapLevelBias",
("hipTexRefGetMipmapLevelBias", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuTexRefGetMipmapLevelClamp",
("hipTexRefGetMipmapLevelClamp", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuTexRefGetMipmappedArray",
("hipTexRefGetMipmappedArray", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuTexRefSetAddress",
("hipTexRefSetAddress", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuTexRefSetAddress2D",
("hipTexRefSetAddress2D", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED),
),
("cuTexRefSetAddressMode", ("hipTexRefSetAddressMode", CONV_TEX, API_DRIVER)),
("cuTexRefSetArray", ("hipTexRefSetArray", CONV_TEX, API_DRIVER)),
(
"cuTexRefSetBorderColor",
("hipTexRefSetBorderColor", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED),
),
("cuTexRefSetFilterMode", ("hipTexRefSetFilterMode", CONV_TEX, API_DRIVER)),
("cuTexRefSetFlags", ("hipTexRefSetFlags", CONV_TEX, API_DRIVER)),
("cuTexRefSetFormat", ("hipTexRefSetFormat", CONV_TEX, API_DRIVER)),
(
"cuTexRefSetMaxAnisotropy",
("hipTexRefSetMaxAnisotropy", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuTexRefSetMipmapFilterMode",
("hipTexRefSetMipmapFilterMode", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuTexRefSetMipmapLevelBias",
("hipTexRefSetMipmapLevelBias", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuTexRefSetMipmapLevelClamp",
("hipTexRefSetMipmapLevelClamp", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuTexRefSetMipmappedArray",
("hipTexRefSetMipmappedArray", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED),
),
("cuTexRefCreate", ("hipTexRefCreate", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED)),
(
"cuTexRefDestroy",
("hipTexRefDestroy", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuSurfRefGetArray",
("hipSurfRefGetArray", CONV_SURFACE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuSurfRefSetArray",
("hipSurfRefSetArray", CONV_SURFACE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuTexObjectCreate",
("hipTexObjectCreate", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuTexObjectDestroy",
("hipTexObjectDestroy", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuTexObjectGetResourceDesc",
("hipTexObjectGetResourceDesc", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuTexObjectGetResourceViewDesc",
("hipTexObjectGetResourceViewDesc", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuTexObjectGetTextureDesc",
("hipTexObjectGetTextureDesc", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuSurfObjectCreate",
("hipSurfObjectCreate", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuSurfObjectDestroy",
("hipSurfObjectDestroy", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuSurfObjectGetResourceDesc",
("hipSurfObjectGetResourceDesc", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuGraphicsMapResources",
("hipGraphicsMapResources", CONV_GRAPHICS, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuGraphicsResourceGetMappedMipmappedArray",
(
"hipGraphicsResourceGetMappedMipmappedArray",
CONV_GRAPHICS,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"cuGraphicsResourceGetMappedPointer",
(
"hipGraphicsResourceGetMappedPointer",
CONV_GRAPHICS,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"cuGraphicsResourceSetMapFlags",
(
"hipGraphicsResourceSetMapFlags",
CONV_GRAPHICS,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"cuGraphicsSubResourceGetMappedArray",
(
"hipGraphicsSubResourceGetMappedArray",
CONV_GRAPHICS,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"cuGraphicsUnmapResources",
("hipGraphicsUnmapResources", CONV_GRAPHICS, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuGraphicsUnregisterResource",
(
"hipGraphicsUnregisterResource",
CONV_GRAPHICS,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"cuProfilerInitialize",
("hipProfilerInitialize", CONV_OTHER, API_DRIVER, HIP_UNSUPPORTED),
),
("cuProfilerStart", ("hipProfilerStart", CONV_OTHER, API_DRIVER)),
("cuProfilerStop", ("hipProfilerStop", CONV_OTHER, API_DRIVER)),
(
"CU_GL_DEVICE_LIST_ALL",
("HIP_GL_DEVICE_LIST_ALL", CONV_GL, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_GL_DEVICE_LIST_CURRENT_FRAME",
("HIP_GL_DEVICE_LIST_CURRENT_FRAME", CONV_GL, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_GL_DEVICE_LIST_NEXT_FRAME",
("HIP_GL_DEVICE_LIST_NEXT_FRAME", CONV_GL, API_DRIVER, HIP_UNSUPPORTED),
),
("cuGLGetDevices", ("hipGLGetDevices", CONV_GL, API_DRIVER, HIP_UNSUPPORTED)),
(
"cuGraphicsGLRegisterBuffer",
("hipGraphicsGLRegisterBuffer", CONV_GL, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuGraphicsGLRegisterImage",
("hipGraphicsGLRegisterImage", CONV_GL, API_DRIVER, HIP_UNSUPPORTED),
),
("cuWGLGetDevice", ("hipWGLGetDevice", CONV_GL, API_DRIVER, HIP_UNSUPPORTED)),
(
"CU_GL_MAP_RESOURCE_FLAGS_NONE",
("HIP_GL_MAP_RESOURCE_FLAGS_NONE", CONV_GL, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_GL_MAP_RESOURCE_FLAGS_READ_ONLY",
(
"HIP_GL_MAP_RESOURCE_FLAGS_READ_ONLY",
CONV_GL,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_GL_MAP_RESOURCE_FLAGS_WRITE_DISCARD",
(
"HIP_GL_MAP_RESOURCE_FLAGS_WRITE_DISCARD",
CONV_GL,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
("cuGLCtxCreate", ("hipGLCtxCreate", CONV_GL, API_DRIVER, HIP_UNSUPPORTED)),
("cuGLInit", ("hipGLInit", CONV_GL, API_DRIVER, HIP_UNSUPPORTED)),
(
"cuGLMapBufferObject",
("hipGLMapBufferObject", CONV_GL, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuGLMapBufferObjectAsync",
("hipGLMapBufferObjectAsync", CONV_GL, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuGLRegisterBufferObject",
("hipGLRegisterBufferObject", CONV_GL, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuGLSetBufferObjectMapFlags",
("hipGLSetBufferObjectMapFlags", CONV_GL, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuGLUnmapBufferObject",
("hipGLUnmapBufferObject", CONV_GL, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuGLUnmapBufferObjectAsync",
("hipGLUnmapBufferObjectAsync", CONV_GL, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuGLUnregisterBufferObject",
("hipGLUnregisterBufferObject", CONV_GL, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_D3D9_DEVICE_LIST_ALL",
("HIP_D3D9_DEVICE_LIST_ALL", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_D3D9_DEVICE_LIST_CURRENT_FRAME",
(
"HIP_D3D9_DEVICE_LIST_CURRENT_FRAME",
CONV_D3D9,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_D3D9_DEVICE_LIST_NEXT_FRAME",
("HIP_D3D9_DEVICE_LIST_NEXT_FRAME", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuD3D9CtxCreate",
("hipD3D9CtxCreate", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuD3D9CtxCreateOnDevice",
("hipD3D9CtxCreateOnDevice", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuD3D9GetDevice",
("hipD3D9GetDevice", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuD3D9GetDevices",
("hipD3D9GetDevices", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuD3D9GetDirect3DDevice",
("hipD3D9GetDirect3DDevice", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuGraphicsD3D9RegisterResource",
("hipGraphicsD3D9RegisterResource", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_D3D9_MAPRESOURCE_FLAGS_NONE",
("HIP_D3D9_MAPRESOURCE_FLAGS_NONE", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_D3D9_MAPRESOURCE_FLAGS_READONLY",
(
"HIP_D3D9_MAPRESOURCE_FLAGS_READONLY",
CONV_D3D9,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_D3D9_MAPRESOURCE_FLAGS_WRITEDISCARD",
(
"HIP_D3D9_MAPRESOURCE_FLAGS_WRITEDISCARD",
CONV_D3D9,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_D3D9_REGISTER_FLAGS_NONE",
("HIP_D3D9_REGISTER_FLAGS_NONE", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_D3D9_REGISTER_FLAGS_ARRAY",
("HIP_D3D9_REGISTER_FLAGS_ARRAY", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuD3D9MapResources",
("hipD3D9MapResources", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuD3D9RegisterResource",
("hipD3D9RegisterResource", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuD3D9ResourceGetMappedArray",
("hipD3D9ResourceGetMappedArray", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuD3D9ResourceGetMappedPitch",
("hipD3D9ResourceGetMappedPitch", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuD3D9ResourceGetMappedPointer",
("hipD3D9ResourceGetMappedPointer", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuD3D9ResourceGetMappedSize",
("hipD3D9ResourceGetMappedSize", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuD3D9ResourceGetSurfaceDimensions",
(
"hipD3D9ResourceGetSurfaceDimensions",
CONV_D3D9,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"cuD3D9ResourceSetMapFlags",
("hipD3D9ResourceSetMapFlags", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuD3D9UnmapResources",
("hipD3D9UnmapResources", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuD3D9UnregisterResource",
("hipD3D9UnregisterResource", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_D3D10_DEVICE_LIST_ALL",
("HIP_D3D10_DEVICE_LIST_ALL", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_D3D10_DEVICE_LIST_CURRENT_FRAME",
(
"HIP_D3D10_DEVICE_LIST_CURRENT_FRAME",
CONV_D3D10,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_D3D10_DEVICE_LIST_NEXT_FRAME",
(
"HIP_D3D10_DEVICE_LIST_NEXT_FRAME",
CONV_D3D10,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"cuD3D10GetDevice",
("hipD3D10GetDevice", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuD3D10GetDevices",
("hipD3D10GetDevices", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuGraphicsD3D10RegisterResource",
(
"hipGraphicsD3D10RegisterResource",
CONV_D3D10,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_D3D10_MAPRESOURCE_FLAGS_NONE",
(
"HIP_D3D10_MAPRESOURCE_FLAGS_NONE",
CONV_D3D10,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_D3D10_MAPRESOURCE_FLAGS_READONLY",
(
"HIP_D3D10_MAPRESOURCE_FLAGS_READONLY",
CONV_D3D10,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_D3D10_MAPRESOURCE_FLAGS_WRITEDISCARD",
(
"HIP_D3D10_MAPRESOURCE_FLAGS_WRITEDISCARD",
CONV_D3D10,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_D3D10_REGISTER_FLAGS_NONE",
("HIP_D3D10_REGISTER_FLAGS_NONE", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_D3D10_REGISTER_FLAGS_ARRAY",
("HIP_D3D10_REGISTER_FLAGS_ARRAY", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuD3D10CtxCreate",
("hipD3D10CtxCreate", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuD3D10CtxCreateOnDevice",
("hipD3D10CtxCreateOnDevice", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuD3D10GetDirect3DDevice",
("hipD3D10GetDirect3DDevice", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuD3D10MapResources",
("hipD3D10MapResources", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuD3D10RegisterResource",
("hipD3D10RegisterResource", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuD3D10ResourceGetMappedArray",
("hipD3D10ResourceGetMappedArray", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuD3D10ResourceGetMappedPitch",
("hipD3D10ResourceGetMappedPitch", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuD3D10ResourceGetMappedPointer",
(
"hipD3D10ResourceGetMappedPointer",
CONV_D3D10,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"cuD3D10ResourceGetMappedSize",
("hipD3D10ResourceGetMappedSize", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuD3D10ResourceGetSurfaceDimensions",
(
"hipD3D10ResourceGetSurfaceDimensions",
CONV_D3D10,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"cuD310ResourceSetMapFlags",
("hipD3D10ResourceSetMapFlags", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuD3D10UnmapResources",
("hipD3D10UnmapResources", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuD3D10UnregisterResource",
("hipD3D10UnregisterResource", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_D3D11_DEVICE_LIST_ALL",
("HIP_D3D11_DEVICE_LIST_ALL", CONV_D3D11, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_D3D11_DEVICE_LIST_CURRENT_FRAME",
(
"HIP_D3D11_DEVICE_LIST_CURRENT_FRAME",
CONV_D3D11,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_D3D11_DEVICE_LIST_NEXT_FRAME",
(
"HIP_D3D11_DEVICE_LIST_NEXT_FRAME",
CONV_D3D11,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"cuD3D11GetDevice",
("hipD3D11GetDevice", CONV_D3D11, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuD3D11GetDevices",
("hipD3D11GetDevices", CONV_D3D11, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuGraphicsD3D11RegisterResource",
(
"hipGraphicsD3D11RegisterResource",
CONV_D3D11,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"cuD3D11CtxCreate",
("hipD3D11CtxCreate", CONV_D3D11, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuD3D11CtxCreateOnDevice",
("hipD3D11CtxCreateOnDevice", CONV_D3D11, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuD3D11GetDirect3DDevice",
("hipD3D11GetDirect3DDevice", CONV_D3D11, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuGraphicsVDPAURegisterOutputSurface",
(
"hipGraphicsVDPAURegisterOutputSurface",
CONV_VDPAU,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"cuGraphicsVDPAURegisterVideoSurface",
(
"hipGraphicsVDPAURegisterVideoSurface",
CONV_VDPAU,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"cuVDPAUGetDevice",
("hipVDPAUGetDevice", CONV_VDPAU, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuVDPAUCtxCreate",
("hipVDPAUCtxCreate", CONV_VDPAU, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuEGLStreamConsumerAcquireFrame",
("hipEGLStreamConsumerAcquireFrame", CONV_EGL, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuEGLStreamConsumerConnect",
("hipEGLStreamConsumerConnect", CONV_EGL, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuEGLStreamConsumerConnectWithFlags",
(
"hipEGLStreamConsumerConnectWithFlags",
CONV_EGL,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"cuEGLStreamConsumerDisconnect",
("hipEGLStreamConsumerDisconnect", CONV_EGL, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuEGLStreamConsumerReleaseFrame",
("hipEGLStreamConsumerReleaseFrame", CONV_EGL, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuEGLStreamProducerConnect",
("hipEGLStreamProducerConnect", CONV_EGL, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuEGLStreamProducerDisconnect",
("hipEGLStreamProducerDisconnect", CONV_EGL, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuEGLStreamProducerPresentFrame",
("hipEGLStreamProducerPresentFrame", CONV_EGL, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuEGLStreamProducerReturnFrame",
("hipEGLStreamProducerReturnFrame", CONV_EGL, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuGraphicsEGLRegisterImage",
("hipGraphicsEGLRegisterImage", CONV_EGL, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuGraphicsResourceGetMappedEglFrame",
(
"hipGraphicsResourceGetMappedEglFrame",
CONV_EGL,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
("cudaDataType_t", ("hipDataType_t", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaDataType", ("hipDataType", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("CUDA_R_16F", ("HIP_R_16F", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("CUDA_C_16F", ("HIP_C_16F", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("CUDA_R_32F", ("HIP_R_32F", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("CUDA_C_32F", ("HIP_C_32F", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("CUDA_R_64F", ("HIP_R_64F", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("CUDA_C_64F", ("HIP_C_64F", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("CUDA_R_8I", ("HIP_R_8I", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("CUDA_C_8I", ("HIP_C_8I", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("CUDA_R_8U", ("HIP_R_8U", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("CUDA_C_8U", ("HIP_C_8U", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("CUDA_R_32I", ("HIP_R_32I", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("CUDA_C_32I", ("HIP_C_32I", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("CUDA_R_32U", ("HIP_R_32U", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
("CUDA_C_32U", ("HIP_C_32U", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
(
"MAJOR_VERSION",
("hipLibraryMajorVersion", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"MINOR_VERSION",
("hipLibraryMinorVersion", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"PATCH_LEVEL",
("hipLibraryPatchVersion", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaMemAttachGlobal",
("hipMemAttachGlobal", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaMemAttachHost",
("hipMemAttachHost", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaMemAttachSingle",
("hipMemAttachSingle", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaOccupancyDefault",
("hipOccupancyDefault", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaOccupancyDisableCachingOverride",
(
"hipOccupancyDisableCachingOverride",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
("cudaGetLastError", ("hipGetLastError", CONV_ERROR, API_RUNTIME)),
("cudaPeekAtLastError", ("hipPeekAtLastError", CONV_ERROR, API_RUNTIME)),
("cudaGetErrorName", ("hipGetErrorName", CONV_ERROR, API_RUNTIME)),
("cudaGetErrorString", ("hipGetErrorString", CONV_ERROR, API_RUNTIME)),
("cudaMemcpy3DParms", ("hipMemcpy3DParms", CONV_MEM, API_RUNTIME)),
(
"cudaMemcpy3DPeerParms",
("hipMemcpy3DPeerParms", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED),
),
("cudaMemcpy", ("hipMemcpy", CONV_MEM, API_RUNTIME)),
("cudaMemcpyToArray", ("hipMemcpyToArray", CONV_MEM, API_RUNTIME)),
("cudaMemcpyToSymbol", ("hipMemcpyToSymbol", CONV_MEM, API_RUNTIME)),
("cudaMemcpyToSymbolAsync", ("hipMemcpyToSymbolAsync", CONV_MEM, API_RUNTIME)),
("cudaMemcpyAsync", ("hipMemcpyAsync", CONV_MEM, API_RUNTIME)),
("cudaMemcpy2D", ("hipMemcpy2D", CONV_MEM, API_RUNTIME)),
("cudaMemcpy2DAsync", ("hipMemcpy2DAsync", CONV_MEM, API_RUNTIME)),
("cudaMemcpy2DToArray", ("hipMemcpy2DToArray", CONV_MEM, API_RUNTIME)),
(
"cudaMemcpy2DArrayToArray",
("hipMemcpy2DArrayToArray", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaMemcpy2DFromArray",
("hipMemcpy2DFromArray", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaMemcpy2DFromArrayAsync",
("hipMemcpy2DFromArrayAsync", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaMemcpy2DToArrayAsync",
("hipMemcpy2DToArrayAsync", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED),
),
("cudaMemcpy3D", ("hipMemcpy3D", CONV_MEM, API_RUNTIME)),
(
"cudaMemcpy3DAsync",
("hipMemcpy3DAsync", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaMemcpy3DPeer",
("hipMemcpy3DPeer", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaMemcpy3DPeerAsync",
("hipMemcpy3DPeerAsync", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaMemcpyArrayToArray",
("hipMemcpyArrayToArray", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaMemcpyFromArrayAsync",
("hipMemcpyFromArrayAsync", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED),
),
("cudaMemcpyFromSymbol", ("hipMemcpyFromSymbol", CONV_MEM, API_RUNTIME)),
(
"cudaMemcpyFromSymbolAsync",
("hipMemcpyFromSymbolAsync", CONV_MEM, API_RUNTIME),
),
("cudaMemAdvise", ("hipMemAdvise", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED)),
(
"cudaMemRangeGetAttribute",
("hipMemRangeGetAttribute", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaMemRangeGetAttributes",
("hipMemRangeGetAttributes", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaMemAdviseSetReadMostly",
("hipMemAdviseSetReadMostly", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaMemAdviseUnsetReadMostly",
("hipMemAdviseUnsetReadMostly", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaMemAdviseSetPreferredLocation",
(
"hipMemAdviseSetPreferredLocation",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaMemAdviseUnsetPreferredLocation",
(
"hipMemAdviseUnsetPreferredLocation",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaMemAdviseSetAccessedBy",
("hipMemAdviseSetAccessedBy", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaMemAdviseUnsetAccessedBy",
("hipMemAdviseUnsetAccessedBy", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaMemRangeAttributeReadMostly",
("hipMemRangeAttributeReadMostly", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaMemRangeAttributePreferredLocation",
(
"hipMemRangeAttributePreferredLocation",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaMemRangeAttributeAccessedBy",
("hipMemRangeAttributeAccessedBy", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaMemRangeAttributeLastPrefetchLocation",
(
"hipMemRangeAttributeLastPrefetchLocation",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
("cudaMemcpyHostToHost", ("hipMemcpyHostToHost", CONV_MEM, API_RUNTIME)),
("cudaMemcpyHostToDevice", ("hipMemcpyHostToDevice", CONV_MEM, API_RUNTIME)),
("cudaMemcpyDeviceToHost", ("hipMemcpyDeviceToHost", CONV_MEM, API_RUNTIME)),
(
"cudaMemcpyDeviceToDevice",
("hipMemcpyDeviceToDevice", CONV_MEM, API_RUNTIME),
),
("cudaMemcpyDefault", ("hipMemcpyDefault", CONV_MEM, API_RUNTIME)),
("cudaMemset", ("hipMemset", CONV_MEM, API_RUNTIME)),
("cudaMemsetAsync", ("hipMemsetAsync", CONV_MEM, API_RUNTIME)),
("cudaMemset2D", ("hipMemset2D", CONV_MEM, API_RUNTIME)),
(
"cudaMemset2DAsync",
("hipMemset2DAsync", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED),
),
("cudaMemset3D", ("hipMemset3D", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED)),
(
"cudaMemset3DAsync",
("hipMemset3DAsync", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED),
),
("cudaMemGetInfo", ("hipMemGetInfo", CONV_MEM, API_RUNTIME)),
(
"cudaArrayGetInfo",
("hipArrayGetInfo", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaFreeMipmappedArray",
("hipFreeMipmappedArray", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaGetMipmappedArrayLevel",
("hipGetMipmappedArrayLevel", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaGetSymbolAddress",
("hipGetSymbolAddress", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaGetSymbolSize",
("hipGetSymbolSize", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaMemPrefetchAsync",
("hipMemPrefetchAsync", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED),
),
("cudaMallocHost", ("hipHostMalloc", CONV_MEM, API_RUNTIME)),
("cudaMallocArray", ("hipMallocArray", CONV_MEM, API_RUNTIME)),
("cudaMalloc", ("hipMalloc", CONV_MEM, API_RUNTIME)),
("cudaMalloc3D", ("hipMalloc3D", CONV_MEM, API_RUNTIME)),
("cudaMalloc3DArray", ("hipMalloc3DArray", CONV_MEM, API_RUNTIME)),
(
"cudaMallocManaged",
("hipMallocManaged", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaMallocMipmappedArray",
("hipMallocMipmappedArray", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED),
),
("cudaMallocPitch", ("hipMallocPitch", CONV_MEM, API_RUNTIME)),
("cudaFreeHost", ("hipHostFree", CONV_MEM, API_RUNTIME)),
("cudaFreeArray", ("hipFreeArray", CONV_MEM, API_RUNTIME)),
("cudaFree", ("hipFree", CONV_MEM, API_RUNTIME)),
("cudaHostRegister", ("hipHostRegister", CONV_MEM, API_RUNTIME)),
("cudaHostUnregister", ("hipHostUnregister", CONV_MEM, API_RUNTIME)),
("cudaHostAlloc", ("hipHostMalloc", CONV_MEM, API_RUNTIME)),
("cudaMemoryTypeHost", ("hipMemoryTypeHost", CONV_MEM, API_RUNTIME)),
("cudaMemoryTypeDevice", ("hipMemoryTypeDevice", CONV_MEM, API_RUNTIME)),
("make_cudaExtent", ("make_hipExtent", CONV_MEM, API_RUNTIME)),
("make_cudaPitchedPtr", ("make_hipPitchedPtr", CONV_MEM, API_RUNTIME)),
("make_cudaPos", ("make_hipPos", CONV_MEM, API_RUNTIME)),
("cudaHostAllocDefault", ("hipHostMallocDefault", CONV_MEM, API_RUNTIME)),
("cudaHostAllocPortable", ("hipHostMallocPortable", CONV_MEM, API_RUNTIME)),
("cudaHostAllocMapped", ("hipHostMallocMapped", CONV_MEM, API_RUNTIME)),
(
"cudaHostAllocWriteCombined",
("hipHostMallocWriteCombined", CONV_MEM, API_RUNTIME),
),
("cudaHostGetFlags", ("hipHostGetFlags", CONV_MEM, API_RUNTIME)),
("cudaHostRegisterDefault", ("hipHostRegisterDefault", CONV_MEM, API_RUNTIME)),
(
"cudaHostRegisterPortable",
("hipHostRegisterPortable", CONV_MEM, API_RUNTIME),
),
("cudaHostRegisterMapped", ("hipHostRegisterMapped", CONV_MEM, API_RUNTIME)),
(
"cudaHostRegisterIoMemory",
("hipHostRegisterIoMemory", CONV_MEM, API_RUNTIME),
),
# ("warpSize", ("hipWarpSize", CONV_SPECIAL_FUNC, API_RUNTIME), (HIP actually uses warpSize...)),
("cudaEventCreate", ("hipEventCreate", CONV_EVENT, API_RUNTIME)),
(
"cudaEventCreateWithFlags",
("hipEventCreateWithFlags", CONV_EVENT, API_RUNTIME),
),
("cudaEventDestroy", ("hipEventDestroy", CONV_EVENT, API_RUNTIME)),
("cudaEventRecord", ("hipEventRecord", CONV_EVENT, API_RUNTIME)),
("cudaEventElapsedTime", ("hipEventElapsedTime", CONV_EVENT, API_RUNTIME)),
("cudaEventSynchronize", ("hipEventSynchronize", CONV_EVENT, API_RUNTIME)),
("cudaEventQuery", ("hipEventQuery", CONV_EVENT, API_RUNTIME)),
("cudaEventDefault", ("hipEventDefault", CONV_EVENT, API_RUNTIME)),
("cudaEventBlockingSync", ("hipEventBlockingSync", CONV_EVENT, API_RUNTIME)),
("cudaEventDisableTiming", ("hipEventDisableTiming", CONV_EVENT, API_RUNTIME)),
("cudaEventInterprocess", ("hipEventInterprocess", CONV_EVENT, API_RUNTIME)),
("cudaStreamCreate", ("hipStreamCreate", CONV_STREAM, API_RUNTIME)),
(
"cudaStreamCreateWithFlags",
("hipStreamCreateWithFlags", CONV_STREAM, API_RUNTIME),
),
(
"cudaStreamCreateWithPriority",
("hipStreamCreateWithPriority", CONV_STREAM, API_RUNTIME, HIP_UNSUPPORTED),
),
("cudaStreamDestroy", ("hipStreamDestroy", CONV_STREAM, API_RUNTIME)),
("cudaStreamWaitEvent", ("hipStreamWaitEvent", CONV_STREAM, API_RUNTIME)),
("cudaStreamSynchronize", ("hipStreamSynchronize", CONV_STREAM, API_RUNTIME)),
("cudaStreamGetFlags", ("hipStreamGetFlags", CONV_STREAM, API_RUNTIME)),
("cudaStreamQuery", ("hipStreamQuery", CONV_STREAM, API_RUNTIME)),
("cudaStreamAddCallback", ("hipStreamAddCallback", CONV_STREAM, API_RUNTIME)),
(
"cudaStreamAttachMemAsync",
("hipStreamAttachMemAsync", CONV_STREAM, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaStreamGetPriority",
("hipStreamGetPriority", CONV_STREAM, API_RUNTIME, HIP_UNSUPPORTED),
),
("cudaStreamDefault", ("hipStreamDefault", CONV_TYPE, API_RUNTIME)),
("cudaStreamNonBlocking", ("hipStreamNonBlocking", CONV_TYPE, API_RUNTIME)),
("cudaDeviceSynchronize", ("hipDeviceSynchronize", CONV_DEVICE, API_RUNTIME)),
("cudaDeviceReset", ("hipDeviceReset", CONV_DEVICE, API_RUNTIME)),
("cudaSetDevice", ("hipSetDevice", CONV_DEVICE, API_RUNTIME)),
("cudaGetDevice", ("hipGetDevice", CONV_DEVICE, API_RUNTIME)),
("cudaGetDeviceCount", ("hipGetDeviceCount", CONV_DEVICE, API_RUNTIME)),
("cudaChooseDevice", ("hipChooseDevice", CONV_DEVICE, API_RUNTIME)),
("cudaThreadExit", ("hipDeviceReset", CONV_THREAD, API_RUNTIME)),
(
"cudaThreadGetCacheConfig",
("hipDeviceGetCacheConfig", CONV_THREAD, API_RUNTIME),
),
(
"cudaThreadGetLimit",
("hipThreadGetLimit", CONV_THREAD, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaThreadSetCacheConfig",
("hipDeviceSetCacheConfig", CONV_THREAD, API_RUNTIME),
),
(
"cudaThreadSetLimit",
("hipThreadSetLimit", CONV_THREAD, API_RUNTIME, HIP_UNSUPPORTED),
),
("cudaThreadSynchronize", ("hipDeviceSynchronize", CONV_THREAD, API_RUNTIME)),
("cudaDeviceGetAttribute", ("hipDeviceGetAttribute", CONV_DEVICE, API_RUNTIME)),
(
"cudaDevAttrMaxThreadsPerBlock",
("hipDeviceAttributeMaxThreadsPerBlock", CONV_TYPE, API_RUNTIME),
),
(
"cudaDevAttrMaxBlockDimX",
("hipDeviceAttributeMaxBlockDimX", CONV_TYPE, API_RUNTIME),
),
(
"cudaDevAttrMaxBlockDimY",
("hipDeviceAttributeMaxBlockDimY", CONV_TYPE, API_RUNTIME),
),
(
"cudaDevAttrMaxBlockDimZ",
("hipDeviceAttributeMaxBlockDimZ", CONV_TYPE, API_RUNTIME),
),
(
"cudaDevAttrMaxGridDimX",
("hipDeviceAttributeMaxGridDimX", CONV_TYPE, API_RUNTIME),
),
(
"cudaDevAttrMaxGridDimY",
("hipDeviceAttributeMaxGridDimY", CONV_TYPE, API_RUNTIME),
),
(
"cudaDevAttrMaxGridDimZ",
("hipDeviceAttributeMaxGridDimZ", CONV_TYPE, API_RUNTIME),
),
(
"cudaDevAttrMaxSharedMemoryPerBlock",
("hipDeviceAttributeMaxSharedMemoryPerBlock", CONV_TYPE, API_RUNTIME),
),
(
"cudaDevAttrTotalConstantMemory",
("hipDeviceAttributeTotalConstantMemory", CONV_TYPE, API_RUNTIME),
),
("cudaDevAttrWarpSize", ("hipDeviceAttributeWarpSize", CONV_TYPE, API_RUNTIME)),
(
"cudaDevAttrMaxPitch",
("hipDeviceAttributeMaxPitch", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaDevAttrMaxRegistersPerBlock",
("hipDeviceAttributeMaxRegistersPerBlock", CONV_TYPE, API_RUNTIME),
),
(
"cudaDevAttrClockRate",
("hipDeviceAttributeClockRate", CONV_TYPE, API_RUNTIME),
),
(
"cudaDevAttrTextureAlignment",
(
"hipDeviceAttributeTextureAlignment",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaDevAttrGpuOverlap",
("hipDeviceAttributeGpuOverlap", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaDevAttrMultiProcessorCount",
("hipDeviceAttributeMultiprocessorCount", CONV_TYPE, API_RUNTIME),
),
(
"cudaDevAttrKernelExecTimeout",
(
"hipDeviceAttributeKernelExecTimeout",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaDevAttrIntegrated",
("hipDeviceAttributeIntegrated", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaDevAttrCanMapHostMemory",
(
"hipDeviceAttributeCanMapHostMemory",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaDevAttrComputeMode",
("hipDeviceAttributeComputeMode", CONV_TYPE, API_RUNTIME),
),
(
"cudaDevAttrMaxTexture1DWidth",
(
"hipDeviceAttributeMaxTexture1DWidth",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaDevAttrMaxTexture2DWidth",
(
"hipDeviceAttributeMaxTexture2DWidth",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaDevAttrMaxTexture2DHeight",
(
"hipDeviceAttributeMaxTexture2DHeight",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaDevAttrMaxTexture3DWidth",
(
"hipDeviceAttributeMaxTexture3DWidth",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaDevAttrMaxTexture3DHeight",
(
"hipDeviceAttributeMaxTexture3DHeight",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaDevAttrMaxTexture3DDepth",
(
"hipDeviceAttributeMaxTexture3DDepth",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaDevAttrMaxTexture2DLayeredWidth",
(
"hipDeviceAttributeMaxTexture2DLayeredWidth",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaDevAttrMaxTexture2DLayeredHeight",
(
"hipDeviceAttributeMaxTexture2DLayeredHeight",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaDevAttrMaxTexture2DLayeredLayers",
(
"hipDeviceAttributeMaxTexture2DLayeredLayers",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaDevAttrSurfaceAlignment",
(
"hipDeviceAttributeSurfaceAlignment",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaDevAttrConcurrentKernels",
("hipDeviceAttributeConcurrentKernels", CONV_TYPE, API_RUNTIME),
),
(
"cudaDevAttrEccEnabled",
("hipDeviceAttributeEccEnabled", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
("cudaDevAttrPciBusId", ("hipDeviceAttributePciBusId", CONV_TYPE, API_RUNTIME)),
(
"cudaDevAttrPciDeviceId",
("hipDeviceAttributePciDeviceId", CONV_TYPE, API_RUNTIME),
),
(
"cudaDevAttrTccDriver",
("hipDeviceAttributeTccDriver", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaDevAttrMemoryClockRate",
("hipDeviceAttributeMemoryClockRate", CONV_TYPE, API_RUNTIME),
),
(
"cudaDevAttrGlobalMemoryBusWidth",
("hipDeviceAttributeMemoryBusWidth", CONV_TYPE, API_RUNTIME),
),
(
"cudaDevAttrL2CacheSize",
("hipDeviceAttributeL2CacheSize", CONV_TYPE, API_RUNTIME),
),
(
"cudaDevAttrMaxThreadsPerMultiProcessor",
("hipDeviceAttributeMaxThreadsPerMultiProcessor", CONV_TYPE, API_RUNTIME),
),
(
"cudaDevAttrAsyncEngineCount",
(
"hipDeviceAttributeAsyncEngineCount",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaDevAttrUnifiedAddressing",
(
"hipDeviceAttributeUnifiedAddressing",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaDevAttrMaxTexture1DLayeredWidth",
(
"hipDeviceAttributeMaxTexture1DLayeredWidth",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaDevAttrMaxTexture1DLayeredLayers",
(
"hipDeviceAttributeMaxTexture1DLayeredLayers",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaDevAttrMaxTexture2DGatherWidth",
(
"hipDeviceAttributeMaxTexture2DGatherWidth",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaDevAttrMaxTexture2DGatherHeight",
(
"hipDeviceAttributeMaxTexture2DGatherHeight",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaDevAttrMaxTexture3DWidthAlt",
(
"hipDeviceAttributeMaxTexture3DWidthAlternate",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaDevAttrMaxTexture3DHeightAlt",
(
"hipDeviceAttributeMaxTexture3DHeightAlternate",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaDevAttrMaxTexture3DDepthAlt",
(
"hipDeviceAttributeMaxTexture3DDepthAlternate",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaDevAttrPciDomainId",
("hipDeviceAttributePciDomainId", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaDevAttrTexturePitchAlignment",
(
"hipDeviceAttributeTexturePitchAlignment",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaDevAttrMaxTextureCubemapWidth",
(
"hipDeviceAttributeMaxTextureCubemapWidth",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaDevAttrMaxTextureCubemapLayeredWidth",
(
"hipDeviceAttributeMaxTextureCubemapLayeredWidth",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaDevAttrMaxTextureCubemapLayeredLayers",
(
"hipDeviceAttributeMaxTextureCubemapLayeredLayers",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaDevAttrMaxSurface1DWidth",
(
"hipDeviceAttributeMaxSurface1DWidth",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaDevAttrMaxSurface2DWidth",
(
"hipDeviceAttributeMaxSurface2DWidth",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaDevAttrMaxSurface2DHeight",
(
"hipDeviceAttributeMaxSurface2DHeight",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaDevAttrMaxSurface3DWidth",
(
"hipDeviceAttributeMaxSurface3DWidth",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaDevAttrMaxSurface3DHeight",
(
"hipDeviceAttributeMaxSurface3DHeight",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaDevAttrMaxSurface3DDepth",
(
"hipDeviceAttributeMaxSurface3DDepth",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaDevAttrMaxSurface1DLayeredWidth",
(
"hipDeviceAttributeMaxSurface1DLayeredWidth",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaDevAttrMaxSurface1DLayeredLayers",
(
"hipDeviceAttributeMaxSurface1DLayeredLayers",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaDevAttrMaxSurface2DLayeredWidth",
(
"hipDeviceAttributeMaxSurface2DLayeredWidth",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaDevAttrMaxSurface2DLayeredHeight",
(
"hipDeviceAttributeMaxSurface2DLayeredHeight",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaDevAttrMaxSurface2DLayeredLayers",
(
"hipDeviceAttributeMaxSurface2DLayeredLayers",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaDevAttrMaxSurfaceCubemapWidth",
(
"hipDeviceAttributeMaxSurfaceCubemapWidth",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaDevAttrMaxSurfaceCubemapLayeredWidth",
(
"hipDeviceAttributeMaxSurfaceCubemapLayeredWidth",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaDevAttrMaxSurfaceCubemapLayeredLayers",
(
"hipDeviceAttributeMaxSurfaceCubemapLayeredLayers",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaDevAttrMaxTexture1DLinearWidth",
(
"hipDeviceAttributeMaxTexture1DLinearWidth",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaDevAttrMaxTexture2DLinearWidth",
(
"hipDeviceAttributeMaxTexture2DLinearWidth",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaDevAttrMaxTexture2DLinearHeight",
(
"hipDeviceAttributeMaxTexture2DLinearHeight",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaDevAttrMaxTexture2DLinearPitch",
(
"hipDeviceAttributeMaxTexture2DLinearPitch",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaDevAttrMaxTexture2DMipmappedWidth",
(
"hipDeviceAttributeMaxTexture2DMipmappedWidth",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaDevAttrMaxTexture2DMipmappedHeight",
(
"hipDeviceAttributeMaxTexture2DMipmappedHeight",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaDevAttrComputeCapabilityMajor",
("hipDeviceAttributeComputeCapabilityMajor", CONV_TYPE, API_RUNTIME),
),
(
"cudaDevAttrComputeCapabilityMinor",
("hipDeviceAttributeComputeCapabilityMinor", CONV_TYPE, API_RUNTIME),
),
(
"cudaDevAttrMaxTexture1DMipmappedWidth",
(
"hipDeviceAttributeMaxTexture1DMipmappedWidth",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaDevAttrStreamPrioritiesSupported",
(
"hipDeviceAttributeStreamPrioritiesSupported",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaDevAttrGlobalL1CacheSupported",
(
"hipDeviceAttributeGlobalL1CacheSupported",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaDevAttrLocalL1CacheSupported",
(
"hipDeviceAttributeLocalL1CacheSupported",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaDevAttrMaxSharedMemoryPerMultiprocessor",
(
"hipDeviceAttributeMaxSharedMemoryPerMultiprocessor",
CONV_TYPE,
API_RUNTIME,
),
),
(
"cudaDevAttrMaxRegistersPerMultiprocessor",
(
"hipDeviceAttributeMaxRegistersPerMultiprocessor",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaDevAttrManagedMemory",
(
"hipDeviceAttributeManagedMemory",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaDevAttrIsMultiGpuBoard",
("hipDeviceAttributeIsMultiGpuBoard", CONV_TYPE, API_RUNTIME),
),
(
"cudaDevAttrMultiGpuBoardGroupID",
(
"hipDeviceAttributeMultiGpuBoardGroupID",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaDevAttrHostNativeAtomicSupported",
(
"hipDeviceAttributeHostNativeAtomicSupported",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaDevAttrSingleToDoublePrecisionPerfRatio",
(
"hipDeviceAttributeSingleToDoublePrecisionPerfRatio",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaDevAttrPageableMemoryAccess",
(
"hipDeviceAttributePageableMemoryAccess",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaDevAttrConcurrentManagedAccess",
(
"hipDeviceAttributeConcurrentManagedAccess",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaDevAttrComputePreemptionSupported",
(
"hipDeviceAttributeComputePreemptionSupported",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaDevAttrCanUseHostPointerForRegisteredMem",
(
"hipDeviceAttributeCanUseHostPointerForRegisteredMem",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaPointerGetAttributes",
("hipPointerGetAttributes", CONV_MEM, API_RUNTIME),
),
(
"cudaHostGetDevicePointer",
("hipHostGetDevicePointer", CONV_MEM, API_RUNTIME),
),
(
"cudaGetDeviceProperties",
("hipGetDeviceProperties", CONV_DEVICE, API_RUNTIME),
),
("cudaDeviceGetPCIBusId", ("hipDeviceGetPCIBusId", CONV_DEVICE, API_RUNTIME)),
(
"cudaDeviceGetByPCIBusId",
("hipDeviceGetByPCIBusId", CONV_DEVICE, API_RUNTIME),
),
(
"cudaDeviceGetStreamPriorityRange",
(
"hipDeviceGetStreamPriorityRange",
CONV_DEVICE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaSetValidDevices",
("hipSetValidDevices", CONV_DEVICE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaDevP2PAttrPerformanceRank",
(
"hipDeviceP2PAttributePerformanceRank",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaDevP2PAttrAccessSupported",
(
"hipDeviceP2PAttributeAccessSupported",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaDevP2PAttrNativeAtomicSupported",
(
"hipDeviceP2PAttributeNativeAtomicSupported",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaDeviceGetP2PAttribute",
("hipDeviceGetP2PAttribute", CONV_DEVICE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaComputeModeDefault",
("hipComputeModeDefault", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaComputeModeExclusive",
("hipComputeModeExclusive", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaComputeModeProhibited",
("hipComputeModeProhibited", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaComputeModeExclusiveProcess",
("hipComputeModeExclusiveProcess", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaGetDeviceFlags",
("hipGetDeviceFlags", CONV_DEVICE, API_RUNTIME, HIP_UNSUPPORTED),
),
("cudaSetDeviceFlags", ("hipSetDeviceFlags", CONV_DEVICE, API_RUNTIME)),
("cudaDeviceScheduleAuto", ("hipDeviceScheduleAuto", CONV_TYPE, API_RUNTIME)),
("cudaDeviceScheduleSpin", ("hipDeviceScheduleSpin", CONV_TYPE, API_RUNTIME)),
("cudaDeviceScheduleYield", ("hipDeviceScheduleYield", CONV_TYPE, API_RUNTIME)),
(
"cudaDeviceBlockingSync",
("hipDeviceScheduleBlockingSync", CONV_TYPE, API_RUNTIME),
),
(
"cudaDeviceScheduleBlockingSync",
("hipDeviceScheduleBlockingSync", CONV_TYPE, API_RUNTIME),
),
(
"cudaDeviceScheduleMask",
("hipDeviceScheduleMask", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
("cudaDeviceMapHost", ("hipDeviceMapHost", CONV_TYPE, API_RUNTIME)),
(
"cudaDeviceLmemResizeToMax",
("hipDeviceLmemResizeToMax", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
("cudaDeviceMask", ("hipDeviceMask", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
(
"cudaDeviceSetCacheConfig",
("hipDeviceSetCacheConfig", CONV_CACHE, API_RUNTIME),
),
(
"cudaDeviceGetCacheConfig",
("hipDeviceGetCacheConfig", CONV_CACHE, API_RUNTIME),
),
("cudaFuncSetCacheConfig", ("hipFuncSetCacheConfig", CONV_CACHE, API_RUNTIME)),
(
"cudaFuncCachePreferNone",
("hipFuncCachePreferNone", CONV_CACHE, API_RUNTIME),
),
(
"cudaFuncCachePreferShared",
("hipFuncCachePreferShared", CONV_CACHE, API_RUNTIME),
),
("cudaFuncCachePreferL1", ("hipFuncCachePreferL1", CONV_CACHE, API_RUNTIME)),
(
"cudaFuncCachePreferEqual",
("hipFuncCachePreferEqual", CONV_CACHE, API_RUNTIME),
),
(
"cudaFuncGetAttributes",
("hipFuncGetAttributes", CONV_EXEC, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaFuncSetSharedMemConfig",
("hipFuncSetSharedMemConfig", CONV_EXEC, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaGetParameterBuffer",
("hipGetParameterBuffer", CONV_EXEC, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaSetDoubleForDevice",
("hipSetDoubleForDevice", CONV_EXEC, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaSetDoubleForHost",
("hipSetDoubleForHost", CONV_EXEC, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaConfigureCall",
("hipConfigureCall", CONV_EXEC, API_RUNTIME, HIP_UNSUPPORTED),
),
("cudaLaunch", ("hipLaunch", CONV_EXEC, API_RUNTIME, HIP_UNSUPPORTED)),
(
"cudaSetupArgument",
("hipSetupArgument", CONV_EXEC, API_RUNTIME, HIP_UNSUPPORTED),
),
("cudaDriverGetVersion", ("hipDriverGetVersion", CONV_VERSION, API_RUNTIME)),
(
"cudaRuntimeGetVersion",
("hipRuntimeGetVersion", CONV_VERSION, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaOccupancyMaxPotentialBlockSize",
("hipOccupancyMaxPotentialBlockSize", CONV_OCCUPANCY, API_RUNTIME),
),
(
"cudaOccupancyMaxPotentialBlockSizeWithFlags",
(
"hipOccupancyMaxPotentialBlockSizeWithFlags",
CONV_OCCUPANCY,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaOccupancyMaxActiveBlocksPerMultiprocessor",
(
"hipOccupancyMaxActiveBlocksPerMultiprocessor",
CONV_OCCUPANCY,
API_RUNTIME,
),
),
(
"cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags",
(
"hipOccupancyMaxActiveBlocksPerMultiprocessorWithFlags",
CONV_OCCUPANCY,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaOccupancyMaxPotentialBlockSizeVariableSMem",
(
"hipOccupancyMaxPotentialBlockSizeVariableSMem",
CONV_OCCUPANCY,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaOccupancyMaxPotentialBlockSizeVariableSMemWithFlags",
(
"hipOccupancyMaxPotentialBlockSizeVariableSMemWithFlags",
CONV_OCCUPANCY,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
("cudaDeviceCanAccessPeer", ("hipDeviceCanAccessPeer", CONV_PEER, API_RUNTIME)),
(
"cudaDeviceDisablePeerAccess",
("hipDeviceDisablePeerAccess", CONV_PEER, API_RUNTIME),
),
(
"cudaDeviceEnablePeerAccess",
("hipDeviceEnablePeerAccess", CONV_PEER, API_RUNTIME),
),
("cudaMemcpyPeerAsync", ("hipMemcpyPeerAsync", CONV_MEM, API_RUNTIME)),
("cudaMemcpyPeer", ("hipMemcpyPeer", CONV_MEM, API_RUNTIME)),
(
"cudaIpcMemLazyEnablePeerAccess",
("hipIpcMemLazyEnablePeerAccess", CONV_TYPE, API_RUNTIME),
),
(
"cudaDeviceSetSharedMemConfig",
("hipDeviceSetSharedMemConfig", CONV_DEVICE, API_RUNTIME),
),
(
"cudaDeviceGetSharedMemConfig",
("hipDeviceGetSharedMemConfig", CONV_DEVICE, API_RUNTIME),
),
(
"cudaSharedMemBankSizeDefault",
("hipSharedMemBankSizeDefault", CONV_TYPE, API_RUNTIME),
),
(
"cudaSharedMemBankSizeFourByte",
("hipSharedMemBankSizeFourByte", CONV_TYPE, API_RUNTIME),
),
(
"cudaSharedMemBankSizeEightByte",
("hipSharedMemBankSizeEightByte", CONV_TYPE, API_RUNTIME),
),
(
"cudaLimitStackSize",
("hipLimitStackSize", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaLimitPrintfFifoSize",
("hipLimitPrintfFifoSize", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
("cudaLimitMallocHeapSize", ("hipLimitMallocHeapSize", CONV_TYPE, API_RUNTIME)),
(
"cudaLimitDevRuntimeSyncDepth",
("hipLimitDevRuntimeSyncDepth", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaLimitDevRuntimePendingLaunchCount",
(
"hipLimitDevRuntimePendingLaunchCount",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
("cudaDeviceGetLimit", ("hipDeviceGetLimit", CONV_DEVICE, API_RUNTIME)),
(
"cudaProfilerInitialize",
("hipProfilerInitialize", CONV_OTHER, API_RUNTIME, HIP_UNSUPPORTED),
),
("cudaProfilerStart", ("hipProfilerStart", CONV_OTHER, API_RUNTIME)),
("cudaProfilerStop", ("hipProfilerStop", CONV_OTHER, API_RUNTIME)),
(
"cudaKeyValuePair",
("hipKeyValuePair", CONV_OTHER, API_RUNTIME, HIP_UNSUPPORTED),
),
("cudaCSV", ("hipCSV", CONV_OTHER, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaReadModeElementType", ("hipReadModeElementType", CONV_TEX, API_RUNTIME)),
(
"cudaReadModeNormalizedFloat",
("hipReadModeNormalizedFloat", CONV_TEX, API_RUNTIME),
),
("cudaFilterModePoint", ("hipFilterModePoint", CONV_TEX, API_RUNTIME)),
("cudaFilterModeLinear", ("hipFilterModeLinear", CONV_TEX, API_RUNTIME)),
("cudaBindTexture", ("hipBindTexture", CONV_TEX, API_RUNTIME)),
("cudaUnbindTexture", ("hipUnbindTexture", CONV_TEX, API_RUNTIME)),
("cudaBindTexture2D", ("hipBindTexture2D", CONV_TEX, API_RUNTIME)),
("cudaBindTextureToArray", ("hipBindTextureToArray", CONV_TEX, API_RUNTIME)),
(
"cudaBindTextureToMipmappedArray",
("hipBindTextureToMipmappedArray", CONV_TEX, API_RUNTIME),
),
(
"cudaGetTextureAlignmentOffset",
("hipGetTextureAlignmentOffset", CONV_TEX, API_RUNTIME),
),
("cudaGetTextureReference", ("hipGetTextureReference", CONV_TEX, API_RUNTIME)),
(
"cudaChannelFormatKindSigned",
("hipChannelFormatKindSigned", CONV_TEX, API_RUNTIME),
),
(
"cudaChannelFormatKindUnsigned",
("hipChannelFormatKindUnsigned", CONV_TEX, API_RUNTIME),
),
(
"cudaChannelFormatKindFloat",
("hipChannelFormatKindFloat", CONV_TEX, API_RUNTIME),
),
(
"cudaChannelFormatKindNone",
("hipChannelFormatKindNone", CONV_TEX, API_RUNTIME),
),
("cudaCreateChannelDesc", ("hipCreateChannelDesc", CONV_TEX, API_RUNTIME)),
("cudaGetChannelDesc", ("hipGetChannelDesc", CONV_TEX, API_RUNTIME)),
("cudaResourceTypeArray", ("hipResourceTypeArray", CONV_TEX, API_RUNTIME)),
(
"cudaResourceTypeMipmappedArray",
("hipResourceTypeMipmappedArray", CONV_TEX, API_RUNTIME),
),
("cudaResourceTypeLinear", ("hipResourceTypeLinear", CONV_TEX, API_RUNTIME)),
("cudaResourceTypePitch2D", ("hipResourceTypePitch2D", CONV_TEX, API_RUNTIME)),
("cudaResViewFormatNone", ("hipResViewFormatNone", CONV_TEX, API_RUNTIME)),
(
"cudaResViewFormatUnsignedChar1",
("hipResViewFormatUnsignedChar1", CONV_TEX, API_RUNTIME),
),
(
"cudaResViewFormatUnsignedChar2",
("hipResViewFormatUnsignedChar2", CONV_TEX, API_RUNTIME),
),
(
"cudaResViewFormatUnsignedChar4",
("hipResViewFormatUnsignedChar4", CONV_TEX, API_RUNTIME),
),
(
"cudaResViewFormatSignedChar1",
("hipResViewFormatSignedChar1", CONV_TEX, API_RUNTIME),
),
(
"cudaResViewFormatSignedChar2",
("hipResViewFormatSignedChar2", CONV_TEX, API_RUNTIME),
),
(
"cudaResViewFormatSignedChar4",
("hipResViewFormatSignedChar4", CONV_TEX, API_RUNTIME),
),
(
"cudaResViewFormatUnsignedShort1",
("hipResViewFormatUnsignedShort1", CONV_TEX, API_RUNTIME),
),
(
"cudaResViewFormatUnsignedShort2",
("hipResViewFormatUnsignedShort2", CONV_TEX, API_RUNTIME),
),
(
"cudaResViewFormatUnsignedShort4",
("hipResViewFormatUnsignedShort4", CONV_TEX, API_RUNTIME),
),
(
"cudaResViewFormatSignedShort1",
("hipResViewFormatSignedShort1", CONV_TEX, API_RUNTIME),
),
(
"cudaResViewFormatSignedShort2",
("hipResViewFormatSignedShort2", CONV_TEX, API_RUNTIME),
),
(
"cudaResViewFormatSignedShort4",
("hipResViewFormatSignedShort4", CONV_TEX, API_RUNTIME),
),
(
"cudaResViewFormatUnsignedInt1",
("hipResViewFormatUnsignedInt1", CONV_TEX, API_RUNTIME),
),
(
"cudaResViewFormatUnsignedInt2",
("hipResViewFormatUnsignedInt2", CONV_TEX, API_RUNTIME),
),
(
"cudaResViewFormatUnsignedInt4",
("hipResViewFormatUnsignedInt4", CONV_TEX, API_RUNTIME),
),
(
"cudaResViewFormatSignedInt1",
("hipResViewFormatSignedInt1", CONV_TEX, API_RUNTIME),
),
(
"cudaResViewFormatSignedInt2",
("hipResViewFormatSignedInt2", CONV_TEX, API_RUNTIME),
),
(
"cudaResViewFormatSignedInt4",
("hipResViewFormatSignedInt4", CONV_TEX, API_RUNTIME),
),
("cudaResViewFormatHalf1", ("hipResViewFormatHalf1", CONV_TEX, API_RUNTIME)),
("cudaResViewFormatHalf2", ("hipResViewFormatHalf2", CONV_TEX, API_RUNTIME)),
("cudaResViewFormatHalf4", ("hipResViewFormatHalf4", CONV_TEX, API_RUNTIME)),
("cudaResViewFormatFloat1", ("hipResViewFormatFloat1", CONV_TEX, API_RUNTIME)),
("cudaResViewFormatFloat2", ("hipResViewFormatFloat2", CONV_TEX, API_RUNTIME)),
("cudaResViewFormatFloat4", ("hipResViewFormatFloat4", CONV_TEX, API_RUNTIME)),
(
"cudaResViewFormatUnsignedBlockCompressed1",
("hipResViewFormatUnsignedBlockCompressed1", CONV_TEX, API_RUNTIME),
),
(
"cudaResViewFormatUnsignedBlockCompressed2",
("hipResViewFormatUnsignedBlockCompressed2", CONV_TEX, API_RUNTIME),
),
(
"cudaResViewFormatUnsignedBlockCompressed3",
("hipResViewFormatUnsignedBlockCompressed3", CONV_TEX, API_RUNTIME),
),
(
"cudaResViewFormatUnsignedBlockCompressed4",
("hipResViewFormatUnsignedBlockCompressed4", CONV_TEX, API_RUNTIME),
),
(
"cudaResViewFormatSignedBlockCompressed4",
("hipResViewFormatSignedBlockCompressed4", CONV_TEX, API_RUNTIME),
),
(
"cudaResViewFormatUnsignedBlockCompressed5",
("hipResViewFormatUnsignedBlockCompressed5", CONV_TEX, API_RUNTIME),
),
(
"cudaResViewFormatSignedBlockCompressed5",
("hipResViewFormatSignedBlockCompressed5", CONV_TEX, API_RUNTIME),
),
(
"cudaResViewFormatUnsignedBlockCompressed6H",
("hipResViewFormatUnsignedBlockCompressed6H", CONV_TEX, API_RUNTIME),
),
(
"cudaResViewFormatSignedBlockCompressed6H",
("hipResViewFormatSignedBlockCompressed6H", CONV_TEX, API_RUNTIME),
),
(
"cudaResViewFormatUnsignedBlockCompressed7",
("hipResViewFormatUnsignedBlockCompressed7", CONV_TEX, API_RUNTIME),
),
("cudaAddressModeWrap", ("hipAddressModeWrap", CONV_TEX, API_RUNTIME)),
("cudaAddressModeClamp", ("hipAddressModeClamp", CONV_TEX, API_RUNTIME)),
("cudaAddressModeMirror", ("hipAddressModeMirror", CONV_TEX, API_RUNTIME)),
("cudaAddressModeBorder", ("hipAddressModeBorder", CONV_TEX, API_RUNTIME)),
("cudaCreateTextureObject", ("hipCreateTextureObject", CONV_TEX, API_RUNTIME)),
(
"cudaDestroyTextureObject",
("hipDestroyTextureObject", CONV_TEX, API_RUNTIME),
),
(
"cudaGetTextureObjectResourceDesc",
("hipGetTextureObjectResourceDesc", CONV_TEX, API_RUNTIME),
),
(
"cudaGetTextureObjectResourceViewDesc",
("hipGetTextureObjectResourceViewDesc", CONV_TEX, API_RUNTIME),
),
(
"cudaGetTextureObjectTextureDesc",
("hipGetTextureObjectTextureDesc", CONV_TEX, API_RUNTIME),
),
(
"cudaBindSurfaceToArray",
("hipBindSurfaceToArray", CONV_SURFACE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaGetSurfaceReference",
("hipGetSurfaceReference", CONV_SURFACE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaBoundaryModeZero",
("hipBoundaryModeZero", CONV_SURFACE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaBoundaryModeClamp",
("hipBoundaryModeClamp", CONV_SURFACE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaBoundaryModeTrap",
("hipBoundaryModeTrap", CONV_SURFACE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaFormatModeForced",
("hipFormatModeForced", CONV_SURFACE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaFormatModeAuto",
("hipFormatModeAuto", CONV_SURFACE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaCreateSurfaceObject",
("hipCreateSurfaceObject", CONV_SURFACE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaDestroySurfaceObject",
("hipDestroySurfaceObject", CONV_SURFACE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaGetSurfaceObjectResourceDesc",
(
"hipGetSurfaceObjectResourceDesc",
CONV_SURFACE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
("cudaIpcCloseMemHandle", ("hipIpcCloseMemHandle", CONV_DEVICE, API_RUNTIME)),
("cudaIpcGetEventHandle", ("hipIpcGetEventHandle", CONV_DEVICE, API_RUNTIME)),
("cudaIpcGetMemHandle", ("hipIpcGetMemHandle", CONV_DEVICE, API_RUNTIME)),
("cudaIpcOpenEventHandle", ("hipIpcOpenEventHandle", CONV_DEVICE, API_RUNTIME)),
("cudaIpcOpenMemHandle", ("hipIpcOpenMemHandle", CONV_DEVICE, API_RUNTIME)),
(
"cudaGLGetDevices",
("hipGLGetDevices", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaGraphicsGLRegisterBuffer",
("hipGraphicsGLRegisterBuffer", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaGraphicsGLRegisterImage",
("hipGraphicsGLRegisterImage", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaWGLGetDevice",
("hipWGLGetDevice", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaGraphicsMapResources",
("hipGraphicsMapResources", CONV_GRAPHICS, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaGraphicsResourceGetMappedMipmappedArray",
(
"hipGraphicsResourceGetMappedMipmappedArray",
CONV_GRAPHICS,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaGraphicsResourceGetMappedPointer",
(
"hipGraphicsResourceGetMappedPointer",
CONV_GRAPHICS,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaGraphicsResourceSetMapFlags",
(
"hipGraphicsResourceSetMapFlags",
CONV_GRAPHICS,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaGraphicsSubResourceGetMappedArray",
(
"hipGraphicsSubResourceGetMappedArray",
CONV_GRAPHICS,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaGraphicsUnmapResources",
("hipGraphicsUnmapResources", CONV_GRAPHICS, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaGraphicsUnregisterResource",
(
"hipGraphicsUnregisterResource",
CONV_GRAPHICS,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaGraphicsCubeFacePositiveX",
(
"hipGraphicsCubeFacePositiveX",
CONV_GRAPHICS,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaGraphicsCubeFaceNegativeX",
(
"hipGraphicsCubeFaceNegativeX",
CONV_GRAPHICS,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaGraphicsCubeFacePositiveY",
(
"hipGraphicsCubeFacePositiveY",
CONV_GRAPHICS,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaGraphicsCubeFaceNegativeY",
(
"hipGraphicsCubeFaceNegativeY",
CONV_GRAPHICS,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaGraphicsCubeFacePositiveZ",
(
"hipGraphicsCubeFacePositiveZ",
CONV_GRAPHICS,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaGraphicsCubeFaceNegativeZ",
(
"hipGraphicsCubeFaceNegativeZ",
CONV_GRAPHICS,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaGraphicsMapFlagsNone",
("hipGraphicsMapFlagsNone", CONV_GRAPHICS, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaGraphicsMapFlagsReadOnly",
(
"hipGraphicsMapFlagsReadOnly",
CONV_GRAPHICS,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaGraphicsMapFlagsWriteDiscard",
(
"hipGraphicsMapFlagsWriteDiscard",
CONV_GRAPHICS,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaGraphicsRegisterFlagsNone",
(
"hipGraphicsRegisterFlagsNone",
CONV_GRAPHICS,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaGraphicsRegisterFlagsReadOnly",
(
"hipGraphicsRegisterFlagsReadOnly",
CONV_GRAPHICS,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaGraphicsRegisterFlagsWriteDiscard",
(
"hipGraphicsRegisterFlagsWriteDiscard",
CONV_GRAPHICS,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaGraphicsRegisterFlagsSurfaceLoadStore",
(
"hipGraphicsRegisterFlagsSurfaceLoadStore",
CONV_GRAPHICS,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaGraphicsRegisterFlagsTextureGather",
(
"hipGraphicsRegisterFlagsTextureGather",
CONV_GRAPHICS,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaGLDeviceListAll",
("HIP_GL_DEVICE_LIST_ALL", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaGLDeviceListCurrentFrame",
("HIP_GL_DEVICE_LIST_CURRENT_FRAME", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaGLDeviceListNextFrame",
("HIP_GL_DEVICE_LIST_NEXT_FRAME", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaGLGetDevices",
("hipGLGetDevices", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaGraphicsGLRegisterBuffer",
("hipGraphicsGLRegisterBuffer", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaGraphicsGLRegisterImage",
("hipGraphicsGLRegisterImage", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaWGLGetDevice",
("hipWGLGetDevice", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaGLMapFlagsNone",
("HIP_GL_MAP_RESOURCE_FLAGS_NONE", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaGLMapFlagsReadOnly",
(
"HIP_GL_MAP_RESOURCE_FLAGS_READ_ONLY",
CONV_GL,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaGLMapFlagsWriteDiscard",
(
"HIP_GL_MAP_RESOURCE_FLAGS_WRITE_DISCARD",
CONV_GL,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaGLMapBufferObject",
("hipGLMapBufferObject__", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaGLMapBufferObjectAsync",
("hipGLMapBufferObjectAsync__", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaGLRegisterBufferObject",
("hipGLRegisterBufferObject", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaGLSetBufferObjectMapFlags",
("hipGLSetBufferObjectMapFlags", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaGLSetGLDevice",
("hipGLSetGLDevice", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaGLUnmapBufferObject",
("hipGLUnmapBufferObject", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaGLUnmapBufferObjectAsync",
("hipGLUnmapBufferObjectAsync", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaGLUnregisterBufferObject",
("hipGLUnregisterBufferObject", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaD3D9DeviceListAll",
("HIP_D3D9_DEVICE_LIST_ALL", CONV_D3D9, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaD3D9DeviceListCurrentFrame",
(
"HIP_D3D9_DEVICE_LIST_CURRENT_FRAME",
CONV_D3D9,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaD3D9DeviceListNextFrame",
(
"HIP_D3D9_DEVICE_LIST_NEXT_FRAME",
CONV_D3D9,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaD3D9GetDevice",
("hipD3D9GetDevice", CONV_D3D9, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaD3D9GetDevices",
("hipD3D9GetDevices", CONV_D3D9, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaD3D9GetDirect3DDevice",
("hipD3D9GetDirect3DDevice", CONV_D3D9, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaD3D9SetDirect3DDevice",
("hipD3D9SetDirect3DDevice", CONV_D3D9, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaGraphicsD3D9RegisterResource",
(
"hipGraphicsD3D9RegisterResource",
CONV_D3D9,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaD3D9MapFlags",
("hipD3D9MapFlags", CONV_D3D9, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaD3D9MapFlagsNone",
(
"HIP_D3D9_MAPRESOURCE_FLAGS_NONE",
CONV_D3D9,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaD3D9MapFlagsReadOnly",
(
"HIP_D3D9_MAPRESOURCE_FLAGS_READONLY",
CONV_D3D9,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaD3D9MapFlagsWriteDiscard",
(
"HIP_D3D9_MAPRESOURCE_FLAGS_WRITEDISCARD",
CONV_D3D9,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaD3D9RegisterFlagsNone",
("HIP_D3D9_REGISTER_FLAGS_NONE", CONV_D3D9, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaD3D9RegisterFlagsArray",
("HIP_D3D9_REGISTER_FLAGS_ARRAY", CONV_D3D9, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaD3D9MapResources",
("hipD3D9MapResources", CONV_D3D9, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaD3D9RegisterResource",
("hipD3D9RegisterResource", CONV_D3D9, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaD3D9ResourceGetMappedArray",
("hipD3D9ResourceGetMappedArray", CONV_D3D9, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaD3D9ResourceGetMappedPitch",
("hipD3D9ResourceGetMappedPitch", CONV_D3D9, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaD3D9ResourceGetMappedPointer",
(
"hipD3D9ResourceGetMappedPointer",
CONV_D3D9,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaD3D9ResourceGetMappedSize",
("hipD3D9ResourceGetMappedSize", CONV_D3D9, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaD3D9ResourceGetSurfaceDimensions",
(
"hipD3D9ResourceGetSurfaceDimensions",
CONV_D3D9,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaD3D9ResourceSetMapFlags",
("hipD3D9ResourceSetMapFlags", CONV_D3D9, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaD3D9UnmapResources",
("hipD3D9UnmapResources", CONV_D3D9, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaD3D9UnregisterResource",
("hipD3D9UnregisterResource", CONV_D3D9, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaD3D10DeviceListAll",
("HIP_D3D10_DEVICE_LIST_ALL", CONV_D3D10, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaD3D10DeviceListCurrentFrame",
(
"HIP_D3D10_DEVICE_LIST_CURRENT_FRAME",
CONV_D3D10,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaD3D10DeviceListNextFrame",
(
"HIP_D3D10_DEVICE_LIST_NEXT_FRAME",
CONV_D3D10,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaD3D10GetDevice",
("hipD3D10GetDevice", CONV_D3D10, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaD3D10GetDevices",
("hipD3D10GetDevices", CONV_D3D10, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaGraphicsD3D10RegisterResource",
(
"hipGraphicsD3D10RegisterResource",
CONV_D3D10,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaD3D10MapFlagsNone",
(
"HIP_D3D10_MAPRESOURCE_FLAGS_NONE",
CONV_D3D10,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaD3D10MapFlagsReadOnly",
(
"HIP_D3D10_MAPRESOURCE_FLAGS_READONLY",
CONV_D3D10,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaD3D10MapFlagsWriteDiscard",
(
"HIP_D3D10_MAPRESOURCE_FLAGS_WRITEDISCARD",
CONV_D3D10,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaD3D10RegisterFlagsNone",
("HIP_D3D10_REGISTER_FLAGS_NONE", CONV_D3D10, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaD3D10RegisterFlagsArray",
(
"HIP_D3D10_REGISTER_FLAGS_ARRAY",
CONV_D3D10,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaD3D10GetDirect3DDevice",
("hipD3D10GetDirect3DDevice", CONV_D3D10, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaD3D10MapResources",
("hipD3D10MapResources", CONV_D3D10, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaD3D10RegisterResource",
("hipD3D10RegisterResource", CONV_D3D10, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaD3D10ResourceGetMappedArray",
(
"hipD3D10ResourceGetMappedArray",
CONV_D3D10,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaD3D10ResourceGetMappedPitch",
(
"hipD3D10ResourceGetMappedPitch",
CONV_D3D10,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaD3D10ResourceGetMappedPointer",
(
"hipD3D10ResourceGetMappedPointer",
CONV_D3D10,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaD3D10ResourceGetMappedSize",
("hipD3D10ResourceGetMappedSize", CONV_D3D10, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaD3D10ResourceGetSurfaceDimensions",
(
"hipD3D10ResourceGetSurfaceDimensions",
CONV_D3D10,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaD3D10ResourceSetMapFlags",
("hipD3D10ResourceSetMapFlags", CONV_D3D10, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaD3D10SetDirect3DDevice",
("hipD3D10SetDirect3DDevice", CONV_D3D10, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaD3D10UnmapResources",
("hipD3D10UnmapResources", CONV_D3D10, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaD3D10UnregisterResource",
("hipD3D10UnregisterResource", CONV_D3D10, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaD3D11DeviceListAll",
("HIP_D3D11_DEVICE_LIST_ALL", CONV_D3D11, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaD3D11DeviceListCurrentFrame",
(
"HIP_D3D11_DEVICE_LIST_CURRENT_FRAME",
CONV_D3D11,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaD3D11DeviceListNextFrame",
(
"HIP_D3D11_DEVICE_LIST_NEXT_FRAME",
CONV_D3D11,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaD3D11GetDevice",
("hipD3D11GetDevice", CONV_D3D11, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaD3D11GetDevices",
("hipD3D11GetDevices", CONV_D3D11, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaGraphicsD3D11RegisterResource",
(
"hipGraphicsD3D11RegisterResource",
CONV_D3D11,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaD3D11GetDevice",
("hipD3D11GetDevice", CONV_D3D11, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaD3D11GetDevices",
("hipD3D11GetDevices", CONV_D3D11, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaGraphicsD3D11RegisterResource",
(
"hipGraphicsD3D11RegisterResource",
CONV_D3D11,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaGraphicsVDPAURegisterOutputSurface",
(
"hipGraphicsVDPAURegisterOutputSurface",
CONV_VDPAU,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaGraphicsVDPAURegisterVideoSurface",
(
"hipGraphicsVDPAURegisterVideoSurface",
CONV_VDPAU,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaVDPAUGetDevice",
("hipVDPAUGetDevice", CONV_VDPAU, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaVDPAUSetVDPAUDevice",
("hipVDPAUSetDevice", CONV_VDPAU, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaEGLStreamConsumerAcquireFrame",
(
"hipEGLStreamConsumerAcquireFrame",
CONV_EGL,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaEGLStreamConsumerConnect",
("hipEGLStreamConsumerConnect", CONV_EGL, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaEGLStreamConsumerConnectWithFlags",
(
"hipEGLStreamConsumerConnectWithFlags",
CONV_EGL,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaEGLStreamConsumerReleaseFrame",
(
"hipEGLStreamConsumerReleaseFrame",
CONV_EGL,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaEGLStreamProducerConnect",
("hipEGLStreamProducerConnect", CONV_EGL, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaEGLStreamProducerDisconnect",
("hipEGLStreamProducerDisconnect", CONV_EGL, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaEGLStreamProducerPresentFrame",
(
"hipEGLStreamProducerPresentFrame",
CONV_EGL,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaEGLStreamProducerReturnFrame",
("hipEGLStreamProducerReturnFrame", CONV_EGL, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaGraphicsEGLRegisterImage",
("hipGraphicsEGLRegisterImage", CONV_EGL, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaGraphicsResourceGetMappedEglFrame",
(
"hipGraphicsResourceGetMappedEglFrame",
CONV_EGL,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
("cublasInit", ("rocblas_init", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
(
"cublasShutdown",
("rocblas_shutdown", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasGetVersion",
("rocblas_get_version", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasGetError",
("rocblas_get_error", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
("cublasAlloc", ("rocblas_alloc", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasFree", ("rocblas_free", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
(
"cublasSetKernelStream",
("rocblas_set_kernel_stream", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasGetAtomicsMode",
("rocblas_get_atomics_mode", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasSetAtomicsMode",
("rocblas_set_atomics_mode", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasGetMathMode",
("rocblas_get_math_mode", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasSetMathMode",
("rocblas_set_math_mode", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
("CUBLAS_OP_N", ("rocblas_operation_none", CONV_NUMERIC_LITERAL, API_BLAS)),
(
"CUBLAS_OP_T",
("rocblas_operation_transpose", CONV_NUMERIC_LITERAL, API_BLAS),
),
(
"CUBLAS_OP_C",
("rocblas_operation_conjugate_transpose", CONV_NUMERIC_LITERAL, API_BLAS),
),
(
"CUBLAS_STATUS_SUCCESS",
("rocblas_status_success", CONV_NUMERIC_LITERAL, API_BLAS),
),
(
"CUBLAS_STATUS_NOT_INITIALIZED",
("rocblas_status_invalid_handle", CONV_NUMERIC_LITERAL, API_BLAS),
),
(
"CUBLAS_STATUS_ALLOC_FAILED",
("rocblas_status_memory_error", CONV_NUMERIC_LITERAL, API_BLAS),
),
(
"CUBLAS_STATUS_INVALID_VALUE",
("rocblas_status_invalid_pointer", CONV_NUMERIC_LITERAL, API_BLAS),
),
(
"CUBLAS_STATUS_MAPPING_ERROR",
("rocblas_status_internal_error", CONV_NUMERIC_LITERAL, API_BLAS),
),
(
"CUBLAS_STATUS_EXECUTION_FAILED",
("rocblas_status_internal_error", CONV_NUMERIC_LITERAL, API_BLAS),
),
(
"CUBLAS_STATUS_INTERNAL_ERROR",
("rocblas_status_internal_error", CONV_NUMERIC_LITERAL, API_BLAS),
),
(
"CUBLAS_STATUS_NOT_SUPPORTED",
("rocblas_status_not_implemented", CONV_NUMERIC_LITERAL, API_BLAS),
),
(
"CUBLAS_STATUS_ARCH_MISMATCH",
("rocblas_status_not_implemented", CONV_NUMERIC_LITERAL, API_BLAS),
),
(
"CUBLAS_FILL_MODE_LOWER",
("rocblas_fill_lower", CONV_NUMERIC_LITERAL, API_BLAS),
),
(
"CUBLAS_FILL_MODE_UPPER",
("rocblas_fill_upper", CONV_NUMERIC_LITERAL, API_BLAS),
),
(
"CUBLAS_DIAG_NON_UNIT",
("rocblas_diagonal_non_unit", CONV_NUMERIC_LITERAL, API_BLAS),
),
("CUBLAS_DIAG_UNIT", ("rocblas_diagonal_unit", CONV_NUMERIC_LITERAL, API_BLAS)),
("CUBLAS_SIDE_LEFT", ("rocblas_side_left", CONV_NUMERIC_LITERAL, API_BLAS)),
("CUBLAS_SIDE_RIGHT", ("rocblas_side_right", CONV_NUMERIC_LITERAL, API_BLAS)),
(
"CUBLAS_POINTER_MODE_HOST",
("rocblas_pointer_mode_host", CONV_NUMERIC_LITERAL, API_BLAS),
),
(
"CUBLAS_POINTER_MODE_DEVICE",
("rocblas_pointer_mode_device", CONV_NUMERIC_LITERAL, API_BLAS),
),
(
"CUBLAS_ATOMICS_NOT_ALLOWED",
(
"rocblas_atomics_not_allowed",
CONV_NUMERIC_LITERAL,
API_BLAS,
HIP_UNSUPPORTED,
),
),
(
"CUBLAS_ATOMICS_ALLOWED",
(
"rocblas_atomics_allowed",
CONV_NUMERIC_LITERAL,
API_BLAS,
HIP_UNSUPPORTED,
),
),
(
"CUBLAS_DATA_FLOAT",
(
"rocblas_precision_float",
CONV_NUMERIC_LITERAL,
API_BLAS,
HIP_UNSUPPORTED,
),
),
(
"CUBLAS_DATA_DOUBLE",
(
"rocblas_precision_double",
CONV_NUMERIC_LITERAL,
API_BLAS,
HIP_UNSUPPORTED,
),
),
(
"CUBLAS_DATA_HALF",
("rocblas_precision_half", CONV_NUMERIC_LITERAL, API_BLAS, HIP_UNSUPPORTED),
),
(
"CUBLAS_DATA_INT8",
("rocblas_precision_int8", CONV_NUMERIC_LITERAL, API_BLAS, HIP_UNSUPPORTED),
),
("cublasCreate", ("rocblas_create_handle", CONV_MATH_FUNC, API_BLAS)),
("cublasDestroy", ("rocblas_destroy_handle", CONV_MATH_FUNC, API_BLAS)),
("cublasSetVector", ("rocblas_set_vector", CONV_MATH_FUNC, API_BLAS)),
("cublasGetVector", ("rocblas_get_vector", CONV_MATH_FUNC, API_BLAS)),
(
"cublasSetVectorAsync",
("rocblas_set_vector_async", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasGetVectorAsync",
("rocblas_get_vector_async", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
("cublasSetMatrix", ("rocblas_set_matrix", CONV_MATH_FUNC, API_BLAS)),
("cublasGetMatrix", ("rocblas_get_matrix", CONV_MATH_FUNC, API_BLAS)),
(
"cublasGetMatrixAsync",
("rocblas_get_matrix_async", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasSetMatrixAsync",
("rocblas_set_matrix_async", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
("cublasXerbla", ("rocblas_xerbla", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasSnrm2", ("rocblas_snrm2", CONV_MATH_FUNC, API_BLAS)),
("cublasDnrm2", ("rocblas_dnrm2", CONV_MATH_FUNC, API_BLAS)),
("cublasScnrm2", ("rocblas_scnrm2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDznrm2", ("rocblas_dznrm2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
(
"cublasNrm2Ex",
("rocblas_nrm2_ex", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
("cublasSdot", ("rocblas_sdot", CONV_MATH_FUNC, API_BLAS)),
(
"cublasSdotBatched",
("rocblas_sdot_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
("cublasDdot", ("rocblas_ddot", CONV_MATH_FUNC, API_BLAS)),
(
"cublasDdotBatched",
("rocblas_ddot_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
("cublasCdotu", ("rocblas_cdotu", CONV_MATH_FUNC, API_BLAS)),
("cublasCdotc", ("rocblas_cdotc", CONV_MATH_FUNC, API_BLAS)),
("cublasZdotu", ("rocblas_zdotu", CONV_MATH_FUNC, API_BLAS)),
("cublasZdotc", ("rocblas_zdotc", CONV_MATH_FUNC, API_BLAS)),
("cublasSscal", ("rocblas_sscal", CONV_MATH_FUNC, API_BLAS)),
(
"cublasSscalBatched",
("rocblas_sscal_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
("cublasDscal", ("rocblas_dscal", CONV_MATH_FUNC, API_BLAS)),
(
"cublasDscalBatched",
("rocblas_dscal_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
("cublasCscal", ("rocblas_cscal", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCsscal", ("rocblas_csscal", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZscal", ("rocblas_zscal", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZdscal", ("rocblas_zdscal", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasSaxpy", ("rocblas_saxpy", CONV_MATH_FUNC, API_BLAS)),
(
"cublasSaxpyBatched",
("rocblas_saxpy_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
("cublasDaxpy", ("rocblas_daxpy", CONV_MATH_FUNC, API_BLAS)),
("cublasCaxpy", ("rocblas_caxpy", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZaxpy", ("rocblas_zaxpy", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasScopy", ("rocblas_scopy", CONV_MATH_FUNC, API_BLAS)),
(
"cublasScopyBatched",
("rocblas_scopy_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
("cublasDcopy", ("rocblas_dcopy", CONV_MATH_FUNC, API_BLAS)),
(
"cublasDcopyBatched",
("rocblas_dcopy_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
("cublasCcopy", ("rocblas_ccopy", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZcopy", ("rocblas_zcopy", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasSswap", ("rocblas_sswap", CONV_MATH_FUNC, API_BLAS)),
("cublasDswap", ("rocblas_dswap", CONV_MATH_FUNC, API_BLAS)),
("cublasCswap", ("rocblas_cswap", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZswap", ("rocblas_zswap", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasIsamax", ("rocblas_isamax", CONV_MATH_FUNC, API_BLAS)),
("cublasIdamax", ("rocblas_idamax", CONV_MATH_FUNC, API_BLAS)),
("cublasIcamax", ("rocblas_icamax", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasIzamax", ("rocblas_izamax", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasIsamin", ("rocblas_isamin", CONV_MATH_FUNC, API_BLAS)),
("cublasIdamin", ("rocblas_idamin", CONV_MATH_FUNC, API_BLAS)),
("cublasIcamin", ("rocblas_icamin", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasIzamin", ("rocblas_izamin", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasSasum", ("rocblas_sasum", CONV_MATH_FUNC, API_BLAS)),
(
"cublasSasumBatched",
("rocblas_sasum_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
("cublasDasum", ("rocblas_dasum", CONV_MATH_FUNC, API_BLAS)),
(
"cublasDasumBatched",
("rocblas_dasum_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
("cublasScasum", ("rocblas_scasum", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDzasum", ("rocblas_dzasum", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasSrot", ("rocblas_srot", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDrot", ("rocblas_drot", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCrot", ("rocblas_crot", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCsrot", ("rocblas_csrot", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZrot", ("rocblas_zrot", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZdrot", ("rocblas_zdrot", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasSrotg", ("rocblas_srotg", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDrotg", ("rocblas_drotg", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCrotg", ("rocblas_crotg", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZrotg", ("rocblas_zrotg", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasSrotm", ("rocblas_srotm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDrotm", ("rocblas_drotm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasSrotmg", ("rocblas_srotmg", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDrotmg", ("rocblas_drotmg", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasSgemv", ("rocblas_sgemv", CONV_MATH_FUNC, API_BLAS)),
(
"cublasSgemvBatched",
("rocblas_sgemv_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
("cublasDgemv", ("rocblas_dgemv", CONV_MATH_FUNC, API_BLAS)),
("cublasCgemv", ("rocblas_cgemv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZgemv", ("rocblas_zgemv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasSgbmv", ("rocblas_sgbmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDgbmv", ("rocblas_dgbmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCgbmv", ("rocblas_cgbmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZgbmv", ("rocblas_zgbmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasStrmv", ("rocblas_strmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDtrmv", ("rocblas_dtrmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCtrmv", ("rocblas_ctrmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZtrmv", ("rocblas_ztrmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasStbmv", ("rocblas_stbmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDtbmv", ("rocblas_dtbmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCtbmv", ("rocblas_ctbmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZtbmv", ("rocblas_ztbmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasStpmv", ("rocblas_stpmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDtpmv", ("rocblas_dtpmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCtpmv", ("rocblas_ctpmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZtpmv", ("rocblas_ztpmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasStrsv", ("rocblas_strsv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDtrsv", ("rocblas_dtrsv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCtrsv", ("rocblas_ctrsv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZtrsv", ("rocblas_ztrsv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasStpsv", ("rocblas_stpsv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDtpsv", ("rocblas_dtpsv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCtpsv", ("rocblas_ctpsv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZtpsv", ("rocblas_ztpsv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasStbsv", ("rocblas_stbsv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDtbsv", ("rocblas_dtbsv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCtbsv", ("rocblas_ctbsv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZtbsv", ("rocblas_ztbsv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasSsymv", ("rocblas_ssymv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDsymv", ("rocblas_dsymv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCsymv", ("rocblas_csymv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZsymv", ("rocblas_zsymv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasChemv", ("rocblas_chemv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZhemv", ("rocblas_zhemv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasSsbmv", ("rocblas_ssbmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDsbmv", ("rocblas_dsbmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasChbmv", ("rocblas_chbmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZhbmv", ("rocblas_zhbmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasSspmv", ("rocblas_sspmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDspmv", ("rocblas_dspmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasChpmv", ("rocblas_chpmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZhpmv", ("rocblas_zhpmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasSger", ("rocblas_sger", CONV_MATH_FUNC, API_BLAS)),
("cublasDger", ("rocblas_dger", CONV_MATH_FUNC, API_BLAS)),
("cublasCgeru", ("rocblas_cgeru", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCgerc", ("rocblas_cgerc", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZgeru", ("rocblas_zgeru", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZgerc", ("rocblas_zgerc", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasSsyr", ("rocblas_ssyr", CONV_MATH_FUNC, API_BLAS)),
("cublasDsyr", ("rocblas_dsyr", CONV_MATH_FUNC, API_BLAS)),
("cublasCher", ("rocblas_cher", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZher", ("rocblas_zher", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasSspr", ("rocblas_sspr", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDspr", ("rocblas_dspr", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasChpr", ("rocblas_chpr", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZhpr", ("rocblas_zhpr", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasSsyr2", ("rocblas_ssyr2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDsyr2", ("rocblas_dsyr2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCher2", ("rocblas_cher2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZher2", ("rocblas_zher2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasSspr2", ("rocblas_sspr2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDspr2", ("rocblas_dspr2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasChpr2", ("rocblas_chpr2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZhpr2", ("rocblas_zhpr2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
(
"cublasSgemmBatched",
("rocblas_sgemm_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasDgemmBatched",
("rocblas_dgemm_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasHgemmBatched",
("rocblas_hgemm_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasSgemmStridedBatched",
("rocblas_sgemm_strided_batched", CONV_MATH_FUNC, API_BLAS),
),
(
"cublasDgemmStridedBatched",
("rocblas_dgemm_strided_batched", CONV_MATH_FUNC, API_BLAS),
),
(
"cublasHgemmStridedBatched",
("rocblas_hgemm_strided_batched", CONV_MATH_FUNC, API_BLAS),
),
(
"cublasCgemmBatched",
("rocblas_cgemm_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasCgemm3mBatched",
("rocblas_cgemm_3m_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasZgemmBatched",
("rocblas_zgemm_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasCgemmStridedBatched",
(
"rocblas_cgemm_strided_batched",
CONV_MATH_FUNC,
API_BLAS,
HIP_UNSUPPORTED,
),
),
(
"cublasCgemm3mStridedBatched",
(
"rocblas_cgemm_3m_strided_batched",
CONV_MATH_FUNC,
API_BLAS,
HIP_UNSUPPORTED,
),
),
(
"cublasZgemmStridedBatched",
(
"rocblas_zgemm_strided_batched",
CONV_MATH_FUNC,
API_BLAS,
HIP_UNSUPPORTED,
),
),
(
"cublasHgemmStridedBatched",
(
"rocblas_hgemm_strided_batched",
CONV_MATH_FUNC,
API_BLAS,
HIP_UNSUPPORTED,
),
),
("cublasSgemm", ("rocblas_sgemm", CONV_MATH_FUNC, API_BLAS)),
("cublasDgemm", ("rocblas_dgemm", CONV_MATH_FUNC, API_BLAS)),
("cublasCgemm", ("rocblas_cgemm", CONV_MATH_FUNC, API_BLAS)),
("cublasZgemm", ("rocblas_zgemm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasHgemm", ("rocblas_hgemm", CONV_MATH_FUNC, API_BLAS)),
("cublasSsyrk", ("rocblas_ssyrk", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDsyrk", ("rocblas_dsyrk", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCsyrk", ("rocblas_csyrk", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZsyrk", ("rocblas_zsyrk", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCherk", ("rocblas_cherk", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZherk", ("rocblas_zherk", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasSsyr2k", ("rocblas_ssyr2k", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDsyr2k", ("rocblas_dsyr2k", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCsyr2k", ("rocblas_csyr2k", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZsyr2k", ("rocblas_zyr2k", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasSsyrkx", ("rocblas_ssyrkx", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDsyrkx", ("rocblas_dsyrkx", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCsyrkx", ("rocblas_csyrkx", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZsyrkx", ("rocblas_zsyrkx", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCher2k", ("rocblas_cher2k", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZher2k", ("rocblas_zher2k", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCherkx", ("rocblas_cherkx", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZherkx", ("rocblas_zherkx", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasSsymm", ("rocblas_ssymm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDsymm", ("rocblas_dsymm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCsymm", ("rocblas_csymm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZsymm", ("rocblas_zsymm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasChemm", ("rocblas_chemm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZhemm", ("rocblas_zhemm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasStrsm", ("rocblas_strsm", CONV_MATH_FUNC, API_BLAS)),
("cublasDtrsm", ("rocblas_dtrsm", CONV_MATH_FUNC, API_BLAS)),
("cublasCtrsm", ("rocblas_ctrsm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZtrsm", ("rocblas_ztrsm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
(
"cublasStrsmBatched",
("rocblas_strsm_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasDtrsmBatched",
("rocblas_dtrsm_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasCtrsmBatched",
("rocblas_ctrsm_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasZtrsmBatched",
("rocblas_ztrsm_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
("cublasStrmm", ("rocblas_strmm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDtrmm", ("rocblas_dtrmm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCtrmm", ("rocblas_ctrmm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZtrmm", ("rocblas_ztrmm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasSgeam", ("rocblas_sgeam", CONV_MATH_FUNC, API_BLAS)),
("cublasDgeam", ("rocblas_dgeam", CONV_MATH_FUNC, API_BLAS)),
("cublasCgeam", ("rocblas_cgeam", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZgeam", ("rocblas_zgeam", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
(
"cublasSgetrfBatched",
("rocblas_sgetrf_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasDgetrfBatched",
("rocblas_dgetrf_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasCgetrfBatched",
("rocblas_cgetrf_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasZgetrfBatched",
("rocblas_zgetrf_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasSgetriBatched",
("rocblas_sgetri_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasDgetriBatched",
("rocblas_dgetri_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasCgetriBatched",
("rocblas_cgetri_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasZgetriBatched",
("rocblas_zgetri_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasSgetrsBatched",
("rocblas_sgetrs_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasDgetrsBatched",
("rocblas_dgetrs_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasCgetrsBatched",
("rocblas_cgetrs_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasZgetrsBatched",
("rocblas_zgetrs_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasStrsmBatched",
("rocblas_strsm_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasDtrsmBatched",
("rocblas_dtrsm_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasCtrsmBatched",
("rocblas_ctrsm_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasZtrsmBatched",
("rocblas_ztrsm_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasSmatinvBatched",
("rocblas_smatinv_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasDmatinvBatched",
("rocblas_dmatinv_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasCmatinvBatched",
("rocblas_cmatinv_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasZmatinvBatched",
("rocblas_zmatinv_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasSgeqrfBatched",
("rocblas_sgeqrf_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasDgeqrfBatched",
("rocblas_dgeqrf_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasCgeqrfBatched",
("rocblas_cgeqrf_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasZgeqrfBatched",
("rocblas_zgeqrf_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasSgelsBatched",
("rocblas_sgels_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasDgelsBatched",
("rocblas_dgels_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasCgelsBatched",
("rocblas_cgels_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasZgelsBatched",
("rocblas_zgels_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
("cublasSdgmm", ("rocblas_sdgmm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDdgmm", ("rocblas_ddgmm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCdgmm", ("rocblas_cdgmm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZdgmm", ("rocblas_zdgmm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasStpttr", ("rocblas_stpttr", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDtpttr", ("rocblas_dtpttr", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCtpttr", ("rocblas_ctpttr", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZtpttr", ("rocblas_ztpttr", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasStrttp", ("rocblas_strttp", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDtrttp", ("rocblas_dtrttp", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCtrttp", ("rocblas_ctrttp", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZtrttp", ("rocblas_ztrttp", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCreate_v2", ("rocblas_create_handle", CONV_MATH_FUNC, API_BLAS)),
("cublasDestroy_v2", ("rocblas_destroy_handle", CONV_MATH_FUNC, API_BLAS)),
(
"cublasGetVersion_v2",
("rocblas_get_version", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
("cublasSetStream", ("rocblas_set_stream", CONV_MATH_FUNC, API_BLAS)),
("cublasGetStream", ("rocblas_get_stream", CONV_MATH_FUNC, API_BLAS)),
("cublasSetStream_v2", ("rocblas_set_stream", CONV_MATH_FUNC, API_BLAS)),
("cublasGetStream_v2", ("rocblas_get_stream", CONV_MATH_FUNC, API_BLAS)),
(
"cublasGetPointerMode",
("rocblas_get_pointer_mode", CONV_MATH_FUNC, API_BLAS),
),
(
"cublasSetPointerMode",
("rocblas_set_pointer_mode", CONV_MATH_FUNC, API_BLAS),
),
(
"cublasGetPointerMode_v2",
("rocblas_get_pointer_mode", CONV_MATH_FUNC, API_BLAS),
),
(
"cublasSetPointerMode_v2",
("rocblas_set_pointer_mode", CONV_MATH_FUNC, API_BLAS),
),
("cublasSgemv_v2", ("rocblas_sgemv", CONV_MATH_FUNC, API_BLAS)),
("cublasDgemv_v2", ("rocblas_dgemv", CONV_MATH_FUNC, API_BLAS)),
(
"cublasCgemv_v2",
("rocblas_cgemv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasZgemv_v2",
("rocblas_zgemv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasSgbmv_v2",
("rocblas_sgbmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasDgbmv_v2",
("rocblas_dgbmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasCgbmv_v2",
("rocblas_cgbmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasZgbmv_v2",
("rocblas_zgbmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasStrmv_v2",
("rocblas_strmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasDtrmv_v2",
("rocblas_dtrmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasCtrmv_v2",
("rocblas_ctrmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasZtrmv_v2",
("rocblas_ztrmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasStbmv_v2",
("rocblas_stbmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasDtbmv_v2",
("rocblas_dtbmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasCtbmv_v2",
("rocblas_ctbmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasZtbmv_v2",
("rocblas_ztbmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasStpmv_v2",
("rocblas_stpmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasDtpmv_v2",
("rocblas_dtpmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasCtpmv_v2",
("rocblas_ctpmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasZtpmv_v2",
("rocblas_ztpmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasStrsv_v2",
("rocblas_strsv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasDtrsv_v2",
("rocblas_dtrsv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasCtrsv_v2",
("rocblas_ctrsv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasZtrsv_v2",
("rocblas_ztrsv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasStpsv_v2",
("rocblas_stpsv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasDtpsv_v2",
("rocblas_dtpsv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasCtpsv_v2",
("rocblas_ctpsv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasZtpsv_v2",
("rocblas_ztpsv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasStbsv_v2",
("rocblas_stbsv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasDtbsv_v2",
("rocblas_dtbsv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasCtbsv_v2",
("rocblas_ctbsv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasZtbsv_v2",
("rocblas_ztbsv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasSsymv_v2",
("rocblas_ssymv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasDsymv_v2",
("rocblas_dsymv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasCsymv_v2",
("rocblas_csymv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasZsymv_v2",
("rocblas_zsymv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasChemv_v2",
("rocblas_chemv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasZhemv_v2",
("rocblas_zhemv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasSsbmv_v2",
("rocblas_ssbmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasDsbmv_v2",
("rocblas_dsbmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasChbmv_v2",
("rocblas_chbmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasZhbmv_v2",
("rocblas_zhbmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasSspmv_v2",
("rocblas_sspmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasDspmv_v2",
("rocblas_dspmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasChpmv_v2",
("rocblas_chpmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasZhpmv_v2",
("rocblas_zhpmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
("cublasSger_v2", ("rocblas_sger", CONV_MATH_FUNC, API_BLAS)),
("cublasDger_v2", ("rocblas_dger", CONV_MATH_FUNC, API_BLAS)),
(
"cublasCgeru_v2",
("rocblas_cgeru", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasCgerc_v2",
("rocblas_cergc", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasZgeru_v2",
("rocblas_zgeru", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasZgerc_v2",
("rocblas_zgerc", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
("cublasSsyr_v2", ("rocblas_ssyr", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDsyr_v2", ("rocblas_dsyr", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCsyr_v2", ("rocblas_csyr", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZsyr_v2", ("rocblas_zsyr", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCher_v2", ("rocblas_cher", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZher_v2", ("rocblas_zher", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasSspr_v2", ("rocblas_sspr", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDspr_v2", ("rocblas_dspr", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasChpr_v2", ("rocblas_chpr", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZhpr_v2", ("rocblas_zhpr", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
(
"cublasSsyr2_v2",
("rocblas_ssyr2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasDsyr2_v2",
("rocblas_dsyr2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasCsyr2_v2",
("rocblas_csyr2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasZsyr2_v2",
("rocblas_zsyr2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasCher2_v2",
("rocblas_cher2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasZher2_v2",
("rocblas_zher2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasSspr2_v2",
("rocblas_sspr2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasDspr2_v2",
("rocblas_dspr2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasChpr2_v2",
("rocblas_chpr2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasZhpr2_v2",
("rocblas_zhpr2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
("cublasSgemm_v2", ("rocblas_sgemm", CONV_MATH_FUNC, API_BLAS)),
("cublasDgemm_v2", ("rocblas_dgemm", CONV_MATH_FUNC, API_BLAS)),
(
"cublasCgemm_v2",
("rocblas_cgemm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasCgemm3m",
("rocblas_cgemm_3m", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasCgemm3mEx",
("rocblas_cgemm_3mex", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasZgemm_v2",
("rocblas_zgemm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasZgemm3m",
("rocblas_zgemm_3m", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
# NB: The function rocblas_sgemmex doesn't actually exist in
# rocblas, as of 2018-12-05
(
"cublasSgemmEx",
("rocblas_sgemmex", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
("cublasGemmEx", ("rocblas_gemmex", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
(
"cublasCgemmEx",
("rocblas_cgemmex", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasUint8gemmBias",
("rocblas_uint8gemmbias", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasSsyrk_v2",
("rocblas_ssyrk", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasDsyrk_v2",
("rocblas_dsyrk", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasCsyrk_v2",
("rocblas_csyrk", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasZsyrk_v2",
("rocblas_zsyrk", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasCsyrkEx",
("rocblas_csyrkex", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasCsyrk3mEx",
("rocblas_csyrk3mex", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasCherk_v2",
("rocblas_cherk", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasCherkEx",
("rocblas_cherkex", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasCherk3mEx",
("rocblas_cherk3mex", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasZherk_v2",
("rocblas_zherk", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasSsyr2k_v2",
("rocblas_ssyr2k", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasDsyr2k_v2",
("rocblas_dsyr2k", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasCsyr2k_v2",
("rocblas_csyr2k", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasZsyr2k_v2",
("rocblas_zsyr2k", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasCher2k_v2",
("rocblas_cher2k", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasZher2k_v2",
("rocblas_zher2k", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasSsymm_v2",
("rocblas_ssymm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasDsymm_v2",
("rocblas_dsymm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasCsymm_v2",
("rocblas_csymm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasZsymm_v2",
("rocblas_zsymm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasChemm_v2",
("rocblas_chemm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasZhemm_v2",
("rocblas_zhemm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasStrsm_v2",
("rocblas_strsm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasDtrsm_v2",
("rocblas_dtrsm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasCtrsm_v2",
("rocblas_ctrsm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasZtrsm_v2",
("rocblas_ztrsm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasStrmm_v2",
("rocblas_strmm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasDtrmm_v2",
("rocblas_dtrmm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasCtrmm_v2",
("rocblas_ctrmm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasZtrmm_v2",
("rocblas_ztrmm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
("cublasSnrm2_v2", ("rocblas_snrm2", CONV_MATH_FUNC, API_BLAS)),
("cublasDnrm2_v2", ("rocblas_dnrm2", CONV_MATH_FUNC, API_BLAS)),
(
"cublasScnrm2_v2",
("rocblas_scnrm2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasDznrm2_v2",
("rocblas_dznrm2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
("cublasDotEx", ("rocblas_dotex", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDotcEx", ("rocblas_dotcex", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasSdot_v2", ("rocblas_sdot", CONV_MATH_FUNC, API_BLAS)),
("cublasDdot_v2", ("rocblas_ddot", CONV_MATH_FUNC, API_BLAS)),
(
"cublasCdotu_v2",
("rocblas_cdotu", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasCdotc_v2",
("rocblas_cdotc", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasZdotu_v2",
("rocblas_zdotu", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasZdotc_v2",
("rocblas_zdotc", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
("cublasScalEx", ("rocblas_scalex", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasSscal_v2", ("rocblas_sscal", CONV_MATH_FUNC, API_BLAS)),
("cublasDscal_v2", ("rocblas_dscal", CONV_MATH_FUNC, API_BLAS)),
(
"cublasCscal_v2",
("rocblas_cscal", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasCsscal_v2",
("rocblas_csscal", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasZscal_v2",
("rocblas_zcsal", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasZdscal_v2",
("rocblas_zdscal", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
("cublasAxpyEx", ("rocblas_axpyex", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasSaxpy_v2", ("rocblas_saxpy", CONV_MATH_FUNC, API_BLAS)),
("cublasDaxpy_v2", ("rocblas_daxpy", CONV_MATH_FUNC, API_BLAS)),
(
"cublasCaxpy_v2",
("rocblas_caxpy", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasZaxpy_v2",
("rocblas_zaxpy", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
("cublasScopy_v2", ("rocblas_scopy", CONV_MATH_FUNC, API_BLAS)),
("cublasDcopy_v2", ("rocblas_dcopy", CONV_MATH_FUNC, API_BLAS)),
(
"cublasCcopy_v2",
("rocblas_ccopy", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasZcopy_v2",
("rocblas_zcopy", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
("cublasSswap_v2", ("rocblas_sswap", CONV_MATH_FUNC, API_BLAS)),
("cublasDswap_v2", ("rocblas_dswap", CONV_MATH_FUNC, API_BLAS)),
(
"cublasCswap_v2",
("rocblas_cswap", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasZswap_v2",
("rocblas_zswap", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
("cublasIsamax_v2", ("rocblas_isamax", CONV_MATH_FUNC, API_BLAS)),
("cublasIdamax_v2", ("rocblas_idamax", CONV_MATH_FUNC, API_BLAS)),
(
"cublasIcamax_v2",
("rocblas_icamax", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasIzamax_v2",
("rocblas_izamax", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
("cublasIsamin_v2", ("rocblas_isamin", CONV_MATH_FUNC, API_BLAS)),
("cublasIdamin_v2", ("rocblas_idamin", CONV_MATH_FUNC, API_BLAS)),
(
"cublasIcamin_v2",
("rocblas_icamin", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasIzamin_v2",
("rocblas_izamin", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
("cublasSasum_v2", ("rocblas_sasum", CONV_MATH_FUNC, API_BLAS)),
("cublasDasum_v2", ("rocblas_dasum", CONV_MATH_FUNC, API_BLAS)),
(
"cublasScasum_v2",
("rocblas_scasum", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasDzasum_v2",
("rocblas_dzasum", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
("cublasSrot_v2", ("rocblas_srot", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDrot_v2", ("rocblas_drot", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCrot_v2", ("rocblas_crot", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
(
"cublasCsrot_v2",
("rocblas_csrot", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
("cublasZrot_v2", ("rocblas_zrot", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
(
"cublasZdrot_v2",
("rocblas_zdrot", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasSrotg_v2",
("rocblas_srotg", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasDrotg_v2",
("rocblas_drotg", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasCrotg_v2",
("rocblas_crotg", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasZrotg_v2",
("rocblas_zrotg", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasSrotm_v2",
("rocblas_srotm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasDrotm_v2",
("rocblas_drotm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasSrotmg_v2",
("rocblas_srotmg", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasDrotmg_v2",
("rocblas_drotmg", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"CURAND_STATUS_SUCCESS",
("HIPRAND_STATUS_SUCCESS", CONV_NUMERIC_LITERAL, API_RAND),
),
(
"CURAND_STATUS_VERSION_MISMATCH",
("HIPRAND_STATUS_VERSION_MISMATCH", CONV_NUMERIC_LITERAL, API_RAND),
),
(
"CURAND_STATUS_NOT_INITIALIZED",
("HIPRAND_STATUS_NOT_INITIALIZED", CONV_NUMERIC_LITERAL, API_RAND),
),
(
"CURAND_STATUS_ALLOCATION_FAILED",
("HIPRAND_STATUS_ALLOCATION_FAILED", CONV_NUMERIC_LITERAL, API_RAND),
),
(
"CURAND_STATUS_TYPE_ERROR",
("HIPRAND_STATUS_TYPE_ERROR", CONV_NUMERIC_LITERAL, API_RAND),
),
(
"CURAND_STATUS_OUT_OF_RANGE",
("HIPRAND_STATUS_OUT_OF_RANGE", CONV_NUMERIC_LITERAL, API_RAND),
),
(
"CURAND_STATUS_LENGTH_NOT_MULTIPLE",
("HIPRAND_STATUS_LENGTH_NOT_MULTIPLE", CONV_NUMERIC_LITERAL, API_RAND),
),
(
"CURAND_STATUS_DOUBLE_PRECISION_REQUIRED",
(
"HIPRAND_STATUS_DOUBLE_PRECISION_REQUIRED",
CONV_NUMERIC_LITERAL,
API_RAND,
),
),
(
"CURAND_STATUS_LAUNCH_FAILURE",
("HIPRAND_STATUS_LAUNCH_FAILURE", CONV_NUMERIC_LITERAL, API_RAND),
),
(
"CURAND_STATUS_PREEXISTING_FAILURE",
("HIPRAND_STATUS_PREEXISTING_FAILURE", CONV_NUMERIC_LITERAL, API_RAND),
),
(
"CURAND_STATUS_INITIALIZATION_FAILED",
("HIPRAND_STATUS_INITIALIZATION_FAILED", CONV_NUMERIC_LITERAL, API_RAND),
),
(
"CURAND_STATUS_ARCH_MISMATCH",
("HIPRAND_STATUS_ARCH_MISMATCH", CONV_NUMERIC_LITERAL, API_RAND),
),
(
"CURAND_STATUS_INTERNAL_ERROR",
("HIPRAND_STATUS_INTERNAL_ERROR", CONV_NUMERIC_LITERAL, API_RAND),
),
("CURAND_RNG_TEST", ("HIPRAND_RNG_TEST", CONV_NUMERIC_LITERAL, API_RAND)),
(
"mtgp32dc_params_fast_11213",
("mtgp32dc_params_fast_11213", CONV_NUMERIC_LITERAL, API_RAND),
),
(
"CURAND_RNG_PSEUDO_DEFAULT",
("HIPRAND_RNG_PSEUDO_DEFAULT", CONV_NUMERIC_LITERAL, API_RAND),
),
(
"CURAND_RNG_PSEUDO_XORWOW",
("HIPRAND_RNG_PSEUDO_XORWOW", CONV_NUMERIC_LITERAL, API_RAND),
),
(
"CURAND_RNG_PSEUDO_MRG32K3A",
("HIPRAND_RNG_PSEUDO_MRG32K3A", CONV_NUMERIC_LITERAL, API_RAND),
),
(
"CURAND_RNG_PSEUDO_MTGP32",
("HIPRAND_RNG_PSEUDO_MTGP32", CONV_NUMERIC_LITERAL, API_RAND),
),
(
"CURAND_RNG_PSEUDO_MT19937",
("HIPRAND_RNG_PSEUDO_MT19937", CONV_NUMERIC_LITERAL, API_RAND),
),
(
"CURAND_RNG_PSEUDO_PHILOX4_32_10",
("HIPRAND_RNG_PSEUDO_PHILOX4_32_10", CONV_NUMERIC_LITERAL, API_RAND),
),
(
"CURAND_RNG_QUASI_DEFAULT",
("HIPRAND_RNG_QUASI_DEFAULT", CONV_NUMERIC_LITERAL, API_RAND),
),
(
"CURAND_RNG_QUASI_SOBOL32",
("HIPRAND_RNG_QUASI_SOBOL32", CONV_NUMERIC_LITERAL, API_RAND),
),
(
"CURAND_RNG_QUASI_SCRAMBLED_SOBOL32",
("HIPRAND_RNG_QUASI_SCRAMBLED_SOBOL32", CONV_NUMERIC_LITERAL, API_RAND),
),
(
"CURAND_RNG_QUASI_SOBOL64",
("HIPRAND_RNG_QUASI_SOBOL64", CONV_NUMERIC_LITERAL, API_RAND),
),
(
"CURAND_RNG_QUASI_SCRAMBLED_SOBOL64",
("HIPRAND_RNG_QUASI_SCRAMBLED_SOBOL64", CONV_NUMERIC_LITERAL, API_RAND),
),
(
"curand_ORDERING_PSEUDO_BEST",
(
"HIPRAND_ORDERING_PSEUDO_BEST",
CONV_NUMERIC_LITERAL,
API_RAND,
HIP_UNSUPPORTED,
),
),
(
"curand_ORDERING_PSEUDO_DEFAULT",
(
"HIPRAND_ORDERING_PSEUDO_DEFAULT",
CONV_NUMERIC_LITERAL,
API_RAND,
HIP_UNSUPPORTED,
),
),
(
"curand_ORDERING_PSEUDO_SEEDED",
(
"HIPRAND_ORDERING_PSEUDO_SEEDED",
CONV_NUMERIC_LITERAL,
API_RAND,
HIP_UNSUPPORTED,
),
),
(
"curand_ORDERING_QUASI_DEFAULT",
(
"HIPRAND_ORDERING_QUASI_DEFAULT",
CONV_NUMERIC_LITERAL,
API_RAND,
HIP_UNSUPPORTED,
),
),
(
"curand_DIRECTION_VECTORS_32_JOEKUO6",
(
"HIPRAND_DIRECTION_VECTORS_32_JOEKUO6",
CONV_NUMERIC_LITERAL,
API_RAND,
HIP_UNSUPPORTED,
),
),
(
"curand_SCRAMBLED_DIRECTION_VECTORS_32_JOEKUO6",
(
"HIPRAND_SCRAMBLED_DIRECTION_VECTORS_32_JOEKUO6",
CONV_NUMERIC_LITERAL,
API_RAND,
HIP_UNSUPPORTED,
),
),
(
"curand_DIRECTION_VECTORS_64_JOEKUO6",
(
"HIPRAND_DIRECTION_VECTORS_64_JOEKUO6",
CONV_NUMERIC_LITERAL,
API_RAND,
HIP_UNSUPPORTED,
),
),
(
"curand_SCRAMBLED_DIRECTION_VECTORS_64_JOEKUO6",
(
"HIPRAND_SCRAMBLED_DIRECTION_VECTORS_64_JOEKUO6",
CONV_NUMERIC_LITERAL,
API_RAND,
HIP_UNSUPPORTED,
),
),
(
"curand_CHOOSE_BEST",
("HIPRAND_CHOOSE_BEST", CONV_NUMERIC_LITERAL, API_RAND, HIP_UNSUPPORTED),
),
(
"curand_ITR",
("HIPRAND_ITR", CONV_NUMERIC_LITERAL, API_RAND, HIP_UNSUPPORTED),
),
(
"curand_KNUTH",
("HIPRAND_KNUTH", CONV_NUMERIC_LITERAL, API_RAND, HIP_UNSUPPORTED),
),
(
"curand_HITR",
("HIPRAND_HITR", CONV_NUMERIC_LITERAL, API_RAND, HIP_UNSUPPORTED),
),
("curand_M1", ("HIPRAND_M1", CONV_NUMERIC_LITERAL, API_RAND, HIP_UNSUPPORTED)),
("curand_M2", ("HIPRAND_M2", CONV_NUMERIC_LITERAL, API_RAND, HIP_UNSUPPORTED)),
(
"curand_BINARY_SEARCH",
("HIPRAND_BINARY_SEARCH", CONV_NUMERIC_LITERAL, API_RAND, HIP_UNSUPPORTED),
),
(
"curand_DISCRETE_GAUSS",
("HIPRAND_DISCRETE_GAUSS", CONV_NUMERIC_LITERAL, API_RAND, HIP_UNSUPPORTED),
),
(
"curand_REJECTION",
("HIPRAND_REJECTION", CONV_NUMERIC_LITERAL, API_RAND, HIP_UNSUPPORTED),
),
(
"curand_DEVICE_API",
("HIPRAND_DEVICE_API", CONV_NUMERIC_LITERAL, API_RAND, HIP_UNSUPPORTED),
),
(
"curand_FAST_REJECTION",
("HIPRAND_FAST_REJECTION", CONV_NUMERIC_LITERAL, API_RAND, HIP_UNSUPPORTED),
),
(
"curand_3RD",
("HIPRAND_3RD", CONV_NUMERIC_LITERAL, API_RAND, HIP_UNSUPPORTED),
),
(
"curand_DEFINITION",
("HIPRAND_DEFINITION", CONV_NUMERIC_LITERAL, API_RAND, HIP_UNSUPPORTED),
),
(
"curand_POISSON",
("HIPRAND_POISSON", CONV_NUMERIC_LITERAL, API_RAND, HIP_UNSUPPORTED),
),
("curandCreateGenerator", ("hiprandCreateGenerator", CONV_MATH_FUNC, API_RAND)),
(
"curandCreateGeneratorHost",
("hiprandCreateGeneratorHost", CONV_MATH_FUNC, API_RAND),
),
(
"curandCreatePoissonDistribution",
("hiprandCreatePoissonDistribution", CONV_MATH_FUNC, API_RAND),
),
(
"curandDestroyDistribution",
("hiprandDestroyDistribution", CONV_MATH_FUNC, API_RAND),
),
(
"curandDestroyGenerator",
("hiprandDestroyGenerator", CONV_MATH_FUNC, API_RAND),
),
("curandGenerate", ("hiprandGenerate", CONV_MATH_FUNC, API_RAND)),
(
"curandGenerateLogNormal",
("hiprandGenerateLogNormal", CONV_MATH_FUNC, API_RAND),
),
(
"curandGenerateLogNormalDouble",
("hiprandGenerateLogNormalDouble", CONV_MATH_FUNC, API_RAND),
),
(
"curandGenerateLongLong",
("hiprandGenerateLongLong", CONV_MATH_FUNC, API_RAND, HIP_UNSUPPORTED),
),
("curandGenerateNormal", ("hiprandGenerateNormal", CONV_MATH_FUNC, API_RAND)),
(
"curandGenerateNormalDouble",
("hiprandGenerateNormalDouble", CONV_MATH_FUNC, API_RAND),
),
("curandGeneratePoisson", ("hiprandGeneratePoisson", CONV_MATH_FUNC, API_RAND)),
("curandGenerateSeeds", ("hiprandGenerateSeeds", CONV_MATH_FUNC, API_RAND)),
("curandGenerateUniform", ("hiprandGenerateUniform", CONV_MATH_FUNC, API_RAND)),
(
"curandGenerateUniformDouble",
("hiprandGenerateUniformDouble", CONV_MATH_FUNC, API_RAND),
),
(
"curandGetDirectionVectors32",
("hiprandGetDirectionVectors32", CONV_MATH_FUNC, API_RAND, HIP_UNSUPPORTED),
),
(
"curandGetDirectionVectors64",
("hiprandGetDirectionVectors64", CONV_MATH_FUNC, API_RAND, HIP_UNSUPPORTED),
),
(
"curandGetProperty",
("hiprandGetProperty", CONV_MATH_FUNC, API_RAND, HIP_UNSUPPORTED),
),
(
"curandGetScrambleConstants32",
(
"hiprandGetScrambleConstants32",
CONV_MATH_FUNC,
API_RAND,
HIP_UNSUPPORTED,
),
),
(
"curandGetScrambleConstants64",
(
"hiprandGetScrambleConstants64",
CONV_MATH_FUNC,
API_RAND,
HIP_UNSUPPORTED,
),
),
("curandGetVersion", ("hiprandGetVersion", CONV_MATH_FUNC, API_RAND)),
(
"curandSetGeneratorOffset",
("hiprandSetGeneratorOffset", CONV_MATH_FUNC, API_RAND),
),
(
"curandSetGeneratorOrdering",
("hiprandSetGeneratorOrdering", CONV_MATH_FUNC, API_RAND, HIP_UNSUPPORTED),
),
(
"curandSetPseudoRandomGeneratorSeed",
("hiprandSetPseudoRandomGeneratorSeed", CONV_MATH_FUNC, API_RAND),
),
(
"curandSetQuasiRandomGeneratorDimensions",
("hiprandSetQuasiRandomGeneratorDimensions", CONV_MATH_FUNC, API_RAND),
),
("curandSetStream", ("hiprandSetStream", CONV_MATH_FUNC, API_RAND)),
("curand", ("hiprand", CONV_DEVICE_FUNC, API_RAND)),
("curand4", ("hiprand4", CONV_DEVICE_FUNC, API_RAND)),
("curand_init", ("hiprand_init", CONV_DEVICE_FUNC, API_RAND)),
("curand_log_normal", ("hiprand_log_normal", CONV_DEVICE_FUNC, API_RAND)),
(
"curand_log_normal_double",
("hiprand_log_normal_double", CONV_DEVICE_FUNC, API_RAND),
),
("curand_log_normal2", ("hiprand_log_normal2", CONV_DEVICE_FUNC, API_RAND)),
(
"curand_log_normal2_double",
("hiprand_log_normal2_double", CONV_DEVICE_FUNC, API_RAND),
),
("curand_log_normal4", ("hiprand_log_normal4", CONV_DEVICE_FUNC, API_RAND)),
(
"curand_log_normal4_double",
("hiprand_log_normal4_double", CONV_DEVICE_FUNC, API_RAND),
),
(
"curand_mtgp32_single",
("hiprand_mtgp32_single", CONV_DEVICE_FUNC, API_RAND, HIP_UNSUPPORTED),
),
(
"curand_mtgp32_single_specific",
(
"hiprand_mtgp32_single_specific",
CONV_DEVICE_FUNC,
API_RAND,
HIP_UNSUPPORTED,
),
),
(
"curand_mtgp32_specific",
("hiprand_mtgp32_specific", CONV_DEVICE_FUNC, API_RAND, HIP_UNSUPPORTED),
),
("curand_normal", ("hiprand_normal", CONV_DEVICE_FUNC, API_RAND)),
(
"curandMakeMTGP32Constants",
("hiprandMakeMTGP32Constants", CONV_DEVICE_FUNC, API_RAND),
),
(
"curandMakeMTGP32KernelState",
("hiprandMakeMTGP32KernelState", CONV_DEVICE_FUNC, API_RAND),
),
("curand_normal_double", ("hiprand_normal_double", CONV_DEVICE_FUNC, API_RAND)),
("curand_normal2", ("hiprand_normal2", CONV_DEVICE_FUNC, API_RAND)),
(
"curand_normal2_double",
("hiprand_normal2_double", CONV_DEVICE_FUNC, API_RAND),
),
("curand_normal4", ("hiprand_normal4", CONV_DEVICE_FUNC, API_RAND)),
(
"curand_normal4_double",
("hiprand_normal4_double", CONV_DEVICE_FUNC, API_RAND),
),
("curand_uniform", ("hiprand_uniform", CONV_DEVICE_FUNC, API_RAND)),
(
"curand_uniform_double",
("hiprand_uniform_double", CONV_DEVICE_FUNC, API_RAND),
),
(
"curand_uniform2_double",
("hiprand_uniform2_double", CONV_DEVICE_FUNC, API_RAND),
),
("curand_uniform4", ("hiprand_uniform4", CONV_DEVICE_FUNC, API_RAND)),
(
"curand_uniform4_double",
("hiprand_uniform4_double", CONV_DEVICE_FUNC, API_RAND),
),
("curand_discrete", ("hiprand_discrete", CONV_DEVICE_FUNC, API_RAND)),
("curand_discrete4", ("hiprand_discrete4", CONV_DEVICE_FUNC, API_RAND)),
("curand_poisson", ("hiprand_poisson", CONV_DEVICE_FUNC, API_RAND)),
("curand_poisson4", ("hiprand_poisson4", CONV_DEVICE_FUNC, API_RAND)),
(
"curand_Philox4x32_10",
("hiprand_Philox4x32_10", CONV_DEVICE_FUNC, API_RAND, HIP_UNSUPPORTED),
),
("mtgp32_kernel_params", ("mtgp32_kernel_params_t", CONV_MATH_FUNC, API_RAND)),
("CUFFT_FORWARD", ("HIPFFT_FORWARD", CONV_NUMERIC_LITERAL, API_BLAS)),
("CUFFT_INVERSE", ("HIPFFT_BACKWARD", CONV_NUMERIC_LITERAL, API_BLAS)),
(
"CUFFT_COMPATIBILITY_DEFAULT",
(
"HIPFFT_COMPATIBILITY_DEFAULT",
CONV_NUMERIC_LITERAL,
API_BLAS,
HIP_UNSUPPORTED,
),
),
("cuComplex", ("rocblas_float_complex", CONV_TYPE, API_BLAS)),
("cuDoubleComplex", ("rocblas_double_complex", CONV_TYPE, API_BLAS)),
("cufftResult_t", ("hipfftResult_t", CONV_TYPE, API_FFT)),
("cufftResult", ("hipfftResult", CONV_TYPE, API_FFT)),
("CUFFT_SUCCESS", ("HIPFFT_SUCCESS", CONV_NUMERIC_LITERAL, API_FFT)),
("CUFFT_INVALID_PLAN", ("HIPFFT_INVALID_PLAN", CONV_NUMERIC_LITERAL, API_FFT)),
("CUFFT_ALLOC_FAILED", ("HIPFFT_ALLOC_FAILED", CONV_NUMERIC_LITERAL, API_FFT)),
("CUFFT_INVALID_TYPE", ("HIPFFT_INVALID_TYPE", CONV_NUMERIC_LITERAL, API_FFT)),
(
"CUFFT_INVALID_VALUE",
("HIPFFT_INVALID_VALUE", CONV_NUMERIC_LITERAL, API_FFT),
),
(
"CUFFT_INTERNAL_ERROR",
("HIPFFT_INTERNAL_ERROR", CONV_NUMERIC_LITERAL, API_FFT),
),
("CUFFT_EXEC_FAILED", ("HIPFFT_EXEC_FAILED", CONV_NUMERIC_LITERAL, API_FFT)),
("CUFFT_SETUP_FAILED", ("HIPFFT_SETUP_FAILED", CONV_NUMERIC_LITERAL, API_FFT)),
("CUFFT_INVALID_SIZE", ("HIPFFT_INVALID_SIZE", CONV_NUMERIC_LITERAL, API_FFT)),
(
"CUFFT_UNALIGNED_DATA",
("HIPFFT_UNALIGNED_DATA", CONV_NUMERIC_LITERAL, API_FFT),
),
(
"CUFFT_INCOMPLETE_PARAMETER_LIST",
("HIPFFT_INCOMPLETE_PARAMETER_LIST", CONV_NUMERIC_LITERAL, API_FFT),
),
(
"CUFFT_INVALID_DEVICE",
("HIPFFT_INVALID_DEVICE", CONV_NUMERIC_LITERAL, API_FFT),
),
("CUFFT_PARSE_ERROR", ("HIPFFT_PARSE_ERROR", CONV_NUMERIC_LITERAL, API_FFT)),
("CUFFT_NO_WORKSPACE", ("HIPFFT_NO_WORKSPACE", CONV_NUMERIC_LITERAL, API_FFT)),
(
"CUFFT_NOT_IMPLEMENTED",
("HIPFFT_NOT_IMPLEMENTED", CONV_NUMERIC_LITERAL, API_FFT),
),
(
"CUFFT_LICENSE_ERROR",
("HIPFFT_LICENSE_ERROR", CONV_NUMERIC_LITERAL, API_FFT, HIP_UNSUPPORTED),
),
(
"CUFFT_NOT_SUPPORTED",
("HIPFFT_NOT_SUPPORTED", CONV_NUMERIC_LITERAL, API_FFT),
),
("cufftType_t", ("hipfftType_t", CONV_TYPE, API_FFT)),
("cufftType", ("hipfftType", CONV_TYPE, API_FFT)),
("CUFFT_R2C", ("HIPFFT_R2C", CONV_NUMERIC_LITERAL, API_FFT)),
("CUFFT_C2R", ("HIPFFT_C2R", CONV_NUMERIC_LITERAL, API_FFT)),
("CUFFT_C2C", ("HIPFFT_C2C", CONV_NUMERIC_LITERAL, API_FFT)),
("CUFFT_D2Z", ("HIPFFT_D2Z", CONV_NUMERIC_LITERAL, API_FFT)),
("CUFFT_Z2D", ("HIPFFT_Z2D", CONV_NUMERIC_LITERAL, API_FFT)),
("CUFFT_Z2Z", ("HIPFFT_Z2Z", CONV_NUMERIC_LITERAL, API_FFT)),
(
"cufftCompatibility_t",
("hipfftCompatibility_t", CONV_TYPE, API_FFT, HIP_UNSUPPORTED),
),
(
"cufftCompatibility",
("hipfftCompatibility", CONV_TYPE, API_FFT, HIP_UNSUPPORTED),
),
(
"CUFFT_COMPATIBILITY_FFTW_PADDING",
(
"HIPFFT_COMPATIBILITY_FFTW_PADDING",
CONV_NUMERIC_LITERAL,
API_FFT,
HIP_UNSUPPORTED,
),
),
("cufftReal", ("hipfftReal", CONV_TYPE, API_FFT)),
("cufftDoubleReal", ("hipfftDoubleReal", CONV_TYPE, API_FFT)),
("cufftComplex", ("hipfftComplex", CONV_TYPE, API_FFT)),
("cufftDoubleComplex", ("hipfftDoubleComplex", CONV_TYPE, API_FFT)),
("cufftHandle", ("hipfftHandle", CONV_TYPE, API_FFT)),
("cufftPlan1d", ("hipfftPlan1d", CONV_MATH_FUNC, API_FFT)),
("cufftPlan2d", ("hipfftPlan2d", CONV_MATH_FUNC, API_FFT)),
("cufftPlan3d", ("hipfftPlan3d", CONV_MATH_FUNC, API_FFT)),
("cufftPlanMany", ("hipfftPlanMany", CONV_MATH_FUNC, API_FFT)),
("cufftMakePlan1d", ("hipfftMakePlan1d", CONV_MATH_FUNC, API_FFT)),
("cufftMakePlan2d", ("hipfftMakePlan2d", CONV_MATH_FUNC, API_FFT)),
("cufftMakePlan3d", ("hipfftMakePlan3d", CONV_MATH_FUNC, API_FFT)),
("cufftMakePlanMany", ("hipfftMakePlanMany", CONV_MATH_FUNC, API_FFT)),
("cufftMakePlanMany64", ("hipfftMakePlanMany64", CONV_MATH_FUNC, API_FFT)),
("cufftGetSizeMany64", ("hipfftGetSizeMany64", CONV_MATH_FUNC, API_FFT)),
("cufftEstimate1d", ("hipfftEstimate1d", CONV_MATH_FUNC, API_FFT)),
("cufftEstimate2d", ("hipfftEstimate2d", CONV_MATH_FUNC, API_FFT)),
("cufftEstimate3d", ("hipfftEstimate3d", CONV_MATH_FUNC, API_FFT)),
("cufftEstimateMany", ("hipfftEstimateMany", CONV_MATH_FUNC, API_FFT)),
("cufftCreate", ("hipfftCreate", CONV_MATH_FUNC, API_FFT)),
("cufftGetSize1d", ("hipfftGetSize1d", CONV_MATH_FUNC, API_FFT)),
("cufftGetSize2d", ("hipfftGetSize2d", CONV_MATH_FUNC, API_FFT)),
("cufftGetSize3d", ("hipfftGetSize3d", CONV_MATH_FUNC, API_FFT)),
("cufftGetSizeMany", ("hipfftGetSizeMany", CONV_MATH_FUNC, API_FFT)),
("cufftGetSize", ("hipfftGetSize", CONV_MATH_FUNC, API_FFT)),
("cufftSetWorkArea", ("hipfftSetWorkArea", CONV_MATH_FUNC, API_FFT)),
(
"cufftSetAutoAllocation",
("hipfftSetAutoAllocation", CONV_MATH_FUNC, API_FFT),
),
("cufftExecC2C", ("hipfftExecC2C", CONV_MATH_FUNC, API_FFT)),
("cufftExecR2C", ("hipfftExecR2C", CONV_MATH_FUNC, API_FFT)),
("cufftExecC2R", ("hipfftExecC2R", CONV_MATH_FUNC, API_FFT)),
("cufftExecZ2Z", ("hipfftExecZ2Z", CONV_MATH_FUNC, API_FFT)),
("cufftExecD2Z", ("hipfftExecD2Z", CONV_MATH_FUNC, API_FFT)),
("cufftExecZ2D", ("hipfftExecZ2D", CONV_MATH_FUNC, API_FFT)),
("cufftSetStream", ("hipfftSetStream", CONV_MATH_FUNC, API_FFT)),
("cufftDestroy", ("hipfftDestroy", CONV_MATH_FUNC, API_FFT)),
("cufftGetVersion", ("hipfftGetVersion", CONV_MATH_FUNC, API_FFT)),
(
"cufftGetProperty",
("hipfftGetProperty", CONV_MATH_FUNC, API_FFT, HIP_UNSUPPORTED),
),
("nvrtcResult", ("hiprtcResult", CONV_TYPE, API_RTC)),
("NVRTC_SUCCESS", ("HIPRTC_SUCCESS", CONV_TYPE, API_RTC)),
(
"NVRTC_ERROR_OUT_OF_MEMORY",
("HIPRTC_ERROR_OUT_OF_MEMORY", CONV_TYPE, API_RTC),
),
(
"NVRTC_ERROR_PROGRAM_CREATION_FAILURE",
("HIPRTC_ERROR_PROGRAM_CREATION_FAILURE", CONV_TYPE, API_RTC),
),
(
"NVRTC_ERROR_INVALID_INPUT",
("HIPRTC_ERROR_INVALID_INPUT", CONV_TYPE, API_RTC),
),
(
"NVRTC_ERROR_INVALID_PROGRAM",
("HIPRTC_ERROR_INVALID_PROGRAM", CONV_TYPE, API_RTC),
),
("NVRTC_ERROR_COMPILATION", ("HIPRTC_ERROR_COMPILATION", CONV_TYPE, API_RTC)),
(
"NVRTC_ERROR_BUILTIN_OPERATION_FAILURE",
("HIPRTC_ERROR_BUILTIN_OPERATION_FAILURE", CONV_TYPE, API_RTC),
),
(
"NVRTC_ERROR_NO_NAME_EXPRESSIONS_AFTER_COMPILATION",
("HIPRTC_ERROR_NO_NAME_EXPRESSIONS_AFTER_COMPILATION", CONV_TYPE, API_RTC),
),
(
"NVRTC_ERROR_NAME_EXPRESSION_NOT_VALID",
("HIPRTC_ERROR_NAME_EXPRESSION_NOT_VALID", CONV_TYPE, API_RTC),
),
(
"NVRTC_ERROR_INTERNAL_ERROR",
("HIPRTC_ERROR_INTERNAL_ERROR", CONV_TYPE, API_RTC),
),
("nvrtcGetErrorString", ("hiprtcGetErrorString", CONV_JIT, API_RTC)),
("nvrtcVersion", ("hiprtcVersion", CONV_JIT, API_RTC)),
("nvrtcProgram", ("hiprtcProgram", CONV_TYPE, API_RTC)),
("nvrtcAddNameExpression", ("hiprtcAddNameExpression", CONV_JIT, API_RTC)),
("nvrtcCompileProgram", ("hiprtcCompileProgram", CONV_JIT, API_RTC)),
("nvrtcCreateProgram", ("hiprtcCreateProgram", CONV_JIT, API_RTC)),
("nvrtcDestroyProgram", ("hiprtcDestroyProgram", CONV_JIT, API_RTC)),
("nvrtcGetLoweredName", ("hiprtcGetLoweredName", CONV_JIT, API_RTC)),
("nvrtcGetProgramLog", ("hiprtcGetProgramLog", CONV_JIT, API_RTC)),
("nvrtcGetProgramLogSize", ("hiprtcGetProgramLogSize", CONV_JIT, API_RTC)),
("nvrtcGetPTX", ("hiprtcGetCode", CONV_JIT, API_RTC)),
("nvrtcGetPTXSize", ("hiprtcGetCodeSize", CONV_JIT, API_RTC)),
("thrust::cuda", ("thrust::hip", CONV_MATH_FUNC, API_BLAS)),
(
"cudaCpuDeviceId",
("hipCpuDeviceId", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
# The caffe2 directory does a string match; pytorch does a word-boundary match.
# Patterns such as 'cub::' will not match for pytorch.
# We list all current uses of cub symbols for this reason.
("cub::", ("hipcub::", CONV_SPECIAL_FUNC, API_RUNTIME)),
("cub::ArgMax", ("hipcub::ArgMax", CONV_SPECIAL_FUNC, API_RUNTIME)),
("cub::ArgMin", ("hipcub::ArgMin", CONV_SPECIAL_FUNC, API_RUNTIME)),
("cub::BLOCK_REDUCE_WARP_REDUCTIONS", ("hipcub::BLOCK_REDUCE_WARP_REDUCTIONS", CONV_SPECIAL_FUNC, API_RUNTIME)),
("cub::BlockReduce", ("hipcub::BlockReduce", CONV_SPECIAL_FUNC, API_RUNTIME)),
("cub::BlockScan", ("hipcub::BlockScan", CONV_SPECIAL_FUNC, API_RUNTIME)),
("cub::CachingDeviceAllocator", ("hipcub::CachingDeviceAllocator", CONV_SPECIAL_FUNC, API_RUNTIME)),
("cub::CountingInputIterator", ("hipcub::CountingInputIterator", CONV_SPECIAL_FUNC, API_RUNTIME)),
("cub::DeviceRadixSort", ("hipcub::DeviceRadixSort", CONV_SPECIAL_FUNC, API_RUNTIME)),
("cub::DeviceReduce", ("hipcub::DeviceReduce", CONV_SPECIAL_FUNC, API_RUNTIME)),
("cub::DeviceRunLengthEncode", ("hipcub::DeviceRunLengthEncode", CONV_SPECIAL_FUNC, API_RUNTIME)),
("cub::DeviceScan", ("hipcub::DeviceScan", CONV_SPECIAL_FUNC, API_RUNTIME)),
("cub::DeviceSegmentedRadixSort", ("hipcub::DeviceSegmentedRadixSort", CONV_SPECIAL_FUNC, API_RUNTIME)),
("cub::DeviceSegmentedReduce", ("hipcub::DeviceSegmentedReduce", CONV_SPECIAL_FUNC, API_RUNTIME)),
("cub::DeviceSelect", ("hipcub::DeviceSelect", CONV_SPECIAL_FUNC, API_RUNTIME)),
("cub::KeyValuePair", ("hipcub::KeyValuePair", CONV_SPECIAL_FUNC, API_RUNTIME)),
("cub::Max", ("hipcub::Max", CONV_SPECIAL_FUNC, API_RUNTIME)),
("cub::Min", ("hipcub::Min", CONV_SPECIAL_FUNC, API_RUNTIME)),
("cub::Sum", ("hipcub::Sum", CONV_SPECIAL_FUNC, API_RUNTIME)),
("cub::ArgIndexInputIterator", ("hipcub::ArgIndexInputIterator", CONV_SPECIAL_FUNC, API_RUNTIME)),
("cub::TransformInputIterator", ("hipcub::TransformInputIterator", CONV_SPECIAL_FUNC, API_RUNTIME)),
("cub::WarpReduce", ("hipcub::WarpReduce", CONV_SPECIAL_FUNC, API_RUNTIME)),
("nvtxMark", ("roctxMark", CONV_OTHER, API_ROCTX)),
("nvtxMarkA", ("roctxMarkA", CONV_OTHER, API_ROCTX)),
("nvtxRangePushA", ("roctxRangePushA", CONV_OTHER, API_ROCTX)),
("nvtxRangePop", ("roctxRangePop", CONV_OTHER, API_ROCTX)),
("nvtxRangeStartA", ("roctxRangeStartA", CONV_OTHER, API_ROCTX)),
("nvtxRangeEnd", ("roctxRangeStop", CONV_OTHER, API_ROCTX)),
]
)
CUDA_SPARSE_MAP = collections.OrderedDict(
[
("cusparseStatus_t", ("hipsparseStatus_t", CONV_MATH_FUNC, API_SPARSE)),
("cusparseHandle_t", ("hipsparseHandle_t", CONV_MATH_FUNC, API_SPARSE)),
("cuComplex", ("hipComplex", CONV_TYPE, API_SPARSE)),
("cuDoubleComplex", ("hipDoubleComplex", CONV_TYPE, API_SPARSE)),
(
"CUSPARSE_POINTER_MODE_HOST",
("HIPSPARSE_POINTER_MODE_HOST", CONV_NUMERIC_LITERAL, API_SPARSE),
),
("cusparseOperation_t", ("hipsparseOperation_t", CONV_TYPE, API_SPARSE)),
(
"cusparseCreateMatDescr",
("hipsparseCreateMatDescr", CONV_MATH_FUNC, API_SPARSE),
),
("cusparseCreate", ("hipsparseCreate", CONV_MATH_FUNC, API_SPARSE)),
(
"cusparseDestroyMatDescr",
("hipsparseDestroyMatDescr", CONV_MATH_FUNC, API_SPARSE),
),
("cusparseDestroy", ("hipsparseDestroy", CONV_MATH_FUNC, API_SPARSE)),
("cusparseXcoo2csr", ("hipsparseXcoo2csr", CONV_MATH_FUNC, API_SPARSE)),
("cusparseMatDescr_t", ("hipsparseMatDescr_t", CONV_MATH_FUNC, API_SPARSE)),
("cusparseDiagType_t", ("hipsparseDiagType_t", CONV_TYPE, API_SPARSE)),
("CUSPARSE_DIAG_TYPE_UNIT", ("HIPSPARSE_DIAG_TYPE_UNIT", CONV_NUMERIC_LITERAL, API_SPARSE)),
("CUSPARSE_DIAG_TYPE_NON_UNIT", ("HIPSPARSE_DIAG_TYPE_NON_UNIT", CONV_NUMERIC_LITERAL, API_SPARSE)),
("cusparseSetMatDiagType", ("hipsparseSetMatDiagType", CONV_MATH_FUNC, API_SPARSE)),
("cusparseFillMode_t", ("hipsparseFillMode_t", CONV_TYPE, API_SPARSE)),
("CUSPARSE_FILL_MODE_UPPER", ("HIPSPARSE_FILL_MODE_UPPER", CONV_NUMERIC_LITERAL, API_SPARSE)),
("CUSPARSE_FILL_MODE_LOWER", ("HIPSPARSE_FILL_MODE_LOWER", CONV_NUMERIC_LITERAL, API_SPARSE)),
("cusparseSetMatFillMode", ("hipsparseSetMatFillMode", CONV_MATH_FUNC, API_SPARSE)),
("cusparseDirection_t", ("hipsparseDirection_t", CONV_TYPE, API_SPARSE)),
("CUSPARSE_DIRECTION_ROW", ("HIPSPARSE_DIRECTION_ROW", CONV_NUMERIC_LITERAL, API_SPARSE)),
("CUSPARSE_DIRECTION_COLUMN", ("HIPSPARSE_DIRECTION_COLUMN", CONV_NUMERIC_LITERAL, API_SPARSE)),
("cusparseSolvePolicy_t", ("hipsparseSolvePolicy_t", CONV_TYPE, API_SPARSE)),
("CUSPARSE_SOLVE_POLICY_NO_LEVEL", ("HIPSPARSE_SOLVE_POLICY_NO_LEVEL", CONV_NUMERIC_LITERAL, API_SPARSE)),
("CUSPARSE_SOLVE_POLICY_USE_LEVEL", ("HIPSPARSE_SOLVE_POLICY_USE_LEVEL", CONV_NUMERIC_LITERAL, API_SPARSE)),
("cusparseCreateBsrsv2Info", ("hipsparseCreateBsrsv2Info", CONV_MATH_FUNC, API_SPARSE)),
("cusparseCreateBsrsm2Info", ("hipsparseCreateBsrsm2Info", CONV_MATH_FUNC, API_SPARSE)),
("cusparseDestroyBsrsv2Info", ("hipsparseDestroyBsrsv2Info", CONV_MATH_FUNC, API_SPARSE)),
("cusparseDestroyBsrsm2Info", ("hipsparseDestroyBsrsm2Info", CONV_MATH_FUNC, API_SPARSE)),
("cusparseSbsrmm", ("hipsparseSbsrmm", CONV_MATH_FUNC, API_SPARSE)),
("cusparseDbsrmm", ("hipsparseDbsrmm", CONV_MATH_FUNC, API_SPARSE)),
("cusparseCbsrmm", ("hipsparseCbsrmm", CONV_MATH_FUNC, API_SPARSE)),
("cusparseZbsrmm", ("hipsparseZbsrmm", CONV_MATH_FUNC, API_SPARSE)),
("cusparseSbsrmv", ("hipsparseSbsrmv", CONV_MATH_FUNC, API_SPARSE)),
("cusparseDbsrmv", ("hipsparseDbsrmv", CONV_MATH_FUNC, API_SPARSE)),
("cusparseCbsrmv", ("hipsparseCbsrmv", CONV_MATH_FUNC, API_SPARSE)),
("cusparseZbsrmv", ("hipsparseZbsrmv", CONV_MATH_FUNC, API_SPARSE)),
("cusparseSbsrsv2_bufferSize", ("hipsparseSbsrsv2_bufferSize", CONV_MATH_FUNC, API_SPARSE)),
("cusparseDbsrsv2_bufferSize", ("hipsparseDbsrsv2_bufferSize", CONV_MATH_FUNC, API_SPARSE)),
("cusparseCbsrsv2_bufferSize", ("hipsparseCbsrsv2_bufferSize", CONV_MATH_FUNC, API_SPARSE)),
("cusparseZbsrsv2_bufferSize", ("hipsparseZbsrsv2_bufferSize", CONV_MATH_FUNC, API_SPARSE)),
("cusparseSbsrsv2_analysis", ("hipsparseSbsrsv2_analysis", CONV_MATH_FUNC, API_SPARSE)),
("cusparseDbsrsv2_analysis", ("hipsparseDbsrsv2_analysis", CONV_MATH_FUNC, API_SPARSE)),
("cusparseCbsrsv2_analysis", ("hipsparseCbsrsv2_analysis", CONV_MATH_FUNC, API_SPARSE)),
("cusparseZbsrsv2_analysis", ("hipsparseZbsrsv2_analysis", CONV_MATH_FUNC, API_SPARSE)),
("cusparseSbsrsv2_solve", ("hipsparseSbsrsv2_solve", CONV_MATH_FUNC, API_SPARSE)),
("cusparseDbsrsv2_solve", ("hipsparseDbsrsv2_solve", CONV_MATH_FUNC, API_SPARSE)),
("cusparseCbsrsv2_solve", ("hipsparseCbsrsv2_solve", CONV_MATH_FUNC, API_SPARSE)),
("cusparseZbsrsv2_solve", ("hipsparseZbsrsv2_solve", CONV_MATH_FUNC, API_SPARSE)),
("cusparseSbsrsm2_bufferSize", ("hipsparseSbsrsm2_bufferSize", CONV_MATH_FUNC, API_SPARSE)),
("cusparseDbsrsm2_bufferSize", ("hipsparseDbsrsm2_bufferSize", CONV_MATH_FUNC, API_SPARSE)),
("cusparseCbsrsm2_bufferSize", ("hipsparseCbsrsm2_bufferSize", CONV_MATH_FUNC, API_SPARSE)),
("cusparseZbsrsm2_bufferSize", ("hipsparseZbsrsm2_bufferSize", CONV_MATH_FUNC, API_SPARSE)),
("cusparseSbsrsm2_analysis", ("hipsparseSbsrsm2_analysis", CONV_MATH_FUNC, API_SPARSE)),
("cusparseDbsrsm2_analysis", ("hipsparseDbsrsm2_analysis", CONV_MATH_FUNC, API_SPARSE)),
("cusparseCbsrsm2_analysis", ("hipsparseCbsrsm2_analysis", CONV_MATH_FUNC, API_SPARSE)),
("cusparseZbsrsm2_analysis", ("hipsparseZbsrsm2_analysis", CONV_MATH_FUNC, API_SPARSE)),
("cusparseSbsrsm2_solve", ("hipsparseSbsrsm2_solve", CONV_MATH_FUNC, API_SPARSE)),
("cusparseDbsrsm2_solve", ("hipsparseDbsrsm2_solve", CONV_MATH_FUNC, API_SPARSE)),
("cusparseCbsrsm2_solve", ("hipsparseCbsrsm2_solve", CONV_MATH_FUNC, API_SPARSE)),
("cusparseZbsrsm2_solve", ("hipsparseZbsrsm2_solve", CONV_MATH_FUNC, API_SPARSE)),
("cusparseScsrmm2", ("hipsparseScsrmm2", CONV_MATH_FUNC, API_SPARSE)),
("cusparseDcsrmm2", ("hipsparseDcsrmm2", CONV_MATH_FUNC, API_SPARSE)),
("cusparseCcsrmm2", ("hipsparseCcsrmm2", CONV_MATH_FUNC, API_SPARSE)),
("cusparseZcsrmm2", ("hipsparseZcsrmm2", CONV_MATH_FUNC, API_SPARSE)),
("cusparseScsrmm", ("hipsparseScsrmm", CONV_MATH_FUNC, API_SPARSE)),
("cusparseDcsrmm", ("hipsparseDcsrmm", CONV_MATH_FUNC, API_SPARSE)),
(
"cusparseXcsrsort_bufferSizeExt",
("hipsparseXcsrsort_bufferSizeExt", CONV_MATH_FUNC, API_SPARSE),
),
("cusparseCreateCsrgemm2Info", ("hipsparseCreateCsrgemm2Info", CONV_MATH_FUNC, API_SPARSE)),
(
"cusparseDestroyCsrgemm2Info",
("hipsparseDestroyCsrgemm2Info", CONV_MATH_FUNC, API_SPARSE),
),
("cusparseXcsrgemm2Nnz", ("hipsparseXcsrgemm2Nnz", CONV_MATH_FUNC, API_SPARSE)),
("cusparseDcsrgemm2_bufferSizeExt", ("hipsparseDcsrgemm2_bufferSizeExt", CONV_MATH_FUNC, API_SPARSE)),
("cusparseScsrgemm2_bufferSizeExt", ("hipsparseScsrgemm2_bufferSizeExt", CONV_MATH_FUNC, API_SPARSE)),
("cusparseDcsrgemm2", ("hipsparseDcsrgemm2", CONV_MATH_FUNC, API_SPARSE)),
("cusparseScsrgemm2", ("hipsparseScsrgemm2", CONV_MATH_FUNC, API_SPARSE)),
("cusparseSetPointerMode", ("hipsparseSetPointerMode", CONV_MATH_FUNC, API_SPARSE)),
("cusparseXcsrgeam2Nnz", ("hipsparseXcsrgeam2Nnz", CONV_MATH_FUNC, API_SPARSE)),
("cusparseScsrgeam2_bufferSizeExt", ("hipsparseScsrgeam2_bufferSizeExt", CONV_MATH_FUNC, API_SPARSE)),
("cusparseDcsrgeam2_bufferSizeExt", ("hipsparseDcsrgeam2_bufferSizeExt", CONV_MATH_FUNC, API_SPARSE)),
("cusparseCcsrgeam2_bufferSizeExt", ("hipsparseCcsrgeam2_bufferSizeExt", CONV_MATH_FUNC, API_SPARSE)),
("cusparseZcsrgeam2_bufferSizeExt", ("hipsparseZcsrgeam2_bufferSizeExt", CONV_MATH_FUNC, API_SPARSE)),
("cusparseScsrgeam2", ("hipsparseScsrgeam2", CONV_MATH_FUNC, API_SPARSE)),
("cusparseDcsrgeam2", ("hipsparseDcsrgeam2", CONV_MATH_FUNC, API_SPARSE)),
("cusparseCcsrgeam2", ("hipsparseCcsrgeam2", CONV_MATH_FUNC, API_SPARSE)),
("cusparseZcsrgeam2", ("hipsparseZcsrgeam2", CONV_MATH_FUNC, API_SPARSE)),
("cusparseXcsrsort", ("hipsparseXcsrsort", CONV_MATH_FUNC, API_SPARSE)),
("cusparseXbsrsm2_zeroPivot", ("hipsparseXbsrsm2_zeroPivot", CONV_MATH_FUNC, API_SPARSE)),
("cusparseXbsrsv2_zeroPivot", ("hipsparseXbsrsv2_zeroPivot", CONV_MATH_FUNC, API_SPARSE)),
(
"cusparseXcoosort_bufferSizeExt",
("hipsparseXcoosort_bufferSizeExt", CONV_MATH_FUNC, API_SPARSE),
),
(
"cusparseXcoosortByRow",
("hipsparseXcoosortByRow", CONV_MATH_FUNC, API_SPARSE),
),
("cusparseSetStream", ("hipsparseSetStream", CONV_MATH_FUNC, API_SPARSE)),
(
"cusparseCreateIdentityPermutation",
("hipsparseCreateIdentityPermutation", CONV_MATH_FUNC, API_SPARSE),
),
(
"cusparseSetMatIndexBase",
("hipsparseSetMatIndexBase", CONV_MATH_FUNC, API_SPARSE),
),
("cusparseSetMatType", ("hipsparseSetMatType", CONV_MATH_FUNC, API_SPARSE)),
("cusparseSpMV", ("hipsparseSpMV", CONV_MATH_FUNC, API_SPARSE)),
("cusparseSpMV_bufferSize", ("hipsparseSpMV_bufferSize", CONV_MATH_FUNC, API_SPARSE)),
("cusparseSpMM", ("hipsparseSpMM", CONV_MATH_FUNC, API_SPARSE)),
("cusparseSpMM_bufferSize", ("hipsparseSpMM_bufferSize", CONV_MATH_FUNC, API_SPARSE)),
("cusparseCreateDnMat", ("hipsparseCreateDnMat", CONV_MATH_FUNC, API_SPARSE)),
("cusparseDnMatSetStridedBatch", ("hipsparseDnMatSetStridedBatch", CONV_MATH_FUNC, API_SPARSE)),
("cusparseCsrSetStridedBatch", ("hipsparseCsrSetStridedBatch", CONV_MATH_FUNC, API_SPARSE)),
("cusparseCreateDnVec", ("hipsparseCreateDnVec", CONV_MATH_FUNC, API_SPARSE)),
("cusparseCreateCsr", ("hipsparseCreateCsr", CONV_MATH_FUNC, API_SPARSE)),
("cusparseDestroyDnMat", ("hipsparseDestroyDnMat", CONV_MATH_FUNC, API_SPARSE)),
("cusparseDestroyDnVec", ("hipsparseDestroyDnVec", CONV_MATH_FUNC, API_SPARSE)),
("cusparseDestroySpMat", ("hipsparseDestroySpMat", CONV_MATH_FUNC, API_SPARSE)),
("cusparseCreateCoo", ("hipsparseCreateCoo", CONV_MATH_FUNC, API_SPARSE)),
("cusparseCreateCsr", ("hipsparseCreateCsr", CONV_MATH_FUNC, API_SPARSE)),
("cusparseDnMatSetStridedBatch", ("hipsparseDnMatSetStridedBatch", CONV_MATH_FUNC, API_SPARSE)),
("cusparseSpMVAlg_t", ("hipsparseSpMVAlg_t", CONV_TYPE, API_SPARSE)),
("cusparseSpMMAlg_t", ("hipsparseSpMMAlg_t", CONV_TYPE, API_SPARSE)),
("cusparseIndexType_t", ("hipsparseIndexType_t", CONV_TYPE, API_SPARSE)),
# Unsupported ("cusparseDnMatDescr", ("hipsparseDnMatDescr", CONV_TYPE, API_SPARSE)),
# Unsupported ("cusparseDnVecDescr", ("hipsparseDnVecDescr", CONV_TYPE, API_SPARSE)),
# Unsupported ("cusparseSpMatDescr", ("hipsparseSpMatDescr", CONV_TYPE, API_SPARSE)),
("cusparseDnMatDescr_t", ("hipsparseDnMatDescr_t", CONV_TYPE, API_SPARSE)),
("cusparseDnVecDescr_t", ("hipsparseDnVecDescr_t", CONV_TYPE, API_SPARSE)),
("cusparseSpMatDescr_t", ("hipsparseSpMatDescr_t", CONV_TYPE, API_SPARSE)),
("CUSPARSE_INDEX_32I", ("HIPSPARSE_INDEX_32I", CONV_NUMERIC_LITERAL, API_SPARSE)),
("CUSPARSE_INDEX_64I", ("HIPSPARSE_INDEX_64I", CONV_NUMERIC_LITERAL, API_SPARSE)),
("CUSPARSE_ORDER_COL", ("HIPSPARSE_ORDER_COLUMN", CONV_NUMERIC_LITERAL, API_SPARSE)),
("CUSPARSE_MV_ALG_DEFAULT", ("HIPSPARSE_MV_ALG_DEFAULT", CONV_NUMERIC_LITERAL, API_SPARSE)),
("CUSPARSE_MM_ALG_DEFAULT", ("HIPSPARSE_MM_ALG_DEFAULT", CONV_NUMERIC_LITERAL, API_SPARSE)),
("CUSPARSE_COOMM_ALG1", ("HIPSPARSE_COOMM_ALG1", CONV_NUMERIC_LITERAL, API_SPARSE)),
("CUSPARSE_COOMM_ALG2", ("HIPSPARSE_COOMM_ALG2", CONV_NUMERIC_LITERAL, API_SPARSE)),
("CUSPARSE_COOMM_ALG3", ("HIPSPARSE_COOMM_ALG3", CONV_NUMERIC_LITERAL, API_SPARSE)),
("CUSPARSE_COOMV_ALG", ("HIPSPARSE_COOMV_ALG", CONV_NUMERIC_LITERAL, API_SPARSE)),
("CUSPARSE_CSRMM_ALG1", ("HIPSPARSE_CSRMM_ALG1", CONV_NUMERIC_LITERAL, API_SPARSE)),
(
"CUSPARSE_STATUS_SUCCESS",
("HIPSPARSE_STATUS_SUCCESS", CONV_NUMERIC_LITERAL, API_SPARSE),
),
(
"CUSPARSE_STATUS_NOT_INITIALIZED",
("HIPSPARSE_STATUS_NOT_INITIALIZED", CONV_NUMERIC_LITERAL, API_SPARSE),
),
(
"CUSPARSE_STATUS_ALLOC_FAILED",
("HIPSPARSE_STATUS_ALLOC_FAILED", CONV_NUMERIC_LITERAL, API_SPARSE),
),
(
"CUSPARSE_STATUS_INVALID_VALUE",
("HIPSPARSE_STATUS_INVALID_VALUE", CONV_NUMERIC_LITERAL, API_SPARSE),
),
(
"CUSPARSE_STATUS_MAPPING_ERROR",
("HIPSPARSE_STATUS_MAPPING_ERROR", CONV_NUMERIC_LITERAL, API_SPARSE),
),
(
"CUSPARSE_STATUS_EXECUTION_FAILED",
("HIPSPARSE_STATUS_EXECUTION_FAILED", CONV_NUMERIC_LITERAL, API_SPARSE),
),
(
"CUSPARSE_STATUS_INTERNAL_ERROR",
("HIPSPARSE_STATUS_INTERNAL_ERROR", CONV_NUMERIC_LITERAL, API_SPARSE),
),
(
"CUSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED",
(
"HIPSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED",
CONV_NUMERIC_LITERAL,
API_SPARSE,
),
),
(
"CUSPARSE_STATUS_ARCH_MISMATCH",
("HIPSPARSE_STATUS_ARCH_MISMATCH", CONV_NUMERIC_LITERAL, API_SPARSE),
),
(
"CUSPARSE_STATUS_ZERO_PIVOT",
("HIPSPARSE_STATUS_ZERO_PIVOT", CONV_NUMERIC_LITERAL, API_SPARSE),
),
(
"CUSPARSE_OPERATION_TRANSPOSE",
("HIPSPARSE_OPERATION_TRANSPOSE", CONV_NUMERIC_LITERAL, API_SPARSE),
),
(
"CUSPARSE_OPERATION_NON_TRANSPOSE",
("HIPSPARSE_OPERATION_NON_TRANSPOSE", CONV_NUMERIC_LITERAL, API_SPARSE),
),
(
"CUSPARSE_OPERATION_CONJUGATE_TRANSPOSE",
(
"HIPSPARSE_OPERATION_CONJUGATE_TRANSPOSE",
CONV_NUMERIC_LITERAL,
API_SPARSE,
),
),
(
"CUSPARSE_INDEX_BASE_ZERO",
("HIPSPARSE_INDEX_BASE_ZERO", CONV_NUMERIC_LITERAL, API_SPARSE),
),
(
"CUSPARSE_INDEX_BASE_ONE",
("HIPSPARSE_INDEX_BASE_ONE", CONV_NUMERIC_LITERAL, API_SPARSE),
),
(
"CUSPARSE_MATRIX_TYPE_GENERAL",
("HIPSPARSE_MATRIX_TYPE_GENERAL", CONV_NUMERIC_LITERAL, API_SPARSE),
),
]
)
PYTORCH_SPECIFIC_MAPPINGS = collections.OrderedDict(
[
("USE_CUDA", ("USE_ROCM", API_PYTORCH)),
("CUDA_VERSION", ("TORCH_HIP_VERSION", API_PYTORCH)),
("cudaHostAllocator", ("hipHostAllocator", API_PYTORCH)),
("cudaDeviceAllocator", ("hipDeviceAllocator", API_PYTORCH)),
("define MAX_NUM_BLOCKS 200", ("define MAX_NUM_BLOCKS 64", API_PYTORCH)),
("cuda::CUDAGuard", ("hip::HIPGuardMasqueradingAsCUDA", API_PYTORCH)),
("CUDAGuard", ("HIPGuardMasqueradingAsCUDA", API_PYTORCH)),
(
"cuda::OptionalCUDAGuard",
("hip::OptionalHIPGuardMasqueradingAsCUDA", API_PYTORCH),
),
("OptionalCUDAGuard", ("OptionalHIPGuardMasqueradingAsCUDA", API_PYTORCH)),
(
"cuda::CUDAStreamGuard",
("hip::HIPStreamGuardMasqueradingAsCUDA", API_PYTORCH),
),
("CUDAStreamGuard", ("HIPStreamGuardMasqueradingAsCUDA", API_PYTORCH)),
(
"cuda::OptionalCUDAStreamGuard",
("hip::OptionalHIPStreamGuardMasqueradingAsCUDA", API_PYTORCH),
),
(
"OptionalCUDAStreamGuard",
("OptionalHIPStreamGuardMasqueradingAsCUDA", API_PYTORCH),
),
(
"cuda::CUDAMultiStreamGuard",
("hip::HIPMultiStreamGuardMasqueradingAsCUDA", API_PYTORCH),
),
(
"CUDAMultiStreamGuard",
("HIPMultiStreamGuardMasqueradingAsCUDA", API_PYTORCH),
),
# Only get needs to be transformed this way; all the other ones can go
# straight to the normal versions hip::HIPCachingAllocator
(
"cuda::CUDACachingAllocator::get",
("hip::HIPCachingAllocatorMasqueradingAsCUDA::get", API_PYTORCH),
),
(
"CUDACachingAllocator::get",
("HIPCachingAllocatorMasqueradingAsCUDA::get", API_PYTORCH),
),
(
"cuda::CUDACachingAllocator::recordStream",
(
"hip::HIPCachingAllocatorMasqueradingAsCUDA::recordStreamMasqueradingAsCUDA",
API_PYTORCH,
),
),
(
"CUDACachingAllocator::recordStream",
(
"HIPCachingAllocatorMasqueradingAsCUDA::recordStreamMasqueradingAsCUDA",
API_PYTORCH,
),
),
("cuda::CUDAStream", ("hip::HIPStreamMasqueradingAsCUDA", API_PYTORCH)),
("CUDAStream", ("HIPStreamMasqueradingAsCUDA", API_PYTORCH)),
(
"cuda::getStreamFromPool",
("hip::getStreamFromPoolMasqueradingAsCUDA", API_PYTORCH),
),
("getStreamFromPool", ("getStreamFromPoolMasqueradingAsCUDA", API_PYTORCH)),
(
"cuda::getDefaultCUDAStream",
("hip::getDefaultHIPStreamMasqueradingAsCUDA", API_PYTORCH),
),
(
"cuda::getStreamFromExternal",
("hip::getStreamFromExternalMasqueradingAsCUDA", API_PYTORCH),
),
("getStreamFromExternal", ("getStreamFromExternalMasqueradingAsCUDA", API_PYTORCH)),
(
"cuda::getDefaultCUDAStream",
("hip::getDefaultHIPStreamMasqueradingAsCUDA", API_PYTORCH),
),
(
"getDefaultCUDAStream",
("getDefaultHIPStreamMasqueradingAsCUDA", API_PYTORCH),
),
(
"cuda::getCurrentCUDAStream",
("hip::getCurrentHIPStreamMasqueradingAsCUDA", API_PYTORCH),
),
(
"getCurrentCUDAStream",
("getCurrentHIPStreamMasqueradingAsCUDA", API_PYTORCH),
),
(
"cuda::setCurrentCUDAStream",
("hip::setCurrentHIPStreamMasqueradingAsCUDA", API_PYTORCH),
),
(
"setCurrentCUDAStream",
("setCurrentHIPStreamMasqueradingAsCUDA", API_PYTORCH),
),
# TODO: Undo this special-case; see the header for motivation behind this
# hack. It's VERY important this is only applied to PyTorch HIPify.
(
"c10/cuda/CUDAGuard.h",
("ATen/hip/impl/HIPGuardImplMasqueradingAsCUDA.h", API_PYTORCH),
),
(
"c10/cuda/CUDACachingAllocator.h",
("ATen/hip/impl/HIPCachingAllocatorMasqueradingAsCUDA.h", API_PYTORCH),
),
(
"c10/cuda/CUDAStream.h",
("ATen/hip/impl/HIPStreamMasqueradingAsCUDA.h", API_PYTORCH),
),
("gloo/cuda.h", ("gloo/hip.h", API_PYTORCH)),
(
"gloo/cuda_allreduce_halving_doubling.h",
("gloo/hip_allreduce_halving_doubling.h", API_PYTORCH),
),
(
"gloo/cuda_allreduce_halving_doubling_pipelined.h",
("gloo/hip_allreduce_halving_doubling_pipelined.h", API_PYTORCH),
),
("gloo/cuda_allreduce_ring.h", ("gloo/hip_allreduce_ring.h", API_PYTORCH)),
(
"gloo/cuda_broadcast_one_to_all.h",
("gloo/hip_broadcast_one_to_all.h", API_PYTORCH),
),
(
"gloo::CudaAllreduceHalvingDoublingPipelined",
("gloo::HipAllreduceHalvingDoublingPipelined", API_PYTORCH),
),
("gloo::CudaBroadcastOneToAll", ("gloo::HipBroadcastOneToAll", API_PYTORCH)),
("gloo::CudaHostWorkspace", ("gloo::HipHostWorkspace", API_PYTORCH)),
("gloo::CudaDeviceWorkspace", ("gloo::HipDeviceWorkspace", API_PYTORCH)),
("CUDNN_RNN_RELU", ("miopenRNNRELU", API_PYTORCH)),
("CUDNN_RNN_TANH", ("miopenRNNTANH", API_PYTORCH)),
("CUDNN_LSTM", ("miopenLSTM", API_PYTORCH)),
("CUDNN_GRU", ("miopenGRU", API_PYTORCH)),
("cudnnRNNMode_t", ("miopenRNNMode_t", API_PYTORCH)),
("magma_queue_create_from_cuda", ("magma_queue_create_from_hip", API_PYTORCH)),
]
)
CAFFE2_SPECIFIC_MAPPINGS = collections.OrderedDict(
[
("cuda_stream", ("hip_stream", API_CAFFE2)),
# if the header is a native hip folder (under hip directory),
# there is no need to add a hip path to it; the trie in hipify script
# takes this mapping order to forbid further replacement
("/hip/", ("/hip/", API_CAFFE2)),
("/context_gpu", ("/hip/context_gpu", API_CAFFE2)),
("/common_gpu", ("/hip/common_gpu", API_CAFFE2)),
("/cuda_nccl_gpu", ("/hip/hip_nccl_gpu", API_CAFFE2)),
("/mixed_utils", ("/hip/mixed_utils", API_CAFFE2)),
("/operator_fallback_gpu", ("/hip/operator_fallback_gpu", API_CAFFE2)),
(
"/spatial_batch_norm_op_impl",
("/hip/spatial_batch_norm_op_impl", API_CAFFE2),
),
(
"/recurrent_network_executor_gpu",
("/hip/recurrent_network_executor_gpu", API_CAFFE2),
),
(
"/generate_proposals_op_util_nms_gpu",
("/hip/generate_proposals_op_util_nms_gpu", API_CAFFE2),
),
("/max_pool_with_index_gpu", ("/hip/max_pool_with_index_gpu", API_CAFFE2)),
("/THCCachingAllocator_gpu", ("/hip/THCCachingAllocator_gpu", API_CAFFE2)),
("/top_k_heap_selection", ("/hip/top_k_heap_selection", API_CAFFE2)),
("/top_k_radix_selection", ("/hip/top_k_radix_selection", API_CAFFE2)),
("/GpuAtomics", ("/hip/GpuAtomics", API_CAFFE2)),
("/GpuDefs", ("/hip/GpuDefs", API_CAFFE2)),
("/GpuScanUtils", ("/hip/GpuScanUtils", API_CAFFE2)),
("/GpuBitonicSort", ("/hip/GpuBitonicSort", API_CAFFE2)),
("/math/reduce.cuh", ("/math/hip/reduce.cuh", API_CAFFE2)),
("/sgd/adagrad_fused_op_gpu.cuh", ("/sgd/hip/adagrad_fused_op_gpu.cuh", API_CAFFE2)),
("/operators/segment_reduction_op_gpu.cuh", ("/operators/hip/segment_reduction_op_gpu.cuh", API_CAFFE2)),
("/gather_op.cuh", ("/hip/gather_op.cuh", API_CAFFE2)),
("caffe2/core/common_cudnn.h", ("caffe2/core/hip/common_miopen.h", API_CAFFE2)),
("REGISTER_CUDA_OPERATOR", ("REGISTER_HIP_OPERATOR", API_CAFFE2)),
("CUDA_1D_KERNEL_LOOP", ("HIP_1D_KERNEL_LOOP", API_CAFFE2)),
("CUDAContext", ("HIPContext", API_CAFFE2)),
("CAFFE_CUDA_NUM_THREADS", ("CAFFE_HIP_NUM_THREADS", API_CAFFE2)),
("HasCudaGPU", ("HasHipGPU", API_CAFFE2)),
("__expf", ("expf", API_CAFFE2)),
("CUBLAS_ENFORCE", ("ROCBLAS_ENFORCE", API_CAFFE2)),
("CUBLAS_CHECK", ("ROCBLAS_CHECK", API_CAFFE2)),
("cublas_handle", ("rocblashandle", API_CAFFE2)),
("CURAND_ENFORCE", ("HIPRAND_ENFORCE", API_CAFFE2)),
("CURAND_CHECK", ("HIPRAND_CHECK", API_CAFFE2)),
("curandGenerateUniform", ("hiprandGenerateUniform", API_CAFFE2)),
("curand_generator", ("hiprand_generator", API_CAFFE2)),
("CaffeCudaGetDevice", ("CaffeHipGetDevice", API_CAFFE2)),
# do not rename CUDA_KERNEL_ASSERT, lazyInitCUDA in caffe2 sources
# the ordered dict guarantees this pattern will match first, before "CUDA"
("CUDA_KERNEL_ASSERT", ("CUDA_KERNEL_ASSERT", API_CAFFE2)),
("lazyInitCUDA", ("lazyInitCUDA", API_CAFFE2)),
("CUDA_VERSION", ("TORCH_HIP_VERSION", API_CAFFE2)),
("CUDA", ("HIP", API_CAFFE2)),
("Cuda", ("Hip", API_CAFFE2)),
("cuda_", ("hip_", API_CAFFE2)),
("_cuda", ("_hip", API_CAFFE2)),
("CUDNN", ("MIOPEN", API_CAFFE2)),
("CuDNN", ("MIOPEN", API_CAFFE2)),
("cudnn", ("miopen", API_CAFFE2)),
("namespace cuda", ("namespace hip", API_CAFFE2)),
("cuda::CUDAGuard", ("hip::HIPGuard", API_CAFFE2)),
("cuda::OptionalCUDAGuard", ("hip::OptionalHIPGuard", API_CAFFE2)),
("cuda::CUDAStreamGuard", ("hip::HIPStreamGuard", API_CAFFE2)),
("cuda::OptionalCUDAStreamGuard", ("hip::OptionalHIPStreamGuard", API_CAFFE2)),
("c10/cuda/CUDAGuard.h", ("c10/hip/HIPGuard.h", API_CAFFE2)),
("gloo/cuda", ("gloo/hip", API_CAFFE2)),
]
)
# We must tread very carefully here. Blanket conversions like are done
# in CAFFE2_SPECIFIC_MAPPINGS are not presently supported on PyTorch,
# because a regex for CUDA will also match a filename like CUDAGuard.h,
# but the HIPIFY script doesn't presently move the file and so the substitution
# will be invalid. Instead, we specifically list out every identifier
# and file from c10/cuda which may be used externally, and do substitutions this
# way.
#
# NB: if you want a transformation to ONLY apply to the c10/ directory,
# put it as API_CAFFE2
C10_MAPPINGS = collections.OrderedDict(
[
("cuda::compat::", ("hip::compat::", API_C10)),
("c10/cuda/CUDAException.h", ("c10/hip/HIPException.h", API_C10)),
("c10/cuda/CUDAMacros.h", ("c10/hip/HIPMacros.h", API_C10)),
("c10/cuda/CUDAMathCompat.h", ("c10/hip/HIPMathCompat.h", API_C10)),
("c10/cuda/CUDAFunctions.h", ("c10/hip/HIPFunctions.h", API_C10)),
("c10/cuda/CUDAMiscFunctions.h", ("c10/hip/HIPMiscFunctions.h", API_C10)),
("c10/cuda/CUDAStream.h", ("c10/hip/HIPStream.h", API_C10)),
("c10/cuda/CUDAGraphsC10Utils.h", ("c10/hip/HIPGraphsC10Utils.h", API_C10)),
("c10/cuda/CUDACachingAllocator.h", ("c10/hip/HIPCachingAllocator.h", API_C10)),
("c10/cuda/impl/CUDATest.h", ("c10/hip/impl/HIPTest.h", API_C10)),
("c10/cuda/impl/CUDAGuardImpl.h", ("c10/hip/impl/HIPGuardImpl.h", API_C10)),
(
"c10/cuda/impl/cuda_cmake_macros.h",
("c10/hip/impl/hip_cmake_macros.h", API_C10),
),
("C10_CUDA_CHECK", ("C10_HIP_CHECK", API_C10)),
("C10_CUDA_CHECK_WARN", ("C10_HIP_CHECK_WARN", API_C10)),
("c10::cuda", ("c10::hip", API_C10)),
("cuda::CUDAStream", ("hip::HIPStream", API_C10)),
("CUDAStream", ("HIPStream", API_C10)),
# This substitution is not permissible, because there's another copy of this
# function in torch/cuda.h
# ("cuda::device_count", ("hip::device_count", API_C10)),
("cuda::current_device", ("hip::current_device", API_C10)),
("cuda::set_device", ("hip::set_device", API_C10)),
("cuda::device_synchronize", ("hip::device_synchronize", API_C10)),
("cuda::getStreamFromPool", ("hip::getStreamFromPool", API_C10)),
("getStreamFromPool", ("getStreamFromPool", API_C10)),
("cuda::getDefaultCUDAStream", ("hip::getDefaultHIPStream", API_C10)),
("getDefaultCUDAStream", ("getDefaultHIPStream", API_C10)),
("cuda::getCurrentCUDAStream", ("hip::getCurrentHIPStream", API_C10)),
("getCurrentCUDAStream", ("getCurrentHIPStream", API_C10)),
("cuda::get_cuda_check_prefix", ("hip::get_cuda_check_prefix", API_C10)),
("cuda::setCurrentCUDAStream", ("hip::setCurrentHIPStream", API_C10)),
("setCurrentCUDAStream", ("setCurrentHIPStream", API_C10)),
("cuda::CUDACachingAllocator", ("hip::HIPCachingAllocator", API_C10)),
("CUDACachingAllocator", ("HIPCachingAllocator", API_C10)),
("C10_CUDA_KERNEL_LAUNCH_CHECK", ("C10_HIP_KERNEL_LAUNCH_CHECK", API_C10))
]
)
# NB: C10 mappings are more specific than Caffe2 mappings, so run them
# first
CUDA_TO_HIP_MAPPINGS = [
CUDA_IDENTIFIER_MAP,
CUDA_TYPE_NAME_MAP,
CUDA_INCLUDE_MAP,
CUDA_SPARSE_MAP,
C10_MAPPINGS,
PYTORCH_SPECIFIC_MAPPINGS,
CAFFE2_SPECIFIC_MAPPINGS,
]
|
""" Constants for annotations in the mapping.
The constants defined here are used to annotate the mapping tuples in cuda_to_hip_mappings.py.
They are based on
https://github.com/ROCm-Developer-Tools/HIP/blob/master/hipify-clang/src/Statistics.h
and fall in three categories: 1) type of mapping, 2) API of mapping, 3) unsupported
mapping.
"""
CONV_VERSION = 0,
CONV_INIT = 1
CONV_DEVICE = 2
CONV_MEM = 3
CONV_KERN = 4
CONV_COORD_FUNC = 5
CONV_MATH_FUNC = 6
CONV_DEVICE_FUNC = 7
CONV_SPECIAL_FUNC = 8
CONV_STREAM = 9
CONV_EVENT = 10
CONV_OCCUPANCY = 11
CONV_CONTEXT = 12
CONV_PEER = 13
CONV_MODULE = 14
CONV_CACHE = 15
CONV_EXEC = 16
CONV_ERROR = 17
CONV_DEF = 18
CONV_TEX = 19
CONV_GL = 20
CONV_GRAPHICS = 21
CONV_SURFACE = 22
CONV_JIT = 23
CONV_D3D9 = 24
CONV_D3D10 = 25
CONV_D3D11 = 26
CONV_VDPAU = 27
CONV_EGL = 28
CONV_THREAD = 29
CONV_OTHER = 30
CONV_INCLUDE = 31
CONV_INCLUDE_CUDA_MAIN_H = 32
CONV_TYPE = 33
CONV_LITERAL = 34
CONV_NUMERIC_LITERAL = 35
CONV_LAST = 36
API_DRIVER = 37
API_RUNTIME = 38
API_BLAS = 39
API_SPARSE = 40
API_RAND = 41
API_LAST = 42
API_FFT = 43
API_RTC = 44
API_ROCTX = 45
HIP_UNSUPPORTED = 46
API_PYTORCH = 1337
API_CAFFE2 = 1338
API_C10 = 1339
|
from .version import __version__
|
#!/usr/bin/env python3
""" The Python Hipify script.
##
# Copyright (c) 2015-2016 Advanced Micro Devices, Inc. All rights reserved.
# 2017-2018 Advanced Micro Devices, Inc. and
# Facebook Inc. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
import argparse
import fnmatch
import re
import shutil
import sys
import os
from . import constants
from .cuda_to_hip_mappings import CUDA_TO_HIP_MAPPINGS
from .cuda_to_hip_mappings import MATH_TRANSPILATIONS
from typing import Dict, List, Iterator, Optional
from collections.abc import Mapping, Iterable
HipifyResult = Dict[str, Optional[str]]
HipifyFinalResult = Dict[str, HipifyResult]
HIPIFY_C_BREADCRUMB = "// !!! This is a file automatically generated by hipify!!!\n"
HIPIFY_FINAL_RESULT: HipifyFinalResult = {}
# Hardcode the PyTorch template map
"""This dictionary provides the mapping from PyTorch kernel template types
to their actual types."""
PYTORCH_TEMPLATE_MAP = {"Dtype": "scalar_t", "T": "scalar_t"}
class InputError(Exception):
# Exception raised for errors in the input.
def __init__(self, message):
super(InputError, self).__init__(message)
self.message = message
def __str__(self):
return "{}: {}".format("Input error", self.message)
def openf(filename, mode):
return open(filename, mode, errors='ignore')
# Color coding for printing
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
# To the programmer, the output of hipify most likely are intermediates.
# This class allows users of hipify to ask for a cleanup by running the
# hipify and compilation in a with instantiating this context manager class
# with keep_intermediates=False.
# The main usecase is the cpp_extensions, specifically the load method.
# It is a good idea to keep intermediates (in case of errors or to
# not recompile unchanged files), but in cases where you don't want to
# keep them (e.g. in the CI), this can be used to remove files.
class GeneratedFileCleaner:
"""Context Manager to clean up generated files"""
def __init__(self, keep_intermediates=False):
self.keep_intermediates = keep_intermediates
self.files_to_clean = set()
self.dirs_to_clean = []
def __enter__(self):
return self
def open(self, fn, *args, **kwargs):
if not os.path.exists(fn):
self.files_to_clean.add(os.path.abspath(fn))
return open(fn, *args, **kwargs)
def makedirs(self, dn, exist_ok=False):
parent, n = os.path.split(dn)
if not n:
parent, n = os.path.split(parent)
if parent and n and not os.path.exists(parent):
self.makedirs(parent, exist_ok=True)
if not os.path.isdir(dn) or not exist_ok:
os.mkdir(dn)
self.dirs_to_clean.append(os.path.abspath(dn))
def __exit__(self, type, value, traceback):
if not self.keep_intermediates:
for f in self.files_to_clean:
os.unlink(f)
for d in self.dirs_to_clean[::-1]:
os.rmdir(d)
def match_extensions(filename: str, extensions: Iterable) -> bool:
"""Helper method to see if filename ends with certain extension"""
return any(filename.endswith(e) for e in extensions)
def _fnmatch(filepath, patterns):
return any(fnmatch.fnmatch(filepath, pattern) for pattern in patterns)
def matched_files_iter(
root_path: str,
includes: Iterable = (),
ignores: Iterable = (),
extensions: Iterable = (),
out_of_place_only: bool = False,
is_pytorch_extension: bool = False) -> Iterator[str]:
exact_matches = set(includes)
# This is a very rough heuristic; really, we want to avoid scanning
# any file which is not checked into source control, but this script
# needs to work even if you're in a Git or Hg checkout, so easier to
# just block the biggest time sinks that won't matter in the
# end.
for (abs_dirpath, dirs, filenames) in os.walk(root_path, topdown=True):
rel_dirpath = os.path.relpath(abs_dirpath, root_path)
if rel_dirpath == '.':
# Blah blah blah O(n) blah blah
if ".git" in dirs:
dirs.remove(".git")
if "build" in dirs:
dirs.remove("build")
if "third_party" in dirs:
dirs.remove("third_party")
for filename in filenames:
filepath = os.path.join(abs_dirpath, filename)
rel_filepath = os.path.join(rel_dirpath, filename)
# We respect extensions, UNLESS you wrote the entire
# filename verbatim, in which case we always accept it
if (
_fnmatch(filepath, includes)
and (not _fnmatch(filepath, ignores))
and (match_extensions(filepath, extensions) or filepath in exact_matches)
):
if not is_pytorch_extension: # for pytorch extensions, consider all files
if not is_pytorch_file(rel_filepath) and not is_caffe2_gpu_file(rel_filepath):
continue
if out_of_place_only and not is_out_of_place(rel_filepath):
continue
yield filepath
def preprocess_file_and_save_result(
output_directory: str,
filepath: str,
all_files: Iterable,
header_include_dirs: Iterable,
stats: Dict[str, List],
hip_clang_launch: bool,
is_pytorch_extension: bool,
clean_ctx: GeneratedFileCleaner,
show_progress: bool) -> None:
result = preprocessor(output_directory, filepath, all_files, header_include_dirs, stats,
hip_clang_launch, is_pytorch_extension, clean_ctx, show_progress)
fin_path = os.path.abspath(os.path.join(output_directory, filepath))
# Show what happened
if show_progress and "ignored" not in result["status"]:
print(
fin_path, "->",
result["hipified_path"], result["status"], flush=True)
HIPIFY_FINAL_RESULT[fin_path] = result
def compute_stats(stats):
unsupported_calls = {cuda_call for (cuda_call, _filepath) in stats["unsupported_calls"]}
# Print the number of unsupported calls
print("Total number of unsupported CUDA function calls: {0:d}".format(len(unsupported_calls)))
# Print the list of unsupported calls
print(", ".join(unsupported_calls))
# Print the number of kernel launches
print("\nTotal number of replaced kernel launches: {0:d}".format(len(stats["kernel_launches"])))
def add_dim3(kernel_string, cuda_kernel):
'''adds dim3() to the second and third arguments in the kernel launch'''
count = 0
closure = 0
kernel_string = kernel_string.replace("<<<", "").replace(">>>", "")
arg_locs: List[Dict[str, int]] = [{} for _ in range(2)]
arg_locs[count]['start'] = 0
for ind, c in enumerate(kernel_string):
if count > 1:
break
if c == "(":
closure += 1
elif c == ")":
closure -= 1
if (c == "," or ind == len(kernel_string) - 1) and closure == 0:
arg_locs[count]['end'] = ind + (c != ",")
count += 1
if count < 2:
arg_locs[count]['start'] = ind + 1
first_arg_raw = kernel_string[arg_locs[0]['start']:arg_locs[0]['end'] + 1]
second_arg_raw = kernel_string[arg_locs[1]['start']:arg_locs[1]['end']]
first_arg_clean = kernel_string[arg_locs[0]['start']:arg_locs[0]['end']].replace("\n", "").strip(" ")
second_arg_clean = kernel_string[arg_locs[1]['start']:arg_locs[1]['end']].replace("\n", "").strip(" ")
first_arg_dim3 = "dim3({})".format(first_arg_clean)
second_arg_dim3 = "dim3({})".format(second_arg_clean)
first_arg_raw_dim3 = first_arg_raw.replace(first_arg_clean, first_arg_dim3)
second_arg_raw_dim3 = second_arg_raw.replace(second_arg_clean, second_arg_dim3)
cuda_kernel = cuda_kernel.replace(first_arg_raw + second_arg_raw, first_arg_raw_dim3 + second_arg_raw_dim3)
return cuda_kernel
RE_KERNEL_LAUNCH = re.compile(r'([ ]+)(detail?)::[ ]+\\\n[ ]+')
def processKernelLaunches(string, stats):
""" Replace the CUDA style Kernel launches with the HIP style kernel launches."""
# Concat the namespace with the kernel names. (Find cleaner way of doing this later).
string = RE_KERNEL_LAUNCH.sub(lambda inp: "{0}{1}::".format(inp.group(1), inp.group(2)), string)
def grab_method_and_template(in_kernel):
# The positions for relevant kernel components.
pos = {
"kernel_launch": {"start": in_kernel["start"], "end": in_kernel["end"]},
"kernel_name": {"start": -1, "end": -1},
"template": {"start": -1, "end": -1}
}
# Count for balancing template
count = {"<>": 0}
# Status for whether we are parsing a certain item.
START = 0
AT_TEMPLATE = 1
AFTER_TEMPLATE = 2
AT_KERNEL_NAME = 3
status = START
# Parse the string character by character
for i in range(pos["kernel_launch"]["start"] - 1, -1, -1):
char = string[i]
# Handle Templating Arguments
if status == START or status == AT_TEMPLATE:
if char == ">":
if status == START:
status = AT_TEMPLATE
pos["template"]["end"] = i
count["<>"] += 1
if char == "<":
count["<>"] -= 1
if count["<>"] == 0 and (status == AT_TEMPLATE):
pos["template"]["start"] = i
status = AFTER_TEMPLATE
# Handle Kernel Name
if status != AT_TEMPLATE:
if string[i].isalnum() or string[i] in {'(', ')', '_', ':', '#'}:
if status != AT_KERNEL_NAME:
status = AT_KERNEL_NAME
pos["kernel_name"]["end"] = i
# Case: Kernel name starts the string.
if i == 0:
pos["kernel_name"]["start"] = 0
# Finished
return [(pos["kernel_name"]), (pos["template"]), (pos["kernel_launch"])]
else:
# Potential ending point if we're already traversing a kernel's name.
if status == AT_KERNEL_NAME:
pos["kernel_name"]["start"] = i
# Finished
return [(pos["kernel_name"]), (pos["template"]), (pos["kernel_launch"])]
def find_kernel_bounds(string):
"""Finds the starting and ending points for all kernel launches in the string."""
kernel_end = 0
kernel_positions = []
# Continue until we cannot find any more kernels anymore.
while string.find("<<<", kernel_end) != -1:
# Get kernel starting position (starting from the previous ending point)
kernel_start = string.find("<<<", kernel_end)
# Get kernel ending position (adjust end point past the >>>)
kernel_end = string.find(">>>", kernel_start) + 3
if kernel_end <= 0:
raise InputError("no kernel end found")
# Add to list of traversed kernels
kernel_positions.append({"start": kernel_start, "end": kernel_end,
"group": string[kernel_start: kernel_end]})
return kernel_positions
# Replace comments and string literals from the code so that find_kernel_bounds does not
# wrongly capture kernels in comments and string literals.
# This function replaces them with "x" to keep positions.
def mask_comments(string):
in_comment = ''
prev_c = ''
new_string = ''
for c in string:
if in_comment == '':
# Outside comments
if c == '/' and prev_c == '/':
in_comment = '//'
elif c == '*' and prev_c == '/':
in_comment = '/*'
elif c == '"' and prev_c != '\\' and prev_c != "'":
in_comment = '"'
elif in_comment == '//':
# In // xxx
if c == '\r' or c == '\n':
in_comment = ''
elif in_comment == '/*':
# In /* xxx */
if c == '/' and prev_c == '*':
in_comment = ''
elif in_comment == '"':
# In ""
if c == '"' and prev_c != '\\':
in_comment = ''
prev_c = c
if in_comment == '':
new_string += c
else:
new_string += 'x'
return new_string
# Grab positional ranges of all kernel launches
get_kernel_positions = list(find_kernel_bounds(mask_comments(string)))
output_string = string
# Replace each CUDA kernel with a HIP kernel.
for kernel in get_kernel_positions:
# Get kernel components
params = grab_method_and_template(kernel)
# Find parenthesis after kernel launch
parenthesis = string.find("(", kernel["end"])
# Extract cuda kernel
cuda_kernel = string[params[0]["start"]:parenthesis + 1]
kernel_string = string[kernel['start']:kernel['end']]
end_param_index = 0 if params[1]['end'] == -1 else 1
kernel_name_with_template = string[params[0]['start']:params[end_param_index]['end'] + 1]
cuda_kernel_dim3 = add_dim3(kernel_string, cuda_kernel)
# Keep number of kernel launch params consistent (grid dims, group dims, stream, dynamic shared size)
num_klp = len(extract_arguments(0, kernel["group"].replace("<<<", "(").replace(">>>", ")")))
hip_kernel = "hipLaunchKernelGGL(" + cuda_kernel_dim3[0:-1].replace(
">>>", ", 0" * (4 - num_klp) + ">>>").replace("<<<", ", ").replace(
">>>", ", ").replace(kernel_name_with_template, "(" + kernel_name_with_template + ")")
# Replace cuda kernel with hip kernel
output_string = output_string.replace(cuda_kernel, hip_kernel)
# Update the statistics
stats["kernel_launches"].append(hip_kernel)
return output_string
def find_closure_group(input_string, start, group):
"""Generalization for finding a balancing closure group
if group = ["(", ")"], then finds the first balanced parentheses.
if group = ["{", "}"], then finds the first balanced bracket.
Given an input string, a starting position in the input string, and the group type,
find_closure_group returns the positions of group[0] and group[1] as a tuple.
Example:
find_closure_group("(hi)", 0, ["(", ")"])
Returns:
0, 3
"""
inside_parenthesis = False
parens = 0
pos = start
p_start, p_end = -1, -1
while pos < len(input_string):
if input_string[pos] == group[0]:
if inside_parenthesis is False:
inside_parenthesis = True
parens = 1
p_start = pos
else:
parens += 1
elif input_string[pos] == group[1] and inside_parenthesis:
parens -= 1
if parens == 0:
p_end = pos
return p_start, p_end
pos += 1
return None, None
def find_bracket_group(input_string, start):
"""Finds the first balanced parantheses."""
return find_closure_group(input_string, start, group=["{", "}"])
def find_parentheses_group(input_string, start):
"""Finds the first balanced bracket."""
return find_closure_group(input_string, start, group=["(", ")"])
RE_ASSERT = re.compile(r"\bassert[ ]*\(")
def replace_math_functions(input_string):
"""FIXME: Temporarily replace std:: invocations of math functions
with non-std:: versions to prevent linker errors NOTE: This
can lead to correctness issues when running tests, since the
correct version of the math function (exp/expf) might not get
called. Plan is to remove this function once HIP supports
std:: math function calls inside device code
"""
output_string = input_string
for func in MATH_TRANSPILATIONS:
output_string = output_string.replace(r'{}('.format(func), '{}('.format(MATH_TRANSPILATIONS[func]))
return output_string
RE_SYNCTHREADS = re.compile(r":?:?\b(__syncthreads)\b(\w*\()")
def hip_header_magic(input_string):
"""If the file makes kernel builtin calls and does not include the cuda_runtime.h header,
then automatically add an #include to match the "magic" includes provided by NVCC.
TODO:
Update logic to ignore cases where the cuda_runtime.h is included by another file.
"""
# Copy the input.
output_string = input_string
# Check if one of the following headers is already included.
headers = ["hip/hip_runtime.h", "hip/hip_runtime_api.h"]
if any(re.search(r'#include ("{0}"|<{0}>)'.format(ext), output_string) for ext in headers):
return output_string
# Rough logic to detect if we're inside device code
hasDeviceLogic: int
hasDeviceLogic = "hipLaunchKernelGGL" in output_string
hasDeviceLogic += "__global__" in output_string
hasDeviceLogic += "__shared__" in output_string
hasDeviceLogic += RE_SYNCTHREADS.search(output_string) is not None
# If device logic found, provide the necessary header.
if hasDeviceLogic:
output_string = '#include "hip/hip_runtime.h"\n' + input_string
return output_string
RE_EXTERN_SHARED = re.compile(r"extern\s+([\w\(\)]+)?\s*__shared__\s+([\w:<>\s]+)\s+(\w+)\s*\[\s*\]\s*;")
def replace_extern_shared(input_string):
"""Match extern __shared__ type foo[]; syntax and use HIP_DYNAMIC_SHARED() MACRO instead.
https://github.com/ROCm-Developer-Tools/HIP/blob/master/docs/markdown/hip_kernel_language.md#__shared__
Example:
"extern __shared__ char smemChar[];" => "HIP_DYNAMIC_SHARED( char, smemChar)"
"extern __shared__ unsigned char smem[];" => "HIP_DYNAMIC_SHARED( unsigned char, my_smem)"
"""
output_string = input_string
output_string = RE_EXTERN_SHARED.sub(
lambda inp: "HIP_DYNAMIC_SHARED({0} {1}, {2})".format(
inp.group(1) or "", inp.group(2), inp.group(3)), output_string)
return output_string
def get_hip_file_path(rel_filepath, is_pytorch_extension=False):
"""
Returns the new name of the hipified file
"""
# At the moment, some PyTorch source files are HIPified in place. The predicate
# is_out_of_place tells us if this is the case or not.
assert(not os.path.isabs(rel_filepath))
if not is_pytorch_extension and not is_out_of_place(rel_filepath):
return rel_filepath
dirpath, filename = os.path.split(rel_filepath)
root, ext = os.path.splitext(filename)
# Here's the plan:
#
# In general, we need to disambiguate the HIPified filename so that
# it gets a different name from the original filename, so
# that we don't overwrite the original file
#
# There's a lot of different naming conventions across PyTorch
# and Caffe2, but the general recipe is to convert occurrences
# of cuda/gpu to hip, and add hip if there are no occurrences
# of cuda/gpu anywhere.
#
# Concretely, we do the following:
#
# - If there is a directory component named "cuda", replace
# it with "hip", AND
#
# - If the file name contains "CUDA", replace it with "HIP", AND
#
# - ALWAYS replace '.cu' with '.hip', because those files
# contain CUDA kernels that needs to be hipified and processed with
# hip compiler
#
# - If we are not hipifying a PyTorch extension, and the parent
# directory name did not change as a result of the above
# transformations, insert "hip" in the file path
# as the direct parent folder of the file
#
# - If we are hipifying a PyTorch extension, and the parent directory
# name as well as the filename (incl. extension) did not change as
# a result of the above transformations, insert "_hip" in the filename
#
# This isn't set in stone; we might adjust this to support other
# naming conventions.
if ext == '.cu':
ext = '.hip'
orig_filename = filename
orig_dirpath = dirpath
dirpath = dirpath.replace('cuda', 'hip')
dirpath = dirpath.replace('CUDA', 'HIP')
dirpath = dirpath.replace('THC', 'THH')
root = root.replace('cuda', 'hip')
root = root.replace('CUDA', 'HIP')
# Special case to handle caffe2/core/THCCachingAllocator
if dirpath != "caffe2/core":
root = root.replace('THC', 'THH')
if not is_pytorch_extension and dirpath == orig_dirpath:
dirpath = os.path.join(dirpath, 'hip')
if is_pytorch_extension and dirpath == orig_dirpath and (root + ext) == orig_filename:
root = root + "_hip"
return os.path.join(dirpath, root + ext)
def is_out_of_place(rel_filepath):
assert(not os.path.isabs(rel_filepath))
if rel_filepath.startswith("torch/"):
return False
if rel_filepath.startswith("tools/autograd/templates/"):
return False
return True
# Keep this synchronized with includes/ignores in build_amd.py
def is_pytorch_file(rel_filepath):
assert(not os.path.isabs(rel_filepath))
if rel_filepath.startswith("aten/"):
if rel_filepath.startswith("aten/src/ATen/core/"):
return False
return True
if rel_filepath.startswith("torch/"):
return True
if rel_filepath.startswith("tools/autograd/templates/"):
return True
return False
def is_cusparse_file(rel_filepath):
assert(not os.path.isabs(rel_filepath))
if is_pytorch_file(rel_filepath):
return "sparse" in rel_filepath.lower()
return False
def is_caffe2_gpu_file(rel_filepath):
assert(not os.path.isabs(rel_filepath))
if rel_filepath.startswith("c10/cuda"):
return True
filename = os.path.basename(rel_filepath)
_, ext = os.path.splitext(filename)
return ('gpu' in filename or ext in ['.cu', '.cuh']) and ('cudnn' not in filename)
# Cribbed from https://stackoverflow.com/questions/42742810/speed-up-millions-of-regex-replacements-in-python-3/42789508#42789508
class Trie():
"""Regex::Trie in Python. Creates a Trie out of a list of words. The trie can be exported to a Regex pattern.
The corresponding Regex should match much faster than a simple Regex union."""
def __init__(self):
self.data = {}
def add(self, word):
ref = self.data
for char in word:
ref[char] = char in ref and ref[char] or {}
ref = ref[char]
ref[''] = 1
def dump(self):
return self.data
def quote(self, char):
return re.escape(char)
def _pattern(self, pData):
data = pData
if "" in data and len(data.keys()) == 1:
return None
alt = []
cc = []
q = 0
for char in sorted(data.keys()):
if isinstance(data[char], dict):
try:
recurse = self._pattern(data[char])
alt.append(self.quote(char) + recurse)
except Exception:
cc.append(self.quote(char))
else:
q = 1
cconly = not len(alt) > 0
if len(cc) > 0:
if len(cc) == 1:
alt.append(cc[0])
else:
alt.append('[' + ''.join(cc) + ']')
if len(alt) == 1:
result = alt[0]
else:
result = "(?:" + "|".join(alt) + ")"
if q:
if cconly:
result += "?"
else:
result = "(?:%s)?" % result
return result
def pattern(self):
return self._pattern(self.dump())
CAFFE2_TRIE = Trie()
CAFFE2_MAP = {}
PYTORCH_TRIE = Trie()
PYTORCH_MAP: Dict[str, object] = {}
# In PyTorch, we map cuBLAS->rocBLAS and cuSPARSE->hipSPARSE. Note the prefix, roc versus hip.
# The 'hip' APIs offer a more direct CUDA-friendly mapping, but calling rocBLAS directly has better performance.
# Unfortunately, the roc* types and hip* types differ, i.e., rocblas_float_complex versus hipComplex.
# In the case of SPARSE, we must use the hip types for complex instead of the roc types,
# but the pytorch mappings assume roc. Therefore, we create a new SPARSE mapping that has a higher priority.
# Its mappings will trigger first, and only when a miss occurs will the lower-priority pytorch mapping take place.
# When a file contains "sparse" in the filename, a mapping marked with API_SPARSE is preferred over other choices.
PYTORCH_SPARSE_MAP = {}
for mapping in CUDA_TO_HIP_MAPPINGS:
assert isinstance(mapping, Mapping)
for src, value in mapping.items():
dst = value[0]
meta_data = value[1:]
if constants.API_CAFFE2 not in meta_data:
PYTORCH_TRIE.add(src)
# if src is already in PYTORCH_MAP and dst belongs to API_SPARSE
# do not overwrite PYTORCH_MAP, store dst separately
if constants.API_SPARSE in meta_data and PYTORCH_MAP.get(src, ""):
PYTORCH_SPARSE_MAP[src] = dst
else:
PYTORCH_MAP[src] = dst
if constants.API_PYTORCH not in meta_data:
CAFFE2_TRIE.add(src)
CAFFE2_MAP[src] = dst
RE_CAFFE2_PREPROCESSOR = re.compile(CAFFE2_TRIE.pattern())
RE_PYTORCH_PREPROCESSOR = re.compile(r'(?<=\W)({0})(?=\W)'.format(PYTORCH_TRIE.pattern()))
RE_QUOTE_HEADER = re.compile(r'#include "([^"]+)"')
RE_ANGLE_HEADER = re.compile(r'#include <([^>]+)>')
RE_THC_GENERIC_FILE = re.compile(r'#define THC_GENERIC_FILE "([^"]+)"')
RE_CU_SUFFIX = re.compile(r'\.cu\b') # be careful not to pick up .cuh
"""
Returns a dict with the following keys:
"hipified_path" : absolute path of hipified source file
"status" : "ok" if hipified file was written out
"skipped" if an identical hipified file already existed or hipified file couldn't be written out
"ignored" if the source file was a hipified file itself or not meant to be hipified
"""
def preprocessor(
output_directory: str,
filepath: str,
all_files: Iterable,
header_include_dirs: Iterable,
stats: Dict[str, List],
hip_clang_launch: bool,
is_pytorch_extension: bool,
clean_ctx: GeneratedFileCleaner,
show_progress: bool) -> HipifyResult:
""" Executes the CUDA -> HIP conversion on the specified file. """
if filepath not in all_files:
return {"hipified_path": None, "status": "[ignored, not to be hipified]"}
fin_path = os.path.abspath(os.path.join(output_directory, filepath))
rel_filepath = os.path.relpath(filepath, output_directory)
with open(fin_path, 'r', encoding='utf-8') as fin:
if fin.readline() == HIPIFY_C_BREADCRUMB:
return {"hipified_path": None, "status": "[ignored, input is hipified output]"}
fin.seek(0)
output_source = fin.read()
orig_output_source = output_source
# get_hip_file_path needs a relative path to work correctly
fout_path = os.path.abspath(os.path.join(output_directory, get_hip_file_path(rel_filepath, is_pytorch_extension)))
if not os.path.exists(os.path.dirname(fout_path)):
clean_ctx.makedirs(os.path.dirname(fout_path))
# unsupported_calls statistics reporting is broken atm
def pt_repl(m):
return PYTORCH_MAP[m.group(0)]
def pt_sparse_repl(m):
# checks SPARSE map first, and if a miss occurs, falls back to pytorch mappings
return PYTORCH_SPARSE_MAP.get(m.group(0), pt_repl(m))
if is_pytorch_extension:
output_source = RE_PYTORCH_PREPROCESSOR.sub(pt_repl, output_source)
else:
if is_cusparse_file(rel_filepath):
output_source = RE_PYTORCH_PREPROCESSOR.sub(pt_sparse_repl, output_source)
elif is_pytorch_file(rel_filepath):
output_source = RE_PYTORCH_PREPROCESSOR.sub(pt_repl, output_source)
else:
def c2_repl(m):
return CAFFE2_MAP[m.group(0)]
output_source = RE_CAFFE2_PREPROCESSOR.sub(c2_repl, output_source)
# Header rewrites
def mk_repl(templ, include_current_dir=True):
def repl(m):
f = m.group(1)
dirpath, filename = os.path.split(f)
if (
f.startswith("ATen/cuda")
or f.startswith("ATen/native/cuda")
or f.startswith("ATen/native/nested/cuda")
or f.startswith("ATen/native/quantized/cuda")
or f.startswith("ATen/native/sparse/cuda")
or f.startswith("ATen/native/transformers/cuda")
or f.startswith("THC/")
or f.startswith("THCUNN/")
or (f.startswith("THC") and not f.startswith("THCP"))
):
return templ.format(get_hip_file_path(m.group(1), is_pytorch_extension))
# if filename is one of the files being hipified for this extension
if (is_pytorch_extension and any(s.endswith(filename) for s in all_files)):
header_dir = None
header_filepath = None
# If include_current_dir True, look first in same dir as the including source file
if include_current_dir:
header_dir_to_check = os.path.dirname(fin_path)
header_path_to_check = os.path.abspath(os.path.join(header_dir_to_check, f))
if os.path.exists(header_path_to_check):
header_dir = header_dir_to_check
header_filepath = header_path_to_check
# If not found, look in include dirs one by one and first match wins
if header_filepath is None:
for header_include_dir in header_include_dirs:
header_dir_to_check = os.path.join(output_directory, header_include_dir)
header_path_to_check = os.path.abspath(os.path.join(header_dir_to_check, f))
if os.path.exists(header_path_to_check):
header_dir = header_dir_to_check
header_filepath = header_path_to_check
# If header file not found, keep as is
if header_filepath is None:
return m.group(0)
# Hipify header file first if needed
if header_filepath not in HIPIFY_FINAL_RESULT:
preprocess_file_and_save_result(output_directory,
header_filepath,
all_files, header_include_dirs, stats, hip_clang_launch,
is_pytorch_extension, clean_ctx, show_progress)
hipified_header_filepath = HIPIFY_FINAL_RESULT[header_filepath]["hipified_path"]
return templ.format(os.path.relpath(hipified_header_filepath if hipified_header_filepath is not None
else header_filepath, header_dir))
return m.group(0)
return repl
output_source = RE_QUOTE_HEADER.sub(mk_repl('#include "{0}"', True), output_source)
output_source = RE_ANGLE_HEADER.sub(mk_repl('#include <{0}>', False), output_source)
output_source = RE_THC_GENERIC_FILE.sub(mk_repl('#define THC_GENERIC_FILE "{0}"'), output_source)
# CMakeLists.txt rewrites
if filepath.endswith('CMakeLists.txt'):
output_source = output_source.replace('CUDA', 'HIP')
output_source = output_source.replace('THC', 'THH')
output_source = RE_CU_SUFFIX.sub('.hip', output_source)
# Perform Kernel Launch Replacements
if not hip_clang_launch:
output_source = processKernelLaunches(output_source, stats)
# Replace std:: with non-std:: versions
if (filepath.endswith(".cu") or filepath.endswith(".cuh")) and "PowKernel" not in filepath:
output_source = replace_math_functions(output_source)
# Include header if device code is contained.
output_source = hip_header_magic(output_source)
# Replace the extern __shared__
output_source = replace_extern_shared(output_source)
# Don't write out identical hipified files for extensions if dirpath has not changed
if (
is_pytorch_extension
and orig_output_source == output_source
and os.path.dirname(fin_path) == os.path.dirname(fout_path)
):
return {"hipified_path": fin_path, "status": "[skipped, no changes]"}
# Add hipify breadcrumb for C-style files to avoid re-hipification
if fin_path != fout_path and match_extensions(fin_path, (".cu", ".cuh", ".c", ".cc", ".cpp", ".h", ".hpp")):
output_source_ascii=output_source.encode("ascii", "ignore").decode()
output_source = HIPIFY_C_BREADCRUMB + output_source_ascii
do_write = True
if os.path.exists(fout_path):
with open(fout_path, 'r', encoding='utf-8') as fout_old:
do_write = fout_old.read() != output_source
if do_write:
try:
with clean_ctx.open(fout_path, 'w', encoding='utf-8') as fout:
fout.write(output_source)
return {"hipified_path": fout_path, "status": "[ok]"}
except PermissionError as e:
print(f"{bcolors.WARNING}Failed to save {fout_path} with \"{e.strerror}\", leaving {fin_path} unchanged.{bcolors.ENDC}",
file=sys.stderr)
return {"hipified_path": fin_path, "status": "[skipped, no permissions]"}
else:
return {"hipified_path": fout_path, "status": "[skipped, already hipified]"}
def file_specific_replacement(filepath, search_string, replace_string, strict=False):
with openf(filepath, "r+") as f:
contents = f.read()
if strict:
contents = re.sub(r'\b({0})\b'.format(re.escape(search_string)), lambda x: replace_string, contents)
else:
contents = contents.replace(search_string, replace_string)
f.seek(0)
f.write(contents)
f.truncate()
def file_add_header(filepath, header):
with openf(filepath, "r+") as f:
contents = f.read()
if header[0] != "<" and header[-1] != ">":
header = '"{0}"'.format(header)
contents = ('#include {0} \n'.format(header)) + contents
f.seek(0)
f.write(contents)
f.truncate()
def fix_static_global_kernels(in_txt):
"""Static global kernels in HIP results in a compilation error."""
in_txt = in_txt.replace(" __global__ static", "__global__")
return in_txt
RE_INCLUDE = re.compile(r"#include .*\n")
def extract_arguments(start, string):
""" Return the list of arguments in the upcoming function parameter closure.
Example:
string (input): '(blocks, threads, 0, THCState_getCurrentStream(state))'
arguments (output):
'[{'start': 1, 'end': 7},
{'start': 8, 'end': 16},
{'start': 17, 'end': 19},
{'start': 20, 'end': 53}]'
"""
arguments = []
closures = {
"<": 0,
"(": 0
}
current_position = start
argument_start_pos = current_position + 1
# Search for final parenthesis
while current_position < len(string):
if string[current_position] == "(":
closures["("] += 1
elif string[current_position] == ")":
closures["("] -= 1
elif string[current_position] == "<":
closures["<"] += 1
elif string[current_position] == ">" and string[current_position - 1] != "-" and closures["<"] > 0:
closures["<"] -= 1
# Finished all arguments
if closures["("] == 0 and closures["<"] == 0:
# Add final argument
arguments.append({"start": argument_start_pos, "end": current_position})
break
# Finished current argument
if closures["("] == 1 and closures["<"] == 0 and string[current_position] == ",":
arguments.append({"start": argument_start_pos, "end": current_position})
argument_start_pos = current_position + 1
current_position += 1
return arguments
def str2bool(v):
"""ArgumentParser doesn't support type=bool. Thus, this helper method will convert
from possible string types to True / False."""
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def hipify(
project_directory: str,
show_detailed: bool = False,
extensions: Iterable = (".cu", ".cuh", ".c", ".cc", ".cpp", ".h", ".in", ".hpp"),
header_extensions: Iterable = (".cuh", ".h", ".hpp"),
output_directory: str = "",
header_include_dirs: Iterable = (),
includes: Iterable = ('*',),
extra_files: Iterable = (),
out_of_place_only: bool = False,
ignores: Iterable = (),
show_progress: bool = True,
hip_clang_launch: bool = False,
is_pytorch_extension: bool = False,
hipify_extra_files_only: bool = False,
clean_ctx: Optional[GeneratedFileCleaner] = None
) -> HipifyFinalResult:
if project_directory == "":
project_directory = os.getcwd()
# Verify the project directory exists.
if not os.path.exists(project_directory):
print("The project folder specified does not exist.")
sys.exit(1)
# If no output directory, provide a default one.
if not output_directory:
project_directory.rstrip("/")
output_directory = project_directory + "_amd"
if project_directory != output_directory:
includes = [include.replace(project_directory, output_directory) for include in includes]
ignores = [ignore.replace(project_directory, output_directory) for ignore in ignores]
# Copy from project directory to output directory if not done already.
if not os.path.exists(output_directory):
shutil.copytree(project_directory, output_directory)
all_files = list(matched_files_iter(output_directory, includes=includes,
ignores=ignores, extensions=extensions,
out_of_place_only=out_of_place_only,
is_pytorch_extension=is_pytorch_extension))
all_files_set = set(all_files)
for f in extra_files:
if not os.path.isabs(f):
f = os.path.join(output_directory, f)
if f not in all_files_set:
all_files.append(f)
# List all files in header_include_paths to ensure they are hipified
from pathlib import Path
for header_include_dir in header_include_dirs:
if os.path.isabs(header_include_dir):
header_include_dir_path = Path(header_include_dir)
else:
header_include_dir_path = Path(os.path.join(output_directory, header_include_dir))
for path in header_include_dir_path.rglob('*'):
if (
path.is_file()
and _fnmatch(str(path), includes)
and (not _fnmatch(str(path), ignores))
and match_extensions(path.name, header_extensions)
):
all_files.append(str(path))
if clean_ctx is None:
clean_ctx = GeneratedFileCleaner(keep_intermediates=True)
# Preprocessing statistics.
stats: Dict[str, List] = {"unsupported_calls": [], "kernel_launches": []}
for filepath in (all_files if not hipify_extra_files_only else extra_files):
preprocess_file_and_save_result(output_directory, filepath, all_files, header_include_dirs,
stats, hip_clang_launch, is_pytorch_extension, clean_ctx, show_progress)
print(bcolors.OKGREEN + "Successfully preprocessed all matching files." + bcolors.ENDC, file=sys.stderr)
# Show detailed summary
if show_detailed:
compute_stats(stats)
return HIPIFY_FINAL_RESULT
|
#!/usr/bin/env python3
#
# This file has been generated by conda-smithy in order to build the recipe
# locally.
#
import os
import glob
import subprocess
from argparse import ArgumentParser
import platform
def setup_environment(ns):
os.environ["CONFIG"] = ns.config
os.environ["UPLOAD_PACKAGES"] = "False"
if ns.debug:
os.environ["BUILD_WITH_CONDA_DEBUG"] = "1"
if ns.output_id:
os.environ["BUILD_OUTPUT_ID"] = ns.output_id
if "MINIFORGE_HOME" not in os.environ:
os.environ["MINIFORGE_HOME"] = os.path.join(
os.path.dirname(__file__), "miniforge3"
)
def run_docker_build(ns):
script = ".scripts/run_docker_build.sh"
subprocess.check_call([script])
def run_osx_build(ns):
script = ".scripts/run_osx_build.sh"
subprocess.check_call([script])
def verify_config(ns):
valid_configs = {
os.path.basename(f)[:-5] for f in glob.glob(".ci_support/*.yaml")
}
print(f"valid configs are {valid_configs}")
if ns.config in valid_configs:
print("Using " + ns.config + " configuration")
return
elif len(valid_configs) == 1:
ns.config = valid_configs.pop()
print("Found " + ns.config + " configuration")
elif ns.config is None:
print("config not selected, please choose from the following:\n")
selections = list(enumerate(sorted(valid_configs), 1))
for i, c in selections:
print(f"{i}. {c}")
s = input("\n> ")
idx = int(s) - 1
ns.config = selections[idx][1]
print(f"selected {ns.config}")
else:
raise ValueError("config " + ns.config + " is not valid")
# Remove the following, as implemented
if ns.config.startswith("win"):
raise ValueError(
f"only Linux/macOS configs currently supported, got {ns.config}"
)
elif ns.config.startswith("osx") and platform.system() == "Darwin":
if "OSX_SDK_DIR" not in os.environ:
raise RuntimeError(
"Need OSX_SDK_DIR env variable set. Run 'export OSX_SDK_DIR=/opt'"
"to download the SDK automatically to '/opt/MacOSX<ver>.sdk'"
)
def main(args=None):
p = ArgumentParser("build-locally")
p.add_argument("config", default=None, nargs="?")
p.add_argument(
"--debug",
action="store_true",
help="Setup debug environment using `conda debug`",
)
p.add_argument(
"--output-id", help="If running debug, specify the output to setup."
)
ns = p.parse_args(args=args)
verify_config(ns)
setup_environment(ns)
if ns.config.startswith("linux") or (
ns.config.startswith("osx") and platform.system() == "Linux"
):
run_docker_build(ns)
elif ns.config.startswith("osx"):
run_osx_build(ns)
if __name__ == "__main__":
main()
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import os
import re
import setuptools
this_dir = os.path.dirname(os.path.abspath(__file__))
def fetch_requirements():
with open("requirements.txt") as f:
reqs = f.read().strip().split("\n")
return reqs
# https://packaging.python.org/guides/single-sourcing-package-version/
def find_version(version_file_path) -> str:
with open(version_file_path) as version_file:
version_match = re.search(r"^__version_tuple__ = (.*)", version_file.read(), re.M)
if version_match:
ver_tup = eval(version_match.group(1))
ver_str = ".".join([str(x) for x in ver_tup])
return ver_str
raise RuntimeError("Unable to find version tuple.")
extensions = []
cmdclass = {}
setup_requires = []
if os.getenv("BUILD_CUDA_EXTENSIONS", "0") == "1":
from torch.utils.cpp_extension import BuildExtension, CUDAExtension
setup_requires = ["ninja"]
extensions.extend(
[
CUDAExtension(
name="fairscale.fused_adam_cuda",
include_dirs=[os.path.join(this_dir, "fairscale/clib/fused_adam_cuda")],
sources=[
"fairscale/clib/fused_adam_cuda/fused_adam_cuda.cpp",
"fairscale/clib/fused_adam_cuda/fused_adam_cuda_kernel.cu",
],
extra_compile_args={"cxx": ["-O3"], "nvcc": ["-O3", "--use_fast_math"]},
)
]
)
cmdclass["build_ext"] = BuildExtension
if __name__ == "__main__":
setuptools.setup(
name="fairscale",
description="FairScale: A PyTorch library for large-scale and high-performance training.",
version=find_version("fairscale/version.py"),
setup_requires=setup_requires,
install_requires=fetch_requirements(),
include_package_data=True,
packages=setuptools.find_packages(include=["fairscale*"]), # Only include code within fairscale.
ext_modules=extensions,
cmdclass=cmdclass,
python_requires=">=3.8",
author="Foundational AI Research @ Meta AI",
author_email="[email protected]",
long_description=(
"FairScale is a PyTorch extension library for high performance and "
"large scale training on one or multiple machines/nodes. This library "
"extends basic PyTorch capabilities while adding new experimental ones."
),
long_description_content_type="text/markdown",
entry_points={"console_scripts": ["wgit = fairscale.experimental.wgit.__main__:main"]},
classifiers=[
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"License :: OSI Approved :: BSD License",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Operating System :: OS Independent",
],
)
|
import argparse
import re
from typing import Tuple
from setup import find_version
def get_next_version(release_type) -> Tuple[Tuple[int, int, int], str, str]:
current_ver = find_version("fairscale/version.py")
version_list = [int(x) for x in current_ver.strip("'").split(".")]
major, minor, patch = version_list[0], version_list[1], version_list[2]
if release_type == "patch":
patch += 1
elif release_type == "minor":
minor += 1
patch = 0
elif release_type == "major":
major += 1
minor = patch = 0
else:
raise ValueError("Incorrect release type specified. Acceptable types are major, minor and patch.")
new_version_tuple = (major, minor, patch)
new_version_str = ".".join([str(x) for x in new_version_tuple])
new_tag_str = "v" + new_version_str
return new_version_tuple, new_version_str, new_tag_str
def update_version(new_version_tuple) -> None:
"""
given the current version, update the version to the
next version depending on the type of release.
"""
with open("fairscale/version.py", "r") as reader:
current_version_data = reader.read()
# for line in current_version_data:
version_match = re.search(r"^__version_tuple__ ", current_version_data)
if version_match:
new_version_data = "__version_tuple__ = %s\n" % str(new_version_tuple)
current_version_data = current_version_data.replace(version_match.string, new_version_data)
with open("fairscale/version.py", "w") as writer:
writer.write(current_version_data)
else:
raise RuntimeError("__version_tuple__ not found in version.py")
def main(args):
if args.release_type in ["major", "minor", "patch"]:
new_version_tuple, new_version, new_tag = get_next_version(args.release_type)
else:
raise ValueError("Incorrect release type specified")
if args.update_version:
update_version(new_version_tuple)
print(new_version, new_tag)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Versioning utils")
parser.add_argument("--release-type", type=str, required=True, help="type of release = major/minor/patch")
parser.add_argument(
"--update-version", action="store_true", required=False, help="updates the version in fairscale/version.py"
)
args = parser.parse_args()
main(args)
|
__version_tuple__ = (0, 4, 13)
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
################################################################################
# Import most common subpackages
#
# NOTE: we don't maintain any public APIs in both experimental and fair_dev
# sub-modules. Code in them are experimental or for developer only. They
# can be changed, removed, anytime.
################################################################################
from typing import List
from . import nn
from .version import __version_tuple__
__version__ = ".".join([str(x) for x in __version_tuple__])
__all__: List[str] = []
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
################################################################################
# Import most common subpackages
################################################################################
from typing import List
# Don't import sub-modules as experimental stuff otherwise gets imported directly
# when user does an `import fairscale`. This can cause experimental code's import
# dependencies (like pygit2) to leak into the fairscale main dependency.
__all__: List[str] = []
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from contextlib import contextmanager
from dataclasses import dataclass
from enum import Enum, auto
from functools import lru_cache
from typing import Any, Callable, Dict, Iterator, List, NamedTuple, Optional, Sequence, Set, Tuple, Union
import numpy as np
import torch
import torch.nn as nn
from torch.utils.hooks import RemovableHandle
from fairscale.nn import FullyShardedDataParallel
class TraceForwardEvent(NamedTuple):
"""
Complementary trace event collected during the forward pass
to trace the memory increase and the memory taken by activations
"""
memory_diff: int
memory_activations: int
def to_dict(self) -> Dict[str, Any]:
return {
"memory_diff": self.memory_diff,
"memory_activations": self.memory_activations,
}
@classmethod
def from_dict(cls, serialized: Dict[str, Any]) -> "TraceForwardEvent":
return TraceForwardEvent(
memory_diff=serialized["memory_diff"],
memory_activations=serialized["memory_activations"],
)
class TraceBackwardEvent(NamedTuple):
"""
Complementary trace event collected during the forward pass
to trace the memory taken by activations
"""
memory_activations: int
def to_dict(self) -> Dict[str, Any]:
return {"memory_activations": self.memory_activations}
@classmethod
def from_dict(cls, serialized: Dict[str, Any]) -> "TraceBackwardEvent":
return TraceBackwardEvent(memory_activations=serialized["memory_activations"])
class LayerMemoryTrace(NamedTuple):
"""
Trace event providing the current memory usage at a point
occuring during the forward or backward
module_name: name of the module under processing
module_params: size of the module parameters
allocated: state of the PyTorch allocated memory
reserved: state of the PyTorch reserved memory
is_forward: whether the trace was collected during forward
all_gathered: memory gathered since last event by FSDP
cumul_all_gathered: total amount of memory currently gathered by FSDP
event: additional information on the trace
"""
module_name: str
module_params: int
allocated: int
reserved: int
is_forward: bool
all_gathered: int
cumul_all_gathered: int
event: Union[TraceForwardEvent, TraceBackwardEvent]
def to_dict(self) -> Dict[str, Any]:
return {
"module_name": self.module_name,
"module_params": self.module_params,
"allocated": self.allocated,
"reserved": self.reserved,
"is_forward": self.is_forward,
"all_gathered": self.all_gathered,
"cumul_all_gathered": self.cumul_all_gathered,
"event": self.event.to_dict(),
}
@classmethod
def from_dict(cls, serialized: Dict[str, Any]) -> "LayerMemoryTrace":
if serialized["is_forward"]:
event: Union[TraceForwardEvent, TraceBackwardEvent] = TraceForwardEvent.from_dict(serialized["event"])
else:
event = TraceBackwardEvent.from_dict(serialized["event"])
return LayerMemoryTrace(
module_name=serialized["module_name"],
module_params=serialized["module_params"],
allocated=serialized["allocated"],
reserved=serialized["reserved"],
is_forward=serialized["is_forward"],
all_gathered=serialized["all_gathered"],
cumul_all_gathered=serialized["cumul_all_gathered"],
event=event,
)
@dataclass
class LayerwiseMemoryTrackerSummary:
"""
Summary of the memory allocation during forward/backward
- max_memory_allocated: the peak of memory allocated
- max_memory_cached: the peak of memory cached by PyTorch
- total_activation_allocations: cumulative count of activations allocations
- total_forward_allocations: cumulative count of forward pass allocations
- top_forward_activation_producers: layers that allocated the most activations
"""
max_memory_allocated: int
max_memory_cached: int
total_activation_allocations: int
total_forward_allocations: int
top_forward_activation_producers: List[LayerMemoryTrace]
class ProcessGroupTrackingEvent(Enum):
"""
Types of events that can be tracked in the process group:
- allgather: will track calls to ProcessGroup.allgather
"""
allgather = auto()
class ProcessGroupTracker:
"""
To be used as a wrapper around a ProcessGroup to track
the calls to specific ProcessGroup function such as
"allgather" calls.
The tracker will send a notification to the listener
when such calls occur.
Best used in conjunction with LayerwiseMemoryTracker:
```
# wrap the group used for FSDP
group = ProcessGroupTracker(group)
# use this group when creating FSDP blocks
model = FullyShardedDataParallel(model, process_group=group),
# monitor the model as before
tracker = LayerwiseMemoryTracker()
tracker.monitor(model)
# the detailed traces will now contain information
# about the amount of all gathered data
tracker.memory_traces
```
"""
def __init__(self, group: Any, listener: Optional[Callable] = None):
self.group = group
self.listener = listener
def __getattr__(self, item: str) -> Any:
# Forward: for functions not traces
if item == "allgather":
# For PyTorch 1.8 and below
return self._build_wrapper(fct=self.group.allgather)
elif item == "_allgather_base":
# For PyTorch 1.9 and above
return self._build_wrapper(fct=getattr(self.group, item))
return getattr(self.group, item)
def _build_wrapper(self, fct: Callable) -> Callable:
def wrapper(
output_tensors: Union[torch.Tensor, Sequence[torch.Tensor]],
input_tensors: Union[torch.Tensor, Sequence[torch.Tensor]],
*args: list,
**kwargs: dict,
) -> Any:
if self.listener is not None:
self.listener(ProcessGroupTrackingEvent.allgather, output_tensors, input_tensors)
return fct(output_tensors, input_tensors, *args, **kwargs)
return wrapper
class LayerwiseMemoryTracker:
"""
Observe a module to get the graph of the memory consumption during
the forward and backward, layer by layer, with:
- a breakdown of the memory used (activations memory estimation)
- additional details such as amount of data exchanged with all gather
Requires the model to be on a CUDA device to track its memory
Example usage (no FSDP):
```
# create your model
model = models.resnet50().cuda()
# monitor the model
tracker = LayerwiseMemoryTracker()
tracker.monitor(model)
# Do a forward/backward
criterion(model(input), target).backward()
# show the plots
tracker.show_plots()
# get the detailed traces
tracker.memory_traces
# print a summary
print(tracker.summary)
```
Advanced usage (for FSDP):
```
# wrap the group used for FSDP
group = ProcessGroupTracker(group)
# use this group when creating FSDP blocks
model = FullyShardedDataParallel(model, process_group=group),
# monitor the model as before
tracker = LayerwiseMemoryTracker()
tracker.monitor(model)
# the detailed traces will now contain information
# about the amount of all gathered data
tracker.memory_traces
```
"""
def __init__(self) -> None:
self.memory_traces: List[LayerMemoryTrace] = []
self._hooks: List[RemovableHandle] = []
self._previous_module_name: Optional[str] = None
self._last_all_gather_memory = 0
self._cumul_all_gather_memory: List[int] = []
self._memory_pre_forward = 0
self._traced_module_names: Set[str] = set()
def monitor(self, model: nn.Module) -> None:
"""
Install hooks on the model to track its memory usage
"""
for name, m in model.named_modules():
h1 = m.register_forward_pre_hook(self._create_pre_forward_hook(name))
h2 = m.register_forward_hook(self._create_post_forward_hook(name))
h3 = m.register_backward_hook(self._create_backward_hook(name))
self._hooks.extend([h1, h2, h3])
if isinstance(m, FullyShardedDataParallel):
if isinstance(m.process_group, ProcessGroupTracker):
m.process_group.listener = self._handle_process_group_call
torch.cuda.empty_cache()
def clear_traces(self) -> None:
"""
Clear all the traces: new traces will be written on a clean slate
"""
self.memory_traces.clear()
def stop(self) -> None:
"""
Stop any form of tracking (removes the hooks used to monitor the model)
"""
for h in self._hooks:
h.remove()
self._hooks.clear()
self._previous_module_name = None
self._memory_pre_forward = 0
self._last_all_gather_memory = 0
self._cumul_all_gather_memory.clear()
@property
def forward_traces(self) -> List[LayerMemoryTrace]:
"""
Get the part of the traces which corresponds to the forward pass
"""
return [t for t in self.memory_traces if t.is_forward]
@property
def backward_traces(self) -> List[LayerMemoryTrace]:
"""
Get the part of the traces which corresponds to the backward pass
"""
return [t for t in self.memory_traces if not t.is_forward]
@property
def max_memory_allocated(self) -> int:
"""
Peak memory allocated during the forward/backward pass
"""
return max(t.allocated for t in self.memory_traces)
@property
def max_memory_cached(self) -> int:
"""
Peak memory cached during the forward/backward pass
"""
return max(t.reserved for t in self.memory_traces)
@property
def summary(self) -> LayerwiseMemoryTrackerSummary:
"""
A quick summary of interesting statistics on the memory usage
during the forward/backward pass
"""
total_diff = sum(t.event.memory_diff for t in self.forward_traces) # type: ignore
total_act = sum(t.event.memory_activations for t in self.forward_traces)
top_act_producers = self.top_forward_activation_producers(top=10)
return LayerwiseMemoryTrackerSummary(
max_memory_allocated=self.max_memory_allocated,
max_memory_cached=self.max_memory_cached,
total_activation_allocations=total_act,
total_forward_allocations=total_diff,
top_forward_activation_producers=top_act_producers,
)
def top_forward_activation_producers(self, top: int = 10) -> List[LayerMemoryTrace]:
"""
What are the top activation producers during the forward pass
"""
return sorted(self.forward_traces, key=lambda a: a.event.memory_activations, reverse=True)[:top]
def show_plots(self, figsize: Tuple[int, int] = (16, 20), capture: bool = False) -> Optional[Any]:
"""
Show useful memory plots. Use "capture=True" to return an image
rather than displaying the plots.
"""
return compare_memory_traces_in_plot({"run": self.memory_traces}, figsize=figsize, capture=capture)
def save_traces(self, path: str) -> None:
"""
Save the traces in a JSON file
"""
import json
with open(path, "w") as f:
json_traces = [t.to_dict() for t in self.memory_traces]
json.dump({"traces": json_traces}, f)
@classmethod
def load(cls, path: str) -> "LayerwiseMemoryTracker":
import json
out = cls()
with open(path, "r") as f:
traces = json.load(f)["traces"]
out.memory_traces = [LayerMemoryTrace.from_dict(t) for t in traces]
return out
def _create_pre_forward_hook(self, name: str) -> Callable:
def _pre_forward_hook(module: nn.Module, inputs: Any) -> None:
torch.cuda.synchronize()
allocated, reserved = self._capture_memory()
self._previous_module_name = name
self._memory_pre_forward = allocated
if isinstance(module, FullyShardedDataParallel):
self._cumul_all_gather_memory.append(0)
return _pre_forward_hook
def _handle_process_group_call(self, event: ProcessGroupTrackingEvent, *args: Sequence[Any]) -> None:
torch.cuda.synchronize()
if event == ProcessGroupTrackingEvent.allgather:
outputs, inputs = args
output_size = self._get_module_output_size(outputs)
self._last_all_gather_memory += output_size
if self._cumul_all_gather_memory:
self._cumul_all_gather_memory[-1] += output_size
def _create_post_forward_hook(self, name: str) -> Callable:
def _post_forward_hook(
module: nn.Module, inputs: Sequence[torch.Tensor], outputs: Sequence[torch.Tensor]
) -> None:
torch.cuda.synchronize()
if isinstance(module, FullyShardedDataParallel):
self._cumul_all_gather_memory.pop()
# Only if it is a leaf module
if name == self._previous_module_name:
allocated, reserved = self._capture_memory()
self._traced_module_names.add(name)
# Get the memory allocated for output activations
ys = self._filter_allocated_output(inputs, outputs)
activations = sum(self._get_module_output_size(y) for y in ys)
# Compute the memory diff + memory taken by the activations
self.memory_traces.append(
LayerMemoryTrace(
module_name=name,
module_params=self._get_parameter_size(module),
allocated=allocated,
reserved=reserved,
is_forward=True,
all_gathered=self._last_all_gather_memory,
cumul_all_gathered=sum(self._cumul_all_gather_memory),
event=TraceForwardEvent(
memory_diff=allocated - self._memory_pre_forward,
memory_activations=activations,
),
)
)
self._last_all_gather_memory = 0
# Clean previous forward call values
self._previous_module_name = None
self._memory_pre_forward = 0
return _post_forward_hook
def _create_backward_hook(self, name: str) -> Callable:
def _backward_hook(module: nn.Module, grad_input: torch.Tensor, grad_output: torch.Tensor) -> None:
torch.cuda.synchronize()
if name not in self._traced_module_names:
return
ys = self._filter_allocated_output(grad_input, grad_output)
memory = sum(self._get_module_output_size(y) for y in ys)
allocated, reserved = self._capture_memory()
self.memory_traces.append(
LayerMemoryTrace(
module_name=name,
module_params=self._get_parameter_size(module),
allocated=allocated,
reserved=reserved,
is_forward=False,
all_gathered=self._last_all_gather_memory,
cumul_all_gathered=0,
event=TraceBackwardEvent(memory_activations=memory),
)
)
# Cleaning accumulated values since last call
self._last_all_gather_memory = 0
return _backward_hook
@staticmethod
def _capture_memory() -> Tuple[int, int]:
torch.cuda.synchronize()
allocated_mb = torch.cuda.memory_allocated()
reserved_mb = torch.cuda.memory_reserved() # type: ignore
return allocated_mb, reserved_mb
@classmethod
def _get_parameter_size(cls, module: nn.Module) -> int:
return sum(p.numel() * cls._get_dtype_size(p) for p in module.parameters())
@classmethod
def _get_module_output_size(cls, xs: Union[torch.Tensor, Sequence[torch.Tensor]]) -> int:
"""
Return the minimum memory requirement to store the tensors
provided as parameters
"""
if isinstance(xs, torch.Tensor):
x = xs
p = cls._get_dtype_size(x)
for d in x.shape:
p *= d
return p
elif isinstance(xs, tuple) or isinstance(xs, list):
return sum(cls._get_module_output_size(x) for x in xs)
return 0
@classmethod
def _get_dtype_size(cls, x: torch.Tensor) -> int:
return 2 if x.dtype == torch.float16 else 4
@classmethod
def _filter_allocated_output(
cls, inputs: Union[torch.Tensor, Sequence[torch.Tensor]], outputs: Union[torch.Tensor, Sequence[torch.Tensor]]
) -> List[torch.Tensor]:
"""
Only return the outputs that are allocated and not views, reshape
or stride of the inputs
"""
xs = cls._collect_tensors(inputs)
ys = cls._collect_tensors(outputs)
return [y for y in ys if all(not cls._is_same_storage(x, y) for x in xs)]
@staticmethod
def _is_same_storage(x: torch.Tensor, y: torch.Tensor) -> bool:
"""
Indicate if x and y share the same storage, meaning that one of them
is a view, reshape or stride of the other or from a common tensor
"""
return x.storage().data_ptr() == y.storage().data_ptr()
@staticmethod
def _collect_tensors(module_io_tensors: Union[torch.Tensor, Sequence[torch.Tensor]]) -> List[torch.Tensor]:
"""
Extract the tensors out of the provided input or output of a nn.Module
"""
tensors = []
to_visit = [module_io_tensors]
while to_visit:
x = to_visit.pop()
if isinstance(x, torch.Tensor):
tensors.append(x)
elif isinstance(x, tuple) or isinstance(x, list):
to_visit.extend(module_io_tensors)
return tensors
def find_best_reset_points(activation_sizes: List[int], num_checkpoints: int) -> Tuple[int, List[int]]:
"""
Assuming constant memory requirement from the model, its gradients
and the associated optimizer state (realistic for small models
or models that are sharded enough to be considered small), this
function computes the ideal placement for the checkpoints by
returning the limits at which we should reset memory.
"""
n = len(activation_sizes)
@lru_cache(maxsize=None)
def visit(pos: int, remaining: int) -> Tuple[int, List[int]]:
if pos == n:
return 0, []
if remaining == 0:
return sum(activation_sizes[pos:]), []
min_val = float("inf")
allocation = []
current_chunk = 0
for curr_pos in range(pos, n):
current_chunk += activation_sizes[curr_pos]
sub_result, sub_alloc = visit(curr_pos + 1, remaining - 1)
result = max(current_chunk, sub_result)
if result < min_val:
min_val = result
allocation = list(sub_alloc)
allocation.append(curr_pos + 1)
return int(min_val), allocation
best_score, best_allocation = visit(0, num_checkpoints)
return best_score, best_allocation[::-1]
@dataclass
class SuggestedCheckpoints:
max_memory: int
split_modules: List[str]
all_modules: List[str]
def suggest_checkpoint_location(
traces: List[LayerMemoryTrace], num_checkpoints: int, num_skipped_layers: int = 0
) -> SuggestedCheckpoints:
"""
Given a trace of a model, collected with or without checkpoint,
return the best places to insert a reset of activation memory.
The names of the returned modules are the boundaries of the
suggested checkpoint_wrapper wrappings
"""
# From the traces, extract how much activation memory
# is generated during the forward pass, layer by layer
visited = set()
modules, allocations = [], []
for t in traces:
if t.is_forward:
name = t.module_name
memory = t.event.memory_activations
if name not in visited:
visited.add(name)
modules.append(name)
allocations.append(memory)
# To skip some layers where we do not want activations
if num_skipped_layers:
modules = modules[num_skipped_layers:]
allocations = allocations[num_skipped_layers:]
# Compute the best positions to reset the memory
max_memory, reset_indices = find_best_reset_points(allocations, num_checkpoints=num_checkpoints)
# Then map it back to module names
return SuggestedCheckpoints(
max_memory=max_memory,
split_modules=[modules[i] for i in reset_indices],
all_modules=modules,
)
def _assert_visualisation_library_installed() -> None:
try:
import PIL # NOQA
import matplotlib # NOQA
except ImportError:
install_matplotlib = "pip install matplotlib"
install_pil = "pip install Pillow"
error_message = "Visualizing memory plots requires matplotlib and Pillow installed"
assert False, f"{error_message}: {install_matplotlib}, {install_pil}"
def compare_memory_traces_in_plot(
memory_traces_by_job: Dict[str, List[LayerMemoryTrace]],
figsize: Tuple[int, int] = (16, 20),
capture: bool = False,
) -> Optional[Any]:
"""
Create a plot of the memory allocation over time during the forward/backward
passes, with a breakdown of the memory used for activation VS parameters
"""
_assert_visualisation_library_installed()
import matplotlib.pyplot as plt
fig, ax = plt.subplots(figsize=figsize, ncols=2, nrows=3)
graph_creator = _MemoryGraphCreator()
ax[0, 0].set_title("memory allocated")
for job_name, memory_traces in memory_traces_by_job.items():
graph_creator.allocated_memory_curve(ax[0, 0], job_name, memory_traces)
if len(memory_traces_by_job) > 1:
ax[0, 0].legend()
ax[0, 1].set_title("memory reserved")
for job_name, memory_traces in memory_traces_by_job.items():
graph_creator.reserved_memory_curve(ax[0, 1], job_name, memory_traces)
if len(memory_traces_by_job) > 1:
ax[0, 1].legend()
ax[1, 0].set_title("activation allocations")
for job_name, memory_traces in memory_traces_by_job.items():
graph_creator.activation_allocations(ax[1, 0], job_name, memory_traces)
if len(memory_traces_by_job) > 1:
ax[1, 0].legend()
ax[1, 1].set_title("cumulative forward activations")
for job_name, memory_traces in memory_traces_by_job.items():
graph_creator.cumulative_activations(ax[1, 1], job_name, memory_traces)
if len(memory_traces_by_job) > 1:
ax[1, 1].legend()
ax[2, 0].set_title("all gathered memory")
for job_name, memory_traces in memory_traces_by_job.items():
graph_creator.all_gathered_memory(ax[2, 0], job_name, memory_traces)
if len(memory_traces_by_job) > 1:
ax[2, 0].legend()
ax[2, 1].set_title("parameter memory")
for job_name, memory_traces in memory_traces_by_job.items():
graph_creator.module_parameters(ax[2, 1], job_name, memory_traces)
if len(memory_traces_by_job) > 1:
ax[2, 1].legend()
if not capture:
plt.show()
return None
else:
return matplotlib_figure_to_image(fig)
class _MemoryGraphCreator:
"""
Helper class to create graphs to display memory
"""
def __init__(self) -> None:
import matplotlib
self.font = {
"family": matplotlib.rcParams["font.family"],
"weight": "normal",
"size": 12,
}
def allocated_memory_curve(self, ax: Any, job_name: str, memory_traces: List[LayerMemoryTrace]) -> None:
allocated_memory = [t.allocated for t in memory_traces]
x, y_forward, y_backward = self._split_forward_backward(memory_traces, allocated_memory)
ax.plot(x, y_forward, x, y_backward, label=job_name)
max_index = np.argmax(allocated_memory)
max_trace = memory_traces[max_index]
max_module = ".".join([n for n in max_trace.module_name.split(".") if not n.startswith("_")])
max_phase = "fwd" if max_trace.is_forward else "bwd"
ax.set_ylim([None, max_trace.allocated * 1.1])
x_text, y_text = max(0, max_index * 0.8), max_trace.allocated * 1.04 # type: ignore
ax.text(x_text, y_text, f"{max_module} ({max_phase})", fontdict=self.font)
self._y_axis_in_gigabytes(ax)
def reserved_memory_curve(self, ax: Any, job_name: str, memory_traces: List[LayerMemoryTrace]) -> None:
reserved_memory = [t.reserved for t in memory_traces]
x, y_forward, y_backward = self._split_forward_backward(memory_traces, reserved_memory)
ax.plot(x, y_forward, x, y_backward, label=job_name)
self._y_axis_in_gigabytes(ax)
def activation_allocations(self, ax: Any, job_name: str, memory_traces: List[LayerMemoryTrace]) -> None:
event_allocations = [t.event.memory_activations for t in memory_traces]
x, y_forward, y_backward = self._split_forward_backward(memory_traces, event_allocations)
ax.plot(x, y_forward, x, y_backward, label=job_name)
self._y_axis_in_gigabytes(ax)
def cumulative_activations(self, ax: Any, job_name: str, memory_traces: List[LayerMemoryTrace]) -> None:
event_allocations = [t.event.memory_activations for t in memory_traces]
x, y_forward, y_backward = self._split_forward_backward(memory_traces, event_allocations)
cumulative_forward_activations = np.cumsum(y_forward)
ax.plot(x, cumulative_forward_activations, label=job_name)
self._y_axis_in_gigabytes(ax)
def all_gathered_memory(self, ax: Any, job_name: str, memory_traces: List[LayerMemoryTrace]) -> None:
# Plot the all_gathered and cumulative all_gathered memory
gathered_memory = [t.all_gathered for t in memory_traces]
cumul_gathered_memory = [t.cumul_all_gathered for t in memory_traces]
x, y_forward, y_backward = self._split_forward_backward(memory_traces, gathered_memory)
ax.plot(x, y_forward, x, y_backward, label=job_name)
ax.plot(x, cumul_gathered_memory, label=job_name)
self._y_axis_in_gigabytes(ax)
# Adding the name of the layer with max cumulative all_gathered memory
max_index = np.argmax(cumul_gathered_memory)
max_trace = memory_traces[max_index]
max_module = ".".join([n for n in max_trace.module_name.split(".") if not n.startswith("_")])
ax.set_ylim([None, max_trace.cumul_all_gathered * 1.1])
x_text, y_text = max(0, max_index * 0.8), max_trace.cumul_all_gathered * 1.04 # type: ignore
ax.text(x_text, y_text, f"{max_module} (fwd)", fontdict=self.font)
def module_parameters(self, ax: Any, job_name: str, memory_traces: List[LayerMemoryTrace]) -> None:
module_parameters = [t.module_params for t in memory_traces]
x, y_forward, y_backward = self._split_forward_backward(memory_traces, module_parameters)
ax.plot(x, y_forward, x, y_backward, label=job_name)
self._y_axis_in_gigabytes(ax)
@staticmethod
def _y_axis_in_gigabytes(ax: Any) -> None:
ax.ticklabel_format(axis="y", style="sci", scilimits=(9, 9))
@classmethod
def _split_forward_backward(
cls, memory_traces: List[LayerMemoryTrace], values: List[Any]
) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
x_values = np.array(list(range(len(memory_traces))))
mask_forwards, mask_backwards = cls._mask_forward_backward(memory_traces)
return (
x_values,
np.ma.masked_where(mask_backwards, values), # type: ignore
np.ma.masked_where(mask_forwards, values), # type: ignore
)
@classmethod
def _mask_forward_backward(cls, memory_traces: List[LayerMemoryTrace]) -> Tuple[np.ndarray, np.ndarray]:
mask_forwards = np.array([t.is_forward for t in memory_traces])
return mask_forwards, ~mask_forwards
@contextmanager
def null_context() -> Iterator[None]:
yield
def matplotlib_figure_to_image(fig: Any) -> Any:
"""
Convert a matplotlib figure to an image in RGB format, for instance
to save it on disk
"""
import io
from PIL import Image
buf = io.BytesIO()
fig.savefig(buf)
buf.seek(0)
return Image.open(buf).convert("RGB")
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from typing import List
__all__: List[str] = []
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import logging
from typing import Dict, List, Set
import torch
import torch.fx
from torch.fx.node import Node
def _get_count(param_count: Dict, node_name: str) -> int:
"""Identify different mutations of a given node name."""
# TODO(anj): This is not very stable since it is possible that the name
# may not be in the same format. Is there another way to identify nodes
# in a graph?
if node_name in param_count:
return param_count[node_name]
elif node_name.split("_")[0] in param_count:
return param_count[node_name.split("_")[0]]
else:
raise RuntimeError(f"Unable to find match between param {param_count} and node {node_name}")
def _create_shard_to_param_count(param_count: Dict, node_name_to_shard_id: Dict) -> Dict:
"""Utility to create a map from shard id to param count using existing state."""
shard_to_param_count: Dict[int, int] = {}
for node_name in node_name_to_shard_id.keys():
try:
count = _get_count(param_count, node_name)
except RuntimeError:
continue
if node_name_to_shard_id[node_name] in shard_to_param_count:
shard_to_param_count[node_name_to_shard_id[node_name]] += count
else:
shard_to_param_count[node_name_to_shard_id[node_name]] = count
return shard_to_param_count
def _split_nodes(traced_graph_module: torch.fx.GraphModule, shard_count: int = 3) -> Dict:
"""Utility used to trace a graph and identify shard cutpoints."""
node_name_to_shard_id: Dict[str, int] = {}
shard_id = 0
nodes_so_far = []
param_count: Dict[str, int] = {}
shard_to_param_count = {}
# Find the total number of params in the model and
# the number of params per shard we are aiming for.
for name, module in traced_graph_module.named_modules():
name = name.replace(".", "_")
param_count[name] = sum([x.numel() for x in module.parameters()])
logging.info(f"Total number of params are {param_count['']}")
per_shard_param = param_count[""] // shard_count
logging.info(f"Per shard param count {per_shard_param}")
for node in traced_graph_module.graph.nodes:
if node.op == "placeholder":
node_name_to_shard_id[node.name] = shard_id
nodes_so_far.append(node.name)
elif node.op in ["get_attr", "call_function", "call_method", "call_module"]:
min_shard_id = shard_id
min_node_name = ""
# For each of the args of a given node, find the arg that is not the
# last node we traversed. This is to help us find skip connections
# across shards.
for arg in node.args:
# If the node has args that are inputs to the forward function, they
# may not have explicit names.
if not hasattr(arg, "name"):
continue
if arg.name in node_name_to_shard_id and arg.name != nodes_so_far[-1]:
if node_name_to_shard_id[arg.name] < min_shard_id:
min_shard_id = node_name_to_shard_id[arg.name]
min_node_name = arg.name
# If there is an input that is not from the previous shard,
# we collapse all the shards in between to be part of 1 shard.
# and update the param count per shard accordingly.
if min_shard_id < shard_id:
for node_name in reversed(nodes_so_far):
node_name_to_shard_id[node_name] = min_shard_id
if node_name == min_node_name:
break
shard_id = min_shard_id
# TODO(anj-s): Find a way to raise an error early if this can cause OOM errors.
shard_to_param_count = _create_shard_to_param_count(param_count, node_name_to_shard_id)
# Update state that is tracking node -> shard id and shard id -> param count.
node_name_to_shard_id[node.name] = shard_id
nodes_so_far.append(node.name)
# TODO(anj): This could just be an update, we don't need to recreate the map.
shard_to_param_count = _create_shard_to_param_count(param_count, node_name_to_shard_id)
# If we have gone over the number of params per shard count that we want to
# achieve, we should add a new shard.
# The shard_id may not have been updated in the map if we are at a node that does not
# have params.
if shard_id in shard_to_param_count and shard_to_param_count[shard_id] > per_shard_param:
shard_id += 1
elif node.op == "output":
break
return node_name_to_shard_id
class _ExtendedLeafTracer(torch.fx.Tracer):
"""Tracer with an extended set of leaf nn.Modules."""
def __init__(self, leaf_modules: Set[torch.nn.Module]):
"""Initializes a new _ExtendedLeafTracer object.
Args:
leaf_modules: The set of extra nn.Modules instances which will not be traced
through but instead considered to be leaves.
"""
super().__init__()
self.leaf_modules = leaf_modules
def is_leaf_module(self, m: torch.nn.Module, model_qualified_name: str) -> bool:
return super().is_leaf_module(m, model_qualified_name) or m in self.leaf_modules
# TODO(ehotaj): Extend this method to wrap at the least granular level. One way to do
# would be to wrap the Module tree bottom up, first wrapping untracable children and
# only wrapping parents if they are also untracable.
def _trace(model: torch.nn.Module) -> torch.fx.GraphModule:
"""Traces the given model and automatically wraps untracable modules into leaves."""
leaf_modules = set()
tracer = _ExtendedLeafTracer(leaf_modules)
for name, module in model.named_modules():
# TODO(ehotaj): The default is_leaf_module includes everything in torch.nn.
# This means that some coarse modules like nn.TransformerEncoder are treated
# as leaves, not traced, and are unable to be sharded. We may want to extend our
# sharding code to trace through these modules as well.
if tracer.is_leaf_module(module, ""):
continue
try:
tracer.trace(module)
except (TypeError, torch.fx.proxy.TraceError):
leaf_modules.add(module)
tracer = _ExtendedLeafTracer(leaf_modules)
graph = tracer.trace(model)
return torch.fx.GraphModule(model, graph)
def shard_model(model: torch.nn.Module, shard_count: int = 3) -> List[torch.fx.GraphModule]:
"""Utility used to shard a model using torch.fx.
This function traces the model twice in an attempt to identify the
right cutpoints and then shard the model. In the first pass we calculate
the number of parameters as we are tracing the graph and mark nodes at
which we might want to create a new module. In the second pass we
modify the graph by inserting placeholders and output nodes to essentially
shard the graph.
We don't support skip connections between shards. This means that all
input and output is self contained within a given shard. A node from
shard 1 cannot be an input to a node from shard 3. We expect all inputs
to a given shard to be coming from the last node in the previous shard.
This means that we may not be able to shard models by the specified
`shard_count` mentioned by the user.
Args:
model (nn.Module): Model to be sharded as specified by the device count.
shard_count (int): Number of shards that we want to split the model into.
"""
module_list: List[torch.fx.GraphModule] = []
num_graphs = 0
new_graph = torch.fx.Graph() # type: ignore
env: Dict[str, Node] = {}
new_input_node = None
traced_graph_module = _trace(model)
# This is the first pass where we attempt to get a map of where
# we need to insert placeholder and output nodes.
node_name_to_shard_id = _split_nodes(traced_graph_module, shard_count=shard_count)
# dummy value which indicates that this is the first node.
prev_shard_id = 1000
prev_node = None
for node in traced_graph_module.graph.nodes:
# If the current node is in the next shard, we insert an output node.
# A new graph is created and a placeholder is added for the next shard.
if node.name in node_name_to_shard_id and prev_shard_id < node_name_to_shard_id[node.name]:
assert prev_node, "prev_node cannot be None"
with new_graph.inserting_after(prev_node):
new_graph.output(env[prev_node.name])
num_graphs += 1
module_list.append(torch.fx.GraphModule(model, new_graph))
new_graph = torch.fx.Graph()
node_name = "placeholder" + str(num_graphs)
pl_node = new_graph.create_node("placeholder", node_name)
env[node_name] = pl_node
new_input_node = pl_node
if new_input_node is not None:
# Account for a placeholder in the new graph.
node.args = (new_input_node,)
new_input_node = None
if node.op in ["placeholder", "get_attr", "call_function", "call_method", "call_module"]:
# Copy the nodes from the existing graph to the new graph.
new_node = new_graph.node_copy(node, lambda x: env[x.name])
env[node.name] = new_node
elif node.op == "output":
# If this is the last node, we should add an output
# node and add the last graph to the list.
assert prev_node, "prev_node cannot be None"
with new_graph.inserting_after(prev_node):
new_graph.output(env[prev_node.name])
module_list.append(torch.fx.GraphModule(model, new_graph))
break
prev_node = new_node
prev_shard_id = node_name_to_shard_id[node.name]
return module_list
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from typing import List
from .mevo import BaselineSoftmaxNllLoss
from .mevo import MemoryEfficientVocabOutput as MEVO
from .offload import OffloadModel
from .sync_batchnorm import SyncBatchNorm
__all__: List[str] = []
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Dict, List, Optional, Tuple
import torch
from torch import Tensor
import torch.distributed as dist
from torch.distributed import ProcessGroup
from fairscale.internal import torch_version
from fairscale.nn.checkpoint import is_checkpointing, is_recomputing
def _forward(input: Tensor, affine: bool, mean: Tensor, invstd: Tensor, weight: Tensor, bias: Tensor) -> Tensor:
if affine:
return (input - mean) * (invstd * weight.reshape_as(mean)) + bias.reshape_as(mean)
else:
return (input - mean) * invstd
def _track_running_stats(
running_mean: Tensor, running_var: Tensor, momentum: float, mean: Tensor, var: Tensor, total_count: Tensor
) -> None:
unbiased_var = var * (total_count / (total_count - 1))
running_mean += momentum * (mean.reshape(-1) - running_mean)
running_var += momentum * (unbiased_var.reshape(-1) - running_var)
def _calculate_stats(input: Tensor, eps: float, process_group: ProcessGroup) -> Tuple[Tensor, Tensor, Tensor, Tensor]:
dim = [d for d in range(input.ndim) if d != 1]
count = torch.full((1,), input.numel() // input.size(1), device=input.device, dtype=input.dtype)
total_count = count.clone()
all_reduce_handle = dist.all_reduce(total_count, group=process_group, async_op=True)
mean = torch.mean(input, dim=dim, keepdim=True)
meansqr = torch.mean(input * input, dim=dim, keepdim=True)
vec = torch.cat([mean, meansqr])
all_reduce_handle.wait()
vec = vec * (count / total_count)
dist.all_reduce(vec, group=process_group)
mean, meansqr = vec.chunk(2)
var = meansqr - mean * mean
invstd = torch.rsqrt(var + eps)
return mean, var, invstd, total_count
if torch_version()[:2] >= (1, 7):
_forward = torch.jit.script(_forward)
_track_running_stats = torch.jit.script(_track_running_stats)
class _SyncBatchNormFunction(torch.autograd.Function):
"""
An autograd function used to avoid storing activations for intermediate results.
NOTE: Even though the mean and var are passed into this function, we do the entire
backward, including mean and var, here. We have to calculate statistics outside
this function in order to avoid multiple all_reduces when using checkpointing.
"""
@staticmethod
# type: ignore
def forward(ctx, input, weight, bias, affine, mean, invstd, total_count, process_group):
ctx.save_for_backward(input, weight, bias, mean, invstd, total_count)
ctx.process_group = process_group
return _forward(input, affine, mean, invstd, weight, bias)
@staticmethod
# type: ignore
def backward(ctx, grad_output):
needs_input_grad = ctx.needs_input_grad[0]
needs_weight_grad = ctx.needs_input_grad[1]
grad_input = None
grad_weight = None
grad_bias = None
input, weight, bias, mean, invstd, total_count = ctx.saved_tensors
process_group = ctx.process_group
dim = [d for d in range(input.ndim) if d != 1]
if needs_input_grad or needs_weight_grad:
grad_common = torch.sum(
(input - mean) * grad_output, dim=dim, keepdim=True
) # common to grad_weight and grad_invstd
if needs_input_grad:
if weight is None: # i.e. affine is False
grad_input = invstd * grad_output
grad_mean = -torch.sum(grad_input, dim=dim, keepdim=True)
grad_invstd = grad_common
else:
grad_input = (invstd * weight.reshape_as(mean)) * grad_output
grad_mean = -torch.sum(grad_input, dim=dim, keepdim=True)
grad_invstd = grad_common * weight.reshape_as(mean)
grad_var = -0.5 * invstd.pow(3) * grad_invstd
grad_mean += -2 * mean * grad_var
grad_meansqr = grad_var
vec = torch.cat([grad_mean, grad_meansqr])
all_reduce_handle = dist.all_reduce(vec, group=process_group, async_op=True)
if needs_weight_grad:
grad_weight = (grad_common * invstd).resize_as(weight)
grad_bias = torch.sum(grad_output, dim=dim)
if needs_input_grad:
all_reduce_handle.wait()
vec = vec / total_count # NOTE(msb) removed '* count' here to avoid '/ count' below
grad_mean, grad_meansqr = vec.chunk(2)
grad_input += grad_mean # removed '/ count'
grad_input += input * (2 * grad_meansqr) # removed '/ count'
return grad_input, grad_weight, grad_bias, None, None, None, None, None, None, None
class SyncBatchNorm(torch.nn.BatchNorm2d):
"""
Fast re-implementation of ``torch.nn.SyncBatchNorm`` that can achieve a speedup
of 5x or more over the default implementation depending on size of the input
and number of distributed workers.
"""
def __init__(
self, *args: Tuple[Any, ...], process_group: Optional[ProcessGroup] = None, **kwargs: Dict[str, Any]
) -> None:
super().__init__(*args, **kwargs) # type: ignore
self._process_group = process_group if process_group is not None else dist.group.WORLD
self.saved_for_2nd_fwd: List[Tuple] = []
self.disable_patch_batchnorm = True
def forward(self, input: Tensor) -> Tensor: # type: ignore
# There are 3 modes this is being called:
# 1. not wrapped (and there is only a single phase)
# 2. wrapped and in checkpointing phase
# 3. wrapped and in recomputing phase
if not dist.is_initialized() or not self.training:
return super().forward(input)
wrapped = is_checkpointing() or is_recomputing()
if not wrapped or is_checkpointing():
# NOTE The full backward, including mean and var, is done by _SyncBatchNormFunction.
with torch.no_grad():
mean, var, invstd, total_count = _calculate_stats(input, self.eps, self._process_group)
if self.track_running_stats:
_track_running_stats(self.running_mean, self.running_var, self.momentum, mean, var, total_count)
if is_checkpointing():
self.saved_for_2nd_fwd.append((mean, invstd, total_count))
return _forward(input, self.affine, mean, invstd, self.weight, self.bias)
if is_recomputing():
mean, invstd, total_count = self.saved_for_2nd_fwd.pop(0)
return _SyncBatchNormFunction.apply(
input, self.weight, self.bias, self.affine, mean, invstd, total_count, self._process_group
)
@classmethod
def convert_sync_batchnorm(
cls, module: torch.nn.Module, process_group: Optional[ProcessGroup] = None
) -> torch.nn.Module:
r"""Helper function to convert all :attr:`BatchNorm*D` layers in the model to
:class:`fairscale.experimental.nn.SyncBatchNorm` layers.
Args:
module (nn.Module): module containing one or more attr:`BatchNorm*D` layers
process_group (optional): process group to scope synchronization,
default is the whole world
Returns:
The original :attr:`module` with the converted :class:`torch.nn.SyncBatchNorm`
layers. If the original :attr:`module` is a :attr:`BatchNorm*D` layer,
a new :class:`torch.nn.SyncBatchNorm` layer object will be returned
instead.
Example::
>>> # Network with nn.BatchNorm layer
>>> module = torch.nn.Sequential(
>>> torch.nn.Linear(20, 100),
>>> torch.nn.BatchNorm1d(100),
>>> ).cuda()
>>> # creating process group (optional)
>>> # ranks is a list of int identifying rank ids.
>>> ranks = list(range(8))
>>> r1, r2 = ranks[:4], ranks[4:]
>>> # Note: every rank calls into new_group for every
>>> # process group created, even if that rank is not
>>> # part of the group.
>>> process_groups = [torch.distributed.new_group(pids) for pids in [r1, r2]]
>>> process_group = process_groups[0 if dist.get_rank() <= 3 else 1]
>>> sync_bn_module = fairscale.experimental.nn.SyncBatchNorm.convert_sync_batchnorm(module, process_group)
"""
module_output = module
if isinstance(module, torch.nn.modules.batchnorm._BatchNorm):
module_output = SyncBatchNorm(
module.num_features, # type: ignore
module.eps, # type: ignore
module.momentum, # type: ignore
module.affine, # type: ignore
module.track_running_stats, # type: ignore
process_group=process_group,
)
if module.affine:
with torch.no_grad():
module_output.weight = module.weight
module_output.bias = module.bias
module_output.running_mean = module.running_mean
module_output.running_var = module.running_var
module_output.num_batches_tracked = module.num_batches_tracked
if hasattr(module, "qconfig"):
module_output.qconfig = module.qconfig
for name, child in module.named_children():
module_output.add_module(name, cls.convert_sync_batchnorm(child, process_group))
del module
return module_output
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Optional, Tuple
import torch
from torch import nn
import torch.distributed as dist
import torch.nn.functional as F
# Debugging flag to enable some prints. Useful to debug with FSDP.
DEBUG = False
def _next_power_of_2_or_max(n: int, max_n: int) -> int:
"""Return the smallest power of 2 greater than or equal to n, with a limit.
Useful when used in splitting a tensor into chunks with power-of-2 sizes.
"""
# special case, just split to 1 element chunks.
if n == 0:
return 1
orig_n = n
n -= 1
n |= n >> 1
n |= n >> 2
n |= n >> 4
n |= n >> 8
n |= n >> 16
n += 1
assert n >= orig_n, f"{n} vs. {orig_n}"
assert bin(n).count("1") == 1, bin(n) # Catch the case n is too large for this function.
if n > max_n:
return max_n
return n
def _reshape_inputs(input: torch.Tensor, target: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""Convert 3D inputs to 2D for this kernel"""
if len(input.shape) == 3:
input = input.reshape(-1, input.shape[2])
if len(target.shape) == 2:
target = target.reshape(-1)
return input, target
def get_data(
shape: Tuple[Tuple[int, int], Tuple[int, int]], dtype: torch.dtype = torch.float16, device: str = "cuda"
) -> Tuple[torch.Tensor, nn.Parameter, torch.Tensor]:
"""Utility function for getting some tensors for testing and benchmarking."""
(tokens, d1), (d2, vocabs) = shape
assert d1 == d2
input = torch.rand(tokens, d1, device=device, dtype=dtype).requires_grad_(True)
# Before pytorch 1.9, nn.Linear does not support device and dtype init option. So we use to()
# and an if condition.
layer = nn.Linear(d2, vocabs, bias=False).to(device)
assert dtype in [torch.float16, torch.float32]
if dtype == torch.float16:
layer = layer.half()
weight = layer.weight
target = (torch.rand(tokens, device=device) * vocabs).long()
return input, weight, target
class BaselineSoftmax(nn.Module):
"""Baseline softmax that does an output linear projection and a softmax.
We also support LMCL (Large Margin Cosine Loss) from the CosFace paper. See
more detailed comment in the MEVO class below.
This is intended to be used with an embedding layer with shared weights.
Args:
proj_weight (nn.Parameter):
The shared weight.
tile_factor (int):
Unused. It is here to make kernel init easier with MEVO.
log_softmax (bool):
If True, use log_softmax instead of softmax.
margin (float):
Used in LMCL (when scale != None). See MEVO comments for
more details.
scale (Optional[float]):
Used in LMCL. If scale is None, LMCL is turned off. See
MEVO comments for more details.
"""
def __init__(
self,
proj_weight: nn.Parameter,
tile_factor: int = 0,
log_softmax: bool = True,
margin: float = 0.35,
scale: Optional[float] = None,
):
super().__init__()
out_dim, in_dim = proj_weight.shape
assert "cuda" in str(proj_weight.device), "weight should be on GPU"
self.fc = nn.Linear(in_dim, out_dim, bias=False).to("cuda")
assert proj_weight.dtype in [torch.float16, torch.float32]
if proj_weight.dtype == torch.float16:
self.fc = self.fc.half()
self.fc.weight = proj_weight
assert self.fc.weight.dtype in [torch.float16, torch.float32], self.fc.weight.dtype
self.fp16 = self.fc.weight.dtype == torch.float16
self.log_softmax = log_softmax
self.margin = margin
self.scale = scale
def lmcl_pre_softmax(self, input: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
# normalize feature and fc layer before multiplication
# n: number of features (tokens)
# k: number of classes (vocab size)
# c: hidden dimension (d_model)
x = F.normalize(input, dim=1)
w = F.normalize(self.fc.weight, dim=1)
logits = torch.einsum("nc,kc->nk", x, w)
# add margin
row_ind = torch.arange(x.shape[0], dtype=torch.long).to(x.device)
col_ind = target
logits[row_ind, col_ind] -= self.margin
# add scale
logits *= self.scale
return logits
def forward(self, input: torch.Tensor, target: torch.Tensor) -> torch.Tensor: # type: ignore
"""Forward function that computes softmax output with the input and target."""
assert isinstance(input, torch.Tensor)
assert isinstance(target, torch.Tensor)
input, target = _reshape_inputs(input, target)
if self.fp16:
assert input.dtype == torch.float16
if self.scale is not None:
x = self.lmcl_pre_softmax(input, target)
else:
x = self.fc(input)
# Note that we do softmax in FP32, which is important for numerical stability.
if self.log_softmax:
x = F.log_softmax(x, dim=-1, dtype=torch.float32)
else:
x = F.softmax(x, dim=-1, dtype=torch.float32)
assert x.dtype == torch.float32
return x
class BaselineSoftmaxNllLoss(BaselineSoftmax):
"""Baseline that does an output projection, a softmax & a NLL loss (cross-entropy).
See BaselineSoftmax above. Constructor is the same. Only difference is in the
forward function.
This class is used for testing and benchmarking.
"""
def __init__(
self,
proj_weight: nn.Parameter,
tile_factor: int = 0,
log_softmax: bool = True,
margin: float = 0.35,
scale: Optional[float] = None,
):
super().__init__(proj_weight, tile_factor, log_softmax, margin, scale)
def forward(self, input: torch.Tensor, target: torch.Tensor) -> torch.Tensor: # type: ignore
"""Forward that directly compute the loss."""
assert isinstance(input, torch.Tensor)
assert isinstance(target, torch.Tensor)
input, target = _reshape_inputs(input, target)
x = super().forward(input, target)
return F.nll_loss(x, target, reduction="sum")
def lmcl_matmul(
i: torch.Tensor, w: torch.Tensor, tgt: torch.Tensor, w_idx: int, margin: float, scale: Optional[float]
) -> torch.Tensor:
"""LMCL variation of matmul with normalization, margin and scale."""
# normalize and matmul
logits = torch.matmul(F.normalize(i, dim=1), F.normalize(w, dim=1).T)
# add margin using a mask since tgt might be out of the the weight split's range.
mask = torch.arange(w_idx * w.shape[0], (w_idx + 1) * w.shape[0], dtype=torch.long, device=i.device).expand(
i.shape[0], -1
)
logits[mask == tgt.reshape(-1, 1)] -= margin
# add scale
logits *= scale
return logits
class GetMaxFunction(torch.autograd.Function):
"""Custom checkpointed function to get max-per-token from an input and a weight"""
@staticmethod
def get_max(
i: torch.Tensor,
w: torch.Tensor,
tgt: torch.Tensor,
w_idx: int,
full_precision: bool,
margin: float,
scale: Optional[float],
) -> torch.Tensor:
"""
Throughout this code:
i: input data with shape = (split-of-tokens, d_model)
w: weight data with shape = (split-of-vocabs, d_model)
tgt: target prediction data with shape = (split-of-tokens,)
"""
if scale is not None:
_m = lmcl_matmul(i, w, tgt, w_idx, margin, scale)
else:
_m = torch.matmul(i, w.T)
if full_precision:
_m = _m.float()
_m = _m.max(dim=1)[0]
return _m
@staticmethod
def forward( # type: ignore
ctx: Any,
i: torch.Tensor,
w: torch.Tensor,
tgt: torch.Tensor,
kernel_obj: "MemoryEfficientVocabOutput",
w_idx: int,
w_split_size: int,
split_dim: int,
) -> torch.Tensor:
"""Forward function that computes the max, without saving activations."""
if DEBUG and dist.is_initialized() and dist.get_rank() == 0:
print("DEBUG max fwd")
ctx.save_for_backward(i, w, tgt)
ctx.kernel_obj = kernel_obj
ctx.w_idx = w_idx
ctx.w_split_size = w_split_size
ctx.args = {}
assert split_dim == 0
# During forward, we use ``no_grad'' to avoid saving the activations.
# The activations will be recomputed in backward below and freed
# immediately after use. This saves the overall GPU peak memory of this layer.
with torch.no_grad():
return GetMaxFunction.get_max(i, w, tgt, w_idx, kernel_obj.fp_max, kernel_obj.margin, kernel_obj.scale)
@staticmethod
def backward(ctx: Any, *args: Any) -> Any:
"""Recompute the forward max and backward grad.
Accumulate the grad to the right split of the full grad.
"""
if DEBUG and dist.is_initialized() and dist.get_rank() == 0:
print("DEBUG max bwd")
assert len(args) == 1
# Gradients should already exist due to TargetScoreFunction's backward.
assert ctx.kernel_obj.proj_weight.grad is not None
# Get saved i and w.
i, w, tgt = ctx.saved_tensors
assert i.requires_grad
assert w.requires_grad
# We use ``detach()'' to ensure the backward call below does not
# trigger backward computation that produced i and w here. Otherwise,
# the backward call below would trigger backward all the way to
# the batch input.
i = i.detach().requires_grad_(True)
w = w.detach().requires_grad_(True)
# Forward + backward again.
with torch.enable_grad():
# This saves the activations.
maxs = GetMaxFunction.get_max(
i, w, tgt, ctx.w_idx, ctx.kernel_obj.fp_max, ctx.kernel_obj.margin, ctx.kernel_obj.scale
)
# This will use the activations and free them immediately.
torch.autograd.backward(maxs, *args)
# Accumulate the computed gradients into the bigger weight tensor's gradient tensor.
assert w.grad is not None
with torch.no_grad():
grads = torch.split(ctx.kernel_obj.proj_weight.grad, ctx.w_split_size)
grads[ctx.w_idx].add_(w.grad)
return i.grad, None, None, None, None, None, None
class GetSumFunction(torch.autograd.Function):
"""Custom checkpointed function to get sum-per-token from an input and a weight."""
@staticmethod
def get_sum(
i: torch.Tensor,
w: torch.Tensor,
tgt: torch.Tensor,
maxs: torch.Tensor,
w_idx: int,
full_precision: bool,
margin: float,
scale: Optional[float],
) -> torch.Tensor:
if scale is not None:
_s = lmcl_matmul(i, w, tgt, w_idx, margin, scale)
else:
_s = torch.matmul(i, w.T)
if full_precision:
_s = _s.float()
_s = (_s - maxs.reshape(-1, 1)).exp().sum(dim=1)
return _s
@staticmethod
def forward( # type: ignore
ctx: Any,
i: torch.Tensor,
w: torch.Tensor,
tgt: torch.Tensor,
maxs: torch.Tensor,
kernel_obj: "MemoryEfficientVocabOutput",
w_idx: int,
w_split_size: int,
split_dim: int,
) -> torch.Tensor:
"""Forward function that computes the sum, without saving activations."""
if DEBUG and dist.is_initialized() and dist.get_rank() == 0:
print("DEBUG sum fwd")
ctx.save_for_backward(i, w, tgt, maxs)
ctx.kernel_obj = kernel_obj
ctx.w_idx = w_idx
ctx.w_split_size = w_split_size
assert split_dim == 0
with torch.no_grad():
return GetSumFunction.get_sum(
i, w, tgt, maxs, w_idx, kernel_obj.fp_sum, kernel_obj.margin, kernel_obj.scale
)
@staticmethod
def backward(ctx: Any, *args: Any) -> Any:
"""Recompute the forward sum and backward grad.
Accumulate the grad to the right split of the full grad.
"""
if DEBUG and dist.is_initialized() and dist.get_rank() == 0:
print("DEBUG sum bwd")
assert len(args) == 1
# Gradients should already exist due to TargetScoreFunction's backward.
assert ctx.kernel_obj.proj_weight.grad is not None
# Get saved i, w, and maxs.
i, w, tgt, maxs = ctx.saved_tensors
assert i.requires_grad
assert w.requires_grad
assert maxs.requires_grad
i = i.detach().requires_grad_(True)
w = w.detach().requires_grad_(True)
maxs = maxs.detach().requires_grad_(True)
# Forward + backward again.
with torch.enable_grad():
sums = GetSumFunction.get_sum(
i, w, tgt, maxs, ctx.w_idx, ctx.kernel_obj.fp_sum, ctx.kernel_obj.margin, ctx.kernel_obj.scale
)
torch.autograd.backward(sums, *args)
# Accumulate the grads.
assert w.grad is not None
with torch.no_grad():
grads = torch.split(ctx.kernel_obj.proj_weight.grad, ctx.w_split_size)
grads[ctx.w_idx].add_(w.grad)
return i.grad, None, None, maxs.grad, None, None, None, None
class TargetScoreFunction(torch.autograd.Function):
"""Custom checkpointed function to compute the target score."""
@staticmethod
def get_target_score(
i: torch.Tensor,
w: torch.Tensor,
target: torch.Tensor,
full_precision: bool,
margin: float,
scale: Optional[float],
) -> torch.Tensor:
tokens, d_model = i.shape
assert d_model == w.shape[1]
tw = w.gather(dim=0, index=target.reshape(target.shape[0], 1).expand(target.shape[0], d_model))
assert tw.shape == (tokens, d_model)
if scale is not None:
target_score = F.normalize(i, dim=1) * F.normalize(tw, dim=1)
else:
target_score = i * tw
if full_precision:
target_score = target_score.float()
target_score = target_score.sum(dim=1) # sum into target scores with shape (tokens,)
if scale is not None:
target_score -= margin
target_score *= scale
return target_score
@staticmethod
def forward( # type: ignore
ctx: Any, i: torch.Tensor, w: torch.Tensor, target: torch.Tensor, kernel_obj: "MemoryEfficientVocabOutput"
) -> torch.Tensor:
"""Forward, without activations."""
if DEBUG and dist.is_initialized() and dist.get_rank() == 0:
print("DEBUG target fwd")
ctx.save_for_backward(i, w, target)
ctx.kernel_obj = kernel_obj
with torch.no_grad():
x = TargetScoreFunction.get_target_score(
i, w, target, kernel_obj.fp_target, kernel_obj.margin, kernel_obj.scale
)
return x
@staticmethod
def backward(ctx: Any, *args: Any) -> Any:
"""Forward and backward again, assign or accumulate the gradients."""
if DEBUG and dist.is_initialized() and dist.get_rank() == 0:
print("DEBUG target bwd")
assert len(args) == 1
i, w, target = ctx.saved_tensors
assert i.requires_grad
assert w.requires_grad
assert not target.requires_grad
i = i.detach().requires_grad_(True)
w = w.detach().requires_grad_(True)
with torch.enable_grad():
scores = TargetScoreFunction.get_target_score(
i, w, target, ctx.kernel_obj.fp_target, ctx.kernel_obj.margin, ctx.kernel_obj.scale
)
torch.autograd.backward(scores, *args)
if ctx.kernel_obj.proj_weight.grad is not None:
# This means we accumulate full grad between iters. Not memory efficient.
ctx.kernel_obj.proj_weight.grad.add_(w.grad)
else:
ctx.kernel_obj.proj_weight.grad = w.grad
return i.grad, None, None, None
class BackwardTriggerFn(torch.autograd.Function):
"""A backward trigger function."""
@staticmethod
def forward(ctx: Any, w: torch.Tensor, trigger_tensor: torch.Tensor) -> torch.Tensor: # type: ignore
"""We take a weight tensor and the trigger as inputs and output the weight directly."""
if DEBUG and dist.is_initialized() and dist.get_rank() == 0:
print("DEBUG trigger fwd")
ctx.save_for_backward(w, trigger_tensor)
return w
@staticmethod
def backward(ctx: Any, *args: Any) -> Any:
"""We return zero grad for the trigger only."""
if DEBUG and dist.is_initialized() and dist.get_rank() == 0:
print("DEBUG trigger bwd")
assert len(args) == 1
w, trigger = ctx.saved_tensors
assert w.requires_grad
assert trigger.requires_grad
return None, torch.zeros_like(trigger)
class BackwardTrigger(nn.Module):
"""A backward trigger module.
This module takes a parameter as an input and create a linked parameter
from a newly created trigger parameter.
The way to use it in a module's ``__init__'' and ``forward'' functions:
```
def __init__():
...
self.trigger = BackwardTrigger(some_layer.weight)
...
def forward():
w = self.trigger()
... continue to use w ...
```
As a resule, the trigger's backward hook will be called at the end of
the backward for the module that uses this trigger.
"""
def __init__(self, linked_param: torch.Tensor):
super().__init__()
assert isinstance(linked_param, nn.Parameter)
self.trigger = nn.Parameter(torch.rand(1, dtype=linked_param.dtype, device=linked_param.device))
self.trigger._linked_param = linked_param
def forward(self) -> torch.Tensor: # type: ignore
return BackwardTriggerFn.apply(self.trigger._linked_param, self.trigger)
class MemoryEfficientVocabOutput(nn.Module): # AKA. MEVO
"""Fused fc + softmax + nll_loss in a tiled fashion.
MEVO uses much less memory but is quite a bit slower.
MEVO also implements the LMCL (Large Margin Cosine Loss) function introduced by
highly cited
`CosFace: Large Margin Cosine Loss for Deep Face Recognition [Wang et al.]`_.
.. _`CosFace: Large Margin Cosine Loss for Deep Face Recognition [Wang et al.]`: https://arxiv.org/abs/1801.09414
LMCL can be turned on using the ``margin`` and ``scale`` parameters below. These
hyperparameters most likely require tuning, depending on the number of classes etc.
MEVO LMCL can be suitable for face recognition and image retrieval tasks, esp. when
the number prediction target classes is large. MEVO is slower but can use much
less GPU memory in that case, which enables training with larger batches. We
hope this is helpful but we strongly recommend users (AI researchers
and engineers) to carefully consider their applications of this technology. This
types of technology should not be used by small group of people exclusively to
potentially harm the general public.
Args:
proj_weight (nn.Parameter):
Sharing this weight with an embedding layer.
tile_factor (int):
Number of splits to use on the input sequence and vocab dimensions.
Default: 16
reduction (str):
Reduction OP (sum or mean).
Default: sum
margin (float):
Hyperparameter of the separation margin between classes. See the
appendix of the CosFace paper for a formula on how to compute its
value properly. The default value is unlikely to be suitable in all
cases.
Default: 0.35
scale (Optional[float]):
Hyperparameter of the feature-vector-scaling for LMCL. When not
supplied, LMCL is turned off. See the appendix of the CosFace paper for
a formula on how to compute its value properly.
Default: None
"""
def __init__(
self,
proj_weight: nn.Parameter,
tile_factor: int = 16,
reduction: str = "sum",
margin: float = 0.35,
scale: Optional[float] = None,
):
super().__init__()
self.proj_weight = proj_weight
# TODO (Min): these two factors doesn't have to be the same. More tuning can be done.
self.tf_in, self.tf_w = tile_factor, tile_factor
self.fp_max = True
self.fp_sum = True # This is esp. important when tensors are large. Otherwise, you get inf.
self.fp_target = True
self.log_softmax = True
self.reduction = reduction
assert self.reduction in ["sum", "mean"]
self.margin = margin
self.scale = scale
self.trigger = BackwardTrigger(self.proj_weight)
if DEBUG and dist.is_initialized() and dist.get_rank() == 0:
print(
f"DEBUG cfg tf_in={self.tf_in} tf_w={self.tf_w} fp_max={self.fp_max} "
f"fp_sum={self.fp_sum} fp_target={self.fp_target} log_softmax={self.log_softmax} "
f"reduction={self.reduction} margin={self.margin} scale={self.scale}"
)
def get_target_nlprob(
self, i: torch.Tensor, w: torch.Tensor, target: torch.Tensor, debase_max: torch.Tensor, exp_sums: torch.Tensor
) -> torch.Tensor:
"""Get target's negative log probability."""
target_score = TargetScoreFunction.apply(i, w, target, self)
prob = (target_score - debase_max).exp() / exp_sums
if self.log_softmax:
# lprob
prob = prob.log()
# nlprob, then sum over all tokens.
return -prob.sum()
def eval_forward(self, input: torch.Tensor) -> torch.Tensor:
"""Eval time forward that doesn't fuse the softmax and NLL Loss kernels."""
# Margin, scaling and normalization of LMCL does not apply to eval time as far as
# I can tell. Therefore, we just do a matmul like the standard output layer.
return torch.matmul(input, self.proj_weight.T)
def forward(self, input: torch.Tensor, target: Optional[torch.Tensor]) -> torch.Tensor: # type: ignore
if not self.training and target is None:
return self.eval_forward(input)
if DEBUG and dist.is_initialized() and dist.get_rank() == 0:
cur_mem = round(torch.cuda.memory_allocated() / 1024 / 1024)
mem = round(torch.cuda.max_memory_allocated() / 1024 / 1024)
print("DEBUG cur, peak", cur_mem, mem)
assert isinstance(input, torch.Tensor)
assert isinstance(target, torch.Tensor)
if torch.is_grad_enabled():
assert input.requires_grad
input, target = _reshape_inputs(input, target)
tokens, d_model = input.shape
(t2,) = target.shape
vocab, d2 = self.proj_weight.shape
assert d_model == d2, f"incorrect shape {d_model} vs {d2}"
assert tokens == t2, f"incorrect shape {tokens} vs {t2}"
split_dim = 0
input_split_size = _next_power_of_2_or_max(tokens // self.tf_in, tokens)
weight_split_size = _next_power_of_2_or_max(vocab // self.tf_w, vocab)
inputs = torch.split(input, input_split_size, split_dim)
weight = self.trigger()
weights = torch.split(weight, weight_split_size, split_dim)
targets = tuple([torch.Tensor()] * len(inputs))
if self.scale is not None:
targets = torch.split(target, input_split_size, split_dim)
# Get maxs
maxs = []
for i, tgt in zip(inputs, targets):
m = None # max with (tokens_tile,) shape
for w_idx, w in enumerate(weights):
_m = GetMaxFunction.apply(i, w, tgt, self, w_idx, weight_split_size, split_dim)
if m is None:
m = _m
else:
m = torch.max(m, _m)
assert m is not None
maxs.append(m) # (tokens_tile,)
maxs_tensor = torch.cat(maxs) # (tokens,)
assert maxs_tensor.shape == (tokens,)
# Get sums.
sums = []
for i, tgt, debase_max in zip(inputs, targets, maxs):
s = None # sum with (tokens_tile,) shape
for w_idx, w in enumerate(weights):
_s = GetSumFunction.apply(i, w, tgt, debase_max, self, w_idx, weight_split_size, split_dim)
if s is None:
s = _s
else:
s += _s
assert s is not None
sums.append(s) # (tokens_tile,)
sums_tensor = torch.cat(sums) # (tokens,)
assert sums_tensor.shape == (tokens,)
# select weights for targets
result = self.get_target_nlprob(input, self.proj_weight, target, maxs_tensor, sums_tensor)
if self.reduction == "mean":
result /= tokens
return result
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from builtins import isinstance
import functools
import logging
from typing import Any, List, Tuple
import torch
from torch import nn
def _conditional_amp_fwd_decorator(orig_func): # type: ignore
if hasattr(torch.cuda.amp, "custom_fwd"):
return torch.cuda.amp.custom_fwd(orig_func) # type: ignore
@functools.wraps(orig_func)
def inner_decorator(*args: Any, **kwargs: Any) -> Any:
return orig_func(*args, **kwargs)
return inner_decorator
def _conditional_amp_bwd_decorator(orig_func): # type: ignore
if hasattr(torch.cuda.amp, "custom_bwd"):
return torch.cuda.amp.custom_bwd(orig_func) # type: ignore
@functools.wraps(orig_func)
def inner_decorator(*args: Any, **kwargs: Any) -> Any:
return orig_func(*args, **kwargs)
return inner_decorator
def _split(modules: nn.Sequential, number_splits: int) -> List[List[nn.Module]]:
number_splits = min(len(modules), number_splits)
splits: List[List[nn.Module]] = [[] for _ in range(number_splits)]
# Count the number of parameters per exposed layer, use that as a proxy for memory footprint
total_number_params = sum([sum(p.numel() for p in m.parameters()) for m in modules])
number_parameters_per_shard = total_number_params // number_splits
current_shard = 0
logging.info(
f"This model has {total_number_params/1e6:.2f}M parameters, aiming for {number_parameters_per_shard/1e6:.2f}M parameters per shard"
)
for m in modules:
for p in m.parameters():
p.data = p.data.pin_memory()
# Number of parameters in the current shard
current_shard_params = sum(p.numel() for sm in splits[current_shard] for p in sm.parameters())
# This shard is big enough, point to the next one
if (
current_shard_params > 0
and current_shard_params + sum(p.numel() for p in m.parameters()) > number_parameters_per_shard
and current_shard < number_splits - 1
):
current_shard += 1
splits[current_shard].append(m)
for i, split in enumerate(splits):
current_shard_params = sum(p.numel() for sm in split for p in sm.parameters())
logging.info(f"Shard {i} holds {current_shard_params/1e6:.2f}M parameters")
return splits
class ModelShard(nn.Module):
"""
Wrap one shard of the model, make it possible to load parameters on the
fly for the FW and BW pass on the given device.
"""
def __init__(
self,
cpu_model_shard: nn.Module,
device: torch.device,
offload_device: torch.device,
index: int,
):
super().__init__()
self.model_shard = cpu_model_shard
self.index = index
# Save all the parameter sizes to be able to restore them
self.device = device
torch.cuda.device(self.device)
self.offload_device = offload_device
self.model_shard.to(offload_device)
self._cpu_to_gpu_stream = torch.cuda.Stream(device=self.device)
self._gpu_to_cpu_stream = torch.cuda.Stream(device=self.device)
def forward(self, *inputs): # type: ignore
return self.model_shard(*inputs) if isinstance(inputs, tuple) else self.model_shard(inputs)
def to(self, device: torch.device) -> "ModelShard": # type: ignore
# Make sure that the lookahead and lookback shards are not captured by this call
self.model_shard.to(device)
return self
def train(self, mode: bool = True) -> "ModelShard":
# Make sure that the lookahead and lookback shards are not captured by this call
self.model_shard.train(mode)
return self
def to_device(self) -> None:
self.model_shard.to(device=self.device, non_blocking=True)
def forward_load(self, non_blocking: bool = True) -> None:
with torch.cuda.stream(self._cpu_to_gpu_stream):
# Restore all the parameter buffers
self.model_shard.to(device=self.device, non_blocking=non_blocking)
# Ignore the following function for code coverage since the backward pass
# is triggered by C++ code and cannot be calculated when overriding
# autograd.Function
def backward_load(self, non_blocking: bool = True) -> None: # pragma: no cover
with torch.cuda.stream(self._cpu_to_gpu_stream):
self.model_shard.to(self.device, non_blocking=non_blocking)
def forward_drop(self, non_blocking: bool = True) -> None:
with torch.cuda.stream(self._gpu_to_cpu_stream):
self.model_shard.to(self.offload_device, non_blocking=non_blocking)
# Ignore the following function for code coverage since the backward pass
# is triggered by C++ code and cannot be calculated when overriding
# autograd.Function
def backward_drop(self, non_blocking: bool = True) -> None: # pragma: no cover
with torch.cuda.stream(self._gpu_to_cpu_stream):
self.model_shard.to(self.offload_device, non_blocking=non_blocking)
class OffloadFunction(torch.autograd.Function):
"""
This Function enables checkpointing of intermediate activations at
shard boundaries by overriding the forward and backward pass of the nn.Module.
- In the FW pass, it drops parameters in the previous shard and
loads parameters for the next shard. No graph is constructed in the FW pass.
This enables us to offload intermediate activations present at the shard
boundaries.
- In the BW pass, it does the reverse. We run the forward pass using the
saved intermediate activations and calculate gradients as needed.
The trade-off is latency vs memory when using activation checkpointing.
- Follows heavily from https://pytorch.org/docs/stable/_modules/torch/utils/checkpoint.html#checkpoint.
NOTE: see https://pytorch.org/docs/stable/autograd.html#torch.autograd.Function
"""
@staticmethod
@_conditional_amp_fwd_decorator # type: ignore
def forward(ctx: Any, inputs: Any, dummy_input: Any, model_instance: Any) -> Any:
inputs = inputs if isinstance(inputs, tuple) else (inputs,)
ctx.inputs = inputs
ctx.model_instance = model_instance
# TODO(anj-s): We might need to store this for each boundary activation.
# Currently we assume all boundary activation inputs require
ctx.grad_requirements = tuple(x.requires_grad for x in inputs)
ctx.fwd_rng_state = torch.get_rng_state()
# List of input activations starting with the given input.
model_instance._activations = [inputs]
# Enumerate through layer shards and apply activations from the previous shard.
for index, layer_shard in enumerate(model_instance.model_slices):
with torch.autograd.profiler.record_function("fairscale.experimental.nn.offload:forward_load"):
# Bring in the current activations onto the device.
model_instance._activations[index] = tuple([a.cuda() for a in list(model_instance._activations[index])])
# Bring in the current layer shard onto the device.
layer_shard.forward_load()
# Apply the FP and store the activations on the CPU.
inputs = model_instance._activations[index]
with torch.autograd.profiler.record_function("fairscale.experimental.nn.offload:no_grad_forward_pass"):
with torch.no_grad():
output_list: List[Any] = []
for given_input in inputs:
given_input_list = torch.chunk(given_input, model_instance._num_microbatches)
given_output_list = []
for inputs in given_input_list:
output = layer_shard(inputs)
given_output_list.append(output)
given_output = torch.cat(given_output_list).squeeze(-1)
output_list.append(given_output)
output = tuple(output_list)
output = output if isinstance(output, tuple) else (output,)
with torch.autograd.profiler.record_function("fairscale.experimental.nn.offload:forward_drop"):
# Move the activation used back for the curent shard back to the CPU.
model_instance._activations[index] = tuple([a.cpu() for a in list(model_instance._activations[index])])
# The newly computed activations remain on the GPU ready for the next shard computation.
model_instance._activations.append(output)
# Move the layer shard back to the CPU.
layer_shard.forward_drop()
# The last instance will lose the gradient function if we move it to the CPU.
# This is because all grad function are present on the device that ran the FW pass.
# The last activation remains on the GPU and is the return value of this function.
# Note that this assumes that the target is also on the GPU which is required for calculating
# the loss.
result = model_instance._activations[-1]
result = [r.cuda() for r in result]
for r in result:
r.requires_grad = True
return result[0] if len(result) == 1 else result
# Ignore the following function for code coverage since the backward pass
# is triggered by C++ code and cannot be calculated when overriding
# autograd.Function
@staticmethod
@_conditional_amp_bwd_decorator
def backward(ctx, *grad_outputs): # type: ignore # pragma: no cover
if not torch.autograd._is_checkpoint_valid():
raise RuntimeError("Checkpointing is not compatible with .grad(), please use .backward() if possible")
inputs = ctx.inputs
model_instance = ctx.model_instance
for i, need_grad in enumerate(ctx.grad_requirements):
inputs[i].requires_grad = need_grad
all_grads = [grad_outputs]
for model_shard, activation in zip(
reversed(model_instance.model_slices), reversed(model_instance._activations[:-1])
):
with torch.autograd.profiler.record_function("fairscale.experimental.nn.offload:backward_load"):
# Move the activation to the GPU.
activation = tuple([a.cuda() for a in list(activation)])
# Move the model shard to the GPU.
model_shard.backward_load()
# Store the BW pass state.
bwd_rng_state = torch.get_rng_state()
# TODO(anj-s): Why detach inputs?
activation = torch.utils.checkpoint.detach_variable(activation)
# Get the last gradient calculation.
final_grads = all_grads[-1]
if isinstance(activation, torch.Tensor):
activation = (activation,)
if isinstance(final_grads, torch.Tensor):
final_grads = (final_grads,)
# Iterate through all the inputs/outputs of a shard (there could be multiple).
chunked_grad_list: List[Any] = []
# Chunk the activation and grad based on the number of microbatches that are set.
for chunked_activation, chunked_grad in zip(
torch.chunk(*activation, model_instance._num_microbatches), # type: ignore
torch.chunk(*final_grads, model_instance._num_microbatches), # type: ignore
):
# Set the states to what it used to be before the forward pass.
torch.set_rng_state(ctx.fwd_rng_state)
if isinstance(chunked_activation, torch.Tensor):
chunked_activation = (chunked_activation,) # type: ignore
if isinstance(chunked_grad, torch.Tensor):
chunked_grad = (chunked_grad,) # type: ignore
# Since we need a grad value of a non leaf element we need to set these properties.
for a in chunked_activation:
if a.dtype == torch.long:
continue
a.requires_grad = True
a.retain_grad()
with torch.autograd.profiler.record_function(
"fairscale.experimental.nn.offload:forward_pass_with_enable_grad"
):
with torch.enable_grad():
# calculate the output of the last shard wrt to the stored activation at the slice boundary.
outputs = model_shard(*chunked_activation)
# Set the states back to what it was at the start of this function.
torch.set_rng_state(bwd_rng_state)
with torch.autograd.profiler.record_function("fairscale.experimental.nn.offload:backward_pass"):
torch.autograd.backward(outputs, chunked_grad)
intermediate_grads = []
for a in chunked_activation:
if a.grad is not None:
intermediate_grads.append(a.grad)
if None not in intermediate_grads:
chunked_grad_list += intermediate_grads
if chunked_grad_list:
# Append the list of grads to the all_grads list and this should be on the GPU.
all_grads.append(torch.cat(chunked_grad_list).squeeze(-1)) # type: ignore
with torch.autograd.profiler.record_function("fairscale.experimental.nn.offload:backward_drop"):
# Move the shard back to the CPU. This should move all the grad tensors to CPU as well.
# We don't need to move activations since we are using a copy of the tensors on the GPU.
model_shard.backward_drop()
detached_inputs = model_instance._activations[0]
grads = tuple(inp.grad if isinstance(inp, torch.Tensor) else inp for inp in detached_inputs)
return (None, None) + grads
class ShardSyncLayer(torch.autograd.Function):
"""
The shard sync layer is a synchronization point between model shards.
- In the forward pass, it drops parameters in the previous shard and
loads parameters for the next shard.
- In the backward pass, it does the reverse.
It does not change or create any outputs at all, instead it just
forwards the input as the output.
NOTE: see https://pytorch.org/docs/stable/autograd.html#torch.autograd.Function
"""
@staticmethod
@_conditional_amp_fwd_decorator # type: ignore
def forward(ctx: Any, inputs: Any, index: int, model_slices: Any, model_instance: Any) -> Any:
drop_index = index
load_index = index + 1
max_slices = len(model_slices)
if drop_index >= 0:
# Move shard from device to offload device.
model_slices[drop_index].forward_drop()
if load_index < max_slices:
# Load shard from offload device to device.
model_slices[load_index].forward_load()
ctx.index = index
ctx.model_slices = model_slices
ctx.model_instance = model_instance
return inputs if isinstance(inputs, tuple) else (inputs,)
# Ignore the following function for code coverage since the backward pass
# is triggered by C++ code and cannot be calculated when overriding
# autograd.Function
@staticmethod
@_conditional_amp_bwd_decorator
def backward(ctx, *grad_outputs): # type: ignore # pragma: no cover
load_index = ctx.index
drop_index = load_index + 1
model_slices = ctx.model_slices
model_instance = ctx.model_instance
# TODO(anj-s): Are these redundant in the backward pass?
if drop_index == len(model_slices):
# Drop the last activation since it is still on the CPU
# after the loss.backward() call.
model_instance._activations[-1] = tuple([a.cuda() for a in list(model_instance._activations[-1])])
if drop_index < len(model_slices):
# Move shard from device to offload device.
model_slices[drop_index].backward_drop()
model_instance._activations[drop_index] = tuple(
[a.cpu() for a in list(model_instance._activations[drop_index])]
)
if load_index >= 0:
# Load shard from offload device to device.
model_slices[load_index].backward_load()
model_instance._activations[load_index] = tuple(
[a.cuda() for a in list(model_instance._activations[load_index])]
)
# The returned variables need to mirror the forward inputs
# TODO(anj-s): Why do we need to do this?
if isinstance(grad_outputs, tuple):
return grad_outputs[0], None, None, None
return grad_outputs, None, None, None
class OffloadModel(nn.Module):
"""Wraps an arbitrary :class:`nn.Sequential <torch.nn.Sequential>` module
to train by offloading majority of the model parameters to the CPU.
`OffloadModel` is heavily inspired by the _L2L algorithm and _Zero-Offload.
::
model = get_model()
offload_model = OffloadModel(model, device,
offload_device=torch.device(“cpu”),
num_slices=3,
checkpoint_activation=True,
num_microbatches=5)
.. _L2L: https://arxiv.org/abs/2002.05645
.. _Zero-Offload: https://arxiv.org/abs/2101.06840
At each step, a layer(or series of layers) are loaded
onto the GPU for the forward and backward pass with intermediate
activations being copied onto the GPU as required. Once the forward
or backward pass is completed for a given shard, it is moved back to
the CPU again.
`OffloadModel` supports activation checkpointing which reduces
the memory footprint. You can also increase the number of
microbatches which translates to more computation cycles for
every shard load. This helps offset the cost of moving the shard
from the CPU to GPU and vice versa.
Note: OffloadModel currently only supports nn.Sequential models.
Args:
module (~torch.nn.Sequential): Module to be offloaded.
device (torch.device):
Device where the active model should reside.
offload_device (torch.device):
Device where the inactive model should reside.
num_slices (int):
Number of slices into which the model should be chunked.
checkpoint_activation (bool):
Boolean to indicate if we want to checkpoint intermediate
activation states on the CPU. Default value is False.
num_microbatches (int):
Number of microbatches which should be run per model
shard on device.
"""
def __init__(
self,
model: Any,
device: torch.device,
offload_device: torch.device = torch.device("cpu"),
num_slices: int = 3,
checkpoint_activation: bool = False,
num_microbatches: int = 1,
):
super().__init__()
if not model:
raise TypeError("`model` argument to `OffloadModel` cannot be None.")
if not device:
raise TypeError("`device` argument to `OffloadModel` cannot be None.")
if not (isinstance(model, nn.Sequential) or type(model) == list):
raise TypeError("`model` argument to `OffloadModel` must be of type `nn.Sequential`.")
if not torch.cuda.is_available():
raise TypeError("CUDA must be available as one of the compute devices for `OffloadModel`.")
self.device = device
self.offload_device = offload_device
# List of model shards that will be placed on/off the device.
self.model_slices: List[nn.Module] = []
# TODO(anj): Add an experimental flag for using this instead of modifying the
# arg type.
if type(model) == list:
# This is already sharded using the auto shard functinality.
for i, m in enumerate(model):
self.model_slices.append(
ModelShard(
cpu_model_shard=m,
device=device,
offload_device=offload_device,
index=i,
)
)
else:
# Slice the model into roughly equivalent sequential shards.
splits = _split(model, num_slices) # type: ignore
for i, split in enumerate(splits):
# Add one model handling this slice
self.model_slices.append(
ModelShard(
cpu_model_shard=nn.Sequential(*split),
device=device,
offload_device=offload_device,
index=i,
)
)
# Expose a unified view of the slices
self._model = torch.nn.Sequential(*self.model_slices)
# intermediate activations at the slice boundaries.
self._activations: List[Tuple] = []
# Currently we only support microbatches with activation checkpointing.
if not checkpoint_activation and num_microbatches > 1:
raise RuntimeError("We currently only support microbatches with activation checkpointing.")
# Bool indicating if we want to checkpoint activation on the host.
self._checkpoint_activation = checkpoint_activation
# Number of microbatches to run per batch on the device
self._num_microbatches = num_microbatches
def forward(self, *inputs: Any, **_: Any) -> Any:
# `apply` calls the `forward` function of the `OffloadFunction` class
# and the `forward` function calls `inputs` on the first model shard.
# Please see https://pytorch.org/docs/stable/autograd.html#function for more details.
# We need the second param to be a dummy input to enable the
# backward pass to be triggered for integer inputs.
if self._checkpoint_activation:
return OffloadFunction.apply(*inputs, torch.tensor([], requires_grad=True), self)
self._activations = []
for index in range(-1, len(self.model_slices)):
if index >= 0:
# TODO(anj-s): This might be a redundant call since we have the previous
# activation on the device already.
self._activations[index] = tuple([a.cuda() for a in list(self._activations[index])])
inputs = self._activations[index]
inputs = self.model_slices[index](*inputs)
# Call the custom autograd hooks (discard/load slices FW and BW)
inputs = ShardSyncLayer.apply(inputs, index, self.model_slices, self)
self._activations.append(inputs)
if index >= 0:
self._activations[index] = tuple([a.cpu() for a in list(self._activations[index])])
result = self._activations[-1]
result = tuple([r.cuda() for r in result])
return result[0] if len(result) == 1 else result
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import time
from typing import Any, Dict, List, Tuple, Union
import torch
from torch import nn
from torch.autograd.profiler import record_function
from torch.distributed import ProcessGroup
from torch.optim.optimizer import Optimizer
from torch.utils.data import DataLoader
from fairscale.nn.model_parallel import get_pipeline_parallel_ranks
from fairscale.nn.pipe.async_schedule import (
AsyncMessageBody,
AsyncMessageType,
AsyncRecvOperator,
Location,
ModuleWrapper,
)
from fairscale.nn.pipe.checkpoint import Checkpointing
from fairscale.nn.pipe.messages import Transport
from fairscale.nn.pipe.microbatch import Batch
from fairscale.nn.pipe.types import (
EVENT_LOOP_ACTIVATIONS_QUEUE,
EVENT_LOOP_GRADIENTS_QUEUE,
PipeMessage,
TensorOrTensors,
)
from fairscale.nn.pipe.worker import Task
def create_task_without_skip_trackers(
checkpoint_stop: int,
i: int,
j: int,
batch: Batch,
partition: nn.Sequential,
) -> Task:
# Determine whether checkpointing or not.
if i < checkpoint_stop:
def function(
input: TensorOrTensors,
partition: nn.Sequential = partition,
chunk_id: int = i,
part_id: int = j,
) -> TensorOrTensors:
with record_function("chunk%d-part%d" % (chunk_id, part_id)):
return partition(input)
chk = Checkpointing(function, batch)
task = Task(None, compute=chk.checkpoint, finalize=chk.recompute)
del function, chk
else:
def compute(
batch: Batch = batch,
partition: nn.Sequential = partition,
chunk_id: int = i,
part_id: int = j,
) -> Batch:
with record_function("chunk%d-part%d" % (chunk_id, part_id)):
return batch.call(partition)
task = Task(None, compute=compute, finalize=None)
del compute
return task
class AsyncAMPnetEventLoop:
def __init__(
self,
partitions: List[ModuleWrapper],
group: ProcessGroup,
transport: Transport,
min_update_interval: int,
weight_prediction: bool,
checkpoint_stop: int,
input_device: Union[None, int, str, torch.device],
chunks: int,
):
self.partitions = partitions
self.group = group
self.transport = transport
self.min_update_interval = min_update_interval
self.weight_prediction = weight_prediction
self.checkpoint_stop = checkpoint_stop
self.input_device = input_device
self.chunks = chunks
def perform_optimizer_step(self, optimizer: Any, num_gradients: Any) -> Any:
return (
(optimizer is not None)
and (not self.weight_prediction and num_gradients % self.min_update_interval == 0)
or (self.weight_prediction and num_gradients % self.chunks == 0)
)
def async_send_inner(self, batch: Batch, index: int) -> Tuple[Batch, PipeMessage]:
task = create_task_without_skip_trackers(
self.checkpoint_stop,
index,
self.group.rank(),
batch,
self.partitions[0].module,
)
result = task.compute()
task.finalize(result)
ranks = get_pipeline_parallel_ranks()
this_rank = torch.distributed.get_rank()
body = AsyncMessageBody(
AsyncMessageType.Activations,
index,
Location(this_rank, 0),
Location(ranks[ranks.index(this_rank) + 1], 0),
0,
)
message = PipeMessage(
this_rank,
ranks[ranks.index(this_rank) + 1],
queue_name=EVENT_LOOP_ACTIVATIONS_QUEUE,
args=body,
tensors=tuple([*result]),
)
return result, message
def async_grad_inner(self, message: PipeMessage, activations: Dict[int, Batch]) -> None:
args: AsyncMessageBody = message.args
recvd_grads = self.transport.recv_message_tensors(message)
batch = activations[args.microbatch_index]
if len(recvd_grads.tensors) != len(batch):
raise RuntimeError("different number of tensors and gradients")
grads = []
final_tensors = []
for i, tensor in enumerate(batch):
if tensor.requires_grad or getattr(tensor, "grad_fn", None) is not None:
grads.append(recvd_grads.tensors[i])
final_tensors.append(tensor)
torch.autograd.backward(final_tensors, grad_tensors=grads, retain_graph=True)
del activations[args.microbatch_index]
def get_batch_from_message(self, message: PipeMessage, queue_name: int) -> Batch:
"""Get the tensor(s) wrapped in a `Batch` from a `PipeMessage`, applying
AsyncRecvOperator so we can intercept the backward pass"""
microbatch_index = message.args.microbatch_index
phony = torch.empty(0, device=self.transport.input_device, requires_grad=True)
result = AsyncRecvOperator.apply(phony, self.transport, message, queue_name)
if len(result) == 1:
batch = Batch(result[0], microbatch_index)
else:
batch = Batch(result, microbatch_index)
return batch
def event_loop_head_across_minibatches(
self, lm_dataloader: DataLoader, criterion: nn.Module, optimizer: Optimizer, transform_logger_object: Any
) -> None:
# handles one epoch
cur_rank = self.group.rank()
N = len(get_pipeline_parallel_ranks()) # for warmup phase
activations = dict()
count = 0
num_gradients = 0
lm_iter = iter(lm_dataloader)
# filling the pipeline: warmup -> all N - 1 forward passes
while True:
try:
cur_batch = next(lm_iter)
reqd_input = transform_logger_object.transform_input(cur_batch).to(self.input_device)
batch = Batch(reqd_input, count)
if self.weight_prediction:
optimizer.update_weight_using_future_predictions(cur_rank, N, count, self.chunks, forward=True)
activations[count], message = self.async_send_inner(batch, count)
self.transport.send_message(message, sync=True)
count += 1
if count == N - 1:
break
except StopIteration:
break
# steady state
while True:
try:
# 1 forward pass
cur_batch = next(lm_iter)
reqd_input = transform_logger_object.transform_input(cur_batch).to(self.input_device)
batch = Batch(reqd_input, count)
if self.weight_prediction:
optimizer.update_weight_using_future_predictions(cur_rank, N, count, self.chunks, forward=True)
activations[count], forward_message = self.async_send_inner(batch, count)
count += 1
# 1 backward pass
message = self.transport.recv_message_header(EVENT_LOOP_GRADIENTS_QUEUE)
args: AsyncMessageBody = message.args
assert args.message_type is AsyncMessageType.Gradients
if self.weight_prediction:
optimizer.update_weight_using_future_predictions(
cur_rank, N, num_gradients, self.chunks, forward=False
)
self.async_grad_inner(message, activations)
# Send after grad
self.transport.send_message(forward_message, sync=True)
num_gradients += 1
if self.perform_optimizer_step(optimizer, num_gradients):
optimizer.step()
optimizer.zero_grad()
transform_logger_object.check_and_save_weights(num_gradients)
except StopIteration:
break
# remaining items for backward
remaining_items = len(activations)
for _ in range(remaining_items):
message = self.transport.recv_message_header(EVENT_LOOP_GRADIENTS_QUEUE)
args = message.args
assert args.message_type is AsyncMessageType.Gradients
if self.weight_prediction:
optimizer.update_weight_using_future_predictions(cur_rank, N, num_gradients, self.chunks, forward=False)
self.async_grad_inner(message, activations)
num_gradients += 1
if self.perform_optimizer_step(optimizer, num_gradients):
optimizer.step()
optimizer.zero_grad()
transform_logger_object.check_and_save_weights(num_gradients)
def event_loop_tail_across_minibatches(
self, lm_dataloader: DataLoader, criterion: nn.Module, optimizer: Optimizer, transform_logger_object: Any
) -> None:
# handles one epoch
cur_rank = self.group.rank()
N = len(get_pipeline_parallel_ranks())
num_batches = len(lm_dataloader)
lm_iter = enumerate(lm_dataloader)
# last partition -> one forward / one backward -> no warmup
count = 0
num_gradients = 0
activations = dict()
log_interval = 1
word_counter = 0
total_loss = 0
while True:
try:
start_time = time.time()
microbatch_index, cur_batch = next(lm_iter)
reqd_target = transform_logger_object.transform_target(cur_batch).to(self.input_device)
# one forward
message = self.transport.recv_message_header(EVENT_LOOP_ACTIVATIONS_QUEUE)
args: AsyncMessageBody = message.args
assert args.microbatch_index == count
batch = self.get_batch_from_message(message, EVENT_LOOP_GRADIENTS_QUEUE)
if self.weight_prediction:
optimizer.update_weight_using_future_predictions(cur_rank, N, count, self.chunks, forward=True)
task = create_task_without_skip_trackers(
self.checkpoint_stop,
args.microbatch_index,
self.group.rank(),
batch,
self.partitions[0].module,
)
output = task.compute()
activations[args.microbatch_index] = output
task.finalize(output)
# one backward
if self.weight_prediction:
optimizer.update_weight_using_future_predictions(
cur_rank, N, num_gradients, self.chunks, forward=False
)
output_tensor = transform_logger_object.transform_output_before_loss(output.tensor)
loss = criterion(output_tensor, reqd_target)
loss.backward()
count += 1
num_gradients += 1
if self.perform_optimizer_step(optimizer, num_gradients):
optimizer.step()
optimizer.zero_grad()
transform_logger_object.check_and_save_weights(num_gradients)
transform_logger_object.log_loss(cur_batch, loss, count)
del loss
del activations[args.microbatch_index]
except StopIteration:
break
def event_loop_trunk_forward_helper(self, activations: Dict[int, Batch]) -> PipeMessage:
message = self.transport.recv_message_header(EVENT_LOOP_ACTIVATIONS_QUEUE)
args: AsyncMessageBody = message.args
assert args.message_type is AsyncMessageType.Activations
batch = self.get_batch_from_message(message, EVENT_LOOP_GRADIENTS_QUEUE)
activations[args.microbatch_index], message = self.async_send_inner(batch, args.microbatch_index)
return message
def event_loop_trunk_backward_helper(self, activations: Dict[int, Batch]) -> None:
message = self.transport.recv_message_header(EVENT_LOOP_GRADIENTS_QUEUE)
args: AsyncMessageBody = message.args
assert args.message_type is AsyncMessageType.Gradients
self.async_grad_inner(message, activations)
def event_loop_across_minibatches(
self, lm_dataloader: DataLoader, criterion: nn.Module, optimizer: Optimizer, transform_logger_object: Any
) -> None:
activations: Dict[int, Batch] = dict()
num_microbatch = len(lm_dataloader)
num_activations = 0
num_gradients = 0
ranks = get_pipeline_parallel_ranks() # for warmup phase
N = len(ranks)
cur_rank = torch.distributed.get_rank()
# warmup phase (forward passes)
# cur_rank worker will do (max_rank - cur_rank) forward passes
n_warmup = ranks[-1] - cur_rank
for _ in range(n_warmup):
if self.weight_prediction:
optimizer.update_weight_using_future_predictions(
cur_rank, N, num_activations, self.chunks, forward=True
)
message = self.event_loop_trunk_forward_helper(activations)
self.transport.send_message(message, sync=True)
num_activations += 1
# common loop for remanining items in the warmup phase and steady phase
while num_activations < num_microbatch:
# 1 Forward
if self.weight_prediction:
optimizer.update_weight_using_future_predictions(
cur_rank, N, num_activations, self.chunks, forward=True
)
message = self.event_loop_trunk_forward_helper(activations)
num_activations += 1
# 1 Backward
if self.weight_prediction:
optimizer.update_weight_using_future_predictions(cur_rank, N, num_gradients, self.chunks, forward=False)
self.event_loop_trunk_backward_helper(activations)
num_gradients += 1
if self.perform_optimizer_step(optimizer, num_gradients):
optimizer.step()
optimizer.zero_grad()
transform_logger_object.check_and_save_weights(num_gradients)
self.transport.send_message(message, sync=True)
# remaining backwards
remaining = len(activations)
for _ in range(remaining):
if self.weight_prediction:
optimizer.update_weight_using_future_predictions(cur_rank, N, num_gradients, self.chunks, forward=False)
self.event_loop_trunk_backward_helper(activations)
num_gradients += 1
if self.perform_optimizer_step(optimizer, num_gradients):
optimizer.step()
optimizer.zero_grad()
transform_logger_object.check_and_save_weights(num_gradients)
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from typing import List
__all__: List[str] = []
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
"""The AMPnetPipe interface."""
from typing import Any
from torch import nn
from torch.optim.optimizer import Optimizer
from torch.utils.data import DataLoader
from fairscale.nn.pipe import AsyncPipe
from .ampnet import AsyncAMPnetEventLoop
__all__ = ["AMPnetPipe"]
class AMPnetPipe(AsyncPipe):
"""
AMPnetPipe is the asynchronous version of the MultiProcessPipe implementation
which avoids the bubble issue, by using stale weights and gradients.
The implementation closely follows the paper: https://arxiv.org/abs/1705.09786
"""
def __init__(self, **kwargs: Any) -> None:
super().__init__(**kwargs)
def interleave(
self,
lm_dataloader: DataLoader,
criterion: nn.Module,
optimizer: Optimizer,
transform_logger_object: Any,
min_update_interval: int = 1,
weight_prediction: bool = False,
) -> None:
partitions = self.partitions
n = len(partitions)
# AMPnet implementation doesn't handle skip_trackers!
assert self.group
rank = self.group.rank()
transport = self.pipeline.transport
checkpoint_stop = self.pipeline.checkpoint_stop
ampnet_event_loop = AsyncAMPnetEventLoop(
partitions,
self.group,
transport,
min_update_interval,
weight_prediction,
checkpoint_stop,
self.input_device,
self.chunks,
)
if rank == 0:
ampnet_event_loop.event_loop_head_across_minibatches(
lm_dataloader, criterion, optimizer, transform_logger_object
)
elif self.final_stage:
ampnet_event_loop.event_loop_tail_across_minibatches(
lm_dataloader, criterion, optimizer, transform_logger_object
)
else:
ampnet_event_loop.event_loop_across_minibatches(
lm_dataloader, criterion, optimizer, transform_logger_object
)
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from .gossip import SlowMoBaseAlgorithm, SlowMoDistributedDataParallel # noqa
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
"""
Mixing Manager Class
:description: Class provides an API for dynamically selecting mixing weights
for gossip
"""
from abc import ABC, abstractmethod
from typing import Dict, Optional, Union
import torch
from .graph_manager import GraphManager
class MixingManager(ABC):
def __init__(self, graph: GraphManager, device: Optional[torch.device]) -> None:
self.graph_manager = graph
self.device = device
def is_regular(self) -> bool:
"""
Whether there is bias accumulated in local entry of stationary
distribution of mixing matrix
"""
return self.graph_manager.is_regular_graph() and self.is_uniform()
@abstractmethod
def is_uniform(self) -> bool:
"""Whether mixing weights are distributed uniformly over peers"""
raise NotImplementedError
@abstractmethod
def get_mixing_weights(self, residual_adjusted: bool = True) -> Dict[Union[str, int], torch.Tensor]:
"""Create mixing weight dictionary using uniform allocation"""
raise NotImplementedError
class UniformMixing(MixingManager):
def get_mixing_weights(self, residual_adjusted: bool = True) -> Dict[Union[str, int], torch.Tensor]:
"""Create mixing weight dictionary using uniform allocation"""
mixing_weights: Dict[Union[str, int], torch.Tensor] = {}
out_peers, _ = self.graph_manager.get_peers()
w = torch.tensor([1.0 / (len(out_peers) + 1.0)], device=self.device)
mixing_weights["lo"] = w.clone()
w_op = w if not residual_adjusted else w / mixing_weights["lo"]
mixing_weights["uniform"] = w_op.clone()
for op in out_peers:
mixing_weights[op] = w_op.clone()
return mixing_weights
def is_uniform(self) -> bool:
return True
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from .distributed import SlowMoBaseAlgorithm, SlowMoDistributedDataParallel
from .gossiper import PushPull, PushSum
from .graph_manager import (
DynamicBipartiteExponentialGraph,
DynamicBipartiteLinearGraph,
DynamicDirectedExponentialGraph,
DynamicDirectedLinearGraph,
GraphManager,
NPeerDynamicDirectedExponentialGraph,
RingGraph,
)
from .mixing_manager import MixingManager, UniformMixing
from .utils import communicate
from .utils.cuda_metering import CudaEventRecorder
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
"""
Distributed Gossip Wrapper
:description: Multi-Threaded Gossip Model Wrapper; designed for efficient
multi-peer training.
"""
from enum import Enum
import functools
import logging
import os
import sys
import threading
from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, Union, cast
import torch
from torch.autograd import Variable
import torch.distributed as dist
from torch.nn.modules import Module
from .gossiper import Gossiper, PushPull, PushSum
from .graph_manager import GraphManager
from .graph_manager import NPeerDynamicDirectedExponentialGraph as NPDDEGraph
from .mixing_manager import MixingManager, UniformMixing
from .utils import (
MultiProcessAdapter,
communicate,
create_process_group,
flatten_tensors,
group_by_dtype,
make_logger,
unflatten_tensors,
)
from .utils.cuda_metering import EventRecorder, create_event_recorder
HEARTBEAT_TIMEOUT = 300 # maximum time to wait for message (seconds)
BROADCAST_BUCKET_SIZE = 10 * 1024 * 1024
class SlowMoBaseAlgorithm(str, Enum):
LOCALSGD = "localsgd"
SGP = "sgp"
class SlowMoDistributedDataParallel(Module):
"""Wraps an arbitrary :class:`nn.Module <torch.nn.Module>` module and allows
it to be run on multiple GPUs (distributed) in a data parallel setting.
This container parallelizes the application of the given module by
splitting the input across the specified devices by chunking in the batch
dimension. The module is replicated on each machine and each device, and
each such replica handles a portion of the input. After the optimizer update,
it synchronizes the parameters on the different nodes using SlowMo
(https://arxiv.org/abs/1910.00643).
Please make sure to read the documentation for slowmo_memory_efficient parameter as
it contains a non-trivial trick in order to optimize our implementation.
Please refer to the documentation of ``torch.nn.parallel.DistributedDataParallel``
for other useful tips for using this container.
Parameters:
module (Module):
module to be parallelized
nprocs_per_node (int):
Number of processes per node (one per GPU). This needs to be specified for optimal accuracy and speed.
Syncing across GPUs in a node is extremely fast, which we utilize for performance optimization
broadcast_buffers (bool):
Flag that enables syncing (broadcasting) buffers (example - batchnorm buffers) of the module at beginning
of the ``forward`` function. Setting it to False would result in better performance due to less
communication on the network but might result in a reduced accuracy (default: ``True``)
slowmo_base_algorithm (SlowMoBaseAlgorithm):
The base algorithm to be used for approximately averaging the different parameters across nodes. The base
algorithm is responsible for increasing the efficiency of this module. The base algorithm, combined with
SlowMo, results in significant speedups without accuracy loss. Either Stochastic Gradient Push
(SlowMoBaseAlgorithm.SGP) (https://arxiv.org/abs/1811.10792) or LocalSGD (SlowMoBaseAlgorithm.LOCALSGD)
(https://arxiv.org/abs/1808.07217) can be used here (default: SlowMoBaseAlgorithm.LOCALSGD)
SlowMo Parameters:
slowmo_momentum (float):
This specifies the value of slowmo momentum to be used (read https://arxiv.org/abs/1910.00643 for more
details). This parameter might need to be tuned and the optimal value varies according to the use case and
the number of nodes being run on. The optimal value typically increases with the number of nodes. On
training transfomers on the WMT 16 En-De dataset, we have found the optimal values to be 0 for less than 4
nodes, 0.2 for 4 nodes, 0.5 for 8 nodes and 0.6 for 16 nodes (default: 0.5)
slowmo_memory_efficient (bool):
If enabled, use a memory efficient implementation of SlowMo. The basic implementation of SlowMo occupies
extra memory equal to double the memory occupied by the model parameters. The memory efficient
implementation shards that memory across a certain number of shards which is specified as a parameter
below.
In addition, slowmo_memory_efficient leads to extra communication with throughput equivalent to an
allreduce, and performs an allreduce as a side-effect. In order to optimize the implementation, we skip
the typical allreduce when slowmo_base_algorithm is localsgd and the localsgd step and slowmo step occur
on the same iteration. Also, we skip the gossip step when slowmo_base_algorithm is sgp. We can skip these
because the memory-efficient slowmo step does an allreduce as a side effect. Due to this skipping, when
slowmo_base_algorithm is localsgd, we recommend setting slowmo_frequency to be a multiple of
localsgd_frequency.
We recommend setting this parameter to True when slowmo_base_algorithm is localsgd. In case of sgp, there
is a tradeoff between extra memory usage which is double the memory occupied by the parameters, and extra
time spent which is half the time taken up by an allreduce every slowmo_frequency iterations and we
suggest setting it to False (default: True)
slowmo_frequency (int):
This specifies how often (number of iterations) slow momentum is to be performed. We recommend keeping
slowmo_frequency as a multiple of localsgd_frequency. Please look at the documentation of
slowmo_memory_efficient for the reasoning (default: 48)
slowmo_lr (float):
This specifies the value of slowmo learning rate to be used (read https://arxiv.org/abs/1910.00643 for
more details). We do not recommend changing this (default: 1.0)
slowmo_num_shards (int):
The number of shards between which slow momentum parameters are distributed. This is only used when
memory_efficient is set to True.
The number of shards should scale with the number of parameters in the model. Increasing the number of
shards decreases the memory used per node for storing the slow momentum parameters. However, if the shard
size per node is too small, it results in a communication overhead (default: 32)
LocalSGD Parameters:
localsgd_frequency (int):
LocalSGD typically averages the parameters once every few iterations. This parameter specifices the
frequency of averaging. We recommend keeping slowmo_frequency as a multiple of localsgd_frequency. Please
look at the documentation of slowmo_memory_efficient for the reasoning (default: 3)
SGP Parameters:
graph (Optional[GraphManager):
Graph to be used for gossip communication. This is used to specify the interaction graph between the
different nodes (default: None)
mixing (Optional[MixingManager]):
Mixing manager to be used for gossip communication. This is used to specify weights given to outgoing and
incoming messages (default: None)
push_sum (bool):
Whether to use PushSum or PushPull gossip (default: True)
overlap (bool):
Whether to use the overlap form of SGP. This feature is currently disabled until further testing is done
for its use (default: False)
synch_freq (int):
How often (number of iterations) to synchronize for overlap SGP. A value of 0 means to synchronize overlap
SGP every iteration (default: 0)
use_streams (bool):
Whether to use CUDA streams to speed up SGP overlap (default: True)
slowmo_sgp_average_params (bool):
Whether to completely average the parameters when slowmo is done instead of a partial averaging that
happens every iteration (default: False)
Debugging Parameters:
verbose (bool):
Prints various logs which are useful for debugging (default: False)
profile_mode (bool):
Prints the time taken by different parts of the code, which can help in finding bottlenecks (default: False)
Parameters for Advanced Users:
process_rank (Optional[int]):
Rank of the current process in the process group (default: None)
process_world_size (Optional[int]):
Size of the process group (default: None)
global_group (Optional[torch.distributed.ProcessGroup]):
Global process group initialized by init_process_group (default: None)
master_group (Optional[torch.distributed.ProcessGroup]):
Process group which only contains the master GPUs of each node (default: None)
local_node_group (Optional[torch.distributed.ProcessGroup]):
Process group which only contains the GPUs local to the current node (default: None)
comm_device: (Optional[torch.device]):
The torch.device on which torch tensors are to be placed before communication (default: None)
Example:
>>> torch.distributed.init_process_group(backend='nccl', world_size=4, init_method='...')
>>> net = fairscale.data_parallel.SlowMoDistributedDataParallel(model, nprocs_per_node=8)
>>> loss = criterion(net(inputs), targets)
>>> loss.backward()
>>> optimizer.step()
>>> net.perform_slowmo(optimizer)
"""
def __init__(
self,
module: torch.nn.Module,
nprocs_per_node: int,
broadcast_buffers: bool = True,
slowmo_base_algorithm: SlowMoBaseAlgorithm = SlowMoBaseAlgorithm.LOCALSGD,
# SlowMo Args
slowmo_momentum: float = 0.5,
slowmo_memory_efficient: bool = True,
slowmo_frequency: int = 48,
slowmo_lr: float = 1.0,
slowmo_num_shards: int = 32,
# LocalSGD Args
localsgd_frequency: int = 3,
# SGP Args
graph: Optional[GraphManager] = None,
mixing: Optional[MixingManager] = None,
push_sum: bool = True,
overlap: bool = False,
synch_freq: int = 0,
use_streams: bool = True,
slowmo_sgp_average_params: bool = False,
# Debugging Args
verbose: bool = False,
profile_mode: bool = False,
# Args for advanced users (these are automatically handled otherwise)
process_rank: Optional[int] = None,
process_world_size: Optional[int] = None,
global_group: Optional[torch.distributed.ProcessGroup] = None,
master_group: Optional[torch.distributed.ProcessGroup] = None,
local_node_group: Optional[torch.distributed.ProcessGroup] = None,
comm_device: Optional[torch.device] = None,
) -> None:
super(SlowMoDistributedDataParallel, self).__init__()
# NCCL_BLOCKING_WAIT causes issues with using multiple process groups
assert os.environ.get("NCCL_BLOCKING_WAIT", "0") == "0"
assert nprocs_per_node >= 1
self.nprocs_per_node = nprocs_per_node
if process_world_size is None or process_rank is None:
assert dist.is_initialized()
process_rank = dist.get_rank()
process_world_size = dist.get_world_size()
assert process_world_size is not None and process_rank is not None
self.process_rank = process_rank
self.process_world_size = process_world_size
self._initialize_logger(verbose, self.process_rank)
# The logical prefix in the following variables denotes the variable value if nprocs_per_node processes
# were treated as one process and then the following variables were calculated for the resulting process
# group. This is how they are being treated for optimization purposes because intra-node communication is
# very efficient with NVLink.
logical_rank, logical_world_size = self._maybe_create_process_groups(
self.process_rank, self.process_world_size, nprocs_per_node, global_group, master_group, local_node_group
)
self.logical_rank = logical_rank
self.logical_world_size = logical_world_size
self.module = module
self.broadcast_buffers = broadcast_buffers
first_param_dtype = next(self.module.parameters()).dtype
# prepare local intra-node all-reduce objects
self.broadcast_bucket_size = BROADCAST_BUCKET_SIZE # bytes
self.module_buffers = list(self.module.buffers())
# choose communication device based on backend
if comm_device is None:
cpu_comm = dist.get_backend() == "gloo"
comm_device = torch.device("cpu") if cpu_comm else torch.device("cuda")
self._cpu_comm = comm_device.type == "cpu"
# distributed backend config
self.dist_config = {
"verbose": verbose,
"comm_device": comm_device,
"logical_rank": logical_rank,
"process_rank": self.process_rank,
"logical_world_size": logical_world_size,
"cpu_comm": self._cpu_comm,
}
self.profile_mode = profile_mode
self.num_updates = 0
self.portion_start: Optional[int] = None
# slowmo being set to False is equivalent to slowmo_lr being set to 1 and slowmo_momentum being set to 0
# This condition is ensuring the values are safe to use even when slowmo is disabled
self.slowmo = slowmo_lr != 1 or slowmo_momentum != 0
self.slowmo_lr = slowmo_lr if self.slowmo else 1
self.slowmo_momentum = slowmo_momentum if self.slowmo else 0
self.slowmo_frequency = slowmo_frequency
self.slowmo_sgp_average_params = slowmo_sgp_average_params
self.localsgd = slowmo_base_algorithm == SlowMoBaseAlgorithm.LOCALSGD
self.sgp = slowmo_base_algorithm == SlowMoBaseAlgorithm.SGP
self.localsgd_frequency = localsgd_frequency
self.ef1: Optional[List[torch.Tensor]] = None
self.global_momentum_buffers_initialized = False
if self.master_group is None:
assert self.localsgd or self.sgp
self.localsgd = self.sgp = False
self.logger.warning("Disabling LocalSGD and SGP since a local allreduce will suffice")
if self.slowmo and not self.localsgd and not self.sgp:
self.logger.warning("SlowMo is being used without LocalSGD and SGP")
self.slowmo_memory_efficient = slowmo_memory_efficient
self.slowmo_num_shards = min(self.process_world_size, slowmo_num_shards) if self.slowmo_memory_efficient else 1
self.is_current_node_a_slowmo_shard = (
self.process_rank < self.slowmo_num_shards if self.slowmo_memory_efficient else True
)
self.nprocs_per_node_device = torch.tensor([self.nprocs_per_node], device=comm_device, dtype=first_param_dtype)
if self.sgp:
self._sgp_init(
module=module,
first_param_dtype=first_param_dtype,
logical_rank=logical_rank,
logical_world_size=logical_world_size,
comm_device=comm_device,
graph=graph,
mixing=mixing,
push_sum=push_sum,
overlap=overlap,
synch_freq=synch_freq,
use_streams=use_streams,
slowmo_sgp_average_params=slowmo_sgp_average_params,
)
# register ps/grad-reduction hooks
self._register_hooks()
self.logger.debug("Initialization of SlowMoDistributedDataParallel complete")
def _initialize_logger(self, verbose: bool, process_rank: int) -> None:
"""Initializes the logger"""
self.logger = logging.getLogger(__name__)
if verbose:
self.logger.setLevel(logging.DEBUG)
# Only create an adapter if debug logging is enabled to avoid additional overhead
if self.logger.isEnabledFor(logging.DEBUG):
# Set custom adapter on top of logger
self.logger = cast(logging.Logger, MultiProcessAdapter(self.logger, {"process_num": process_rank}))
def _maybe_create_process_groups(
self,
process_rank: int,
process_world_size: int,
nprocs_per_node: int,
global_group: Optional[torch.distributed.ProcessGroup],
master_group: Optional[torch.distributed.ProcessGroup],
local_node_group: Optional[torch.distributed.ProcessGroup],
) -> Tuple[int, int]:
"""Creates the process groups required for the SlowMo implementation"""
self.local_rank = process_rank % self.nprocs_per_node
assert (
process_world_size % self.nprocs_per_node == 0
) # total world size must be a multiple of `nprocs_per_node`
logical_world_size = process_world_size // self.nprocs_per_node
logical_rank = process_rank // self.nprocs_per_node
self._maybe_initialize_global_group(global_group, process_world_size)
self._maybe_initialize_local_node_group(local_node_group, process_rank, logical_world_size)
self._maybe_initialize_master_group(master_group, process_rank, process_world_size, nprocs_per_node)
self.logger.debug("Initialization of all process groups complete")
return logical_rank, logical_world_size
def _maybe_initialize_global_group(
self, global_group: Optional[torch.distributed.ProcessGroup], process_world_size: int
) -> None:
if global_group is None:
all_processes = list(range(process_world_size))
self.global_group = create_process_group(all_processes)
self.logger.debug("Initialization of global group complete")
else:
self.global_group = global_group
self.logger.debug("Global group set")
self.process_group = self.global_group
def _maybe_initialize_master_group(
self,
master_group: Optional[torch.distributed.ProcessGroup],
process_rank: int,
process_world_size: int,
nprocs_per_node: int,
) -> None:
if master_group is not None:
self.master_group: Optional[torch.distributed.ProcessGroup] = master_group
return
if self.nprocs_per_node > 1:
self.logger.debug("Initializing master process group")
master_nodes = [i for i in range(process_world_size) if i % nprocs_per_node == 0]
self.master_group = create_process_group(master_nodes) if len(master_nodes) > 1 else None
if self.master_group is not None and process_rank in master_nodes:
self.logger.debug("Initialization of master group complete")
else:
self.master_group = self.global_group
def _maybe_initialize_local_node_group(
self, local_node_group: Optional[torch.distributed.ProcessGroup], process_rank: int, logical_world_size: int
) -> None:
if self.nprocs_per_node == 1:
self.local_node_group = None
return
if local_node_group is not None:
self.local_node_group = local_node_group
return
self.logger.debug("Initializing local process groups")
for node in range(logical_world_size):
node_processes_ranks = list(
range(
node * self.nprocs_per_node,
(node + 1) * self.nprocs_per_node,
)
)
# Process group to communicate between processes on this machine
new_local_group = create_process_group(node_processes_ranks)
if process_rank in node_processes_ranks:
self.local_node_group = new_local_group
assert self.local_node_group is not None
self.logger.debug("Initialization of local groups complete")
def forward(self, *inputs: Any, **kwargs: Any) -> Union[torch.Tensor, List[torch.Tensor]]:
"""Forward pass performed in parallel across all devices on node"""
return self.module(*inputs, **kwargs)
def _sync_params(self) -> None:
"""Synchronize parameters across devices (intra-node)"""
if self.local_node_group is None:
return
# intra-node parameter sync
params = cast(List[torch.Tensor], list(self.module.parameters()))
communication_op = functools.partial(
dist.broadcast,
src=self.logical_rank * self.nprocs_per_node,
group=self.local_node_group,
)
communicate(params, communication_op)
self.logger.debug("Intra-node param sync complete")
def _sync_buffers(self) -> None:
"""Synchronize buffers across nodes"""
# module buffer sync
if self.broadcast_buffers and len(self.module_buffers) > 0:
# Synchronize buffers across processes.
# The process with rank 0 is considered the authoritative copy.
self._distributed_broadcast_coalesced(self.process_group, self.module_buffers, self.broadcast_bucket_size)
self.logger.debug("Intra-node buffer sync complete")
def _distributed_broadcast_coalesced(
self, process_group: torch.distributed.ProcessGroup, tensors: List[torch.Tensor], buffer_size: int
) -> None:
dist._broadcast_coalesced(process_group, tensors, buffer_size)
def _create_event_recorder(self, event_name: str) -> EventRecorder:
"""Creates an cuda event recorder which helps in profiling"""
return create_event_recorder(event_name, dummy=not self.profile_mode)
def _fp16_fp32_iterator(
self, optimizer: torch.optim.Optimizer, fp32_params: Optional[torch.Tensor]
) -> Iterable[Tuple[torch.Tensor, torch.Tensor]]:
"""Iterator for those fp16 parameters which have a fp32 copy"""
# Handle apex fp16 optimizer
if hasattr(optimizer, "_amp_stash") and hasattr(optimizer._amp_stash, "fp16_groups"):
for p_fp16_group, p_fp32_group in zip(
optimizer._amp_stash.fp16_groups,
optimizer._amp_stash.fp32_from_fp16_groups,
):
for p_fp16, p_fp32 in zip(p_fp16_group, p_fp32_group):
yield p_fp16, p_fp32
# Handle fairseq fp16 optimizer
elif fp32_params is not None:
if isinstance(fp32_params, dict):
fp32_params_list = list(fp32_params.values())
assert len(fp32_params_list) == 1
fp32_params = fp32_params_list[0]
if isinstance(fp32_params, list):
for p, fp32_param in zip(self.parameters(), fp32_params):
yield p.view(-1), fp32_param
else:
offset = 0
for p in self.parameters():
yield p.view(-1), fp32_params[offset : offset + p.numel()]
offset += p.numel()
def _should_perform_slowmo(self) -> bool:
return self.slowmo and (self.num_updates + 1) % self.slowmo_frequency == 0
def _should_perform_localsgd(self) -> bool:
return self.localsgd and (self.num_updates + 1) % self.localsgd_frequency == 0
def _skip_averaging_memory_efficient_slowmo(self) -> bool:
return self.slowmo_memory_efficient and self._should_perform_slowmo()
def _should_perform_sgp_common(self) -> bool:
return self.sgp and not self.overlap and not self._skip_averaging_memory_efficient_slowmo()
def _should_perform_sgp(self) -> bool:
return self._should_perform_sgp_common() and not self.overlap
def _should_perform_sgp_overlap(self) -> bool:
return self._should_perform_sgp_common() and self.overlap
def _should_use_error_feedback(self, fp16_fp32_list: List[Tuple[torch.Tensor, torch.Tensor]]) -> bool:
return bool(fp16_fp32_list) and (self._should_perform_sgp() or self._should_allreduce_params())
def _should_allreduce_params(self) -> bool:
# We do not all-reduce parameters with local SGD if a slow momentum step is
# performed, since this step contains a reduce operation already. Note that this
# also means there is no error feedback correction in that case: it is not needed
# since communication within the slow momentum step happens in fp32.
return (self.sgp and self._should_perform_slowmo() and self.slowmo_sgp_average_params) or (
self._should_perform_localsgd() and not self._skip_averaging_memory_efficient_slowmo()
)
def _maybe_pre_communicate_error_feedback(self, fp16_fp32_list: List[Tuple[torch.Tensor, torch.Tensor]]) -> None:
ef_rec = self._create_event_recorder("Error feedback")
if self._should_use_error_feedback(fp16_fp32_list):
with torch.no_grad():
for p_fp16, p_fp32 in fp16_fp32_list:
if self._should_allreduce_params():
# This division and multiplication with the same number is done
# to ensure that we do not lose bits of information when we divide
# before the all_reduce. In order to preserve these bits in an
# error feedback (https://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.1050.5040&rep=rep1&type=pdf)
# like manner, we are forcing the bits to be lost
# initially, and storing the lost information in error feedback
p_fp16.div_(self.logical_world_size)
p_fp16.mul_(self.logical_world_size)
p_fp32 -= p_fp16.float()
if self.ef1 is not None:
for idx, (_, p_fp32) in enumerate(fp16_fp32_list):
p_fp32 += self.ef1[idx]
p_fp32.div_(2)
ef_rec.stop()
self.logger.debug("Error feedback completed")
def _maybe_post_communicate_error_feedback(self, fp16_fp32_list: List[Tuple[torch.Tensor, torch.Tensor]]) -> None:
ef_unroll_rec = self._create_event_recorder("Sync and error feedback unroll rec")
if self._should_use_error_feedback(fp16_fp32_list):
# Error Feedback Reversal
with torch.no_grad():
for p, p_fp32 in fp16_fp32_list:
p_fp32 += p.float()
ef_unroll_rec.stop()
self.logger.debug("Error feedback unroll completed")
def _maybe_perform_sgp(self) -> None:
sgp_rec = self._create_event_recorder("SGP")
if self._should_perform_sgp():
if not self._should_allreduce_params():
self._sgp_transfer_params()
self._sgp_query_gossip_queue()
torch.cuda.synchronize()
self.logger.debug("SGP completed")
sgp_rec.stop()
def _maybe_allreduce(self) -> None:
localsgd_rec = self._create_event_recorder("Localsgd communication time")
if self._should_allreduce_params():
communication_op = functools.partial(dist.all_reduce, group=self.master_group)
params = cast(List[torch.Tensor], list(self.parameters()))
with torch.no_grad():
for p in params:
p.div_(self.logical_world_size)
self.logger.debug("Params normalized before localsgd step")
# Commenting this out as it may cause an overhead. Can be uncommented if needed
# synch_rec = self._create_event_recorder("Synchronization time for localsgd")
# dist.barrier()
# synch_rec.stop()
# self.logger.debug("Barrier completed before localsgd step")
communicate(params, communication_op, self.logger)
torch.cuda.synchronize()
self.logger.debug("Allreduce completed")
localsgd_rec.stop()
def _maybe_sync_locally(self) -> None:
if self._should_perform_sgp() or self._should_allreduce_params():
self._sync_params()
torch.cuda.synchronize()
def _maybe_perform_slowmo(self, optimizer: torch.optim.Optimizer) -> None:
slowmo_rec = self._create_event_recorder("Slowmo")
if self._should_perform_slowmo():
self._global_momentum_step(optimizer)
slowmo_rec.stop()
self.logger.debug("Global momentum step completed")
def _maybe_copy_back_fp32_parameters(self, fp16_fp32_list: List[Tuple[torch.Tensor, torch.Tensor]]) -> None:
ef_copy_rec = self._create_event_recorder("Error feedback copy back")
if (
self._should_perform_sgp() or self._should_allreduce_params() or self._should_perform_slowmo()
) and fp16_fp32_list:
with torch.no_grad():
for idx, (p_fp16, p_fp32) in enumerate(fp16_fp32_list):
p_fp16.copy_(p_fp32)
ef_copy_rec.stop()
self.logger.debug("Error feedback copy-back completed")
def _maybe_sgp_overlap_pre_communicate_error_feedback(
self, fp16_fp32_list: List[Tuple[torch.Tensor, torch.Tensor]]
) -> None:
if self._should_perform_sgp_overlap() and fp16_fp32_list:
# Initialize error feedback for SGP-overlap
if self.ef1 is None:
self.ef1 = [p_fp32.clone().detach_() for _, p_fp32 in fp16_fp32_list]
with torch.no_grad():
assert self.ef1 is not None
for ef1, (p_fp16, p_fp32) in zip(self.ef1, fp16_fp32_list):
ef1.copy_(p_fp32 - p_fp16.float())
def perform_slowmo(self, optimizer: torch.optim.Optimizer, fp32_params: Optional[torch.Tensor] = None) -> None:
"""This is to be called after optimizer.step(). It performs the approximate averaging using
the base algorithm (SGP/ LocalSGD) and the slow momentum step. Since LocalSGD and the slow
momentum step are not performed every iteration, it only performs those when needed.
It is recommended to call ``model.zero_grad(set_to_none=True)`` just before calling this function. This
is because ``model.zero_grad(set_to_none=True)`` frees up the memory occupied by the gradients, some of which
may be reused by this function.
Args:
optimizer (torch.optim.Optimizer): The optimizer being used for training the model
fp32_params (Optional[torch.Tensor]): To be used when performing fp16 training. Needs to be
set to the fp16 copy of the parameters (default: None)
"""
# Done here in case the global momentum buffers have not been initialized by the caller.
# In an ideal implementation, this would be called by the caller. We do it here instead of
# waiting for it to happen in the global_momentum step function so that we store a copy of
# the version of the parameters at iteration 0 and can use them for a slow momentum step later.
if not self.global_momentum_buffers_initialized:
self._init_global_momentum_buffers(optimizer)
fp16_fp32_list = list(self._fp16_fp32_iterator(optimizer, fp32_params))
self.logger.debug("Created a list of fp16 and fp32 corresponding parameters")
self.logger.debug(
"Booleans set. Values - self._should_perform_slowmo()=%r, self._should_perform_localsgd()=%r, self._should_allreduce_params()=%r",
self._should_perform_slowmo(),
self._should_perform_localsgd(),
self._should_allreduce_params(),
)
self.logger.debug("Step number(0-indexed)=%d", self.num_updates)
if (
self.num_updates == 0
and fp32_params is None
and not hasattr(optimizer, "_amp_stash")
and any(p.dtype == torch.float16 for p in self.parameters())
):
self.logger.warning("WARNING: please set fp32_params in perform_slowmo() in order to avoid accuracy loss")
self._maybe_pre_communicate_error_feedback(fp16_fp32_list)
self._maybe_perform_sgp()
self._maybe_allreduce()
self._maybe_sync_locally()
self._maybe_post_communicate_error_feedback(fp16_fp32_list)
self._maybe_perform_slowmo(optimizer)
self._maybe_copy_back_fp32_parameters(fp16_fp32_list)
self._maybe_sgp_overlap_pre_communicate_error_feedback(fp16_fp32_list)
self.num_updates += 1
def _init_global_momentum_buffers(self, optimizer: torch.optim.Optimizer) -> None:
"""Initializes the slow momentum buffers"""
self.global_momentum_buffers_initialized = True
if not self.slowmo:
return
total_elements = 0
params_dtype = None
for group in optimizer.param_groups:
for p in group["params"]:
total_elements += p.numel()
# Assert that all parameters have the same device and dtype
if params_dtype is None:
params_dtype, params_device = p.dtype, p.device
# Check that dtype is fp32 since slow mometum is to be performed in fp32
assert p.dtype == params_dtype == torch.float32
assert p.device == params_device
self.world_portion_length = (total_elements + self.slowmo_num_shards - 1) // self.slowmo_num_shards
if not self.is_current_node_a_slowmo_shard:
return
self.portion_start = self.process_rank * self.world_portion_length if self.slowmo_memory_efficient else 0
self.portion_end = (
min((self.process_rank + 1) * self.world_portion_length, total_elements)
if self.slowmo_memory_efficient
else total_elements
)
self.old_params = torch.empty(self.world_portion_length, dtype=params_dtype).to(params_device).detach()
# copy params to old_params to initialize old_params
offset = 0
for group in optimizer.param_groups:
for p in group["params"]:
numel = p.numel()
if offset + numel > self.portion_start and offset < self.portion_end:
# start and end for each
overall_start = max(self.portion_start, offset)
overall_end = min(self.portion_end, offset + numel)
p_start = overall_start - offset
p_end = overall_end - offset
buffer_start = overall_start - self.portion_start
buffer_end = overall_end - self.portion_start
# let's see size of p and split based on that
current_p = p.view(-1)[p_start:p_end]
current_p_old = self.old_params[buffer_start:buffer_end]
current_p_old.copy_(current_p)
offset += numel
self.global_momentum_buffer = torch.zeros_like(self.old_params).detach()
def _distributed_comm(self, optimizer: torch.optim.Optimizer, mode: str) -> None:
"""Performs the communication needed for the efficient SlowMo implementation"""
offset = 0
slowmo_comm_lists: List[List[torch.Tensor]] = [[] for _ in range(self.slowmo_num_shards)]
with torch.no_grad():
for group in optimizer.param_groups:
# aggregate different parts of p in required node
for p in group["params"]:
numel = p.numel()
# gather has a reduce operation so division by world size is needed
if mode == "gather":
p /= self.process_world_size
current_start = offset
while current_start < offset + numel:
main_node = current_start // self.world_portion_length
main_node_end = (main_node + 1) * self.world_portion_length
current_end = min(offset + numel, main_node_end)
p_start = current_start - offset
p_end = current_end - offset
slowmo_comm_lists[main_node].append(p.view(-1)[p_start:p_end])
current_start = current_end
offset += numel
for slowmo_rank, slowmo_comm_list in enumerate(slowmo_comm_lists):
if mode == "gather":
communication_op = functools.partial(dist.reduce, dst=slowmo_rank)
elif mode == "scatter":
communication_op = functools.partial(dist.broadcast, src=slowmo_rank)
communicate(slowmo_comm_list, communication_op)
def _global_momentum_step(self, optimizer: torch.optim.Optimizer) -> None:
"""Performs the slow momentum step"""
if not self.slowmo:
return
if not self.global_momentum_buffers_initialized:
self._init_global_momentum_buffers(optimizer)
if self.slowmo_memory_efficient:
self._distributed_comm(optimizer, mode="gather")
if self.is_current_node_a_slowmo_shard:
self._perform_local_optimization(optimizer)
if self.slowmo_memory_efficient:
self._distributed_comm(optimizer, mode="scatter")
def _perform_local_optimization(self, optimizer: torch.optim.Optimizer) -> None:
"""Performs the slow momentum on the local shard"""
assert self.portion_start is not None
with torch.no_grad():
offset = 0
for group in optimizer.param_groups:
# perform local slowmo for p
for p in group["params"]:
numel = p.numel()
if offset + numel > self.portion_start and offset < self.portion_end:
# start and end for each
overall_start = max(self.portion_start, offset)
overall_end = min(self.portion_end, offset + numel)
p_start = overall_start - offset
p_end = overall_end - offset
buffer_start = overall_start - self.portion_start
buffer_end = overall_end - self.portion_start
# let's see size of p and split based on that
current_p = p.view(-1)[p_start:p_end]
current_p_gmb = self.global_momentum_buffer[buffer_start:buffer_end]
current_p_old = self.old_params[buffer_start:buffer_end]
current_p_gmb.mul_(self.slowmo_momentum).sub_(current_p, alpha=1 / group["lr"]).add_(
current_p_old, alpha=1 / group["lr"]
)
current_p_old.add_(current_p_gmb, alpha=-group["lr"] * self.slowmo_lr) # type: ignore
current_p.copy_(current_p_old)
offset += numel
def _register_hooks(self) -> None:
"""
Registers push-sum de-bias/bias hooks in pre-forward/post-backward
passes in all leaf modules
"""
self.register_forward_pre_hook(self.__make_forward_pre_hook())
self.register_backward_hook(self.__make_backward_hook())
def __make_backward_hook(self) -> Callable[..., None]:
self.logger.debug("making backward hook")
def hook(*unused: Any) -> None:
# reduce gradients across devices on a single machine
if self.local_node_group is not None:
grads = []
for p in self.module.parameters():
if not p.requires_grad or p.grad is None:
continue
p.grad.div_(self.nprocs_per_node)
grads.append(p.grad)
self.logger.debug("Gradients ready for syncing")
communication_op = functools.partial(dist.all_reduce, group=self.local_node_group)
communicate(grads, communication_op, self.logger)
self.logger.debug("Gradient sync during backward pass in local_group complete")
if self.sgp:
# convert model back to ps-numerator
self._sgp_ps_numerator()
# gossip during training (not inference)
if self.gossip_enable and self.overlap and not self._skip_averaging_memory_efficient_slowmo():
self._sgp_query_gossip_queue()
def queue_hook(*unused: Any) -> None:
Variable._execution_engine.queue_callback(hook)
return queue_hook
def __make_forward_pre_hook(self) -> Callable[..., None]:
self.logger.debug("making forward pre-hook")
def hook(*unused: Any) -> None:
"""Query gossip queue and de-bias during forward pass"""
# sync buffers before the forward pass
self._sync_buffers()
# gossip during training (not inference)
if self.sgp:
if self.gossip_enable and self.overlap and not self._skip_averaging_memory_efficient_slowmo():
self._sgp_transfer_params()
# convert model to de-biased estimate
self._sgp_unbias()
return hook
# SGP related functions
def _sgp_init(
self,
module: torch.nn.Module,
first_param_dtype: torch.dtype,
logical_rank: int,
logical_world_size: int,
comm_device: Optional[torch.device] = None,
graph: Optional[GraphManager] = None,
mixing: Optional[MixingManager] = None,
push_sum: bool = True,
overlap: bool = False,
synch_freq: int = 0,
use_streams: bool = True,
slowmo_sgp_average_params: bool = False,
) -> None:
"""Perform initialization for Stochastic Gradient Push base algorithm"""
if graph is None:
graph = NPDDEGraph(logical_rank, logical_world_size, self.nprocs_per_node, self.local_rank)
if mixing is None:
mixing = UniformMixing(graph, comm_device)
self.dist_config.update({"graph": graph, "mixing": mixing, "push_sum": push_sum})
self.overlap = overlap
assert not self.overlap # currently disabled, see docstring
self.synch_freq = synch_freq
self.asynch = synch_freq > 0
# push-sum weight=1.0 ==> distributed averaging
self.ps_weight = torch.ones(1, device=comm_device, dtype=first_param_dtype)
self.is_sgp_ps_numerator = False
self.gossip_enable = True
self.gossiping = False
self.params_mixed = True
self.gossip_ps_factor = torch.zeros(1, device=comm_device, dtype=first_param_dtype)
self.gossip_ps_weight = self.ps_weight.clone()
self.gossip_params = []
self.gossip_device_buffer = []
for p in module.parameters():
cp = cast(torch.nn.Parameter, p.clone().detach_())
cp = cast(torch.nn.Parameter, cp.cpu().pin_memory() if self._cpu_comm else cp.cuda())
self.gossip_params.append(cp)
self.gossip_device_buffer.append(cp)
# prepare gossip process control objects
self.gossip_lock = threading.Lock()
self.gossip_flag = threading.Event()
self.train_flag = threading.Event()
if cast(torch.device, self.dist_config["comm_device"]).type != "cpu" and use_streams:
self.gossip_stream = torch.cuda.Stream()
else:
self.gossip_stream = torch.cuda.current_stream()
if self.process_rank % self.nprocs_per_node == 0:
self.gossip_thread = threading.Thread(
target=SlowMoDistributedDataParallel._sgp_gossip_target,
args=(
self.dist_config,
self.gossip_flag,
self.train_flag,
self.gossip_lock,
self.gossip_params,
self.gossip_device_buffer,
self.gossip_ps_weight,
self.gossip_ps_factor,
self.gossip_stream,
),
)
self.gossip_thread.daemon = True
self.gossip_thread.name = "Gossip-Thread"
self.gossip_thread.start()
else:
self.gossip_flag.set()
# wait for thread to complete initialization
self.gossip_flag.wait()
self.gossip_flag.clear()
# lazy mixing avoids additional bias/de-bias steps
self.lazy_mixing = not self.asynch and cast(MixingManager, self.dist_config["mixing"]).is_regular()
self.lazy_ps_factor = self.gossip_ps_factor.clone()
self.logger.debug("lazy mixing: %r", self.lazy_mixing)
def state_dict(self) -> Dict[str, Union[torch.Tensor, bool]]: # type: ignore
state_dict = super(SlowMoDistributedDataParallel, self).state_dict()
if self.sgp:
state_dict["ps_weight"] = self.ps_weight.cpu()
state_dict["is_sgp_ps_numerator"] = self.is_sgp_ps_numerator # type: ignore
return state_dict # type: ignore
def load_state_dict(self, state_dict: Dict[str, Union[torch.Tensor, bool]]) -> None: # type: ignore
if self.sgp:
assert isinstance(state_dict, dict)
self.ps_weight = cast(torch.Tensor, state_dict.pop("ps_weight")).to(
device=cast(torch.device, self.dist_config["comm_device"])
)
self.is_sgp_ps_numerator = cast(bool, state_dict.pop("is_sgp_ps_numerator"))
super(SlowMoDistributedDataParallel, self).load_state_dict(cast(Dict[str, torch.Tensor], state_dict))
def _sgp_ps_numerator(self) -> None:
"""Convert model params to ps-numerator"""
if not self.is_sgp_ps_numerator:
if not self.lazy_mixing:
ps_weight = self.ps_weight
with torch.no_grad():
for p in self.module.parameters():
p.mul_(cast(torch.Tensor, ps_weight.type(p.dtype)))
self.is_sgp_ps_numerator = True
def _sgp_unbias(self) -> None:
"""Convert model params to de-biased estimate"""
if self.is_sgp_ps_numerator:
if not self.lazy_mixing:
ps_weight = self.ps_weight
with torch.no_grad():
for p in self.module.parameters():
p.div_(cast(torch.Tensor, ps_weight.type(p.dtype))) # type: ignore
self.is_sgp_ps_numerator = False
def train(self, mode: bool = True) -> "SlowMoDistributedDataParallel":
super(SlowMoDistributedDataParallel, self).train(mode)
if self.sgp:
self.gossip_enable = True
return self
def eval(self) -> "SlowMoDistributedDataParallel":
super(SlowMoDistributedDataParallel, self).eval()
if self.sgp:
self.gossip_enable = False
self._sgp_query_gossip_queue(non_blocking=self.asynch)
return self
def _sgp_query_gossip_queue(self, non_blocking: bool = False) -> bool:
"""Check gossip-queue for push-sum residuals and update model"""
if not self.gossip_enable:
return False
self.logger.debug("querying gossip queue")
# no gossip happening right now so just return
if not self.gossiping:
if self.process_rank % self.nprocs_per_node == 0:
self.logger.warning("not gossiping right now")
return False
if not non_blocking and not self.gossip_flag.wait(timeout=HEARTBEAT_TIMEOUT):
raise RuntimeError("Gossip flag timeout")
sys.exit() # HEARTBEAT monitor
# query gossip thread
if self.gossip_flag.is_set():
self.logger.debug("received gossip flag")
# atomic gossip was interrupted so try again
if self.gossip_ps_weight[0] == -1:
self.gossip_flag.clear()
self.params_mixed = True
self.gossiping = False
self._sgp_transfer_params(mix=False)
return False
self.lazy_ps_factor.copy_(self.gossip_ps_factor)
# convert model-params to ps numerators b4 adding residuals
self._sgp_ps_numerator()
# add residuals
self.ps_weight += self.gossip_ps_weight
if self.lazy_mixing:
self.ps_weight *= self.lazy_ps_factor
with torch.no_grad():
for p, r in zip(self.module.parameters(), self.gossip_device_buffer):
p.add_(r) # type: ignore
if self.lazy_mixing:
p.mul_(cast(torch.Tensor, self.lazy_ps_factor.type(p.dtype)))
# update flags
self.logger.debug("updated ps-weight %f", self.ps_weight)
self.logger.debug("updated model params")
self.gossip_flag.clear()
self.params_mixed = True
self.gossiping = False
return True
return False
def _sgp_transfer_params(self, mix: bool = True) -> bool:
"""Transfers COPY of model parameters to gossip queue"""
if not self.gossip_enable or self.process_rank % self.nprocs_per_node != 0:
return False
self.logger.debug("transferring model params")
# don't transfer new params if old params haven't been mixed yet
if not self.params_mixed:
self.logger.warning("params not mixed")
return False
# using lazy mixing ==> mix on query not transfer
mix = mix and not self.lazy_mixing
# Transfer ps-numerators to gossip-process:
# --
self._sgp_ps_numerator()
if mix:
self.ps_weight *= self.gossip_ps_factor
self.gossip_ps_weight.copy_(self.ps_weight)
# --
# params gpu-gpu copy (fast)
# --
with torch.no_grad():
for p, gossip_device_buffer_elem in zip(self.module.parameters(), self.gossip_device_buffer):
if mix:
p.mul_(cast(torch.Tensor, self.gossip_ps_factor.type(p.dtype)))
gossip_device_buffer_elem.copy_(p)
# --
# buffer to gossip-thread copy (potentially slow, but asynchronous)
# --
self.gossip_stream.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(self.gossip_stream):
for b, gp in zip(self.gossip_device_buffer, self.gossip_params):
gp.copy_(b, non_blocking=True)
# --
# update flags
self.logger.debug("transferred model params")
self.params_mixed = False
self.gossiping = True
self.train_flag.set()
return True
@staticmethod
def _sgp_gossip_into_receive_buffer(
send_buffer: List[torch.Tensor],
gossiper: Gossiper,
receive_buffer: List[torch.Tensor],
gossip_ps_weight: torch.Tensor,
gossip_lock: threading.Lock,
dist_config: Dict[Any, Any],
) -> Tuple[torch.Tensor, torch.Tensor]:
# flatten parameters before sending
out_msg = flatten_tensors(send_buffer)
# send and receive parameters
with gossip_lock:
in_msg, ps_weight = gossiper.mix(out_msg, gossip_ps_weight)
ps_factor = gossiper.mixing_weights["lo"]
# unflatten parameters
with torch.no_grad():
for r, g in zip(unflatten_tensors(in_msg, send_buffer), receive_buffer):
if dist_config["cpu_comm"]:
g.copy_(r, non_blocking=True)
else:
g.copy_(r)
return ps_weight, ps_factor
@staticmethod
def _sgp_gossip_target(
dist_config: Dict[Any, Any],
gossip_flag: threading.Event,
train_flag: threading.Event,
gossip_lock: threading.Lock,
gossip_params: List[torch.Tensor],
gossip_device_buffer: List[torch.Tensor],
gossip_ps_weight: torch.Tensor,
gossip_ps_factor: torch.Tensor,
gossip_stream: torch.cuda.Stream,
) -> None:
"""Gossip thread, which performs push-sum on model params"""
logger = make_logger(dist_config["logical_rank"], dist_config["verbose"])
gossip_params_by_dtype = group_by_dtype(gossip_params)
gossip_device_buffer_by_dtype = group_by_dtype(gossip_device_buffer)
gossipers = {}
# init gossip instance
gossiper_class = PushSum if dist_config["push_sum"] else PushPull
for dtype in gossip_params_by_dtype:
gossipers[dtype] = gossiper_class(
flatten_tensors(gossip_params_by_dtype[dtype]),
device=cast(torch.device, dist_config["comm_device"]),
graph=cast(GraphManager, dist_config["graph"]),
mixing=cast(MixingManager, dist_config["mixing"]),
rank=dist_config["process_rank"],
world_size=dist_config["logical_world_size"],
logger=logger,
)
dist_config["gossipers"] = gossipers
gossip_ps_factor.copy_(gossipers[list(gossipers)[0]].mixing_weights["lo"])
gossip_flag.set()
# gossip loop
while True:
train_flag.wait()
logger.debug("received train-flag")
try:
with torch.cuda.stream(gossip_stream):
for dtype in gossip_params_by_dtype:
(ps_weight, ps_factor,) = SlowMoDistributedDataParallel._sgp_gossip_into_receive_buffer(
gossip_params_by_dtype[dtype],
gossipers[dtype],
gossip_device_buffer_by_dtype[dtype],
gossip_ps_weight,
gossip_lock,
dist_config,
)
gossip_ps_weight.copy_(ps_weight)
gossip_ps_factor.copy_(ps_factor)
except RuntimeError as e:
logger.warning("received runtime error {}".format(e))
for gossiper in gossipers.values():
gossiper.clean_msg_buffers_()
gossip_ps_weight.fill_(-1)
finally:
# Make sure all queued operations are complete
gossip_stream.synchronize()
# give main thread go-ahead to read our gossip buffer
train_flag.clear()
gossip_flag.set()
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
"""
Graph Manager Class
:description: Class provides an API for loading different peer-to-peer
communication topologies, and cycling through peers.
"""
from abc import ABC, abstractmethod
from math import log as mlog
from typing import List, Optional, Tuple
import torch
import torch.distributed as dist
class Edge(object):
def __init__(self, local_master_rank: int, dest: int, src: int, local_rank: int) -> None:
self.src = src
self.dest = dest
self.process_group = dist.new_group([src, dest])
if local_master_rank in [self.src, self.dest] and local_rank == 0:
initializer_tensor = torch.Tensor([1]).cuda()
dist.all_reduce(initializer_tensor, group=self.process_group)
initializer_tensor = torch.Tensor([1]).cuda().half()
dist.all_reduce(initializer_tensor, group=self.process_group)
class GraphManager(ABC):
def __init__(
self, rank: int, world_size: int, nprocs_per_node: int = 1, local_rank: int = 0, peers_per_itr: int = 1
) -> None:
assert int(peers_per_itr) >= 1
self.rank = rank
self.world_size = world_size
self.phone_book: List[List[Edge]] = [[] for _ in range(self.world_size)]
self._peers_per_itr = peers_per_itr
self._group_indices = list(range(peers_per_itr))
self.nprocs_per_node = nprocs_per_node
self.local_rank = local_rank
self._make_graph()
@property
def peers_per_itr(self) -> int:
return self._peers_per_itr
@peers_per_itr.setter
def peers_per_itr(self, v: int) -> None:
self._peers_per_itr = v
# set group-indices attr. --- point to out-peers in phone-book
self._group_indices = list(range(v))
@abstractmethod
def _make_graph(self) -> None:
"""
Returns a nested list of peers; the outer-list is indexed by rank,
the inner list denotes the set of peers that 'rank' can send
messages to at any point in time
"""
raise NotImplementedError
def _add_peers(self, rank: int, peers: List[int]) -> None:
for peer in peers:
if peer not in self.phone_book[rank]:
self.phone_book[rank].append(
Edge(
local_master_rank=(self.rank * self.nprocs_per_node),
dest=(peer * self.nprocs_per_node),
src=(rank * self.nprocs_per_node),
local_rank=self.local_rank,
)
)
@abstractmethod
def is_regular_graph(self) -> bool:
"""Whether each node has the same number of in-peers as out-peers"""
raise NotImplementedError
@abstractmethod
def is_bipartite_graph(self) -> bool:
"""Whether graph is bipartite or not"""
raise NotImplementedError
@abstractmethod
def is_passive(self, rank: Optional[int] = None) -> bool:
"""Whether 'rank' is a passive node or not"""
raise NotImplementedError
@abstractmethod
def is_dynamic_graph(self) -> bool:
"""Whether the graph-type is dynamic (as opposed to static)"""
raise NotImplementedError
def get_peers(self, rotate: bool = False) -> Tuple[List[int], List[int]]:
"""Returns the out and in-peers corresponding to 'self.rank'"""
# cycle through in- and out-peers by updating group-index
if rotate:
self._rotate_group_indices()
# get out- and in-peers using new group-indices
out_peers, in_peers = [], []
for group_index in self._group_indices:
out_peers.append(self.phone_book[self.rank][group_index].dest)
for rank, peers in enumerate(self.phone_book):
if rank == self.rank:
continue
if self.rank * self.nprocs_per_node == peers[group_index].dest:
in_peers.append(rank)
return out_peers, in_peers
def get_edges(self, rotate: bool = False) -> Tuple[List[Edge], List[Edge]]:
"""Returns the pairwise process groups between rank and the out and
in-peers corresponding to 'self.rank'"""
# cycle through in- and out-peers by updating group-index
if rotate:
self._rotate_group_indices()
# get out- and in-peers using new group-indices
out_edges, in_edges = [], []
for group_index in self._group_indices:
out_edges.append(self.phone_book[self.rank][group_index])
for rank, edges in enumerate(self.phone_book):
if rank == self.rank:
continue
if self.rank * self.nprocs_per_node == edges[group_index].dest:
in_edges.append(self.phone_book[rank][group_index])
return out_edges, in_edges
def _rotate_group_indices(self) -> None:
"""Incerement group indices to point to the next out-peer"""
increment = self.peers_per_itr
for i, group_index in enumerate(self._group_indices):
self._group_indices[i] = int((group_index + increment) % len(self.phone_book[self.rank]))
def _rotate_forward(self, r: int, p: int) -> int:
"""Helper function returns peer that is p hops ahead of r"""
return (r + p) % self.world_size
def _rotate_backward(self, r: int, p: int) -> int:
"""Helper function returns peer that is p hops behind r"""
return (r - p) % self.world_size
class DynamicDirectedExponentialGraph(GraphManager):
def _make_graph(self) -> None:
for rank in range(self.world_size):
for i in range(0, int(mlog(self.world_size - 1, 2)) + 1):
f_peer = self._rotate_forward(rank, 2**i)
b_peer = self._rotate_backward(rank, 2**i)
self._add_peers(rank, [f_peer, b_peer])
def is_regular_graph(self) -> bool:
return True
def is_bipartite_graph(self) -> bool:
return False
def is_passive(self, rank: Optional[int] = None) -> bool:
return False
def is_dynamic_graph(self) -> bool:
return True
class NPeerDynamicDirectedExponentialGraph(GraphManager):
def _make_graph(self) -> None:
for rank in range(self.world_size):
for i in range(0, int(mlog(self.world_size - 1, self._peers_per_itr + 1)) + 1):
for j in range(1, self._peers_per_itr + 1):
distance_to_neighbor = j * ((self._peers_per_itr + 1) ** i)
f_peer = self._rotate_forward(rank, distance_to_neighbor)
self._add_peers(rank, [f_peer])
def is_regular_graph(self) -> bool:
return True
def is_bipartite_graph(self) -> bool:
return False
def is_passive(self, rank: Optional[int] = None) -> bool:
return False
def is_dynamic_graph(self) -> bool:
return True
class DynamicBipartiteExponentialGraph(GraphManager):
def _make_graph(self) -> None:
for rank in range(self.world_size):
for i in range(0, int(mlog(self.world_size - 1, 2)) + 1):
if i == 0:
f_peer = self._rotate_forward(rank, 1)
b_peer = self._rotate_backward(rank, 1)
else:
f_peer = self._rotate_forward(rank, 1 + 2**i)
b_peer = self._rotate_backward(rank, 1 + 2**i)
# create directory for non-passive peers
if not self.is_passive(rank) and (self.is_passive(f_peer) and self.is_passive(b_peer)):
self._add_peers(rank, [f_peer, b_peer])
# create directory for passive peers
elif self.is_passive(rank) and (not (self.is_passive(f_peer) or self.is_passive(b_peer))):
self._add_peers(rank, [f_peer, b_peer])
def is_regular_graph(self) -> bool:
return True
def is_bipartite_graph(self) -> bool:
return True
def is_passive(self, rank: Optional[int] = None) -> bool:
rank = self.rank if rank is None else rank
return (rank % 2) == 0
def is_dynamic_graph(self) -> bool:
return True
class DynamicDirectedLinearGraph(GraphManager):
def _make_graph(self) -> None:
for rank in range(self.world_size):
for i in range(1, self.world_size):
if i % 2 == 0:
continue
f_peer = self._rotate_forward(rank, i)
b_peer = self._rotate_backward(rank, i)
self._add_peers(rank, [f_peer, b_peer])
def is_regular_graph(self) -> bool:
return True
def is_bipartite_graph(self) -> bool:
return False
def is_passive(self, rank: Optional[int] = None) -> bool:
return False
def is_dynamic_graph(self) -> bool:
return True
class DynamicBipartiteLinearGraph(GraphManager):
def _make_graph(self) -> None:
for rank in range(self.world_size):
for i in range(1, self.world_size):
f_peer = self._rotate_forward(rank, i)
b_peer = self._rotate_backward(rank, i)
# create directory for non-passive peers
if not self.is_passive(rank) and (self.is_passive(f_peer) and self.is_passive(b_peer)):
self._add_peers(rank, [f_peer, b_peer])
# create directory for passive peers
elif self.is_passive(rank) and (not (self.is_passive(f_peer) or self.is_passive(b_peer))):
self._add_peers(rank, [f_peer, b_peer])
def is_regular_graph(self) -> bool:
return True
def is_bipartite_graph(self) -> bool:
return True
def is_passive(self, rank: Optional[int] = None) -> bool:
rank = self.rank if rank is None else rank
return (rank % 2) == 0
def is_dynamic_graph(self) -> bool:
return True
class RingGraph(GraphManager):
def _make_graph(self) -> None:
for rank in range(self.world_size):
f_peer = self._rotate_forward(rank, 1)
b_peer = self._rotate_backward(rank, 1)
self._add_peers(rank, [f_peer, b_peer])
def is_regular_graph(self) -> bool:
return True
def is_bipartite_graph(self) -> bool:
return False
def is_passive(self, rank: Optional[int] = None) -> bool:
return False
def is_dynamic_graph(self) -> bool:
return False
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
"""
Gossipers
:description: Gossiper's are designed for multi-peer communication (i.e., send
and recv from multiple peers at each ieration)
"""
from enum import Enum
import logging
from typing import Iterator, List, Optional, Tuple, cast
import torch
import torch.distributed as dist
from .graph_manager import GraphManager
from .mixing_manager import MixingManager, UniformMixing
class dist_backend(str, Enum):
UNDEFINED = "undefined"
TCP = "tcp"
MPI = "mpi"
GLOO = "gloo"
NCCL = "nccl"
class Gossiper(object):
"""Generic gossip averaging object for multi-peer communication
Args:
msg (torch.Tensor): message used to initialize recv buffer
graph (GraphManager): Subclass of GraphManager
device: (torch.Device) device on which to initialize recv buffer
mixing (MixingManager): Subclass of MixingManager
logger (logging.Logger): Module used to log results
rank (int): Rank of the current process
world_size (int): World size of the current process
"""
def __init__(
self,
msg: torch.Tensor,
graph: GraphManager,
device: Optional[torch.device] = None,
mixing: MixingManager = None,
logger: logging.Logger = None,
rank: Optional[int] = None,
world_size: Optional[int] = None,
) -> None:
"""
Initialize generic averaging class designed for multi-peer comms
"""
self.logger = logger
if rank is None or world_size is None:
assert dist.is_initialized()
# for now p2p communication only supported with tcp and mpi
assert dist.get_backend() != dist_backend.GLOO
assert dist.get_backend() != dist_backend.NCCL
rank = dist.get_rank()
world_size = dist.get_world_size()
# graph topology properties
self.rank = rank
self.world_size = world_size
assert isinstance(graph, GraphManager)
self._graph_manager = graph
self.peers_per_itr_device = torch.tensor([self._graph_manager.peers_per_itr], device=device, dtype=msg.dtype)
# This might need to be made float16 later on
self.passive = self._graph_manager.is_passive()
self.refresh_peers_(rotate=False) # sets in- and out-peers attributes
# mixing matrix
if mixing is None:
mixing = UniformMixing(self._graph_manager, device)
assert isinstance(mixing, MixingManager)
self._mixing_manager = mixing
self.refresh_mixing_weights_() # sets mixing-weights attribute
# regular ==> we don't need to keep track of ps-weight explicitly
self.regular = self._mixing_manager.is_regular()
# msg buffers used during send/recv
self.device = device if device is not None else msg.device
self.out_msg_buffer: List[Tuple[dist.Work, torch.Tensor]] = []
self.in_msg_buffer = msg.clone().detach_().to(self.device)
self._ps_weight: torch.Tensor = torch.ones(1, dtype=msg.dtype).detach_().to(self.device)
# not using regular comms ==> need to communicate ps-weight
if not self.regular:
self.in_msg_buffer = torch.cat([self.in_msg_buffer, self.ps_weight])
if self.device.type == "cpu":
try:
self.in_msg_buffer = self.in_msg_buffer.pin_memory()
except Exception as e:
if self.logger is not None:
self.logger.error(e)
else:
raise
self.placeholder = self.in_msg_buffer.clone()
@property
def ps_weight(self) -> torch.Tensor:
return self._ps_weight
@ps_weight.setter
def ps_weight(self, v: torch.Tensor) -> None:
self._ps_weight.data[0] = v
@property
def peers_per_itr(self) -> int:
return self._graph_manager.peers_per_itr
@peers_per_itr.setter
def peers_per_itr(self, v: int) -> None:
self._graph_manager.peers_per_itr = v
def refresh_peers_(self, rotate: Optional[bool] = None) -> None:
"""Update in- and out-peers"""
if rotate is None:
rotate = self._graph_manager.is_dynamic_graph()
# cannot cycle peers in a static graph
assert not (rotate and not self._graph_manager.is_dynamic_graph())
self.out_edges, self.in_edges = self._graph_manager.get_edges(rotate)
def refresh_mixing_weights_(self, residual_adjusted: bool = False) -> None:
"""Update mixing-matrix weights"""
self.mixing_weights = self._mixing_manager.get_mixing_weights(residual_adjusted)
def mix_out_msg_(self, out_msg: torch.Tensor, ps_weight: torch.Tensor) -> Iterator[torch.Tensor]:
"""Returns a generator mixing messages on the fly"""
self.refresh_mixing_weights_(residual_adjusted=True)
self.ps_weight = ps_weight
# check whether or not we need to communicate ps_weight
if not self.regular:
out_msg = torch.cat([out_msg, cast(torch.Tensor, self.ps_weight.type(out_msg.dtype))])
# check whether or not we need to create a buffer for each out-msg
if self._mixing_manager.is_uniform():
weight = self.mixing_weights["uniform"]
out_msg *= weight.type(out_msg.dtype)
for _ in self.out_edges:
yield out_msg
else:
for out_edge in self.out_edges:
weight = self.mixing_weights[out_edge.dest]
yield out_msg.mul(weight.type(out_msg.dtype)) # type: ignore
def clean_msg_buffers_(self) -> None:
"""Clean outgoing message buffer"""
while len(self.out_msg_buffer) > 0:
req, msg = self.out_msg_buffer.pop()
req.wait()
msg.set_()
def parse_in_msg_buffer(self) -> Tuple[torch.Tensor, torch.Tensor]:
"""Parse in-msg buffer and return msg and ps-weight separately"""
msg = self.in_msg_buffer
if not self.regular:
return msg.narrow(0, 0, len(msg) - 1), msg[-1]
else:
return msg, self.ps_weight * self.peers_per_itr_device
def mix(self, out_msg: torch.Tensor, ps_weight: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""Single gossip step"""
raise NotImplementedError
class PushSum(Gossiper):
"""1-peer Push-Sum consensus averaging module"""
def mix(self, out_msg: torch.Tensor, ps_weight: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""Consensus averaging step"""
# out_msg must be on the correct device
assert out_msg.device.type == self.device.type
if self.logger is not None:
self.logger.debug("in/out -peers {}/{}".format(self.in_edges, self.out_edges))
# prepare messages for gossip
mixed_out_msgs = self.mix_out_msg_(out_msg, ps_weight)
# non-blocking send
for out_edge in self.out_edges:
msg = next(mixed_out_msgs)
assert self.rank == out_edge.src
req = dist.broadcast(
tensor=msg,
src=out_edge.src,
group=out_edge.process_group,
async_op=True,
)
self.out_msg_buffer.append((req, msg))
# blocking recv w/ some code optimization to avoid buffer prep overhead
if len(self.in_edges) == 1:
in_edge = self.in_edges[0]
dist.broadcast(tensor=self.in_msg_buffer, src=in_edge.src, group=in_edge.process_group)
# regular non-blocking recv
else:
# prepare in-msg buffer
self.in_msg_buffer.zero_()
for in_edge in self.in_edges:
dist.broadcast(
tensor=self.placeholder,
src=in_edge.src,
group=in_edge.process_group,
)
self.in_msg_buffer.add_(self.placeholder) # type: ignore
self.refresh_peers_()
self.clean_msg_buffers_()
return self.parse_in_msg_buffer()
class PushPull(Gossiper):
"""Doubly-stochastic consensus averaging module"""
def mix(self, out_msg: torch.Tensor, ps_weight: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
# out_msg must be on the correct device
assert out_msg.device.type == self.device.type
if self.logger is not None:
self.logger.debug("in/out -peers {}/{}".format(self.in_edges, self.out_edges))
# prepare messages for gossip
mixed_out_msgs = self.mix_out_msg_(out_msg, ps_weight)
# send-recv w/ some code optimization to avoid buffer prep overhead
if len(self.in_edges) == 1 and len(self.out_edges) == 1:
out_edge, in_edge = self.out_edges[0], self.in_edges[0]
msg = next(mixed_out_msgs)
if not self.passive:
dist.broadcast(tensor=msg, src=out_edge.src, group=out_edge.process_group)
dist.broadcast(
tensor=self.in_msg_buffer,
src=in_edge.src,
group=in_edge.process_group,
)
else:
dist.broadcast(
tensor=self.in_msg_buffer,
src=in_edge.src,
group=in_edge.process_group,
)
dist.broadcast(tensor=msg, src=out_edge.src, group=out_edge.process_group)
# regular send-recv
else:
# prepare in-msg buffer
self.in_msg_buffer.zero_()
# send-recv
for out_edge, in_edge in zip(self.out_edges, self.in_edges):
msg = next(mixed_out_msgs)
if not self.passive:
dist.broadcast(tensor=msg, src=out_edge.src, group=out_edge.process_group)
dist.broadcast(
tensor=self.placeholder,
src=in_edge.src,
group=in_edge.process_group,
)
else:
dist.broadcast(
tensor=self.placeholder,
src=in_edge.src,
group=in_edge.process_group,
)
dist.broadcast(tensor=msg, src=out_edge.src, group=out_edge.process_group)
self.in_msg_buffer.add_(self.placeholder) # type: ignore
self.refresh_peers_()
self.clean_msg_buffers_()
return self.parse_in_msg_buffer()
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from .helpers import (
MultiProcessAdapter,
communicate,
create_process_group,
flatten_tensors,
group_by_dtype,
make_logger,
unflatten_tensors,
)
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
"""
Benchmarking utils for timing cuda executions
"""
from collections import defaultdict, deque
from functools import partial
import statistics
from typing import ClassVar, Deque, Dict, Optional
import torch
MAX_LEN_DEQUEUE = 10**4
deque_with_max_len_fixed = partial(deque, maxlen=MAX_LEN_DEQUEUE)
def create_and_record_event() -> torch.cuda.Event:
event = torch.cuda.Event(enable_timing=True)
event.record()
return event
class EventRecorder(object):
def stop(self) -> None:
pass
def create_event_recorder(event_name: str, dummy: bool = False) -> EventRecorder:
if not dummy:
return CudaEventRecorder(event_name)
return DummyCudaEventRecorder()
class CudaEventRecorder(EventRecorder):
"""Allows profiling in an easy-to-use manner. CudaEventRecorder can be used
in a loop. When it is used in a loop (or when an event recorder is created
multiple times with the same name), get_timings returns the statistics of the
timings since the last reset. Note: in case the number of timings is greater than
10,000, only the last 10,000 timings are used to calculate the statistics.
Usage:
>>> event_recorder1 = CudaEventRecorder('1')
>>> # Sequence of events whose time is to be measured
>>> event_recorder1.stop()
>>> event_recorder2 = CudaEventRecorder('2')
>>> # Sequence of events whose time is to be measured
>>> event_recorder2.stop()
>>> print(CudaEventRecorder.get_timings())
Args:
event_name (str): The name by which the cuda event is to be referred later on
"""
event_recorders: ClassVar[Dict[str, Deque["CudaEventRecorder"]]] = defaultdict(deque_with_max_len_fixed) # type: ignore
all_event_recorders: ClassVar[Dict[str, Deque["CudaEventRecorder"]]] = defaultdict(deque_with_max_len_fixed) # type: ignore
def __init__(self, event_name: str) -> None:
self.event_name = event_name
self.start_event = create_and_record_event()
self.end_event: Optional[torch.cuda.Event] = None
# Adding it to global tracker
CudaEventRecorder.event_recorders[event_name].append(self)
CudaEventRecorder.all_event_recorders[event_name].append(self)
def stop(self) -> None:
self.end_event = create_and_record_event()
def find_time_elapsed(self) -> float:
if self.end_event is None:
raise Exception(f"stopEvent was not called for event with name {self.event_name}")
self.end_event.synchronize()
return self.start_event.elapsed_time(self.end_event)
@classmethod
def reset(cls) -> None:
cls.event_recorders = defaultdict(deque_with_max_len_fixed) # type: ignore
@classmethod
def get_common_timings(cls, event_recorders: Dict[str, Deque["CudaEventRecorder"]], description: str) -> str:
all_timings_str = f"{description}:\n"
# Iterating over different types of events, eg., forward, backward
for event_name, event_recorder_list in event_recorders.items():
# Iterating over different occurences of an event type
time_taken_list = [event_recorder.find_time_elapsed() for event_recorder in event_recorder_list]
all_timings_str += ("{}: Time taken: avg: {}, std: {}, count: " "{}\n").format(
event_name,
statistics.mean(time_taken_list),
statistics.pstdev(time_taken_list),
len(time_taken_list),
)
return all_timings_str
@classmethod
def get_timings(cls) -> str:
"""Returns the timings since last reset was called"""
return cls.get_common_timings(cls.event_recorders, "Timings since last reset")
@classmethod
def get_all_timings(cls) -> str:
"""Returns the statistics of all the timings"""
return cls.get_common_timings(cls.all_event_recorders, "All timings")
class DummyCudaEventRecorder(EventRecorder):
pass
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
"""
Collection of commonly used utility functions
"""
import collections
import logging
import sys
from typing import Any, Dict, List, MutableMapping, Set, Tuple
import torch
import torch.distributed as dist
def flatten_tensors(tensors: List[torch.Tensor]) -> torch.Tensor:
"""
Flatten dense tensors into a contiguous 1D buffer. Assume tensors are of
same dense type.
Since inputs are dense, the resulting tensor will be a concatenated 1D
buffer. Element-wise operation on this buffer will be equivalent to
operating individually
Args:
tensors (Iterable[Tensor]): dense tensors to flatten
Returns:
A 1D buffer containing input tensors
"""
if len(tensors) == 1:
return tensors[0].view(-1).clone()
flat = torch.cat([t.view(-1) for t in tensors], dim=0)
return flat
def unflatten_tensors(flat: torch.Tensor, tensors: List[torch.Tensor]) -> List[torch.Tensor]:
"""
View a flat buffer using the sizes of tensors. Assume that tensors are of
same dense type, and that flat is given by flatten_dense_tensors.
Args:
flat (Tensor): flattened dense tensors to unflatten
tensors (Iterable[Tensor]): dense tensors whose sizes will be used to
unflatten flat
Returns:
Unflattened dense tensors with sizes same as tensors and values from
flat
"""
outputs = []
offset = 0
for tensor in tensors:
numel = tensor.numel()
outputs.append(flat.narrow(0, offset, numel).view_as(tensor))
offset += numel
return outputs
def group_by_dtype(tensors: List[torch.Tensor]) -> Dict[torch.dtype, List[torch.Tensor]]:
"""
Returns a dict mapping from the tensor dtype to a list containing all
tensors of that dtype.
Arg:
tensors (Iterable[Tensor]): list of tensors
"""
tensors_by_dtype = collections.defaultdict(list)
for tensor in tensors:
tensors_by_dtype[tensor.dtype].append(tensor)
return tensors_by_dtype
def communicate(tensors: List[torch.Tensor], communication_op: Any, logger: logging.Logger = None) -> None:
"""
Communicate a list of tensors
Args:
tensors (Iterable[Tensor]): list of tensors
communication_op: a method or partial object which takes a tensor as
input and communicates it. It can be a partial object around
something like torch.distributed.all_reduce
"""
tensors_by_dtype = group_by_dtype(tensors)
for tensors_with_same_dtype in tensors_by_dtype.values():
flat_tensor = flatten_tensors(tensors_with_same_dtype)
if logger is not None:
logger.debug("Flatten completed")
communication_op(tensor=flat_tensor)
if logger is not None:
logger.debug("Commmunication completed")
with torch.no_grad():
for f, t in zip(
unflatten_tensors(flat_tensor, tensors_with_same_dtype),
tensors_with_same_dtype,
):
t.copy_(f)
if logger is not None:
logger.debug("Unflatten completed")
HANDLER_AND_LEVEL_SET: Set[logging.Logger] = set()
# TODO: deprecate this function
def make_logger(rank: int, verbose: bool = True) -> logging.Logger:
"""
Return a logger for writing to stdout
Args:
rank (int): rank of node making logger
verbose (bool): whether to set log-level to INFO; o.w. WARNING
Returns:
Python logger
"""
logger = logging.getLogger(__name__)
if logger not in HANDLER_AND_LEVEL_SET:
# if not getattr(logger, "handler_and_level_set", None):
console = logging.StreamHandler(stream=sys.stdout)
format_str = "{}".format(rank)
format_str += ": %(levelname)s -- %(threadName)s -- %(message)s"
console.setFormatter(logging.Formatter(format_str))
logger.addHandler(console) # prints to console
if verbose:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
HANDLER_AND_LEVEL_SET.add(logger)
# logger.handler_and_level_set = True
return logger
def create_process_group(ranks: List[int]) -> torch.distributed.ProcessGroup:
"""
Creates and intializes a new process group. Assumes init_process_group
has already been called
Arguments:
ranks (list<int>): ranks corresponding to the processes which should
belong the created process group
Returns:
New process group
"""
new_group = dist.new_group(ranks=ranks)
init_tensor_fp32, init_tensor_fp16 = torch.zeros(1), torch.zeros(1).half()
for init_tensor in [init_tensor_fp32, init_tensor_fp16]:
if torch.cuda.is_available():
init_tensor = init_tensor.cuda()
if dist.get_rank() in ranks:
dist.all_reduce(init_tensor, group=new_group)
torch.cuda.synchronize()
return new_group
class MultiProcessAdapter(logging.LoggerAdapter):
"""
Creates an adapter to make logging for multiple processes cleaner
"""
def process(self, msg: str, kwargs: Any) -> Tuple[str, MutableMapping[str, Any]]:
# use process_num from kwargs or the default given on instantiation
process_num = kwargs.pop("process_num", self.extra["process_num"])
return f"process: {process_num} {msg}", kwargs
|
import inspect
import operator
from typing import Dict, List, Optional, Tuple, cast
from torch.distributed.nn import RemoteModule
import torch.fx
import torch.nn as nn
from . import PipelineModulesGraph
class RemoteModuleTracer(torch.fx.Tracer):
def is_leaf_module(self, m: torch.nn.Module, module_qualified_name: str) -> bool:
if isinstance(m, RemoteModule):
return True
return False
class GraphCreator:
def __init__(self, tracer: RemoteModuleTracer) -> None:
self.tracer = tracer
def get_module(self, node: torch.fx.Node) -> Optional[nn.Module]:
"""Given a call_module node, returns the module corresponding to this module call"""
if node.op != "call_module":
return None
module = self.tracer.root
for t in cast(str, node.target).split("."):
module = getattr(module, t)
return module
def create_graph(self) -> PipelineModulesGraph:
# node_to_data maps nodes to the data they represent
node_to_data: Dict[torch.fx.Node, PipelineModulesGraph.DataSourceSpec] = {}
remote_module_nodes: List[Tuple[torch.fx.Node, RemoteModule]] = []
for node in self.tracer.graph.nodes:
if node.op == "call_module":
module = self.get_module(node)
assert isinstance(module, RemoteModule)
node_to_data[node] = module
remote_module_nodes.append((node, module))
elif node.target == operator.__getitem__ and node.op == "call_function":
assert node.args[0] in node_to_data
d = node_to_data[node.args[0]]
assert isinstance(d, RemoteModule)
node_to_data[node] = (d, node.args[1])
elif node.op == "placeholder":
arg_names = list(inspect.signature(self.tracer.root.forward).parameters)
node_to_data[node] = arg_names.index(node.target)
elif node.op == "output":
pass
else:
assert False, "Invalid node %s" % node
# Following dict stores cardinality of the output for modules: for each module, it stores None if the output
# is a simple tensor, and it stores the number of tensors in the the output if it is a tuple.
module_to_num_outputs: Dict[nn.Module, Optional[int]] = {}
for node, _ in remote_module_nodes:
# iterate over inputs to the module.
for arg in node.args:
data = node_to_data[arg]
if isinstance(data, int):
continue
if isinstance(data, RemoteModule):
assert module_to_num_outputs.get(data, None) is None
module_to_num_outputs[data] = None
else:
module, output_num = data
# Here we discovered that the output number "output_num" is used,
# so the number of outputs should be at least "output_num+1".
if module in module_to_num_outputs:
prev_value = module_to_num_outputs[module]
assert prev_value is not None
module_to_num_outputs[module] = max(prev_value, output_num + 1)
else:
module_to_num_outputs[module] = output_num + 1
graph = PipelineModulesGraph()
for node, module in remote_module_nodes:
inputs = [node_to_data[arg] for arg in node.args]
graph.add_layer(module, inputs, module_to_num_outputs.get(module))
return graph
def _call_trace(tracer: RemoteModuleTracer, module: nn.Module) -> torch.fx.Graph:
try:
org_named_modules = RemoteModule.named_modules
org_named_children = RemoteModule.named_children
RemoteModule.named_modules = nn.Module.named_modules # type: ignore
RemoteModule.named_children = nn.Module.named_children # type: ignore
return tracer.trace(module)
finally:
RemoteModule.named_modules = org_named_modules # type: ignore
RemoteModule.named_children = org_named_children # type: ignore
def make_graph(module: nn.Module) -> PipelineModulesGraph:
"""
Creates a PipelineModulesGraph for the module. The module should be traceable by torch.fx.
Also all operators on tensors should be done by RemoteModule's.
"""
tracer = RemoteModuleTracer()
r = _call_trace(tracer, module)
g = torch.fx.GraphModule(module, r)
return GraphCreator(tracer).create_graph()
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import dataclass
from typing import List, Optional, Set, Tuple, Union
from torch import Tensor, nn
from torch.distributed import rpc
from torch.distributed.nn import RemoteModule
from .data import DataConsumer
class MultiInputSequential(nn.Module):
"""A variation of nn.Sequential, that allows the first module in the sequence accepts
multiple inputs. To be used internally by _split_module
"""
def __init__(self, *modules: nn.Module) -> None:
super().__init__()
self.modules_list = nn.ModuleList(modules)
def forward(self, *inputs: Tuple[Tensor]) -> Tensor: # type: ignore
input = self.modules_list[0](*inputs)
for module in self.modules_list[1:]:
input = module(input)
return input
def RemoteSequential(rref_list: List[rpc.RRef]) -> MultiInputSequential:
return MultiInputSequential(*(r.local_value() for r in rref_list))
NodeDataConsumer = DataConsumer["Node"]
@dataclass
class DataSource:
# If producer is None, we are referring to the model input
producer: Optional["Node"]
# indicating which output of the producer module, or which input of the model if producer is None.
output_idx: int
class Node:
def __init__(self, module: RemoteModule):
self.module = module
self.num_outputs: Optional[int] = None
self.inputs: List[DataSource] = []
# To be compiled by _compile method
self.output_consumers: List[NodeDataConsumer] = []
class PipelineModulesGraph(nn.Module):
"""A collection of remote modules (of type RemoteModule) with connections showing how inputs
to the model or outputs of individual modules are use as inputs of subsequent modules.
The graph has a number of helper functions that add new modules to the graph and define inputs
to these module.
"""
def __init__(self) -> None:
super().__init__()
self.nodes: List[Node] = []
def _find_node(self, module: RemoteModule) -> Node:
for n in self.nodes:
if n.module is module:
return n
raise ValueError
def _find_or_add(self, module: RemoteModule) -> Node:
try:
return self._find_node(module)
except ValueError:
new_node = Node(module)
self.nodes.append(new_node)
return new_node
# DataSourceSpec lists choices the user has for specifying the source of each input to a module:
# -- If the input is one of model inputs, it is specified by a simple integer, which is the index of that input
# -- If the input comes from a module with a simple output, it is specified by that module
# -- If the input comes from a module with multiple outputs (a tuple), it is specified by that module and the
# index of the output
DataSourceSpec = Union[int, RemoteModule, Tuple[RemoteModule, int]]
def _data_source_spec_to_data_source(self, spec: DataSourceSpec) -> DataSource:
if isinstance(spec, int):
return DataSource(None, spec)
if isinstance(spec, RemoteModule):
return DataSource(self._find_node(spec), 0)
return DataSource(self._find_node(spec[0]), spec[1])
def add_layer(self, module: RemoteModule, inputs: List[DataSourceSpec], num_outputs: Optional[int] = None) -> None:
"""Adds a module with specified inputs to the graph. The modules that provide inputs to this module must have
been added previously to the graph and are listed with argument inputs. If the module output is a tuple,
num_outputs specifies the number of elements in the tuple.
"""
node = Node(module)
node.inputs = [self._data_source_spec_to_data_source(spec) for spec in inputs]
node.num_outputs = num_outputs
self.nodes.append(node)
def add_sequence(
self,
modules: List[RemoteModule],
first_module_inputs: List[DataSourceSpec],
last_module_num_outputs: Optional[int] = None,
) -> None:
"""Adds a list of modules to the graph, to be run sequentially.
The connection between these modules is as follows: the output of each of these modules
(except the last one) is used as the input of its next module in this sequence.
So all modules (except the last one) must have simple output, and also all of them (except the first one)
should have a single input.
The user also specifies the input to the first module in this sequence with argument 'first_module_inputs'.
In case the last module output is a tuple, 'last_module_num_outputs' specifies the number of elements
in the tuple.
"""
next_input = first_module_inputs
for i, module in enumerate(modules):
self.add_layer(module, next_input, last_module_num_outputs if i == len(modules) - 1 else None)
next_input = [module]
def _compile(self) -> None:
"""Precomputes self.model_input_consumers and self.output_consumers for internal use by the pipleine
class. These two lists show consumers of inputs to the model, and outputs of each module of
the graph. Each consumer is a pair (i, j) which stands for the j'th input to the i'th module
in the graph.
"""
# TODO: We need to make sure following conditions hold before preparing the graph for the pipeline:
# * the graph has a least one module, and is connected.
# * num_inputs and num_outputs for modules matche list of connections defined in the graph.
# * all inputs to a module should come from model input, or modules with smaller index in
# the graph. This condition is used in implementaion of DistributedPipeline.forward. Even
# if we relax this condition, still need to make sure the graph is acyclic.
m = len(self.nodes)
self.model_input_consumers = []
for node in self.nodes:
for input_index, input_item in enumerate(node.inputs):
data_consumer = NodeDataConsumer(node, input_index, input_item.output_idx)
if input_item.producer is not None:
input_item.producer.output_consumers.append(data_consumer)
else:
self.model_input_consumers.append(data_consumer)
def _trace_modules(self, node: Node) -> List[Node]:
"""Compiles a list of modules (starting from module number module_idx), where each module in the list
gets the output of previous module in the list as its input. So every module in the list, except the
first one should have only one input, and similarly, every module in the list, except the last one
should have only one output.
"""
partition = []
current_node = node
while True:
partition.append(current_node)
# If we reached a module with multiple outputs or with multiple consumers for its output,
# stop adding more modules to the partition.
if len(current_node.output_consumers) != 1:
break
if current_node.num_outputs is not None:
break
# Next module to add is the only consumer of the ouput of the current module
next_node = current_node.output_consumers[0].consumer
# If the next module has multiple inputs, do not add it to the current partition and stop.
if next_node.inputs != [DataSource(current_node, 0)]:
break
# If the next module is on a different deivce or worker, stop
if next_node.module.on != current_node.module.on:
break
if next_node.module.device != current_node.module.device:
break
current_node = next_node
return partition
def partition_graph(self) -> List[Tuple[List[Node], rpc.RRef]]:
"""Splits the graph into pipeline partitions and for each parition returns a tuple (indices, module_rref),
where indices is indices of modules of the partition in the graph, and module_rref is an RRef to an nn.Module:
Each partition is a list of modules on the same device that are executed sequentially (output of each module is
the input to the next module).
If there is only one module in the partition, module_rref is reference to that module; otherwise those modules
are wrapped by a MultiInputSequential and module_rref referes to that.
"""
self._compile()
modules_used: Set[Node] = set()
partitions = []
for node in self.nodes:
if node in modules_used:
continue
partition = self._trace_modules(node)
assert not modules_used.intersection(partition)
modules_used.update(partition)
if len(partition) == 1:
remote_module = partition[0].module.get_module_rref()
else:
remote_module = rpc.remote(
partition[0].module.on,
RemoteSequential,
args=([p.module.get_module_rref() for p in partition],),
)
partitions.append((partition, remote_module))
return partitions
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from .graph import PipelineModulesGraph
from .loss import DistributedLoss
from .pipeline import DistributedPipeline
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from threading import Condition
from types import TracebackType
from typing import Dict, List, Optional, Tuple, Type, Union, cast
import torch
from torch import Tensor
from torch.autograd.profiler import record_function
from torch.distributed import rpc
from fairscale.nn.pipe import microbatch
from fairscale.nn.pipe.checkpoint import Checkpointing, TensorOrTensors
from fairscale.nn.pipe.dependency import fork, join
from fairscale.nn.pipe.microbatch import Batch
from fairscale.nn.pipe.stream import as_cuda, current_stream, is_cuda, use_device, use_stream
from fairscale.nn.pipe.worker import Task, create_workers
from .data import DataConsumer
ExcInfo = Tuple[Type[BaseException], BaseException, TracebackType]
class DistributedPipelineRecord:
"""A class for storing a single mini-batch (consisting of multiple micro-batches) as input to
a single partition.
Args:
device: the local device that runs the partition.
rank: the rank of the partition in the pipeline.
chunks: number of micro-batches in a mini-batch
num_inputs: number of inputs to the partition.
consumers: list of consumers of outputs of the partition. Each consumer in the list is a tuple
(remote_partition_rref, input_idx, output_idx) where remote_partition_rref points to a
remote DistributedPipelineRecord for consumer partiton for this mini-batch. The output number
output_idx of this partition will be used as the input number input_idx of that partition.
"""
# Need to use Union due to https://github.com/python/mypy/issues/7866
DataConsumer = Union[DataConsumer[rpc.RRef]]
def __init__(
self,
device: torch.device,
rank: int,
chunks: int,
num_inputs: int,
num_outputs: Optional[int],
consumers: List[DataConsumer],
) -> None:
self.ready_cv = Condition()
# Each chunk consists of num_inputs tensors. self.tensors stores these individual tensors.
self.tensors: List[List[Optional[Tensor]]] = [[None] * num_inputs for _ in range(chunks)]
# For each tensor in self.tensors, we record a cuda event in corrsponding tensorpipe stream in self.recv_events,
# and later the stream that processes that tensor will wait on that event.
self.recv_events = [[None] * num_inputs for _ in range(chunks)]
# Once all num_inputs tensors of a given chunk are recieved, they are assembled as a batch and stored in
# self.batches
self.batches: List[Optional[Batch]] = [None] * chunks
# For each tensor of each chunk, we fork a phony tensor, which will be used for injecting dependency between
# different chunks in backward path.
if num_outputs is None:
num_outputs = 1
self.forwarded_phony: List[List[List[rpc.RRef]]] = [[[] for j in range(num_outputs)] for i in range(chunks)]
self.consumers = consumers
self.rank = rank
self.device = device
def __getstate__(self) -> Dict:
# avoid pickling failure.
return {}
def feed(self, chunk: int, input_idx: int, input: Tensor) -> Tensor:
"""This function is called remotely to provide individual tensors of a given chunk."""
if input.device.type == "cpu":
input = input.to(self.device)
cuda_stream = torch.cuda.current_stream(input.device) if input.device.type == "cuda" else None
with self.ready_cv:
assert self.tensors[chunk][input_idx] is None
input, phony = fork(input)
self.recv_events[chunk][input_idx] = (
cuda_stream.record_event() if cuda_stream is not None else None # type: ignore
)
self.tensors[chunk][input_idx] = input
self.ready_cv.notify_all()
return phony
def wait_for(self, chunk: int) -> None:
"""Waits until all elements of given chunk is populated in self.tensors.
Then it constructs self.batches[chunk] if it is not constructed yet.
"""
with self.ready_cv:
while self.batches[chunk] is None and any(b is None for b in self.tensors[chunk]):
self.ready_cv.wait()
if self.batches[chunk] is None:
tensors = cast(List[Tensor], self.tensors[chunk])
self.batches[chunk] = Batch(tuple(tensors), chunk)
def fence(self, chunk: int) -> None:
"""Prepares micro-batches for computation."""
# Ensure that batches[chunk-1] is executed after batches[chunk] in
# backpropagation by an explicit dependency.
# TODO: This dependency injection causes deadlock if this partition
# gets its input from model input. 1) Figure out why 2) If we need to live
# with this constraint, replace the condition 'self.rank > 0' below with
# a more accurate one.
if chunk != 0 and self.consumers and self.rank > 0:
batch = self.batches[chunk]
assert batch is not None
dependant_tensors = list(batch.tensors)
for remote_ph_list in self.forwarded_phony[chunk - 1]:
for remote_ph in remote_ph_list:
phony = remote_ph.to_here()
dependant_tensors[0] = join(dependant_tensors[0], phony)
self.batches[chunk] = Batch(tuple(dependant_tensors), chunk)
def sync_stream(self, chunk: int, stream: torch.cuda.Stream) -> None:
"""syncs the stream with cuda events associated with transmission of the chunck to the cuda device."""
for e in self.recv_events[chunk]:
if e is not None:
stream.wait_event(e)
def forward_results(self, chunk: int) -> None:
"""Forward outputs of processing the chunk in this parition for processing by next partition."""
for consumer in self.consumers:
v = self.get_batch(chunk).value[consumer.output_idx]
self.forwarded_phony[chunk][consumer.output_idx].append(
consumer.consumer.remote().feed(chunk, consumer.consumer_input_idx, v)
)
def get_batch(self, chunk: int) -> Batch:
batch = self.batches[chunk]
assert batch is not None
return batch
class PartitionHandler:
"""This class processes a single partition of the pipeline.
Args:
module_rref: RRef to the nn.Module for this partition. It should be on the local rpc worker.
device: The device that holds the module.
num_inputs: Numer of inputs to the module
num_outputs: Number of outputs of the module. If the module output is not a tuple (and it is a
single tensor), num_outputs should be None.
rank: The rank of the partition
chunks: Number of micor-batches in a mini-batch
checkpoint_stop:: Checkpointing is done only for the first checkpoint_stop chunks of a mini-batch.
"""
def __init__(
self,
module_rref: rpc.RRef,
device: str,
num_inputs: int,
num_outputs: Optional[int],
rank: int,
chunks: int,
checkpoint_stop: int,
) -> None:
self.module = module_rref.local_value()
self.chunks = chunks
self.device = torch.device(device)
self.checkpoint_stop = checkpoint_stop
self.rank = rank
self.num_inputs = num_inputs
self.num_outputs = num_outputs
(self.in_queue,), (self.out_queue,) = create_workers([self.device])
def __getstate__(self) -> Dict:
# avoid pickling failure.
return {}
def local_parameter_rrefs(self) -> List[rpc.RRef]:
r"""
Create one RRef for each parameter in the given local module, and return a
list of RRefs.
"""
return [rpc.RRef(p) for p in self.module.parameters()]
def make_pipeline_record(self, consumers: List[DataConsumer]) -> DistributedPipelineRecord:
return DistributedPipelineRecord(
self.device, self.rank, self.chunks, self.num_inputs, self.num_outputs, consumers
)
def run(self, pipeline_record: DistributedPipelineRecord) -> None:
"""Runs pipeline parallelism. It modifies the given batches in place."""
m = len(pipeline_record.batches)
self.stream = current_stream(self.device)
for chunk in range(m):
with record_function("feed"):
pipeline_record.wait_for(chunk)
pipeline_record.fence(chunk)
self.compute(pipeline_record, chunk)
with use_stream(self.stream):
pipeline_record.forward_results(chunk)
def compute(self, pipeline_record: DistributedPipelineRecord, chunk: int) -> None:
"""Runs tasks with synchronization to tensor-pipe streams."""
checkpoint_stop = self.checkpoint_stop
# Disable checkpointing if in eval mode.
if not self.module.training:
checkpoint_stop = 0
exc_info: Optional[ExcInfo] = None
batch = pipeline_record.get_batch(chunk)
if is_cuda(self.stream):
pipeline_record.sync_stream(chunk, as_cuda(self.stream))
# Determine whether checkpointing or not.
checkpoint = chunk < checkpoint_stop
if checkpoint:
def function(input: TensorOrTensors, chunk_id: int = chunk) -> TensorOrTensors:
with record_function("chunk%d-rank%d" % (chunk_id, pipeline_record.rank)):
result = self.module(*input)
if self.num_outputs is None:
result = (result,)
return tuple(result)
chk = Checkpointing(function, batch)
task = Task(self.stream, compute=chk.checkpoint, finalize=chk.recompute)
del function, chk
else:
def compute(
batch: Batch = batch,
chunk_id: int = chunk,
rank: int = pipeline_record.rank if pipeline_record is not None else -1,
) -> Batch:
with record_function("chunk%d-rank%d" % (chunk_id, pipeline_record.rank)):
result = self.module(*batch.tensors)
if self.num_outputs is None:
result = (result,)
return Batch(result, chunk_id)
task = Task(self.stream, compute=compute, finalize=None)
del compute
self.in_queue.put(task)
ok, payload = self.out_queue.get()
# Hold the first exception.
if exc_info is not None:
pass
elif not ok:
exc_info = cast(ExcInfo, payload)
else:
task, batch = cast(Tuple[Task, Batch], payload)
with use_device(self.device):
task.finalize(batch)
pipeline_record.batches[chunk] = batch
if exc_info is not None:
raise exc_info[0].with_traceback(exc_info[1], exc_info[2])
def run_pipeline(self, pipeline_record_rref: rpc.RRef) -> Optional[Tensor]:
"""Processes a min-batch on this partition.
If this is the last partition (pipeline_record has no consumer), concatenates results of processing
all chunks and returns the result as the output of the model on the whole mini-batch.
"""
pipeline_record = pipeline_record_rref.local_value()
self.run(pipeline_record)
result: Optional[Tensor] = None
if not pipeline_record.consumers:
gather_result = microbatch.gather(pipeline_record.batches)
assert len(gather_result) == 1
result = gather_result[0]
s0 = current_stream(result.device)
if is_cuda(s0):
# TODO. Investigate why this is needed and remove it if possible.
as_cuda(s0).synchronize()
# TODO: There seems to be a memory leak that is solved by following line.
# Investigate why is it needed.
del pipeline_record.batches
return result
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from typing import Callable, Dict, Tuple
from torch import nn
from torch.distributed import rpc
def _rloss(loss_func: Callable, input_rref: rpc.RRef, target_rref: rpc.RRef) -> rpc.RRef:
return loss_func(input_rref.to_here(), target_rref.to_here())
def DistributedLoss(loss: nn.Module, *args: Tuple, **kwargs: Dict) -> Callable:
loss_func = loss(*args, **kwargs)
def dloss(input_rref: rpc.RRef, target_rref: rpc.RRef) -> rpc.RRef:
return rpc.remote(input_rref.owner(), _rloss, args=(loss_func, input_rref, target_rref))
return dloss
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import dataclass
from typing import Any, Dict, List, Optional, Union
import torch
from torch import Tensor, nn
from torch.distributed import rpc
from fairscale.internal import torch_version
from fairscale.nn.pipe import microbatch
from .data import DataConsumer
from .graph import Node, PipelineModulesGraph
from .partition_handler import DistributedPipelineRecord, PartitionHandler
Device = Union[torch.device, int, str]
def check_pytorch_version() -> None:
if torch_version() < (1, 8, 0):
raise Exception("DistributedPipeline requires PyTorch version 1.8 or higher")
MOVING_DENIED = TypeError(
"denied to move parameters and buffers, " "because DistributedPipeline should manage device placement"
)
class DistributedPipeline(nn.Module):
"""Wraps a :class:`PipelineModulesGraph` model to train on using synchronous pipeline
parallelism. If the model requires lots of memory and doesn't fit on a single GPU,
pipeline parallelism is a useful technique to employ for training.
The implementation is based on the torchgpipe_ paper.
.. _torchgpipe: https://arxiv.org/abs/2004.09910
PipelineModulesGraph combines pipeline parallelism with checkpointing to reduce peak
memory required to train while minimizing device under-utilization.
You should place all the modules on the appropriate rpc workers and devices and wrap
them into an :class:`PipelineModulesGraph` module defining the connection between them.
Args:
module (:class:`PipelineModulesGraph`):
model to be parallelized using pipelining. Each module
in the graph has to have all of its parameters on a single
device.
chunks (int):
number of micro-batches (default: ``1``)
checkpoint (str):
when to enable checkpointing, one of ``'always'``,
``'except_last'``, or ``'never'`` (default: ``'except_last'``).
``'never'`` disables checkpointing completely, ``'except_last'``
enables checkpointing for all micro-batches except the last one
and ``'always'`` enables checkpointing for all micro-batches.
"""
@dataclass
class Partition:
nodes: List[Node]
handler: rpc.RRef
def __hash__(self) -> int:
return hash(self.handler)
DataConsumer = DataConsumer[Partition]
def __init__(
self,
graph: PipelineModulesGraph,
chunks: int = 1,
checkpoint: str = "except_last",
) -> None:
super().__init__()
check_pytorch_version()
chunks = int(chunks)
checkpoint = str(checkpoint)
if chunks <= 0:
raise ValueError("number of chunks must be positive integer")
if checkpoint not in ["always", "except_last", "never"]:
raise ValueError("checkpoint is not one of 'always', 'except_last', or 'never'")
self.chunks = chunks
# The micro-batch index where the checkpointing stops.
checkpoint_stop = {"always": self.chunks, "except_last": self.chunks - 1, "never": 0}[checkpoint]
self.partitions = [
self.Partition(
nodes,
rpc.remote(
handler.owner(),
PartitionHandler,
args=(
handler,
nodes[0].module.device,
len(nodes[0].inputs),
nodes[-1].num_outputs,
i,
self.chunks,
checkpoint_stop,
),
),
)
for i, (nodes, handler) in enumerate(graph.partition_graph())
]
self.input_consumers = [
next(
self.DataConsumer(partition, input_consumer.consumer_input_idx, input_consumer.output_idx)
for partition in self.partitions
if partition.nodes[0] is input_consumer.consumer
)
for input_consumer in graph.model_input_consumers
]
self.graph = graph
# DistributedPipeline should manage the device of each partition.
# Deny cuda(), cpu(), and to() with device, by TypeError.
def cuda(self, device: Optional[Device] = None) -> "DistributedPipeline":
raise MOVING_DENIED
def cpu(self) -> "DistributedPipeline":
raise MOVING_DENIED
def to(self, *args: Any, **kwargs: Any) -> "DistributedPipeline":
# Deny these usages:
#
# - to(device[, dtype, non_blocking])
# - to(tensor[, non_blocking])
#
# But allow this:
#
# - to(dtype[, non_blocking])
#
if "device" in kwargs or "tensor" in kwargs:
raise MOVING_DENIED
if args:
if isinstance(args[0], (torch.device, int, str)):
raise MOVING_DENIED
if torch.is_tensor(args[0]):
raise MOVING_DENIED
return super().to(*args, **kwargs)
def parameter_rrefs(self) -> List[rpc.RRef]:
remote_params = []
for p in self.partitions:
remote_params.extend(p.handler.rpc_sync().local_parameter_rrefs())
return remote_params
def forward(self, *inputs: Tensor) -> rpc.RRef: # type: ignore
for i, input in enumerate(inputs):
microbatch.check(input)
# Divide a mini-batch into micro-batches.
batches_list = [microbatch.scatter(input, self.chunks) for input in inputs]
# Create a DistributedPipelineRecord, one per partition, and make connections between them (i.e.
# set list of consumers).
pipeline_records: Dict[DistributedPipeline.Partition, rpc.RRef] = {}
for partition in reversed(self.partitions):
r_handler = partition.handler.remote()
consumers = []
# Identify consumers of the outputs of the partition
for consumer in partition.nodes[-1].output_consumers:
consumer_partition = next(p for p in self.partitions if p.nodes[0] is consumer.consumer)
# Index of a consumer partition should be greater than index of the partition.
assert consumer_partition in pipeline_records
consumers.append(
DistributedPipelineRecord.DataConsumer(
pipeline_records[consumer_partition], consumer.consumer_input_idx, consumer.output_idx
)
)
pipeline_records[partition] = r_handler.make_pipeline_record(consumers)
# Let the pipeline-handler for the partition starts processing the pipeline-record for that partition.
this_result = r_handler.run_pipeline(pipeline_records[partition])
# If this is the last partition, we expect the result of the model be the output of this partition.
if partition is self.partitions[-1]:
result = this_result
# Start feeding model input to the partitions that need them.
for i, b in enumerate(zip(*batches_list)):
for input_consumer in self.input_consumers:
pipeline_record = pipeline_records[input_consumer.consumer]
# TODO: Debug why we need this special handling
if pipeline_record.owner().name == rpc.get_worker_info().name: # type: ignore
pipeline_record.local_value().feed(
i, input_consumer.consumer_input_idx, b[input_consumer.output_idx].value
)
else:
pipeline_record.rpc_async().feed(i, input_consumer.consumer_input_idx, b[input_consumer.output_idx].value) # type: ignore
return result
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import dataclass
from typing import Generic, TypeVar
ConsumerType = TypeVar("ConsumerType")
@dataclass
class DataConsumer(Generic[ConsumerType]):
"""A data class for representating a consumer of an output of a module."""
consumer: ConsumerType
consumer_input_idx: int # indicating which input of the consumer module
output_idx: int # indicating which output of the producer module
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
"""
To prevent underflow or overflow of gradients, DynamicLossScaler is used to
dynamically scale up and down gradients by scaling the loss. The usage of the
DynamicLossScaler is similar with the GradScaler except that DynamicLossScaler
can be used for updates on a CPU device.
https://pytorch.org/docs/stable/_modules/torch/cuda/amp/grad_scaler.html#GradScaler
"""
from collections import defaultdict
from enum import Enum
from typing import Dict, List, Optional
import torch
class OptState(Enum):
READY = 0
UNSCALED = 1
STEPPED = 2
def _refresh_per_optimizer_state() -> OptState:
return OptState.READY
class DynamicLossScaler(object):
"""An instance ``scaler`` helps perform the steps of gradient scaling
conveniently.
"""
def __init__(
self,
init_scale: float = 2.0**15,
scale_factor: float = 2.0,
scale_window: int = 2000,
tolerance: float = 0.0,
threshold: float = None,
min_loss_scale: float = 1e-4,
):
self.loss_scale = init_scale
self.scale_factor = scale_factor
self.scale_window = scale_window
self.tolerance = tolerance
self.threshold = threshold
self.min_loss_scale = min_loss_scale
self._iter = 0
self._last_overflow_iter = -1
self._last_rescale_iter = -1
self._overflows_since_rescale = 0
self._per_optimizer_states: Dict[int, OptState] = defaultdict(_refresh_per_optimizer_state)
self._scale = None
def scale(self, outputs): # type: ignore
"""
Multiplies ('scales') a tensor or list of tensors by the scale factor.
Returns scaled outputs.
Args:
outputs (Tensor or iterable of Tensors): Outputs to scale.
Returns:
Tensor or iterable of Tensors: Scaled outputs.
"""
return self.loss_scale * outputs
@torch.no_grad()
def _get_gradients_norm(self, params: List[torch.nn.Parameter]) -> float:
grads = []
for p in params:
if p.grad is None:
continue
else:
grads.append(p.grad.detach())
if len(grads) == 0:
return 0.0
if len(grads) == 1:
total_norm = torch.norm(grads[0], p=2, dtype=torch.float32) # type: ignore
else:
total_norm = torch.norm(torch.stack([torch.norm(g, p=2, dtype=torch.float32) for g in grads])) # type: ignore
return total_norm.item()
def _decrease_loss_scale(self) -> None:
self.loss_scale /= self.scale_factor
if self.threshold is not None:
self.loss_scale = max(self.loss_scale, self.threshold)
def _check_overflow(self, grad_norm: float) -> None:
# detect inf and nan
if grad_norm == float("inf") or grad_norm != grad_norm:
# overflow has occured
prev_scale = self.loss_scale
iter_since_rescale = self._iter - self._last_rescale_iter
self._last_overflow_iter = self._iter
self._overflows_since_rescale += 1
pct_overflow = self._overflows_since_rescale / float(iter_since_rescale)
if pct_overflow >= self.tolerance:
self._decrease_loss_scale()
self._last_rescale_iter = self._iter
self._overflows_since_rescale = 0
if self.loss_scale <= self.min_loss_scale:
# Use FloatingPointError as an uncommon error that parent
# functions can safely catch to stop training.
self.loss_scale = prev_scale
raise FloatingPointError(
(
"Minimum loss scale reached ({}). Your loss is probably exploding. "
"Try lowering the learning rate, using gradient clipping or "
"increasing the batch size."
).format(self.min_loss_scale)
)
self._iter += 1
raise OverflowError("setting loss scale to: " + str(self.loss_scale))
def update(self) -> None:
"""Updates the scale factor."""
if (self._iter - self._last_overflow_iter) % self.scale_window == 0:
self.loss_scale *= self.scale_factor
self._last_rescale_iter = self._iter
self._iter += 1
self._per_optimizer_states = defaultdict(_refresh_per_optimizer_state)
def step(self, optimizer, *args, **kwargs): # type: ignore
"""
:meth:`step` unscale the gradients and step the optimizer.
``*args`` and ``**kwargs`` are forwarded to ``optimizer.step()``.
Args:
optimizer (torch.optim.Optimizer): Optimizer that applies the gradients.
args: Any arguments.
kwargs: Any keyword arguments.
Returns:
The return value of ``optimizer.step(*args, **kwargs)``. None when overflow or underflow
gradients occur and optimizer.step() is skipped.
"""
if "closure" in kwargs:
raise RuntimeError("Closure use is not currently supported if DynamicLossScaler is enabled.")
optimizer_state = self._per_optimizer_states[id(optimizer)]
if optimizer_state is OptState.STEPPED:
raise RuntimeError("step() has already been called since the last update().")
# check gradient norm. If gradient norm is nan or inf, adjust scale here, and skip step.
# clip_grads_norm can happen before this step
for group in optimizer.param_groups:
grad_norm = self._get_gradients_norm(group["params"])
try:
self._check_overflow(grad_norm)
except OverflowError:
return None
if optimizer_state is OptState.READY:
self.unscale_(optimizer)
state_dict = optimizer.state_dict()
state_dict["loss_scale"] = self.loss_scale
retval = optimizer.step(*args, **kwargs)
optimizer_state = OptState.STEPPED
return retval
def unscale_(self, optimizer: torch.optim.Optimizer) -> None:
# uncale the gradients.
optimizer_state = self._per_optimizer_states[id(optimizer)]
if optimizer_state is OptState.UNSCALED:
raise RuntimeError("unscale_() has already been called on this optimizer since the last update().")
elif optimizer_state is OptState.STEPPED:
raise RuntimeError("unscale_() is being called after step().")
assert self.loss_scale is not None
inv_scale = 1.0 / float(self.loss_scale)
with torch.no_grad():
for group in optimizer.param_groups:
for param in group["params"]:
if param.grad is None:
continue
else:
param.grad.data.mul_(inv_scale)
optimizer_state = OptState.UNSCALED
def state_dict(self) -> Optional[Dict[str, float]]:
if self.loss_scale is not None:
return {"loss_scale": self.loss_scale}
else:
return None
def load_state_dict(self, state_dict: Dict[str, float]) -> None:
if "loss_scale" in state_dict:
self.loss_scale = state_dict["loss_scale"]
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from typing import List
from .dynamic_loss_scaler import DynamicLossScaler
__all__: List[str] = []
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from enum import Enum
from typing import Optional, Tuple
import torch
from torch import Tensor
# Helper Functions
def _get_k_for_topk(topk_percent: Optional[float], top_k_element: Optional[int], top_k_total_size: int) -> int:
"""Converts the top_k_percent to top_k_element when top_k_percent is provided
as the criterion for top-k calculation. When, top_k_element is used as the criterion,
simply returns the value for k. Also, ensures k is never 0 to avoid all-zero tensors.
"""
if top_k_element is None:
top_k_element = round(top_k_total_size * topk_percent / 100.0)
elif top_k_element > top_k_total_size:
raise ValueError("top_k_element for sst or dst is larger than max number of elements along top_k_dim")
# ensure we never have 100% sparsity in tensor and always have 1 surviving element!
return max(1, top_k_element)
def _scatter_topk_to_sparse_tensor(
top_k_tensor: Tensor, to_be_sparsify_tensor: Tensor, k: int, dim: Optional[int]
) -> Tensor:
"""Scatter the topk values of the to_be_sparsify_tensor to a zero tensor of the same shape
at the top-k indices of the top_k_tensor. This function allows top-k computation with a
derived tensor from to_be_sparsify_tensor.
Args:
top_k_tensor (Tensor):
The source tensor whose top-k "indices" are taken and used to extract
the corresponding "values" from the to_be_sparsify_tensor.
to_be_sparsify_tensor (Tensor):
The tensor whose values are gathered according to the top-k indices
of the top_k_tensor, and a zero tensor of same shape is populated with these
values at those indices and creates the sparse_tensor tensor.
k (int):
the value of k for top-k
dim (Optional[int]):
dimension for top-k
Returns:
(Tensor):
Returns a sparse_tensor with the same shape as the top_k_tensor and to_be_sparsify_tensor,
and populated with the values of the to_be_sparsify_tensor at the indices corresponding
to the top-k indices of the source tensor.
"""
assert (
top_k_tensor.shape == to_be_sparsify_tensor.shape
), "top_k_tensor and to_be_sparsify_tensor have different shapes!"
sparse_tensor = torch.zeros_like(to_be_sparsify_tensor)
orig_shape = sparse_tensor.shape
if dim is None and len(orig_shape) > 1:
sparse_tensor = sparse_tensor.reshape(-1)
to_be_sparsify_tensor = to_be_sparsify_tensor.reshape(-1)
top_k_tensor = top_k_tensor.reshape(-1)
dim = -1
_, i = top_k_tensor.topk(k, dim=dim)
return sparse_tensor.scatter(dim, i, to_be_sparsify_tensor.gather(dim, i)).reshape(orig_shape)
def _top_k_total_size(tensor: Tensor, topk_dim: Optional[int]) -> int:
"""Get the total size of the input tensor along the topk_dim dimension. When, the
dimension is None, get the number of elements in the tensor.
"""
top_k_total_size = tensor.numel() if topk_dim is None else tensor.shape[topk_dim]
assert top_k_total_size > 0, "Total size of input tensor along the topk_dim has to be greater than 0."
return top_k_total_size
def _is_sparsity_zero(
dense: Tensor, topk_percent: Optional[float], topk_element: Optional[int], top_k_dim: Optional[int]
) -> bool:
"""Returns True when a given value of topk_percent or topk_element along a particular top_k_dim
for an input tensor results in sparsity=0% (or top-100-percent). Otherwise, returns False.
"""
if topk_percent is None and topk_element is None:
return False # 100% sparse
top_k_total_size = _top_k_total_size(dense, top_k_dim)
k = _get_k_for_topk(topk_percent, topk_element, top_k_total_size)
return k == top_k_total_size
def _fft_transform(dense: Tensor, dim: int) -> Tensor:
"""Wrapper of torch.fft.fft with more flexibility on dimensions.
TODO (Min): figure out if we need to change other args like frequency length, n, or
the normalization flag.
For our use case, we use fft not rfft since we want big magnitute components from
both positive and negative frequencies.
Args:
dense (Tensor):
Input dense tensor (no zeros).
dim (int):
Which dimension to transform.
Returns:
(Tensor, complex):
transformed dense tensor FFT components.
"""
orig_shape = None
if dim is None:
orig_shape = dense.shape
dense = dense.reshape(-1)
dim = -1
ret = torch.fft.fft(dense, dim=dim)
if orig_shape is not None:
ret = ret.reshape(orig_shape)
return ret
def _ifft_transform(sst: Tensor, dim: int) -> Tensor:
"""Wrapper of torch.fft.ifft with more flexibility on dimensions.
Args:
sst (Tensor):
Input sst tensor (may have zeros) in frequency domain.
dim (int):
Which dimension to transform.
Returns:
(Tensor):
A new, transformed dense tensor with real domain values.
"""
assert sst.is_complex()
orig_shape = None
if dim is None:
orig_shape = sst.shape
sst = sst.reshape(-1)
dim = -1
ret = torch.fft.ifft(sst, dim=dim)
if orig_shape is not None:
ret = ret.reshape(orig_shape)
return ret
def _dct_transform(dense: Tensor, dim: int) -> Tensor:
"""Should take a tensor and perform a Discrete Cosine Transform on the tensor.
Args:
dense (Tensor):
Input dense tensor (no zeros).
dim (int):
Which dimension to transform.
Returns:
(Tensor):
transformed dense tensor DCT components
"""
raise NotImplementedError("Support for DCT has not been implemented yet!")
def _idct_transform(sst: Tensor, dim: int) -> Tensor:
"""Should take a tensor and perform an inverse Discrete Cosine Transform and return a new tensor.
Args:
sst (Tensor):
Input sst tensor (may have zeros) in frequency domain.
dim (int):
Which dimension to transform.
Returns:
(Tensor):
A new, transformed dense tensor with real domain values.
"""
raise NotImplementedError("Support for iDCT has not been implemented yet!")
class Algo(Enum):
FFT = 0
DCT = 1
class SignalSparsity:
"""
This class represents a particular config for a set of signal
processing based sparsification functions on tensors. This can
be used both on weights, gradients and other tensors like the
optimizer state.
During initialization, this class requires a value for one of
`sst_top_k_element` or `sst_top_k_percent` and also requires a
value for one of `dst_top_k_element` or `dst_top_k_percent`.
This class only handles tensor inputs and outputs. We leave
state_dict type of data handling to upper layer functions.
Args:
algo (Algo):
The algorithm used. Default: FFT
sst_top_k_dim (int, optional):
The dimension on which the top-k is done for SST.
E.g. -1 is the last dim. None means flatten and top-k on all dims.
There is no way to specify multiple dims other than None.
Default: -1
sst_top_k_element (int, optional):
Number of top-k elements to retain for SST. Default: None
sst_top_k_percent (float, optional):
Percent of top-k elements to retain for SST. Default: None
dst_top_k_dim (int, optional):
The dimension on which the top-k is done for DST.
E.g. -1 is the last dim. None means flatten and top-k on all dims.
There is no way to specify multiple dims other than None.
Default: None
dst_top_k_element (int, optional):
Number of top-k elements to retain for DST. Default: None
dst_top_k_percent (float, optional):
Percent of top-k elements to retain for DST. Default: None
Example:
.. code-block:: python
2d_sparser = SignalSparsity(sst_top_k_element=10, dst_top_k_element=1)
sst = 2d_sparser.dense_to_sst(linear.weight.data)
3d_sparser = SingalSparsity(algo=Algo.FFT, sst_top_k_dim=None, dst_top_k_dim=-1, sst_top_k_percent=10, dst_top_k_element=100)
conv.weight.data, _, _ = 3d_sparser.lossy_compress(conv.weight.data)
"""
def __init__(
self,
algo: Algo = Algo.FFT,
sst_top_k_dim: Optional[int] = -1,
sst_top_k_element: Optional[int] = None,
sst_top_k_percent: Optional[float] = None,
dst_top_k_dim: Optional[int] = -1,
dst_top_k_element: Optional[int] = None,
dst_top_k_percent: Optional[float] = None,
) -> None:
self._sst_top_k_dim = sst_top_k_dim
self._sst_top_k_element = sst_top_k_element
self._sst_top_k_percent = sst_top_k_percent
self._dst_top_k_dim = dst_top_k_dim
self._dst_top_k_element = dst_top_k_element
self._dst_top_k_percent = dst_top_k_percent
self._validate_conf()
self._transform, self._inverse_transform = (
(_fft_transform, _ifft_transform) if algo is Algo.FFT else (_dct_transform, _idct_transform)
)
@property
def _sst_enabled(self) -> bool:
"""True if SST is enabled."""
return self._sst_top_k_element is not None or self._sst_top_k_percent is not None
@property
def _dst_enabled(self) -> bool:
"""True if DST is enabled."""
return self._dst_top_k_element is not None or self._dst_top_k_percent is not None
def _validate_conf(self) -> None:
"""Validating if the config is valid.
This includes asserting the following:
1. validating that one and only one of top_k_element and top_k_percent is set.
2. Asserting that both element and percentage are in valid ranges.
Throws:
ValueError:
If validation fails.
"""
# assert that both top_k_elements and top_k_percent aren't set for sst and dst
def both_set(a: Optional[int], b: Optional[float]) -> bool:
return (a is not None) and (b is not None)
if both_set(self._sst_top_k_element, self._sst_top_k_percent) or both_set(
self._dst_top_k_element, self._dst_top_k_percent
):
raise ValueError(
"top_k_element and top_k_percent can't be both set\n"
f"Input values are: sst element={self._sst_top_k_element}, sst percent={self._sst_top_k_percent}, "
f"dst element={self._dst_top_k_element}, dst percent={self._dst_top_k_percent}"
)
# assert that, if top_k_percent is not None, it is a valid number for a percentage.
def none_or_in_range(a: Optional[float]) -> bool:
return a is None or (0.0 < a <= 100.0)
if not (none_or_in_range(self._sst_top_k_percent) and none_or_in_range(self._dst_top_k_percent)):
raise ValueError(
"top_k_percent values for sst and dst has to be in the interval (0, 100].\n"
f"Input values are: sst percent={self._sst_top_k_percent}, dst percent={self._dst_top_k_percent}"
)
def none_or_greater_0(a: Optional[int]) -> bool:
return a is None or (0 < a)
if not (none_or_greater_0(self._sst_top_k_element) and none_or_greater_0(self._dst_top_k_element)):
raise ValueError(
"top_k_element values for sst and dst has to be greater than 0.\n"
f"Input values are: sst element={self._sst_top_k_element} "
f"and dst element={self._dst_top_k_element}"
)
def dense_to_sst(self, dense: Tensor) -> Optional[Tensor]:
"""Get Signal Sparse Tensor (SST) from a dense tensor
Dense -> fft -> top-k -> results.
The input dense tensor is transformed using a transform algorithm according to the `algo`
initialization argument. The SST is then generated from the top_k_elements
(or the top_k_percentage) of values from the transformed tensor along the 'sst_top_k_dim'.
Args:
dense (Tensor):
Input dense tensor (no zeros).
Returns:
(Tensor, optional):
Same shaped tensor as the input dense tensor, still in dense format but in frequency
domain (complex valued) and has zeros.
"""
if not self._sst_enabled:
# Special case, SST is simply None, which represents an all-zero tensor.
return None
top_k_total_size = _top_k_total_size(dense, self._sst_top_k_dim)
k = _get_k_for_topk(self._sst_top_k_percent, self._sst_top_k_element, top_k_total_size)
dense_freq = self._transform(dense, dim=self._sst_top_k_dim)
# NOTE: real_dense_freq can potentially be magnitude of complex frequency components
# or DCT transformed components when using DCT (currently not implemented).
# TODO: In case of the FFT, the imaginary part can perhaps be quantized or pruning can be
# done on the smaller phases.
real_dense_freq = dense_freq.real.abs()
return _scatter_topk_to_sparse_tensor(real_dense_freq, dense_freq, k, dim=self._sst_top_k_dim)
def dense_sst_to_dst(self, dense: Tensor, sst: Optional[Tensor]) -> Optional[Tensor]:
"""Calculates DST from input dense and SST tensors.
dense - inverse_transform(sst)[using sst_dst_to_dense method] -> top-k -> dst
Args:
dense (Tensor):
Input dense tensor (no zeros).
sst (Tensor):
Input SST tensor (has zeros).
Returns:
(Tensor):
Same shaped tensor, still dense format but has zeros. Non-zeros are top-k delta values.
"""
if not self._dst_enabled:
# Special case, DST is simply None, which represents an all-zero tensor.
return None
if sst is None:
sst = torch.zeros_like(dense, dtype=torch.complex64)
if not (dense.shape == sst.shape):
raise ValueError("dense and sst have different shapes!")
top_k_total_size = _top_k_total_size(dense, self._dst_top_k_dim)
k = _get_k_for_topk(self._dst_top_k_percent, self._dst_top_k_element, top_k_total_size)
delta = dense - self.sst_dst_to_dense(sst) # sst_dst_to_dense(sst) returns the inverse transform here
del dense
return _scatter_topk_to_sparse_tensor(delta.abs(), delta, k, dim=self._dst_top_k_dim)
def sst_dst_to_dense(self, sst: Optional[Tensor], dst: Optional[Tensor] = None) -> Tensor:
"""From SST and DST returns a dense reconstructed tensor (RT). When argument dst=None, simply returns
the inverse transform of the SST tensor.
Args:
sst (Tensor):
Singal sparse tensor. Required argument.
dst (Tensor, optional):
Delta sparse tensor, optional.
Returns:
(Tensor):
A dense tensor in real number domain from the SST.
"""
assert not (sst is None and dst is None), "both-None-case is not useful"
if sst is None:
# Simply the delta is the reconstruction.
return dst
# Now, ifft and then add the delta.
dense_rt = torch.real(self._inverse_transform(sst, dim=self._sst_top_k_dim))
if dst is not None:
dense_rt += dst
return dense_rt
def lossy_compress(self, dense: Tensor) -> Tuple[Tensor, Optional[Tensor], Optional[Tensor]]:
"""From dense tensor to lossy reconstruction of dense tensor with the help of SST and DST
tensor calculation. If requested sparsity is zero (or top_100_percent) then simply returns
the input dense tensor as the reconstruction.
Args:
dense (Tensor):
Input dense tensor (no zeros).
Returns:
(Tuple[Tensor, Tensor, Tensor]):
A tuple of the form (lossy_reconstruction, sst, dst) with three tensors of the same
shape as the dense tensor.
"""
if _is_sparsity_zero(
dense, self._sst_top_k_percent, self._sst_top_k_element, self._sst_top_k_dim
) and _is_sparsity_zero(dense, self._dst_top_k_percent, self._dst_top_k_element, self._dst_top_k_dim):
# when sparsity is 0% for both sst and dst, the dense tensor itself is returned as the reconstructed
# tensor, sst is returned as None and dst as the dense tensor. This choice is made because with the
# returned sst=None and dst=dense, we should be able to recombine them if needed to retrieve the
# dense tensor again as: dense = inv_transform(sst) + dst, where inv_transform(sst=None) = zero_tensor
# of the same size as dense.
return dense, None, dense
else:
# depending on whether self._sst_enabled and self._dst_enabled, None SST/DST tensors can be returned
# below as well.
sst = self.dense_to_sst(dense)
dst = self.dense_sst_to_dst(dense, sst)
return self.sst_dst_to_dense(sst, dst), sst, dst
def random_sparse_mask(dense: Tensor, percent: float, dim: int) -> Tensor:
"""Get a random sparse mask
Args:
dense (Tensor):
Input dense tensor (no zeros).
percent (float):
Percent of non-zeros (0, 100].
dim (int):
Dimension on which the random sparse mask is computed.
"""
assert percent > 0 and percent <= 100, percent
rand = torch.rand_like(dense)
ones = torch.ones_like(dense)
k = _get_k_for_topk(percent, None, dense.shape[dim])
return _scatter_topk_to_sparse_tensor(rand, ones, k, dim)
|
__version_tuple__ = (0, 0, 1)
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from typing import List
import torch
from torch import Tensor
class EnergyConcentrationProfile:
"""Compute "energy" concentration level for a tensor
Args:
dim (int):
The dimension to measure.
top_k_percents (List[float]):
List of percentage values. For each value, the `measure`
function will compute and return the percentage of "energy"
concentrated on that top-K percent of values in the dimension
to measure. Note, this is the opposite of the sparsity percentage.
"""
def __init__(self, dim: int, top_k_percents: List[float]) -> None:
assert isinstance(dim, int)
self.dim = dim
self.percents = []
last_p = 0.0
for p in top_k_percents:
assert isinstance(p, (int, float))
assert p > 0, p
assert p <= 100, p
assert p > last_p, f"p {p} should be larger than last_p {last_p}"
self.percents.append(float(p))
last_p = p
def measure(self, in_tensor: Tensor) -> List[Tensor]:
"""Compute the return the results
Note, we want this function to be nonblocking and async.
Returns:
(List[Tensor])
List of tensors. Each tensor is a singleton float
that contains the energy measure for that top_k_percent.
"""
assert in_tensor.is_floating_point(), in_tensor.dtype
assert self.dim < len(in_tensor.shape), f"tensor shape {in_tensor.shape} not compatible with dim {self.dim}"
dim_size = in_tensor.shape[self.dim]
abs_tensor = in_tensor.abs()
full_energy = abs_tensor.sum()
return_tensors = []
for p in self.percents:
k = max(1, round(p / 100 * dim_size))
abs_top_k_values, _ = abs_tensor.topk(k, dim=self.dim)
return_tensors.append(abs_top_k_values.sum() / full_energy)
return return_tensors
def measure_fft(self, in_tensor: Tensor) -> List[Tensor]:
"""Like measure, but do it in FFT frequency domain."""
return self.measure(torch.fft.fft(in_tensor, dim=self.dim).real)
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import sys
from typing import List
# Check for user requirements before we import our code.
try:
import pygit2
except ImportError:
print("Error: please pip install pygit2 module to use wgit")
sys.exit(1)
try:
import pgzip
except ImportError:
print("Error: please pip install pgzip module to use wgit")
sys.exit(1)
from .repo import Repo
from .signal_sparsity import Algo, SignalSparsity, random_sparse_mask
from .signal_sparsity_profiling import EnergyConcentrationProfile
from .version import __version_tuple__
__version__ = ".".join([str(x) for x in __version_tuple__])
__all__: List[str] = []
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import argparse
from pathlib import Path
from typing import List
from . import Repo, version
def main(argv: List[str] = None) -> None:
desc = "WeiGit: A git-like tool for model weight tracking"
# top level parser and corresponding subparser
parser = argparse.ArgumentParser(description=desc)
subparsers = parser.add_subparsers(dest="command")
# Version
version_parser = subparsers.add_parser("version", description="Display version")
version_parser.set_defaults(command="version", subcommand="")
# Repo
init_parser = subparsers.add_parser("init", description="Initialize a weigit repo")
init_parser.add_argument("init", action="store_true", help="initialize the repo")
status_parser = subparsers.add_parser("status", description="Shows the repo's current status")
status_parser.add_argument("status", action="store_true", help="Show the repo's current status")
add_parser = subparsers.add_parser("add", description="add a file to the staged changeset (default: none)")
add_parser.add_argument(
"add",
default="",
type=str,
metavar="FILE_PATH",
help="add a file to the staged changeset (default: none)",
)
add_parser.add_argument(
"--no_per_tensor",
action="store_true",
help="Disable per-tensor adding of a file",
)
commit_parser = subparsers.add_parser("commit", description="Commits the staged changes")
commit_parser.add_argument("commit", action="store_true", help="Commit the staged changes")
commit_parser.add_argument(
"-m",
"--message",
default="",
type=str,
metavar="MESSAGE",
required=True,
help="commit message",
)
checkout_parser = subparsers.add_parser("checkout", description="checkout from a commit")
checkout_parser.add_argument(
"checkout",
default="",
type=str,
metavar="FILE_SHA1",
help="checkout from a commit",
)
log_parser = subparsers.add_parser("log", description="Show the history log of the repo or optionally of a file.")
log_parser.add_argument("log", action="store_true", help="Show the repo's history log")
log_parser.add_argument(
"-f",
"--file",
default="",
type=str,
metavar="FILE_PATH",
help="Show the history log of a file",
)
args = parser.parse_args(argv)
if args.command == "init":
repo = Repo(Path.cwd(), init=True)
if args.command == "add":
repo = Repo(Path.cwd())
repo.add(args.add, per_tensor=not args.no_per_tensor)
if args.command == "status":
repo = Repo(Path.cwd())
out = repo.status()
print(out)
if args.command == "log":
repo = Repo(Path.cwd())
repo.log(args.file)
if args.command == "commit":
repo = Repo(Path.cwd())
repo.commit(args.message)
if args.command == "checkout":
repo = Repo(Path.cwd())
repo.checkout(args.checkout)
if args.command == "version":
print(".".join([str(x) for x in version.__version_tuple__]))
if __name__ == "__main__":
main()
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from enum import Enum
class ExitCode(Enum):
"""Collections of the Exit codes as an Enum class"""
CLEAN = 0
FILE_EXISTS_ERROR = 1
FILE_DOES_NOT_EXIST_ERROR = 2
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import copy
from dataclasses import dataclass
from enum import Enum
import json
from pathlib import Path
import sys
from typing import Any, Dict, List, Optional, Union
import torch
from torch import Tensor
from .pygit import PyGit
from .sha1_store import SHA1_Store
# This is a fixed dir name we use for sha1_store. It should not be changed
# for backward compatibility reasons.
SHA1_STORE_DIR_NAME = "sha1_store"
# These are on-disk keys. Don't modify for backward compatibility.
SHA1_KEY = "SHA1"
LAST_MODIFIED_TS_KEY = "last_modified_time_stamp"
REL_PATH_KEY = "file_path" # this will be removed from the json since it is redundant.
class RepoStatus(Enum):
"""Repo Statuses"""
CLEAN = 1
CHANGES_NOT_ADDED = 2
CHANGES_ADDED_NOT_COMMITED = 3
@dataclass
class SizeInfo:
"""Size info for a file or the repo in bytes.
Deduped size can't be disabled. So it is always performed.
Both sparsified and gzipped are optional. They are applied in the following
order if both are enabled:
sparsify -> gzip
Therefore, original >= deduped >= sparsified >= gzipped
"""
original: int
deduped: int
sparsified: int
gzipped: int
@dataclass
class _SHA1_Tensor:
"""Representing a tensor using sha1(s) from SHA1 store.
It can be either a dense one or two sparse one (SST and DST).
"""
is_dense: bool = True
dense_sha1: str = ""
sst_sha1: str = ""
dst_sha1: str = ""
def _recursive_apply_to_elements(data: Union[List[Any], Dict[str, Any]], fn: Any, names: List[str]) -> None:
"""Helper function to traverse a dict recursively and apply a function to leafs.
Args:
data (dict or list):
A dict or a list and it should only contain dict and list.
fn (Any):
A call back function on each element. Signature:
fn(element: Any, names: List[str]) -> Any
names (list):
Stack of names for making the element path.
"""
if isinstance(data, list):
for i, _ in enumerate(data):
names.append(str(i))
if isinstance(data[i], (list, dict)):
_recursive_apply_to_elements(data[i], fn, names)
else:
data[i] = fn(data[i], names)
names.pop()
elif isinstance(data, dict):
for key in data.keys():
names.append(str(key))
if isinstance(data[key], (list, dict)):
_recursive_apply_to_elements(data[key], fn, names)
else:
data[key] = fn(data[key], names)
names.pop()
else:
assert False, f"Unexpected data type: {type(data)}"
class Repo:
"""
Represents the WeiGit repo for tracking neural network weights and their versions.
A WeiGit repo is like a git repo. It is a dir, in which a .wgit dir exists to keep
track of the content.
Args:
parent_dir (Path, str):
Parent dir in which to make or to load a .wgit dir.
Default: "", which means CWD.
init (bool, optional):
- If ``True``, initializes a new WeiGit repo in the parent_dir. Initialization
creates a `.wgit` directory within the <parent_dir>, triggers an initialization.
of a sha1_store in the ./<parent_dir>/.wgit directory, and makes the
./<parent_dir>/.wgit a git repository through git initialization.
- If ``False``, a new WeiGit repo is not initialized and the existing repo is
wrapped, populating the `_wgit_parent` and other internal attributes.
- Default: False
"""
def __init__(self, parent_dir: Union[Path, str] = "", init: bool = False) -> None:
# Set _wgit_parent.
self._wgit_parent = Path(parent_dir if parent_dir != "" else Path.cwd())
# Set _dot_wgit_dir_path.
self._dot_wgit_dir_path: Optional[Path] = None
exists = self._recursive_search_and_may_init_dot_wgit_dir_path(self._wgit_parent)
if not exists and init:
# No weigit repo exists and is being initialized with init=True
# Make .wgit directory, create sha1_store
self._dot_wgit_dir_path = self._wgit_parent.joinpath(".wgit")
self._dot_wgit_dir_path.mkdir(parents=False, exist_ok=True)
# Initializing sha1_store only after wgit has been initialized!
self._sha1_store = SHA1_Store(self._dot_wgit_dir_path.joinpath(SHA1_STORE_DIR_NAME), init=True)
# Create a git repo for the metadata versioning.
self._pygit = PyGit(self._dot_wgit_dir_path, gitignore=[SHA1_STORE_DIR_NAME])
elif exists:
# Weigit repo already exists, populate this object.
assert self._dot_wgit_dir_path is not None
self._sha1_store = SHA1_Store(self._dot_wgit_dir_path.joinpath(SHA1_STORE_DIR_NAME))
self._pygit = PyGit(self._dot_wgit_dir_path)
else:
# weigit doesn't exist and is not trying to be initialized (triggers
# during non-init commands)
sys.stderr.write("fatal: not a wgit repository!\n")
sys.exit(1)
# We are done init. Do a check.
self._sanity_check()
def _recursive_search_and_may_init_dot_wgit_dir_path(self, check_dir: Path) -> bool:
"""Search for a wgit repo top level dir from potentiall a subdir of a repo.
This may set the self._dot_wgit_dir_path if a repo is found.
Args:
check_dir (Path):
Path to the directory from where search is started.
Returns:
Returns True if a repo is found.
"""
assert self._dot_wgit_dir_path is None, f"_dot_wgit_dir_path is already set to {self._dot_wgit_dir_path}"
if self._weigit_repo_exists(check_dir):
self._dot_wgit_dir_path = check_dir.joinpath(".wgit")
else:
root = Path(check_dir.parts[0])
while check_dir != root:
check_dir = check_dir.parent
if self._weigit_repo_exists(check_dir):
self._dot_wgit_dir_path = check_dir.joinpath(".wgit")
break
return True if self._dot_wgit_dir_path is not None else False
def _weigit_repo_exists(self, check_dir: Path) -> bool:
"""Returns True if a valid WeiGit repo exists in the path: check_dir."""
wgit_exists, git_exists, gitignore_exists = self._weigit_repo_file_check(check_dir)
return wgit_exists and git_exists and gitignore_exists
def _weigit_repo_file_check(self, check_dir: Path) -> tuple:
"""Returns a tuple of boolean corresponding to the existence of each
.wgit internally required files.
"""
wgit_exists = check_dir.joinpath(".wgit").exists()
git_exists = check_dir.joinpath(".wgit/.git").exists()
gitignore_exists = check_dir.joinpath(".wgit/.gitignore").exists()
return wgit_exists, git_exists, gitignore_exists
def _sanity_check(self) -> None:
"""Helper to check if on-disk state matches what we expect."""
if not self._weigit_repo_exists(self._wgit_parent):
sys.stderr.write("fatal: no wgit repo exists!\n")
sys.exit(1)
def add(
self,
in_file_path: str,
per_tensor: bool = True,
gzip: bool = True,
sparsify: bool = False,
sparsify_policy: Any = None,
) -> Optional[Dict[Any, Any]]:
"""Add a file to the wgit repo.
This could a new file or a modified file. Adding an unmodified, existing file
is allowed but it is a noop.
Args:
in_file_path (str):
Path to the file to be added.
per_tensor (bool, optional):
Add a file in a per-tensor fashion. This enables more deduplication
due to tensors being identical. Deduplication cannot be disabled
completely because we use a content addressable SHA1_Store class.
Default: True
gzip (bool, optional):
Enable gzip based lossless compression on the object being added.
Default: True
sparsify (bool, optional):
Enable sparsify for the tensors, which is going to modify the values
for all or some tensors, i.e. lossy compression.
Default: False
sparsify_policy (Any):
TODO (Min): need to add a callback function to control which tensors
and how to sparsify.
Default: None
Returns:
(Dict, optional)
None if the content is added but not modified with lossy compression.
Otherwise, returns a state_dict that contains the modified Tensors to
be loaded back into the model, which means the tensors are dense, not
SST and DST tensors.
"""
self._sanity_check()
if sparsify and not per_tensor:
raise ValueError("Only support sparsity when per_tensor is true")
# Create the corresponding metadata file or load it if the file is
# not a newly added file.
file_path = Path(in_file_path)
rel_file_path = self._rel_file_path(file_path)
metadata_file = self._process_metadata_file(rel_file_path)
# Add the file to the sha1_store.
ret_state_dict = None
file_path_or_state_dict: Union[Path, Dict] = file_path
# TODO (Min): We don't add parent sha1 tracking to sha1 store due to
# de-duplication & dependency tracking can create cycles.
# We need to figure out a way to handle deletion.
# TODO (Min): We don't detect changes and compute delta on a modified file
# yet. Need to figure out a method for delta tracking.
if per_tensor:
def fn(element: Any, names: List[str]) -> Any:
"""Callback on each leaf object for _recursive_apply_to_elements below."""
if isinstance(element, Tensor):
if sparsify:
# TODO (Min): here we will optionally do SST/DST and add those
# tensors with sparsity.
# Remember to update ret_state_dict
raise NotImplementedError()
sha1 = self._sha1_store.add(element, compress=gzip, name=".".join(names))
return _SHA1_Tensor(is_dense=True, dense_sha1=sha1)
else:
return element
state_dict = torch.load(file_path)
ret_state_dict = copy.deepcopy(state_dict) # This is only a temporary addition for testing.
_recursive_apply_to_elements(state_dict, fn, [])
file_path_or_state_dict = state_dict
# Add this top-level object.
sha1 = self._sha1_store.add(file_path_or_state_dict, compress=gzip)
# write metadata to the metadata-file
self._write_metadata(metadata_file, file_path, sha1)
self._pygit.add() # add to the .wgit/.git repo
return ret_state_dict
def commit(self, message: str) -> None:
"""Commits staged changes to the repo.
Args:
message (str):
The commit message to be added.
"""
self._sanity_check()
# TODO (Min): make commit message a json for better handling of metadata like step count,
# LR, sparsity level, etc.
self._pygit.commit(message)
def size_info(self, path: Optional[str] = None) -> SizeInfo:
"""Get size info for a file or the whole repo.
For the whole repo, just call size_info from sha1_store.
For a file, needs to open the metadata and find the sha1 and then
for per_tensor state_dict, collect size_info on all objects.
TODO (Min): not exactly clear it is easy to compute this with
delta encoding, deduplication between objects, this
is possible to compute precisely.
Args:
path (str, optional):
File path for the query. If None, return whole repo's info.
Default: None
Returns:
(SizeInfo):
The dataclass that contains the size info.
"""
raise NotImplementedError()
def status(self) -> Dict[str, RepoStatus]:
"""Show the state of the weigit working tree.
State can be
1. dirty with changes/modifications not added to weigit repo.
2. dirty with a file changes added but not committed.
3. clean and tracking files after a change has been committed,
or clean with with an empty repo.
TODO (Min): this needs to return repo status and dirty files and untracked
files too.
Returns:
(dict):
A dict keyed with files and their status.
"""
self._sanity_check()
pygit_status = self._pygit.status()
status = self._get_metdata_files()
if status:
out_status = dict()
for metadata_file, is_modified in status.items():
# if metadata_file is among the keys of pygit_status dict, it has not been commited to git yet.
if is_modified:
out_status[str(metadata_file)] = RepoStatus.CHANGES_NOT_ADDED
elif not is_modified and metadata_file in pygit_status.keys():
out_status[str(metadata_file)] = RepoStatus.CHANGES_ADDED_NOT_COMMITED
elif not is_modified and metadata_file not in pygit_status.keys():
out_status[str(metadata_file)] = RepoStatus.CLEAN
return out_status
else: # if status dict is empty, nothing has been added so far.
return {"": RepoStatus.CLEAN} # sub case of case-3, clean with an empty repo
def log(self, file: str) -> None:
"""Returns the WeiGit log of commit history.
Args:
file (str, optional):
Show the log of the commit history of the repo. Optionally, show
the log history of a specific file.
"""
self._sanity_check()
# TODO (Min): this should return a list of sha1 for the history as well as
# each commit's message, which could be a dict from json commit msg.
if file:
print(f"wgit log of the file: {file}")
else:
print("wgit log")
def checkout(self, sha1: str) -> None:
"""Checkout a previously commited version of the checkpoint.
Args:
sha1 (str):
The sha1 hash of the file version to checkout.
"""
self._sanity_check()
raise NotImplementedError()
def checkout_by_steps(self) -> None:
"""Not Implemented: Checkout by step count of the train process"""
self._sanity_check()
raise NotImplementedError()
def _get_metdata_files(self) -> Dict[str, bool]:
"""Walk the directories that contain the metadata files and check the
status of those files, whether they have been modified or not.
Dict[str, bool] is a path in string and whether the file is_modified.
"""
metadata_d = dict()
for file in self._dot_wgit_dir_path.iterdir(): # iterate over the .wgit directory
# exclude all the .wgit files and directory
if file.name not in {"sha1_store", ".git", ".gitignore"}:
# perform a directory walk on the metadata_file directories to find the metadata files
for path in file.rglob("*"):
if path.is_file():
rel_path = str(path.relative_to(self._dot_wgit_dir_path)) # metadata path relative to .wgit dir
metadata_d[rel_path] = self._is_file_modified(path)
return metadata_d
def _is_metadata_file(self, file: Path) -> bool:
"""Checks whether a file is a valid metadata file by matching keys and
checking if it has valid json data.
"""
try:
with open(file) as f:
metadata = json.load(f)
is_metadata = set(metadata.keys()) == {SHA1_KEY, LAST_MODIFIED_TS_KEY, REL_PATH_KEY}
except json.JSONDecodeError:
return False # not a json file, so not valid metadata file
return is_metadata
def _is_file_modified(self, file: Path) -> bool:
"""Checks whether a file has been modified since its last recorded modification
time recorded in the metadata_file.
"""
with open(file) as f:
data = json.load(f)
# Get the last modified timestamp recorded by weigit and the current modified
# timestamp. If not the same, then file has been modified since last weigit
# updated metadata.
last_mod_timestamp = data[LAST_MODIFIED_TS_KEY]
curr_mod_timestamp = Path(data[REL_PATH_KEY]).stat().st_mtime
return not curr_mod_timestamp == last_mod_timestamp
def _process_metadata_file(self, metadata_fname: Path) -> Path:
"""Create a metadata_file corresponding to the file to be tracked by weigit if
the first version of the file is encountered. If a version already exists, open
the file and get the sha1_hash of the last version as parent_sha1.
"""
metadata_file = self._dot_wgit_dir_path.joinpath(metadata_fname)
metadata_file.parent.mkdir(parents=True, exist_ok=True) # create parent dirs for metadata file
if not metadata_file.exists() or not metadata_file.stat().st_size:
metadata_file.touch()
else:
with open(metadata_file, "r") as f:
ref_data = json.load(f)
return metadata_file
def _write_metadata(self, metadata_file: Path, file_path: Path, sha1: str) -> None:
"""Write metadata to the metadata file"""
change_time = Path(file_path).stat().st_mtime
metadata = {
SHA1_KEY: sha1,
LAST_MODIFIED_TS_KEY: change_time,
REL_PATH_KEY: str(file_path),
}
with open(metadata_file, "w", encoding="utf-8") as f:
json.dump(metadata, f, ensure_ascii=False, indent=4)
def _rel_file_path(self, filepath: Path) -> Path:
"""Find the relative part to the filepath from the current working
directory and return the relative path.
"""
# get the absolute path
filepath = filepath.resolve()
# using zipped loop we get the path common to the filepath and cwd
for i, (x, y) in enumerate(zip(filepath.parts, Path.cwd().parts)):
pass
# return the relative part (path not common to cwd)
return Path(*filepath.parts[i:])
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from pathlib import Path
import subprocess
import sys
from typing import Dict, List, Tuple
import pygit2
class PyGit:
"""
PyGit class represents a git repo within a weigit repo.
Args:
parent_path (pathlib.Path)
Has to be the full path of the parent!
gitignore (List)
a list of files to be added to the .gitignore
name (str)
Name of the author of the git repo. Optionally used if it can't be determined from user's .gitconfig.
email (str)
email address of the author of the git repo
"""
def __init__(
self,
parent_path: Path,
gitignore: List = list(),
name: str = "user",
email: str = "[email protected]",
) -> None:
# Find if a git repo exists within .wgit repo:
# If exists: then discover it and set the self.gitrepo path to its path
self._parent_path = parent_path
git_repo_found = pygit2.discover_repository(self._parent_path)
# If gitconfig file exists use the name and email from the file
self.name, self.email = self._set_author_config(name, email)
if git_repo_found:
# grab the parent dir of this git repo
git_repo = Path(pygit2.discover_repository(self._parent_path))
pygit_parent_p = git_repo.parent.absolute()
# Check If the parent dir is a .wgit dir. If the .wgit is a git repo
# just wrap the existing git repo with pygit2.Repository class
if pygit_parent_p == self._parent_path:
self.repo = pygit2.Repository(str(self._parent_path))
self.path = self._parent_path.joinpath(".git")
else:
# if the parent is not a .wgit repo,
# then the found-repo is a different git repo. Init new .wgit/.git
self._init_wgit_git(gitignore)
else:
# no git repo found, make .wgit a git repo
self._init_wgit_git(gitignore)
def _init_wgit_git(self, gitignore: List) -> None:
"""
Initializes a .git within .wgit directory, making it a git repo.
Args:
gitignore (List)
a list of file paths to be ignored by the wgit git repo.
"""
self.repo = pygit2.init_repository(str(self._parent_path), False)
self.path = self._parent_path.joinpath(".git")
# create and populate a .gitignore
self._parent_path.joinpath(".gitignore").touch(exist_ok=False)
with open(self._parent_path.joinpath(".gitignore"), "a") as file:
for item in gitignore:
file.write(f"{item}\n")
def add(self) -> None:
"""
git add all the untracked files not in gitignore, to the .wgit/.git repo.
"""
# If .wgit is git repo, add all the files in .wgit not being ignored to git
# TODO: Add functionalities for add specific files and add all files.
if self._exists:
self.repo.index.add_all()
self.repo.index.write()
else:
sys.stderr.write("fatal: git repo does not exist")
def commit(self, message: str) -> None:
"""
git commit the staged changes to the .wgit/.git repo.
Args:
message (str)
Commit message
"""
# If .wgit is git repo, commit the staged files to git
if self._exists:
# if no commit exists, set ref to HEAD and parents to empty
try:
ref = self.repo.head.name
parents = [self.repo.head.target]
except pygit2.GitError:
ref = "HEAD"
parents = []
author = pygit2.Signature(self.name, self.email)
committer = pygit2.Signature(self.name, self.email)
tree = self.repo.index.write_tree()
self.repo.create_commit(ref, author, committer, message, tree, parents)
@property
def _exists(self) -> bool:
"""returns True if wgit is a git repository"""
return self._parent_path == Path(self.repo.path).parent
@property
def _path(self) -> str:
"""returns the path of the git repository PyGit is wrapped around"""
return self.repo.path
def status(self) -> Dict:
"""Gathers the status of the git repo within wgit and returns a dictionary detailing the status.
The dictionary contains the relative paths of the metadata files as keys and the values represent
the status of the file in the form of an int number as status codes. These status codes are
elaborated within PyGit2's documentation: https://www.pygit2.org/index_file.html#status and
https://github.com/libgit2/pygit2/blob/320ee5e733039d4a3cc952b287498dbc5737c353/src/pygit2.c#L312-L320
Returns: {"relative path to a file" : pygit2 status codes}
"""
status_dict = self.repo.status()
tracking_dict = dict(filter(lambda item: item[1] != pygit2.GIT_STATUS_IGNORED, status_dict.items()))
return tracking_dict
def _set_author_config(self, name: str, email: str) -> Tuple[str, str]:
"""Set the name and email for the pygit repo collecting from the gitconfig.
If not available in gitconfig, set the values from the passed arguments."""
gitconfig = Path("~/.gitconfig").expanduser()
# parse the .gitconfig file for name and email
try:
set_name = subprocess.run(["git", "config", "user.name"], capture_output=True, text=True).stdout.rstrip()
set_email = subprocess.run(["git", "config", "user.email"], capture_output=True, text=True).stdout.rstrip()
if not set_name or not set_email:
set_name = name
set_email = email
except BaseException:
set_name = name
set_email = email
return set_name, set_email
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from .cli import main
if __name__ == "__main__":
main()
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import hashlib
import json
import logging
import os
from pathlib import Path
import pickle
import shutil
import sys
import tempfile
import time
from typing import Any, Dict, Optional, Tuple, Union, cast
import pgzip
import torch
from torch import Tensor
from fairscale.internal.containers import from_np, to_np
from .utils import ExitCode
#
# Const string keys for json file. Do not change for backward compatibilities.
#
# For each object entry in the metadata json file.
ENTRY_RF_KEY = "ref_count" # int, reference count for this object.
ENTRY_COMP_KEY = "compressed" # bool, is compressed or not.
ENTRY_OS_KEY = "original_size" # int, original size for all identical objects mapped to this object.
ENTRY_DS_KEY = "deduped_size" # int, size after deduplication (always enabled).
ENTRY_CS_KEY = "compressed_size" # int, size after gzip compression, if enabled.
ENTRY_NAMES_KEY = "names" # dict, names of objects and their count mapped to this object.
# For the entire store in the metadata json file.
STORE_CREATE_DATE_KEY = "created_on" # str, when is the store created.
STORE_OS_KEY = "original_size" # int, original size for all objects added.
STORE_DS_KEY = "deduped_size" # int, size after deduplication (always enabled).
STORE_CS_KEY = "compressed_size" # int, size after gzip compression, if enabled on any object within the store.
def _get_json_entry(d: Dict[str, Any]) -> Dict[str, Any]:
"""Get a dict from a json entry.
This fills in any missing entries in case we load an older version
json file from the disk.
"""
for int_key_init_zero in [ENTRY_RF_KEY, ENTRY_OS_KEY, STORE_DS_KEY, ENTRY_CS_KEY]:
if int_key_init_zero not in d.keys():
d[int_key_init_zero] = 0
for bool_key_init_false in [ENTRY_COMP_KEY]:
if bool_key_init_false not in d.keys():
d[bool_key_init_false] = False
for dict_key_init_empty in [ENTRY_NAMES_KEY]:
if dict_key_init_empty not in d.keys():
d[dict_key_init_empty] = {}
return d
def _copy_compressed(src: Path, dest: Path, thread: Optional[int], blocksize: int) -> Tuple[int, int]:
"""Helper to copy a file and compress it at the same time.
Returns:
(int, int):
original size and compressed size in bytes.
"""
with open(str(src), "rb") as srcf:
with pgzip.open(str(dest), "wb", compresslevel=5, thread=thread, blocksize=blocksize) as destf:
while True:
buf = srcf.read(blocksize)
if len(buf) == 0:
break
destf.write(buf)
orig, comp = Path(src).stat().st_size, Path(dest).stat().st_size
assert orig >= comp or comp < 1 * 1024 * 1024, f"Compressed size {comp} > original {orig} for large data"
return orig, comp
def _copy_uncompressed(src: Path, dest: Path, thread: Optional[int], blocksize: int) -> None:
"""Helper to copy a file and uncompress it at the same time."""
with open(str(dest), "wb") as destf:
with pgzip.open(str(src), "rb", thread=thread, blocksize=blocksize) as srcf:
while True:
buf = srcf.read(blocksize)
if len(buf) == 0:
break
destf.write(buf)
class _JSON_DictContext:
"""Helper class that handles syncing of a json and a dict."""
def __init__(self, s: "SHA1_Store", readonly: bool) -> None:
self._s = s
self._readonly = readonly
def __enter__(self) -> None:
"""Load from file."""
assert self._s._json_dict is None
if self._s._metadata_file_path.exists():
with open(self._s._metadata_file_path, "r") as f:
self._s._json_dict = json.load(f)
else:
self._s._json_dict = {}
def __exit__(self, exc_type: Any, exc_value: Any, exc_traceback: Any) -> None:
"""Store back to file."""
assert isinstance(self._s._json_dict, dict)
if not self._readonly:
with open(self._s._metadata_file_path, "w", encoding="utf-8") as f:
json.dump(self._s._json_dict, f, ensure_ascii=False, indent=2)
self._s._json_dict = None
class SHA1_Store:
"""
This class represents a SHA1 checksum based storage dir for state_dict
and tensors.
This means the same content will not be stored multiple times, resulting
in space savings. (a.k.a. de-duplication)
To make things easier for the callers, this class accept input data
as files, state_dict or tensors. This class always returns in-memory
data, not on-disk files. This class doesn't really care or know the actually
data types.
A key issue is dealing with content deletion. We use a reference counting
algorithm, which means the caller must have symmetrical add/remove calls
for each object.
We used to support children-parent dependency graph and ref counting, but
it is flawed since a grand-child can have the same SHA1 as the grand-parent,
resulting in a cycle. This means caller must compute which parent is safe
to delete in a version tracking graph. The lesson here is that content
addressibility and dependency graphs do not mix well.
We support multicore compression for the data to be store on per-object basis.
We use pgzip to do parallel compression/decompression on top of it to use all
the cores.
Args:
path (Path):
The path in which a SHA1_Store will be created.
init (bool, optional):
- If ``True``, a new SHA1_Store in the path if not already exists.
- Default: False
sha1_buf_size (int):
Buffer size used for checksumming. Default: 100MB.
tmp_dir (str):
Dir for temporary files if input is an in-memory object or output data needs
to be decompressed first.
pgzip_threads (int, optional):
Number of threads (cores) used in compression. Default: None to use all cores.
pgzip_block_size (int):
Per-thread block size for compression. Default: 10MB.
"""
def __init__(
self,
path: Path,
init: bool = False,
sha1_buf_size: int = 100 * 1024 * 1024,
tmp_dir: str = "",
pgzip_threads: Optional[int] = None,
pgzip_block_size: int = 10 * 1024 * 1024,
) -> None:
"""Create or wrap (if already exists) a store."""
self._path = path
self._sha1_buf_size = sha1_buf_size
self._pgzip_threads = pgzip_threads
self._pgzip_block_size = pgzip_block_size
# Metadata related.
self._metadata_file_path = self._path.joinpath("metadata.json")
self._json_dict: Optional[Dict[str, Any]] = None
self._json_ctx = _JSON_DictContext(self, readonly=False)
self._readonly_json_ctx = _JSON_DictContext(self, readonly=True)
# Initialize the store if not exist and if init is True.
if init and not self._path.exists():
try:
Path.mkdir(self._path, parents=False, exist_ok=False)
except FileExistsError as error:
sys.stderr.write(f"An exception occured while creating Sha1_store: {repr(error)}\n")
sys.exit(ExitCode.FILE_EXISTS_ERROR)
# Create a new json file for this new store.
with self._json_ctx:
self._json_dict = {
STORE_CREATE_DATE_KEY: time.ctime(),
STORE_OS_KEY: 0,
STORE_DS_KEY: 0,
STORE_CS_KEY: 0,
}
# This is an internal error since caller of this our own wgit code.
assert (
self._path.exists() and self._metadata_file_path.exists()
), f"SHA1 store {self._path} does not exist and init is False"
# Make sure there is a valid metadata file.
with self._readonly_json_ctx:
assert STORE_CREATE_DATE_KEY in self._json_dict, f"Invalid SHA1 Store in {self._path}"
# Init temp dir.
if tmp_dir:
# Caller supplied tmp dir
assert Path(tmp_dir).is_dir(), "incorrect input"
self._tmp_dir = Path(tmp_dir)
else:
# Default tmp dir, need to clean it.
self._tmp_dir = self._path.joinpath("tmp")
shutil.rmtree(self._tmp_dir, ignore_errors=True)
self._tmp_dir.mkdir()
def add(self, file_or_obj: Union[Path, Tensor, Dict], compress: bool = True, name: str = None) -> str:
"""Adds a file/object to this store and the sha1 references accordingly.
First, a sha1 hash is calculated. Utilizing the sha1 hash string, the actual file
in <file_or_obj> is moved within the store and the reference file is updated.
If the input is an object, it will be store in the self._tmp_dir and then moved.
If compress is True, the stored file is also compressed, which is useful for tensors
with a lot of zeros.
We use pickle and numpy for saving, loading because it is more deterministic
in terms of serialized bytes. They do lose info on device and dtype of
tensors. Will handle those later.
Args:
file_or_obj (str or tensor or Dict):
Path to the file to be added to the store or an in-memory object
that can be handled by pickle. Note, OrderedDict is used when
you call `state_dict()` on a nn.Module, and it is an instance
of a Dict too. A model's state_dict can be a simple dict because
it may contain both model state_dict and other non-tensor info.
compress (bool, optional):
Use gzip compression on this object or not.
Default: True
name (str, optional):
Optional name for this object.
Default: None
"""
start = time.time()
is_pickle_file = None
# Use `isinstance` not `type() == Path` since pathlib returns OS specific
# Path types, which inherit from the Path class.
if isinstance(file_or_obj, (Path, str)):
# Make sure it is a valid file.
try:
pickle.load(open(file_or_obj, "rb"))
is_pickle_file = True
except Exception as e:
is_pickle_file = False
pass
file_path = Path(file_or_obj)
remove_tmp = False
if is_pickle_file is False:
# Continue to support torch.save()'ed files too by loading it
# in memory and the next if condition will pickle it.
file_or_obj = torch.load(cast(Union[Path, str], file_or_obj))
if isinstance(file_or_obj, (Tensor, Dict)):
# Serialize the object into a tmp file.
file_path = self._get_tmp_file_path()
pickle.dump(to_np(file_or_obj), open(file_path, "wb"))
remove_tmp = True
else:
assert False, f"incorrect input {type(file_or_obj)}"
# Get SHA1 from the file.
assert isinstance(file_path, Path), type(file_path)
sha1_hash = self._get_sha1_hash(file_path)
# Load json for many meta data operations below. Repeatedly loading
# can be very slow.
with self._json_ctx:
# Add reference.
ref_count = self._add_ref(sha1_hash, True, compress)
if ref_count == 1: # First time adding.
# Create the file dir, if needed.
repo_fdir = self._sha1_to_dir(sha1_hash)
if not repo_fdir.exists():
try:
repo_fdir.mkdir(exist_ok=True, parents=True)
except FileExistsError as error:
sys.stderr.write(f"An exception occured: {repr(error)}\n")
sys.exit(ExitCode.FILE_EXISTS_ERROR)
# Transfer the file to the store.
repo_fpath = repo_fdir.joinpath(sha1_hash)
try:
if compress:
orig_size, comp_size = _copy_compressed(
file_path, repo_fpath, self._pgzip_threads, self._pgzip_block_size
)
else:
shutil.copy2(file_path, repo_fpath)
orig_size = comp_size = file_path.stat().st_size
except BaseException as error:
# Something went wrong, perhaps out of space, or race condition due to lack of locking.
# TODO (Min): proper handle the error and recover when we learn more here.
sys.stderr.write(f"An exception occured: {repr(error)}\n")
ref_count = self._add_ref(sha1_hash, False, compress)
# Update the sizes for this entry.
entry = _get_json_entry(self._json_dict[sha1_hash])
assert (
ref_count == 1 or entry[ENTRY_OS_KEY] % (ref_count - 1) == 0
), f"incorrect size: {entry[ENTRY_OS_KEY]} and {ref_count}"
o_diff = orig_size if ref_count == 1 else (entry[ENTRY_OS_KEY] // (ref_count - 1))
d_diff = orig_size if ref_count == 1 else 0
c_diff = comp_size if ref_count == 1 else 0
entry[ENTRY_OS_KEY] += o_diff
entry[ENTRY_DS_KEY] += d_diff
entry[ENTRY_CS_KEY] += c_diff
# Update whole store's stats.
self._json_dict[STORE_OS_KEY] += o_diff
self._json_dict[STORE_DS_KEY] += d_diff
self._json_dict[STORE_CS_KEY] += c_diff
# Update the name list for this entry.
if name:
if name not in entry[ENTRY_NAMES_KEY].keys():
entry[ENTRY_NAMES_KEY][name] = 1
else:
entry[ENTRY_NAMES_KEY][name] += 1
# Clean up if needed.
if remove_tmp:
file_path.unlink()
duration = time.time() - start
if duration > 60:
logging.warning(f"Add() is taking long: {duration}s")
return sha1_hash
def get(self, sha1: str) -> Union[Tensor, Dict]:
"""Get data from a SHA1
Args:
sha1 (str):
SHA1 of the object to get.
Returns:
(Tensor or Dict):
In-memory object.
Throws:
ValueError if sha1 is not found.
"""
path = self._sha1_to_dir(sha1).joinpath(sha1)
if not path.exists():
# This is potentially valid case for the caller, we need to inform the
# the caller about it.
raise ValueError(f"Try to get SHA1 {sha1} but it is not found")
# Directly return the object after loading it. This could be throw an
# exception but that indicates some internal error since we should never
# have stored the (invalid) object in the first place with the add() API.
#
# TODO (Min): we could also keep a stats in the meta data on how many
# times the object is read. Will add if that's needed.
with self._readonly_json_ctx:
if self._json_dict[sha1][ENTRY_COMP_KEY]:
# Compressed. Because pgzip doesn't support tell() yet, we need to
# uncompress into a temp file and return it.
tmp = self._get_tmp_file_path()
_copy_uncompressed(path, tmp, self._pgzip_threads, self._pgzip_block_size)
obj = pickle.load(open(tmp, "rb"))
tmp.unlink()
else:
# Uncompressed.
obj = pickle.load(open(path, "rb"))
return from_np(obj)
def delete(self, sha1: str) -> None:
"""Delete a SHA1
Args:
sha1 (str):
SHA1 of the object to delete.
Throws:
ValueError if sha1 is not found.
"""
path = self._sha1_to_dir(sha1).joinpath(sha1)
if not path.exists():
# This is potentially a valid case for the caller, we need to inform the
# the caller about it.
raise ValueError(f"Try to delete SHA1 {sha1} but it is not found")
with self._json_ctx:
assert sha1 in self._json_dict.keys(), "internal error: sha1 not found in json"
entry = _get_json_entry(self._json_dict[sha1])
assert entry[ENTRY_RF_KEY] > 0, f"ref count {entry[ENTRY_RF_KEY]} should be positive"
entry[ENTRY_RF_KEY] -= 1
if entry[ENTRY_RF_KEY] == 0:
# Now, since ref count is 0 now deleting the object.
path.unlink() # We may leave behind an empty dir, which is OK.
entry = {} # Below, we remove the entry because of this.
# Put the entry back and store it or delete it.
if entry:
self._json_dict[sha1] = entry
else:
# empty entry, it means this sha1 is deleted.
del self._json_dict[sha1]
def size_info(self, sha1: Optional[str] = None) -> Tuple[int, int, int]:
"""Return original, deduped, gzipped sizes for an entry or the store."""
with self._readonly_json_ctx:
if sha1:
if sha1 not in self._json_dict.keys():
raise ValueError(f"SHA1 {sha1} not found")
entry = self._json_dict[sha1]
return entry[ENTRY_OS_KEY], entry[ENTRY_DS_KEY], entry[ENTRY_CS_KEY]
return self._json_dict[STORE_OS_KEY], self._json_dict[STORE_DS_KEY], self._json_dict[STORE_CS_KEY]
def names(self, sha1: str = None) -> Dict[str, int]:
"""Return the names dict for an object."""
with self._readonly_json_ctx:
if sha1 not in self._json_dict.keys():
raise ValueError(f"SHA1 {sha1} not found")
entry = self._json_dict[sha1]
return entry[ENTRY_NAMES_KEY]
def _get_sha1_hash(self, file_path: Union[str, Path]) -> str:
"""Return the sha1 hash of a file
Args:
file_path (str, Path):
Path to the file whose sha1 hash is to be calculalated and returned.
Returns:
(str):
The SHA1 computed.
"""
sha1 = hashlib.sha1()
with open(file_path, "rb") as f:
while True:
data = f.read(self._sha1_buf_size)
if not data:
break
sha1.update(data)
return sha1.hexdigest()
def _get_tmp_file_path(self) -> Path:
"""Helper to get a tmp file name under self.tmp_dir."""
fd, name = tempfile.mkstemp(dir=self._tmp_dir)
os.close(fd) # Must close this FD or unlink() won't be able to release the space of the file.
return Path(name)
def _sha1_to_dir(self, sha1: str) -> Path:
"""Helper to get the internal dir for a file based on its SHA1"""
# Using first 2 letters of the sha1, which results 26 * 26 = 676 subdirs under the top
# level. Then, using another 2 letters for sub-sub-dir. If each dir holds 1000 files, this
# can hold 450 millions files.
# NOTE: this can NOT be changed for backward compatible reasons once in production.
assert len(sha1) > 4, "sha1 too short"
part1, part2 = sha1[:2], sha1[2:4]
return self._path.joinpath(part1, part2)
def _add_ref(self, current_sha1_hash: str, inc: bool, compressed: bool) -> int:
"""
Update the reference count.
If the reference counting file does not have this sha1, then a new tracking
entry of the added.
Args:
current_sha1_hash (str):
The sha1 hash of the incoming added file.
inc (bool):
Increment or decrement.
Returns:
(int):
Resulting ref count.
"""
# Init the entry if needed.
if current_sha1_hash not in self._json_dict:
entry = {}
else:
entry = self._json_dict[current_sha1_hash]
entry = _get_json_entry(entry)
# Update the ref count.
entry[ENTRY_RF_KEY] += 1 if inc else -1
assert entry[ENTRY_RF_KEY] >= 0, "negative ref count"
# Update compressed flag.
entry[ENTRY_COMP_KEY] = compressed
self._json_dict[current_sha1_hash] = entry
return entry[ENTRY_RF_KEY]
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from typing import List
__all__: List[str] = []
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
"Common cache root for torchvision.datasets and others."
DATASET_CACHE_ROOT = "cached_datasets"
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
""" Shared functions related to testing GPU memory sizes. """
import gc
from typing import Tuple
import torch
def find_tensor_by_shape(target_shape: Tuple, only_param: bool = True) -> bool:
"""Find a tensor from the heap
Args:
target_shape (tuple):
Tensor shape to locate.
only_param (bool):
Only match Parameter type (e.g. for weights).
Returns:
(bool):
Return True if found.
"""
for obj in gc.get_objects():
try:
# Only need to check parameter type objects if asked.
if only_param and "torch.nn.parameter.Parameter" not in str(type(obj)):
continue
if torch.is_tensor(obj) or (hasattr(obj, "data") and torch.is_tensor(obj.data)):
if obj.shape == target_shape:
return True
except Exception as e:
pass
return False
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from typing import List
__all__: List[str] = []
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.