text
stringlengths 1
2.05k
|
---|
import te
def check_throws(f):
try:
f()
except tvm.error.TVMError:
pass
else:
raise AssertionError("Should have raised an exception but didn't.")
def test_const_fold():
def check(f, *args):
x = f(*[tvm.tir.const(x, "int32") for x in args])
y = f(*args)
if not isinstance(x, (tvm.tir.IntImm,)) or x.value != int(y):
raise ValueError("check error: %s vs %s " % (x, y))
tmod = tvm.tir.truncmod
check(lambda x, y: x + y, 3, 4)
check(lambda x, y: x * y, 3, 12)
check(lambda x, y: x * y - 10, 3, 12)
check(lambda x, y: x - tmod(y, 10), 3, 12)
check(lambda x, y: x
check(lambda x, y: x & y + 10, 112, 128)
check(lambda x, y: x > y, 112, 128)
check(lambda x, y: x < y, 112, 128)
check(lambda x, y: x <= y, 112, 128)
check(lambda x, y: x >= y, 112, 128)
check(lambda x, y: (x | y) ^ 10, 112, 128)
def test_const_fold2():
x = te.var("x")
tmod = tvm.tir.truncmod
tdiv = tvm.tir.truncdiv
assert (x + 0).same_as(x)
assert (0 + x).same_as(x)
assert (x - 0).same_as(x)
assert tmod(x, 1).value == 0
assert (x * 1).same_as(x)
assert (1 * x).same_as(x)
assert isinstance(tdiv(1, x), tvm.tir.Div)
def test_const_fold3():
x = te.var("x")
for val in [0, 1]:
for func in [tvm.tir.all, tvm.tir.any]:
check_throws(lambda: func(tvm.tir.const(val, "uint1"), x))
check_throws(lambda: func(x, tvm.tir.const(val, "uint1")))
for tvm_func, py_func in [
(tvm.tir.all, lambda a, b: a and b),
(tvm.tir.any, lambda a, b: a or b),
]:
for v1 in [0, 1]:
for v2 in [0, 1]:
assert tvm.ir.structural_equal(
tvm_func(tvm.tir.const(v1, "uint1"), tvm.tir.const(v2, "uint1")),
tvm.tir.const(py_func(v1, v2), "uint1"),
)
x = te.var("x", "uint1")
true = tvm.tir.const(1, "uint1")
false = tvm.tir.const(0, "uint1")
assert tvm.tir.all(x, |
true).same_as(x)
assert tvm.tir.all(true, x).same_as(x)
assert tvm.tir.any(x, false).same_as(x)
assert tvm.tir.any(false, x).same_as(x)
assert tvm.tir.all(x, false).same_as(false)
assert tvm.tir.all(false, x).same_as(false)
assert tvm.tir.any(x, true).same_as(true)
assert tvm.tir.any(true, x).same_as(true)
def test_const_fold4():
x1 = tvm.tir.const(4, "int32")
x2 = x1 + 5
tdiv = tvm.tir.truncdiv
assert isinstance(x2, tvm.tir.IntImm) and x2.value == 9
x3 = tdiv(x2, 3)
assert isinstance(x3, tvm.tir.IntImm) and x3.value == 3
x4 = x3 + 0.55
assert isinstance(x4, tvm.tir.FloatImm) and abs(x4.value - 3.55) < 1e-6
x5 = te.ceil(x4)
assert isinstance(x5, tvm.tir.FloatImm) and x5.value == 4
x6 = x5.astype("int")
assert isinstance(x6, tvm.tir.IntImm) and x6.value == 4, "x6={}".format(x6)
y = (te.round((tvm.tir.const(6.5, "float32") - 1) / 1.5) + 2).astype("int")
assert isinstance(y, tvm.tir.IntImm) and y.value == 6
def test_binary_dtype_match():
def verify_general_dtype_support(f, is_conditional=False):
rules = [
[("bool", "int32"), "int32"],
[("int32", "float32"), "float32"],
[("int32", "int64"), "int64"],
[("uint32", "int8"), "uint32"],
[("uint32", "int32"), "uint32"],
]
for (lhs_dtype, rhs_dtype), out_dtype in rules:
lhs = te.var("lhs", dtype=lhs_dtype)
rhs = te.var("rhs", dtype=rhs_dtype)
out = f(lhs, rhs)
if not is_conditional:
assert out.dtype == out_dtype
else:
assert out.dtype == "bool"
if hasattr(out, "a"):
assert out.a.dtype == out_dtype
assert out.b.dtype == out_dtype
elif hasattr(out, "args"):
assert out.args[0].dtype == out_dtype
assert out.args[1].dtype == out_dtype
else:
raise ValueError("Unknown binary op format!" |
)
def verify_callop_float_only(f):
for lhs_dtype in ["int32", "float32", "float64"]:
for rhs_dtype in ["int32", "float32", "float64"]:
lhs = te.var("lhs", dtype=lhs_dtype)
rhs = te.var("rhs", dtype=rhs_dtype)
if "float" not in lhs_dtype and "float" not in rhs_dtype:
check_throws(lambda: f(lhs, rhs))
elif "float" in lhs_dtype:
out = f(lhs, rhs)
dtypes = [lhs_dtype, rhs_dtype]
if "float64" in dtypes:
target_dtype = "float64"
elif "float32" in dtypes:
target_dtype = "float32"
else:
target_dtype = "int32"
assert out.dtype == target_dtype
assert out.args[0].dtype == target_dtype
assert out.args[1].dtype == target_dtype
else:
out = f(lhs, rhs)
assert out.dtype == rhs_dtype
assert out.args[0].dtype == rhs_dtype
assert out.args[1].dtype == rhs_dtype
verify_general_dtype_support(lambda a, b: a + b)
verify_general_dtype_support(lambda a, b: a * b)
verify_general_dtype_support(lambda a, b: a >= b, is_conditional=True)
verify_general_dtype_support(lambda a, b: a <= b, is_conditional=True)
verify_callop_float_only(lambda a, b: te.power(a, b))
assert tvm.tir.const(1) == tvm.tir.const(True)
assert tvm.tir.const(2) != tvm.tir.const(True)
def test_if_then_else():
cases = [
[(te.var("cond", dtype="bool"), "bool", "int32"), "int32"],
[(True, "int32", "float32"), "float32"],
[(False, "int32", "int64"), "int64"],
[(te.var("cond", dtype="bool"), "uint32", "int32"), "uint32"],
[(te.var("cond", dtype="int32"), "uint32", "int32"), "uint32"],
]
for (cond, lhs_dtype, rhs_dtype), o |
ut_dtype in cases:
lhs = te.var("lhs", dtype=lhs_dtype)
rhs = te.var("rhs", dtype=rhs_dtype)
if cond is True or cond is False:
out = tvm.tir.if_then_else(cond, lhs, rhs)
out2 = tvm.tir.if_then_else(not cond, rhs, lhs)
out3 = tvm.tir.if_then_else(not cond, lhs, rhs)
assert tvm.ir.structural_equal(out, out2) == 1
if cond:
assert tvm.ir.structural_equal(out, lhs.astype(out_dtype)) == 1
assert tvm.ir.structural_equal(out3, rhs.astype(out_dtype)) == 1
else:
assert tvm.ir.structural_equal(out, rhs.astype(out_dtype)) == 1
assert tvm.ir.structural_equal(out3, lhs.astype(out_dtype)) == 1
elif cond.dtype == "bool":
out = tvm.tir.if_then_else(cond, lhs, rhs)
assert out.dtype == out_dtype
assert out.args[1].dtype == out_dtype
assert out.args[2].dtype == out_dtype
elif cond.dtype != "bool":
check_throws(lambda: tvm.tir.if_then_else(cond, lhs, rhs))
else:
raise ValueError("Unknown combinations")
if __name__ == "__main__":
test_const_fold()
test_const_fold2()
test_const_fold3()
test_const_fold4()
test_binary_dtype_match()
test_if_then_else() |
import tvm
from tvm.script |
import tir as T |
import numpy as np |
import tvm.testing
@T.prim_func
def ptx_cp_async(A: T.Buffer[(32, 128), "float16"], B: T.Buffer[(32, 128), "float16"]) -> None:
T.func_attr({"global_symbol": "default_function", "tir.noalias": True})
bx = T.env_thread("blockIdx.x")
tx = T.env_thread("threadIdx.x")
T.launch_thread(bx, 1)
T.launch_thread(tx, 32)
with T.block():
A_shared = T.alloc_buffer([32, 128], "float16", scope="shared")
T.reads(A[0:32, 0:128])
T.writes(B[0:32, 0:128])
for i in range(16):
T.evaluate(
T.ptx_cp_async(
A_shared.data, tx * 128 + 8 * i, A.data, tx * 128 + 8 * i, 16, dtype="float16"
)
)
T.evaluate(T.ptx_commit_group(dtype=""))
T.evaluate(T.ptx_wait_group(0, dtype=""))
for i in range(128):
B[tx, i] = A_shared[tx, i]
@tvm.testing.requires_cuda_compute_version(8)
def test_ptx_cp_async():
f = ptx_cp_async
mod = tvm.build(f, target="cuda")
A_np = np.random.rand(32, 128).astype("float16")
B_np = np.zeros((32, 128)).astype("float16")
dev = tvm.cuda(0)
A_nd = tvm.nd.array(A_np, device=dev)
B_nd = tvm.nd.array(B_np, device=dev)
mod(A_nd, B_nd)
tvm.testing.assert_allclose(B_nd.numpy(), A_np)
if __name__ == "__main__":
test_ptx_cp_async() |
import tvm
from tvm.script |
import tir as T |
import numpy as np |
import tvm.testing
@T.prim_func
def ptx_ldmatrix(
A: T.Buffer[(16, 16), "float16"], B: T.Buffer[(16, 16), "float16"], num: T.int32, trans: T.uint8
) -> None:
T.func_attr({"global_symbol": "default_function", "tir.noalias": True})
bx = T.env_thread("blockIdx.x")
tx = T.env_thread("threadIdx.x")
T.launch_thread(bx, 1)
T.launch_thread(tx, 32)
with T.block():
A_shared = T.alloc_buffer([16, 16], "float16", scope="shared")
A_local = T.alloc_buffer([8], "float16", scope="local")
for i in range(8):
A_shared[i * 2 + tx
T.evaluate(
T.ptx_ldmatrix(
trans,
num,
".b16",
A_local.data,
0,
A_shared.data,
16 * (tx % 16) + 8 * (tx
dtype="float16",
)
)
for k in range(2):
for j in range(2):
for i in range(2):
B[8 * j + tx
@tvm.testing.requires_cuda_compute_version(7, 5)
def test_ptx_ldmatrix():
f = ptx_ldmatrix
_, _, param_num, param_trans = f.params
for num in [1, 2, 4]:
for trans in [False, True]:
mod = tvm.build(f.specialize({param_num: num, param_trans: trans}), target="cuda")
A_np = np.random.rand(16, 16).astype("float16")
A_mask_np = np.zeros_like(A_np)
if num == 1:
if trans:
A_mask_np[:8, :8] = A_np[:8, :8].T
else:
A_mask_np[:8, :8] = A_np[:8, :8]
elif num == 2:
if trans:
A_mask_np[:8, :8] = A_np[:8, :8].T
A_mask_np[8:16, :8] = A_np[8:16, :8].T
else:
A_mask_np[:16, :8] = A_np[:16, :8]
else:
if trans:
A_mask_np[:8, :8] = A_np[:8, :8].T
A_mask_np[8:16, :8] = A_np[8:16, :8].T
A_mask_np[:8, 8:16] = A_np[:8, 8 |
:16].T
A_mask_np[8:16, 8:16] = A_np[8:16, 8:16].T
else:
A_mask_np[:16, :16] = A_np[:16, :16]
B_np = np.zeros((16, 16)).astype("float16")
dev = tvm.cuda(0)
A_nd = tvm.nd.array(A_np, device=dev)
B_nd = tvm.nd.array(B_np, device=dev)
mod(A_nd, B_nd)
tvm.testing.assert_allclose(B_nd.numpy(), A_mask_np)
if __name__ == "__main__":
test_ptx_ldmatrix() |
import sys |
import pytest |
import tvm
from tvm.script |
import tir as T |
import numpy as np |
import tvm.testing
@T.prim_func
def gemm_mma_m8n8k4_row_col_fp64pf64fp64(a: T.handle, b: T.handle, c: T.handle):
T.func_attr({"global_symbol": "default_function", "tir.noalias": True})
A = T.match_buffer(a, [8, 4], dtype="float64")
B = T.match_buffer(b, [8, 4], dtype="float64")
C = T.match_buffer(c, [8, 8], dtype="float64")
brow = T.env_thread("blockIdx.y")
bcol = T.env_thread("blockIdx.x")
tx = T.env_thread("threadIdx.x")
T.launch_thread(brow, 1)
T.launch_thread(bcol, 1)
T.launch_thread(tx, 32)
MultiA = T.decl_buffer([1], "float64", scope="local")
MultiB = T.decl_buffer([1], "float64", scope="local")
Accum = T.decl_buffer([2], "float64", scope="local")
for i in range(2):
Accum[i] = T.float64(0)
MultiA[0] = A[(tx % 32)
MultiB[0] = B[(tx % 32)
T.evaluate(
T.ptx_mma(
"m8n8k4",
"row",
"col",
"fp64",
"fp64",
"fp64",
MultiA.data,
0,
MultiB.data,
0,
Accum.data,
0,
False,
dtype="float64",
)
)
for mma_accum_c_id in range(2):
C[(tx % 32)
@tvm.testing.requires_cuda_compute_version(8)
def test_gemm_mma_m8n8k4_row_col_fp64pf64fp64():
sch = tvm.tir.Schedule(gemm_mma_m8n8k4_row_col_fp64pf64fp64)
cuda_mod = tvm.build(sch.mod, target="cuda")
A_np = np.random.uniform(-1, 1, [8, 4]).astype("float64")
B_np = np.random.uniform(-1, 1, [8, 4]).astype("float64")
C_np = np.zeros([8, 8]).astype("float64")
ctx = tvm.cuda()
A_tvm = tvm.nd.array(A_np, ctx)
B_tvm = tvm.nd.array(B_np, ctx)
C_tvm = tvm.nd.array(C_np, ctx)
cuda_mod(A_tvm, B_tvm, C_tvm)
golden = np.matmul(A_np.astype("float64"), B_np.astype("float64").T)
C_numpy = C_tvm.numpy()
tvm.testing.assert_allclose(golden, C_numpy, atol=1e-3, rtol=1e-3)
@T.prim_func
def gemm_mma_m8n8k4_row_row_fp16fp16fp16(a: T.handle, b: T.handle, c: T.handle):
T |
.func_attr({"global_symbol": "default_function", "tir.noalias": True})
A = T.match_buffer(a, [16, 4], dtype="float16")
B = T.match_buffer(b, [4, 16], dtype="float16")
C = T.match_buffer(c, [16, 16], dtype="float16")
brow = T.env_thread("blockIdx.y")
bcol = T.env_thread("blockIdx.x")
tx = T.env_thread("threadIdx.x")
T.launch_thread(brow, 1)
T.launch_thread(bcol, 1)
T.launch_thread(tx, 32)
MultiA = T.decl_buffer([4], "float16", scope="local")
MultiB = T.decl_buffer([4], "float16", scope="local")
Accum = T.decl_buffer([8], "float16", scope="local")
for i in range(8):
Accum[i] = T.float32(0)
for mma_multi_a_col in T.vectorized(4):
MultiA[mma_multi_a_col] = A[
((tx % 32) % 4) + (4 * ((((tx % 32)
mma_multi_a_col,
]
for mma_multi_b_col in T.vectorized(4):
MultiB[mma_multi_b_col] = B[
(tx % 32) % 4,
mma_multi_b_col + (4 * ((tx % 32)
]
T.evaluate(
T.ptx_mma(
"m8n8k4",
"row",
"row",
"fp16",
"fp16",
"fp16",
MultiA.data,
0,
MultiB.data,
0,
Accum.data,
0,
False,
dtype="float16",
)
)
for mma_accum_c_id in range(8):
C[
((tx % 32) % 4) + (4 * ((((tx % 32)
mma_accum_c_id % 4 + (4 * ((tx % 32) % 16
] = Accum[mma_accum_c_id]
@tvm.testing.requires_cuda_compute_version(7)
def test_gemm_mma_m8n8k4_row_row_fp16fp16fp16():
sch = tvm.tir.Schedule(gemm_mma_m8n8k4_row_row_fp16fp16fp16)
cuda_mod = tvm.build(sch.mod, target="cuda")
A_np = np.random.uniform(-1, 1, [16, 4]).astype("float16")
B_np = np.random.uniform(-1, 1, [4, 16]).astype("float16")
C_np = np.zeros([16, 16]).astype("float16")
ctx = tvm.cuda()
A_tvm = tvm.nd.array(A_np, ctx)
B_tvm = tvm.nd.array(B_np, ctx)
C_tvm = tvm.nd.array(C_np, ctx)
cuda_mod(A_tvm |
, B_tvm, C_tvm)
golden = np.matmul(A_np.astype("float16"), B_np.astype("float16"))
C_numpy = C_tvm.numpy()
tvm.testing.assert_allclose(golden, C_numpy, atol=1e-3, rtol=1e-3)
@T.prim_func
def gemm_mma_m8n8k4_row_row_fp16fp16fp32(a: T.handle, b: T.handle, c: T.handle):
T.func_attr({"global_symbol": "default_function", "tir.noalias": True})
A = T.match_buffer(a, [16, 4], dtype="float16")
B = T.match_buffer(b, [4, 16], dtype="float16")
C = T.match_buffer(c, [16, 16], dtype="float32")
brow = T.env_thread("blockIdx.y")
bcol = T.env_thread("blockIdx.x")
tx = T.env_thread("threadIdx.x")
T.launch_thread(brow, 1)
T.launch_thread(bcol, 1)
T.launch_thread(tx, 32)
MultiA = T.decl_buffer([4], "float16", scope="local")
MultiB = T.decl_buffer([4], "float16", scope="local")
Accum = T.decl_buffer([8], "float32", scope="local")
for i in range(8):
Accum[i] = T.float32(0)
for mma_multi_a_col in T.vectorized(4):
MultiA[mma_multi_a_col] = A[
((tx % 32) % 4) + (4 * ((((tx % 32)
mma_multi_a_col,
]
for mma_multi_b_col in T.vectorized(4):
MultiB[mma_multi_b_col] = B[
(tx % 32) % 4,
mma_multi_b_col + (4 * ((tx % 32)
]
T.evaluate(
T.ptx_mma(
"m8n8k4",
"row",
"row",
"fp16",
"fp16",
"fp32",
MultiA.data,
0,
MultiB.data,
0,
Accum.data,
0,
False,
dtype="float32",
)
)
for mma_accum_c_id in range(8):
C[
((tx % 32) % 2)
+ ((mma_accum_c_id
+ 4 * ((tx % 32)
+ ((tx % 32) % 16
(tx % 32) % 4
+ (tx % 32) % 16
+ mma_accum_c_id % 2
+ mma_accum_c_id
] = Accum[mma_accum_c_id]
@tvm.testing.requires_cuda_compute_version(7)
def test_gemm_mma_m8n8k4_row_row_fp16fp16fp32():
sch = |
tvm.tir.Schedule(gemm_mma_m8n8k4_row_row_fp16fp16fp32)
cuda_mod = tvm.build(sch.mod, target="cuda")
A_np = np.random.uniform(-1, 1, [16, 4]).astype("float16")
B_np = np.random.uniform(-1, 1, [4, 16]).astype("float16")
C_np = np.zeros([16, 16]).astype("float32")
ctx = tvm.cuda()
A_tvm = tvm.nd.array(A_np, ctx)
B_tvm = tvm.nd.array(B_np, ctx)
C_tvm = tvm.nd.array(C_np, ctx)
cuda_mod(A_tvm, B_tvm, C_tvm)
golden = np.matmul(A_np.astype("float32"), B_np.astype("float32"))
C_numpy = C_tvm.numpy()
tvm.testing.assert_allclose(golden, C_numpy, atol=1e-3, rtol=1e-3)
@T.prim_func
def gemm_mma_m8n8k16_row_col_s8s8s32(a: T.handle, b: T.handle, c: T.handle):
T.func_attr({"global_symbol": "default_function", "tir.noalias": True})
A = T.match_buffer(a, [8, 16], dtype="int8")
B = T.match_buffer(b, [8, 16], dtype="int8")
C = T.match_buffer(c, [8, 8], dtype="int32")
brow = T.env_thread("blockIdx.y")
bcol = T.env_thread("blockIdx.x")
tx = T.env_thread("threadIdx.x")
T.launch_thread(brow, 1)
T.launch_thread(bcol, 1)
T.launch_thread(tx, 32)
MultiA = T.decl_buffer([4], "int8", scope="local")
MultiB = T.decl_buffer([4], "int8", scope="local")
Accum = T.decl_buffer([2], "int32", scope="local")
for i in range(2):
Accum[i] = T.int32(0)
for mma_multi_a_col in T.vectorized(4):
MultiA[mma_multi_a_col] = A[(tx % 32)
for mma_multi_b_col in T.vectorized(4):
MultiB[mma_multi_b_col] = B[(tx % 32)
T.evaluate(
T.ptx_mma(
"m8n8k16",
"row",
"col",
"int8",
"int8",
"int32",
MultiA.data,
0,
MultiB.data,
0,
Accum.data,
0,
False,
dtype="int32",
)
)
for mma_accum_c_id in range(2):
C[(tx % 32)
@tvm.testing.requires_nvcc_version(11)
@tvm.testing.requires_cuda_compute_version(7, 5)
def test_gemm_mma_m8n8k16_ro |
w_col_s8s8s32():
sch = tvm.tir.Schedule(gemm_mma_m8n8k16_row_col_s8s8s32)
cuda_mod = tvm.build(sch.mod, target="cuda")
A_np = np.random.uniform(-10, 10, [8, 16]).astype("int8")
B_np = np.random.uniform(-10, 10, [8, 16]).astype("int8")
C_np = np.zeros([8, 8]).astype("int32")
ctx = tvm.cuda()
A_tvm = tvm.nd.array(A_np, ctx)
B_tvm = tvm.nd.array(B_np, ctx)
C_tvm = tvm.nd.array(C_np, ctx)
cuda_mod(A_tvm, B_tvm, C_tvm)
golden = np.matmul(A_np.astype("int32"), B_np.astype("int32").T)
C_numpy = C_tvm.numpy()
tvm.testing.assert_allclose(golden, C_numpy, atol=1e-3, rtol=1e-3)
@T.prim_func
def gemm_mma_m8n8k16_row_col_s8u8s32(a: T.handle, b: T.handle, c: T.handle):
T.func_attr({"global_symbol": "default_function", "tir.noalias": True})
A = T.match_buffer(a, [8, 16], dtype="int8")
B = T.match_buffer(b, [8, 16], dtype="uint8")
C = T.match_buffer(c, [8, 8], dtype="int32")
brow = T.env_thread("blockIdx.y")
bcol = T.env_thread("blockIdx.x")
tx = T.env_thread("threadIdx.x")
T.launch_thread(brow, 1)
T.launch_thread(bcol, 1)
T.launch_thread(tx, 32)
MultiA = T.decl_buffer([4], "int8", scope="local")
MultiB = T.decl_buffer([4], "uint8", scope="local")
Accum = T.decl_buffer([2], "int32", scope="local")
for i in range(2):
Accum[i] = T.int32(0)
for mma_multi_a_col in T.vectorized(4):
MultiA[mma_multi_a_col] = A[(tx % 32)
for mma_multi_b_col in T.vectorized(4):
MultiB[mma_multi_b_col] = B[(tx % 32)
T.evaluate(
T.ptx_mma(
"m8n8k16",
"row",
"col",
"int8",
"uint8",
"int32",
MultiA.data,
0,
MultiB.data,
0,
Accum.data,
0,
False,
dtype="int32",
)
)
for mma_accum_c_id in range(2):
C[(tx % 32)
@tvm.testing.requires_nvcc_version(11)
@tvm.testing.requires_cuda_compute_version(7, 5)
def test_g |
emm_mma_m8n8k16_row_col_s8u8s32():
sch = tvm.tir.Schedule(gemm_mma_m8n8k16_row_col_s8u8s32)
cuda_mod = tvm.build(sch.mod, target="cuda")
A_np = np.random.uniform(-10, 10, [8, 16]).astype("int8")
B_np = np.random.uniform(-10, 10, [8, 16]).astype("uint8")
C_np = np.zeros([8, 8]).astype("int32")
ctx = tvm.cuda()
A_tvm = tvm.nd.array(A_np, ctx)
B_tvm = tvm.nd.array(B_np, ctx)
C_tvm = tvm.nd.array(C_np, ctx)
cuda_mod(A_tvm, B_tvm, C_tvm)
golden = np.matmul(A_np.astype("int32"), B_np.astype("int32").T)
C_numpy = C_tvm.numpy()
tvm.testing.assert_allclose(golden, C_numpy, atol=1e-3, rtol=1e-3)
@T.prim_func
def gemm_mma_m8n8k32_row_col_s4s4s32(a: T.handle, b: T.handle, c: T.handle):
T.func_attr({"global_symbol": "default_function", "tir.noalias": True})
A = T.match_buffer(a, [8, 32], dtype="int4")
B = T.match_buffer(b, [8, 32], dtype="int4")
C = T.match_buffer(c, [8, 8], dtype="int32")
brow = T.env_thread("blockIdx.y")
bcol = T.env_thread("blockIdx.x")
tx = T.env_thread("threadIdx.x")
T.launch_thread(brow, 1)
T.launch_thread(bcol, 1)
T.launch_thread(tx, 32)
MultiA = T.decl_buffer([8], "int4", scope="local")
MultiB = T.decl_buffer([8], "int4", scope="local")
Accum = T.decl_buffer([2], "int32", scope="local")
for i in range(2):
Accum[i] = T.int32(0)
for mma_multi_a_col in T.vectorized(8):
MultiA[mma_multi_a_col] = A[(tx % 32)
for mma_multi_b_col in T.vectorized(8):
MultiB[mma_multi_b_col] = B[(tx % 32)
T.evaluate(
T.ptx_mma(
"m8n8k32",
"row",
"col",
"int4",
"int4",
"int32",
MultiA.data,
0,
MultiB.data,
0,
Accum.data,
0,
False,
dtype="int32",
)
)
for mma_accum_c_id in range(2):
C[(tx % 32)
@tvm.testing.requires_nvcc_version(11)
@tvm.testing.requires_cuda_compute_version( |
7, 5)
def test_gemm_mma_m8n8k32_row_col_s4s4s32():
sch = tvm.tir.Schedule(gemm_mma_m8n8k32_row_col_s4s4s32)
cuda_mod = tvm.build(sch.mod, target="cuda")
ctx = tvm.cuda()
A_tvm = tvm.nd.empty([8, 32], "int4", ctx)
B_tvm = tvm.nd.empty([8, 32], "int4", ctx)
C_tvm = tvm.nd.empty([8, 8], "int32", ctx)
cuda_mod(A_tvm, B_tvm, C_tvm)
@T.prim_func
def gemm_mma_m8n8k32_row_col_s4u4s32(a: T.handle, b: T.handle, c: T.handle):
T.func_attr({"global_symbol": "default_function", "tir.noalias": True})
A = T.match_buffer(a, [8, 32], dtype="int4")
B = T.match_buffer(b, [8, 32], dtype="uint4")
C = T.match_buffer(c, [8, 8], dtype="int32")
brow = T.env_thread("blockIdx.y")
bcol = T.env_thread("blockIdx.x")
tx = T.env_thread("threadIdx.x")
T.launch_thread(brow, 1)
T.launch_thread(bcol, 1)
T.launch_thread(tx, 32)
MultiA = T.decl_buffer([8], "int4", scope="local")
MultiB = T.decl_buffer([8], "uint4", scope="local")
Accum = T.decl_buffer([2], "int32", scope="local")
for i in range(2):
Accum[i] = T.int32(0)
for mma_multi_a_col in T.vectorized(8):
MultiA[mma_multi_a_col] = A[(tx % 32)
for mma_multi_b_col in T.vectorized(8):
MultiB[mma_multi_b_col] = B[(tx % 32)
T.evaluate(
T.ptx_mma(
"m8n8k32",
"row",
"col",
"int4",
"uint4",
"int32",
MultiA.data,
0,
MultiB.data,
0,
Accum.data,
0,
False,
dtype="int32",
)
)
for mma_accum_c_id in range(2):
C[(tx % 32)
@tvm.testing.requires_nvcc_version(11)
@tvm.testing.requires_cuda_compute_version(7, 5)
def test_gemm_mma_m8n8k32_row_col_s4u4s32():
sch = tvm.tir.Schedule(gemm_mma_m8n8k32_row_col_s4u4s32)
cuda_mod = tvm.build(sch.mod, target="cuda")
ctx = tvm.cuda()
A_tvm = tvm.nd.empty([8, 32], "int4", ctx)
B_tvm = tvm.nd.empty([8, 32], "uint4", ctx) |
C_tvm = tvm.nd.empty([8, 8], "int32", ctx)
cuda_mod(A_tvm, B_tvm, C_tvm)
@T.prim_func
def gemm_mma_m16n8k8_row_col_fp16fp16fp32(a: T.handle, b: T.handle, c: T.handle):
T.func_attr({"global_symbol": "default_function", "tir.noalias": True})
A = T.match_buffer(a, [16, 8], dtype="float16")
B = T.match_buffer(b, [8, 8], dtype="float16")
C = T.match_buffer(c, [16, 8], dtype="float32")
brow = T.env_thread("blockIdx.y")
bcol = T.env_thread("blockIdx.x")
tx = T.env_thread("threadIdx.x")
T.launch_thread(brow, 1)
T.launch_thread(bcol, 1)
T.launch_thread(tx, 32)
MultiA = T.decl_buffer([4], "float16", scope="local")
MultiB = T.decl_buffer([2], "float16", scope="local")
Accum = T.decl_buffer([4], "float32", scope="local")
for i in range(4):
Accum[i] = T.float32(0)
for mma_multi_a_col in T.vectorized(4):
MultiA[mma_multi_a_col] = A[
(tx % 32)
]
for mma_multi_b_col in T.vectorized(4):
MultiB[mma_multi_b_col] = B[
(tx % 32)
]
T.evaluate(
T.ptx_mma(
"m16n8k8",
"row",
"col",
"fp16",
"fp16",
"fp32",
MultiA.data,
0,
MultiB.data,
0,
Accum.data,
0,
False,
dtype="float32",
)
)
for mma_accum_c_id in range(4):
C[(tx % 32)
mma_accum_c_id
]
@tvm.testing.requires_cuda_compute_version(8)
def test_gemm_mma_m16n8k8_row_col_fp16fp16fp32():
sch = tvm.tir.Schedule(gemm_mma_m16n8k8_row_col_fp16fp16fp32)
cuda_mod = tvm.build(sch.mod, target="cuda")
A_np = np.random.uniform(-1, 1, [16, 8]).astype("float16")
B_np = np.random.uniform(-1, 1, [8, 8]).astype("float16")
C_np = np.zeros([16, 8]).astype("float32")
ctx = tvm.cuda()
A_tvm = tvm.nd.array(A_np, ctx)
B_tvm = tvm.nd.array(B_np, ctx)
C_tvm = tvm.nd.array(C_np, ctx)
cuda_mod(A_tvm, B_t |
vm, C_tvm)
golden = np.matmul(A_np.astype("float32"), B_np.astype("float32").T)
C_numpy = C_tvm.numpy()
tvm.testing.assert_allclose(golden, C_numpy, atol=1e-3, rtol=1e-3)
@T.prim_func
def gemm_mma_m16n8k16_row_col_fp16fp16fp16(a: T.handle, b: T.handle, c: T.handle):
T.func_attr({"global_symbol": "default_function", "tir.noalias": True})
A = T.match_buffer(a, [16, 16], dtype="float16")
B = T.match_buffer(b, [8, 16], dtype="float16")
C = T.match_buffer(c, [16, 8], dtype="float16")
brow = T.env_thread("blockIdx.y")
bcol = T.env_thread("blockIdx.x")
tx = T.env_thread("threadIdx.x")
T.launch_thread(brow, 1)
T.launch_thread(bcol, 1)
T.launch_thread(tx, 32)
MultiA = T.decl_buffer([8], "float16", scope="local")
MultiB = T.decl_buffer([4], "float16", scope="local")
Accum = T.decl_buffer([4], "float16", scope="local")
for i in range(4):
Accum[i] = T.float32(0)
for mma_multi_a_col in range(8):
MultiA[mma_multi_a_col] = A[
(tx % 32)
(tx % 32) % 4 * 2 + mma_multi_a_col % 2 + mma_multi_a_col
]
for mma_multi_b_col in T.vectorized(4):
MultiB[mma_multi_b_col] = B[
(tx % 32)
(tx % 32) % 4 * 2 + mma_multi_b_col % 2 + mma_multi_b_col
]
T.evaluate(
T.ptx_mma(
"m16n8k16",
"row",
"col",
"fp16",
"fp16",
"fp16",
MultiA.data,
0,
MultiB.data,
0,
Accum.data,
0,
False,
dtype="float16",
)
)
for mma_accum_c_id in range(4):
C[
(tx % 32)
(tx % 32) % 4 * 2 + mma_accum_c_id % 2,
] = Accum[mma_accum_c_id]
@tvm.testing.requires_cuda_compute_version(8)
def test_gemm_mma_m16n8k16_row_col_fp16fp16fp16():
sch = tvm.tir.Schedule(gemm_mma_m16n8k16_row_col_fp16fp16fp16)
cuda_mod = tvm.build(sch.mod, target="cuda")
A_np = np.random.uniform |
(-1, 1, [16, 16]).astype("float16")
B_np = np.random.uniform(-1, 1, [8, 16]).astype("float16")
C_np = np.zeros([16, 8]).astype("float16")
ctx = tvm.cuda()
A_tvm = tvm.nd.array(A_np, ctx)
B_tvm = tvm.nd.array(B_np, ctx)
C_tvm = tvm.nd.array(C_np, ctx)
cuda_mod(A_tvm, B_tvm, C_tvm)
golden = np.matmul(A_np.astype("float16"), B_np.astype("float16").T)
C_numpy = C_tvm.numpy()
tvm.testing.assert_allclose(golden, C_numpy, atol=1e-3, rtol=1e-3)
@T.prim_func
def gemm_mma_m16n8k16_row_col_fp16fp16fp32(a: T.handle, b: T.handle, c: T.handle):
T.func_attr({"global_symbol": "default_function", "tir.noalias": True})
A = T.match_buffer(a, [16, 16], dtype="float16")
B = T.match_buffer(b, [8, 16], dtype="float16")
C = T.match_buffer(c, [16, 8], dtype="float32")
brow = T.env_thread("blockIdx.y")
bcol = T.env_thread("blockIdx.x")
tx = T.env_thread("threadIdx.x")
T.launch_thread(brow, 1)
T.launch_thread(bcol, 1)
T.launch_thread(tx, 32)
MultiA = T.decl_buffer([8], "float16", scope="local")
MultiB = T.decl_buffer([4], "float16", scope="local")
Accum = T.decl_buffer([4], "float32", scope="local")
for i in range(4):
Accum[i] = T.float32(0)
for mma_multi_a_col in range(8):
MultiA[mma_multi_a_col] = A[
(tx % 32)
(tx % 32) % 4 * 2 + mma_multi_a_col % 2 + mma_multi_a_col
]
for mma_multi_b_col in T.vectorized(4):
MultiB[mma_multi_b_col] = B[
(tx % 32)
(tx % 32) % 4 * 2 + mma_multi_b_col % 2 + mma_multi_b_col
]
T.evaluate(
T.ptx_mma(
"m16n8k16",
"row",
"col",
"fp16",
"fp16",
"fp32",
MultiA.data,
0,
MultiB.data,
0,
Accum.data,
0,
False,
dtype="float32",
)
)
for mma_accum_c_id in range(4):
C[
(tx % 32)
(tx % 32) % 4 * 2 |
+ mma_accum_c_id % 2,
] = Accum[mma_accum_c_id]
@tvm.testing.requires_cuda_compute_version(8)
def test_gemm_mma_m16n8k16_row_col_fp16fp16fp32():
sch = tvm.tir.Schedule(gemm_mma_m16n8k16_row_col_fp16fp16fp32)
cuda_mod = tvm.build(sch.mod, target="cuda")
A_np = np.random.uniform(-1, 1, [16, 16]).astype("float16")
B_np = np.random.uniform(-1, 1, [8, 16]).astype("float16")
C_np = np.zeros([16, 8]).astype("float32")
ctx = tvm.cuda()
A_tvm = tvm.nd.array(A_np, ctx)
B_tvm = tvm.nd.array(B_np, ctx)
C_tvm = tvm.nd.array(C_np, ctx)
cuda_mod(A_tvm, B_tvm, C_tvm)
golden = np.matmul(A_np.astype("float32"), B_np.astype("float32").T)
C_numpy = C_tvm.numpy()
tvm.testing.assert_allclose(golden, C_numpy, atol=1e-3, rtol=1e-3)
@T.prim_func
def gemm_mma_m16n8k16_row_col_s8s8s32(a: T.handle, b: T.handle, c: T.handle):
T.func_attr({"global_symbol": "default_function", "tir.noalias": True})
A = T.match_buffer(a, [16, 16], dtype="int8")
B = T.match_buffer(b, [8, 16], dtype="int8")
C = T.match_buffer(c, [16, 8], dtype="int32")
brow = T.env_thread("blockIdx.y")
bcol = T.env_thread("blockIdx.x")
tx = T.env_thread("threadIdx.x")
T.launch_thread(brow, 1)
T.launch_thread(bcol, 1)
T.launch_thread(tx, 32)
MultiA = T.decl_buffer([8], "int8", scope="local")
MultiB = T.decl_buffer([4], "int8", scope="local")
Accum = T.decl_buffer([4], "int32", scope="local")
for i in range(4):
Accum[i] = T.int32(0)
for mma_multi_a_col in range(8):
MultiA[mma_multi_a_col] = A[
(tx % 32)
(tx % 32) % 4 * 4 + mma_multi_a_col % 4,
]
for mma_multi_b_col in T.vectorized(4):
MultiB[mma_multi_b_col] = B[
(tx % 32)
(tx % 32) % 4 * 4 + mma_multi_b_col,
]
T.evaluate(
T.ptx_mma(
"m16n8k16",
"row",
"col",
"int8",
"int8",
"int32",
MultiA.data,
0, |
MultiB.data,
0,
Accum.data,
0,
False,
dtype="int32",
)
)
for mma_accum_c_id in range(4):
C[
(tx % 32)
(tx % 32) % 4 * 2 + mma_accum_c_id % 2,
] = Accum[mma_accum_c_id]
@tvm.testing.requires_cuda_compute_version(8)
def test_gemm_mma_m16n8k16_row_col_s8s8s32():
sch = tvm.tir.Schedule(gemm_mma_m16n8k16_row_col_s8s8s32)
cuda_mod = tvm.build(sch.mod, target="cuda")
A_np = np.random.uniform(-10, 10, [16, 16]).astype("int8")
B_np = np.random.uniform(-10, 10, [8, 16]).astype("int8")
C_np = np.zeros([16, 8]).astype("int32")
ctx = tvm.cuda()
A_tvm = tvm.nd.array(A_np, ctx)
B_tvm = tvm.nd.array(B_np, ctx)
C_tvm = tvm.nd.array(C_np, ctx)
cuda_mod(A_tvm, B_tvm, C_tvm)
golden = np.matmul(A_np.astype("int32"), B_np.astype("int32").T)
C_numpy = C_tvm.numpy()
tvm.testing.assert_allclose(golden, C_numpy, atol=1e-3, rtol=1e-3)
@T.prim_func
def gemm_mma_m16n8k16_row_col_s8u8s32(a: T.handle, b: T.handle, c: T.handle):
T.func_attr({"global_symbol": "default_function", "tir.noalias": True})
A = T.match_buffer(a, [16, 16], dtype="int8")
B = T.match_buffer(b, [8, 16], dtype="uint8")
C = T.match_buffer(c, [16, 8], dtype="int32")
brow = T.env_thread("blockIdx.y")
bcol = T.env_thread("blockIdx.x")
tx = T.env_thread("threadIdx.x")
T.launch_thread(brow, 1)
T.launch_thread(bcol, 1)
T.launch_thread(tx, 32)
MultiA = T.decl_buffer([8], "int8", scope="local")
MultiB = T.decl_buffer([4], "uint8", scope="local")
Accum = T.decl_buffer([4], "int32", scope="local")
for i in range(4):
Accum[i] = T.int32(0)
for mma_multi_a_col in range(8):
MultiA[mma_multi_a_col] = A[
(tx % 32)
(tx % 32) % 4 * 4 + mma_multi_a_col % 4,
]
for mma_multi_b_col in T.vectorized(4):
MultiB[mma_multi_b_col] = B[
(tx % 32)
(tx % 32) % 4 * 4 + mma_multi_ |
b_col,
]
T.evaluate(
T.ptx_mma(
"m16n8k16",
"row",
"col",
"int8",
"uint8",
"int32",
MultiA.data,
0,
MultiB.data,
0,
Accum.data,
0,
False,
dtype="int32",
)
)
for mma_accum_c_id in range(4):
C[
(tx % 32)
(tx % 32) % 4 * 2 + mma_accum_c_id % 2,
] = Accum[mma_accum_c_id]
@tvm.testing.requires_cuda_compute_version(8)
def test_gemm_mma_m16n8k16_row_col_s8u8s32():
sch = tvm.tir.Schedule(gemm_mma_m16n8k16_row_col_s8u8s32)
cuda_mod = tvm.build(sch.mod, target="cuda")
A_np = np.random.uniform(-10, 10, [16, 16]).astype("int8")
B_np = np.random.uniform(-10, 10, [8, 16]).astype("uint8")
C_np = np.zeros([16, 8]).astype("int32")
ctx = tvm.cuda()
A_tvm = tvm.nd.array(A_np, ctx)
B_tvm = tvm.nd.array(B_np, ctx)
C_tvm = tvm.nd.array(C_np, ctx)
cuda_mod(A_tvm, B_tvm, C_tvm)
golden = np.matmul(A_np.astype("int32"), B_np.astype("int32").T)
C_numpy = C_tvm.numpy()
tvm.testing.assert_allclose(golden, C_numpy, atol=1e-3, rtol=1e-3)
@T.prim_func
def gemm_mma_m16n8k32_row_col_s8s8s32(a: T.handle, b: T.handle, c: T.handle):
T.func_attr({"global_symbol": "default_function", "tir.noalias": True})
A = T.match_buffer(a, [16, 32], dtype="int8")
B = T.match_buffer(b, [8, 32], dtype="int8")
C = T.match_buffer(c, [16, 8], dtype="int32")
brow = T.env_thread("blockIdx.y")
bcol = T.env_thread("blockIdx.x")
tx = T.env_thread("threadIdx.x")
T.launch_thread(brow, 1)
T.launch_thread(bcol, 1)
T.launch_thread(tx, 32)
MultiA = T.decl_buffer([16], "int8", scope="local")
MultiB = T.decl_buffer([8], "int8", scope="local")
Accum = T.decl_buffer([4], "int32", scope="local")
for i in range(4):
Accum[i] = T.int32(0)
for mma_multi_a_col in range(16):
MultiA[mma_multi_a_col] = A[ |
(tx % 32)
(tx % 32) % 4 * 4 + mma_multi_a_col % 4 + mma_multi_a_col
]
for mma_multi_b_col in range(8):
MultiB[mma_multi_b_col] = B[
(tx % 32)
(tx % 32) % 4 * 4 + mma_multi_b_col % 4 + mma_multi_b_col
]
T.evaluate(
T.ptx_mma(
"m16n8k32",
"row",
"col",
"int8",
"int8",
"int32",
MultiA.data,
0,
MultiB.data,
0,
Accum.data,
0,
False,
dtype="int32",
)
)
for mma_accum_c_id in range(4):
C[
(tx % 32)
(tx % 32) % 4 * 2 + mma_accum_c_id % 2,
] = Accum[mma_accum_c_id]
@tvm.testing.requires_cuda_compute_version(8)
def test_gemm_mma_m16n8k32_row_col_s8s8s32():
sch = tvm.tir.Schedule(gemm_mma_m16n8k32_row_col_s8s8s32)
cuda_mod = tvm.build(sch.mod, target="cuda")
A_np = np.random.uniform(-10, 10, [16, 32]).astype("int8")
B_np = np.random.uniform(-10, 10, [8, 32]).astype("int8")
C_np = np.zeros([16, 8]).astype("int32")
ctx = tvm.cuda()
A_tvm = tvm.nd.array(A_np, ctx)
B_tvm = tvm.nd.array(B_np, ctx)
C_tvm = tvm.nd.array(C_np, ctx)
cuda_mod(A_tvm, B_tvm, C_tvm)
golden = np.matmul(A_np.astype("int32"), B_np.astype("int32").T)
C_numpy = C_tvm.numpy()
tvm.testing.assert_allclose(golden, C_numpy, atol=1e-3, rtol=1e-3)
@T.prim_func
def gemm_mma_m16n8k32_row_col_s8u8s32(a: T.handle, b: T.handle, c: T.handle):
T.func_attr({"global_symbol": "default_function", "tir.noalias": True})
A = T.match_buffer(a, [16, 32], dtype="int8")
B = T.match_buffer(b, [8, 32], dtype="uint8")
C = T.match_buffer(c, [16, 8], dtype="int32")
brow = T.env_thread("blockIdx.y")
bcol = T.env_thread("blockIdx.x")
tx = T.env_thread("threadIdx.x")
T.launch_thread(brow, 1)
T.launch_thread(bcol, 1)
T.launch_thread(tx, 32)
MultiA = T.decl_buffer([16], "int8", scope="loc |
al")
MultiB = T.decl_buffer([8], "uint8", scope="local")
Accum = T.decl_buffer([4], "int32", scope="local")
for i in range(4):
Accum[i] = T.int32(0)
for mma_multi_a_col in range(16):
MultiA[mma_multi_a_col] = A[
(tx % 32)
(tx % 32) % 4 * 4 + mma_multi_a_col % 4 + mma_multi_a_col
]
for mma_multi_b_col in range(8):
MultiB[mma_multi_b_col] = B[
(tx % 32)
(tx % 32) % 4 * 4 + mma_multi_b_col % 4 + mma_multi_b_col
]
T.evaluate(
T.ptx_mma(
"m16n8k32",
"row",
"col",
"int8",
"uint8",
"int32",
MultiA.data,
0,
MultiB.data,
0,
Accum.data,
0,
False,
dtype="int32",
)
)
for mma_accum_c_id in range(4):
C[
(tx % 32)
(tx % 32) % 4 * 2 + mma_accum_c_id % 2,
] = Accum[mma_accum_c_id]
@tvm.testing.requires_cuda_compute_version(8)
def test_gemm_mma_m16n8k32_row_col_s8u8s32():
sch = tvm.tir.Schedule(gemm_mma_m16n8k32_row_col_s8u8s32)
cuda_mod = tvm.build(sch.mod, target="cuda")
A_np = np.random.uniform(-10, 10, [16, 32]).astype("int8")
B_np = np.random.uniform(-10, 10, [8, 32]).astype("uint8")
C_np = np.zeros([16, 8]).astype("int32")
ctx = tvm.cuda()
A_tvm = tvm.nd.array(A_np, ctx)
B_tvm = tvm.nd.array(B_np, ctx)
C_tvm = tvm.nd.array(C_np, ctx)
cuda_mod(A_tvm, B_tvm, C_tvm)
golden = np.matmul(A_np.astype("int32"), B_np.astype("int32").T)
C_numpy = C_tvm.numpy()
tvm.testing.assert_allclose(golden, C_numpy, atol=1e-3, rtol=1e-3)
@T.prim_func
def gemm_mma_m16n8k64_row_col_s4s4s32(a: T.handle, b: T.handle, c: T.handle):
T.func_attr({"global_symbol": "default_function", "tir.noalias": True})
A = T.match_buffer(a, [16, 64], dtype="int4")
B = T.match_buffer(b, [8, 64], dtype="int4")
C = T.match_buffer(c, [16, 8], dtype="in |
t32")
brow = T.env_thread("blockIdx.y")
bcol = T.env_thread("blockIdx.x")
tx = T.env_thread("threadIdx.x")
T.launch_thread(brow, 1)
T.launch_thread(bcol, 1)
T.launch_thread(tx, 32)
MultiA = T.decl_buffer([32], "int4", scope="local")
MultiB = T.decl_buffer([16], "int4", scope="local")
Accum = T.decl_buffer([4], "int32", scope="local")
for i in range(4):
Accum[i] = T.int32(0)
for mma_multi_a_col in range(32):
MultiA[mma_multi_a_col] = A[
(tx % 32)
(tx % 32) % 4 * 8 + mma_multi_a_col % 8 + mma_multi_a_col
]
for mma_multi_b_col in range(16):
MultiB[mma_multi_b_col] = B[
(tx % 32)
(tx % 32) % 4 * 8 + mma_multi_b_col % 8 + mma_multi_b_col
]
T.evaluate(
T.ptx_mma(
"m8n8k32",
"row",
"col",
"int4",
"int4",
"int32",
MultiA.data,
0,
MultiB.data,
0,
Accum.data,
0,
False,
dtype="int32",
)
)
for mma_accum_c_id in range(4):
C[
(tx % 32)
(tx % 32) % 4 * 2 + mma_accum_c_id % 2,
] = Accum[mma_accum_c_id]
@tvm.testing.requires_cuda_compute_version(8)
def test_gemm_mma_m16n8k64_row_col_s4s4s32():
sch = tvm.tir.Schedule(gemm_mma_m16n8k64_row_col_s4s4s32)
cuda_mod = tvm.build(sch.mod, target="cuda")
ctx = tvm.cuda()
A_tvm = tvm.nd.empty([16, 64], "int4", ctx)
B_tvm = tvm.nd.empty([8, 64], "int4", ctx)
C_tvm = tvm.nd.empty([16, 8], "int32", ctx)
cuda_mod(A_tvm, B_tvm, C_tvm)
@T.prim_func
def gemm_mma_m16n8k64_row_col_s4u4s32(a: T.handle, b: T.handle, c: T.handle):
T.func_attr({"global_symbol": "default_function", "tir.noalias": True})
A = T.match_buffer(a, [16, 64], dtype="int4")
B = T.match_buffer(b, [8, 64], dtype="uint4")
C = T.match_buffer(c, [16, 8], dtype="int32")
brow = T.env_thread("blockIdx.y" |
)
bcol = T.env_thread("blockIdx.x")
tx = T.env_thread("threadIdx.x")
T.launch_thread(brow, 1)
T.launch_thread(bcol, 1)
T.launch_thread(tx, 32)
MultiA = T.decl_buffer([32], "int4", scope="local")
MultiB = T.decl_buffer([16], "uint4", scope="local")
Accum = T.decl_buffer([4], "int32", scope="local")
for i in range(4):
Accum[i] = T.int32(0)
for mma_multi_a_col in range(32):
MultiA[mma_multi_a_col] = A[
(tx % 32)
(tx % 32) % 4 * 8 + mma_multi_a_col % 8 + mma_multi_a_col
]
for mma_multi_b_col in range(16):
MultiB[mma_multi_b_col] = B[
(tx % 32)
(tx % 32) % 4 * 8 + mma_multi_b_col % 8 + mma_multi_b_col
]
T.evaluate(
T.ptx_mma(
"m8n8k32",
"row",
"col",
"int4",
"uint4",
"int32",
MultiA.data,
0,
MultiB.data,
0,
Accum.data,
0,
False,
dtype="int32",
)
)
for mma_accum_c_id in range(4):
C[
(tx % 32)
(tx % 32) % 4 * 2 + mma_accum_c_id % 2,
] = Accum[mma_accum_c_id]
@tvm.testing.requires_cuda_compute_version(8)
def test_gemm_mma_m16n8k64_row_col_s4u4s32():
sch = tvm.tir.Schedule(gemm_mma_m16n8k64_row_col_s4u4s32)
cuda_mod = tvm.build(sch.mod, target="cuda")
ctx = tvm.cuda()
A_tvm = tvm.nd.empty([16, 64], "int4", ctx)
B_tvm = tvm.nd.empty([8, 64], "uint4", ctx)
C_tvm = tvm.nd.empty([16, 8], "int32", ctx)
cuda_mod(A_tvm, B_tvm, C_tvm)
@T.prim_func
def gemm_mma_m16n8k256_row_col_b1b1s32(a: T.handle, b: T.handle, c: T.handle):
T.func_attr({"global_symbol": "default_function", "tir.noalias": True})
A = T.match_buffer(a, [16, 256], dtype="int1")
B = T.match_buffer(b, [8, 256], dtype="int1")
C = T.match_buffer(c, [16, 8], dtype="int32")
brow = T.env_thread("blockIdx.y")
bcol = T.env_thread("blockIdx.x |
")
tx = T.env_thread("threadIdx.x")
T.launch_thread(brow, 1)
T.launch_thread(bcol, 1)
T.launch_thread(tx, 32)
MultiA = T.decl_buffer([128], "int1", scope="local")
MultiB = T.decl_buffer([64], "int1", scope="local")
Accum = T.decl_buffer([4], "int32", scope="local")
for i in range(4):
Accum[i] = T.int32(0)
for mma_multi_a_col in range(128):
MultiA[mma_multi_a_col] = A[
(tx % 32)
(tx % 32) % 4 * 32 + mma_multi_a_col % 32 + mma_multi_a_col
]
for mma_multi_b_col in range(16):
MultiB[mma_multi_b_col] = B[
(tx % 32)
(tx % 32) % 4 * 32 + mma_multi_b_col % 32 + mma_multi_b_col
]
T.evaluate(
T.ptx_mma(
"m16n8k256",
"row",
"col",
"int1",
"int1",
"int32",
MultiA.data,
0,
MultiB.data,
0,
Accum.data,
0,
False,
"xor",
dtype="int32",
)
)
for mma_accum_c_id in range(4):
C[
(tx % 32)
(tx % 32) % 4 * 2 + mma_accum_c_id % 2,
] = Accum[mma_accum_c_id]
@tvm.testing.requires_cuda_compute_version(8)
def test_gemm_mma_m16n8k256_row_col_b1b1s32():
sch = tvm.tir.Schedule(gemm_mma_m16n8k256_row_col_b1b1s32)
cuda_mod = tvm.build(sch.mod, target="cuda")
ctx = tvm.cuda()
A_tvm = tvm.nd.empty([16, 256], "int1", ctx)
B_tvm = tvm.nd.empty([8, 256], "int1", ctx)
C_tvm = tvm.nd.empty([16, 8], "int32", ctx)
cuda_mod(A_tvm, B_tvm, C_tvm)
if __name__ == "__main__":
tvm.testing.main() |
import tvm
from tvm.script |
import tir as T |
import numpy as np |
import tvm.testing
def gen_2in4_mask(m: int, n: int):
assert n % 4 == 0
return np.array(
[[np.sort(np.random.choice(4, 2, replace=False)) for _ in range(n
).astype("uint8")
def get_dense_mat_by_mask(val, mask):
m, n_chunks, _ = mask.shape
val = val.reshape(m, n_chunks, 2)
ret = np.zeros((m, n_chunks, 4)).astype(val.dtype)
for i in range(m):
for j in range(n_chunks):
for k in range(2):
ret[i, j, mask[i, j, k]] = val[i, j, k]
return ret.reshape(m, n_chunks * 4)
@T.prim_func
def mma_sp_m16n8k16_f16f16f16(a: T.handle, b: T.handle, c: T.handle, _metadata: T.handle):
T.func_attr({"global_symbol": "default_function", "tir.noalias": True})
A = T.match_buffer(a, [16, 8], dtype="float16")
B = T.match_buffer(b, [16, 8], dtype="float16")
C = T.match_buffer(c, [16, 8], dtype="float16")
metadata = T.match_buffer(_metadata, [8], dtype="uint32")
brow = T.env_thread("blockIdx.y")
bcol = T.env_thread("blockIdx.x")
tx = T.env_thread("threadIdx.x")
T.launch_thread(brow, 1)
T.launch_thread(bcol, 1)
T.launch_thread(tx, 32)
multi_a = T.decl_buffer([4], "float16", scope="local")
multi_b = T.decl_buffer([4], "float16", scope="local")
accum = T.decl_buffer([4], "float16", scope="local")
meta_local = T.decl_buffer([1], "uint32", scope="local")
for i in range(4):
accum[i] = T.float16(0)
for i in range(4):
multi_a[i] = A[tx
for i in range(4):
multi_b[i] = B[tx % 4 * 2 + i % 2 + i
meta_local[0] = metadata[tx
T.evaluate(
T.ptx_mma_sp(
"m16n8k16",
"row",
"col",
"fp16",
"fp16",
"fp16",
multi_a.data,
0,
multi_b.data,
0,
accum.data,
0,
meta_local.data,
0,
0,
False,
dtype="float16",
)
)
for i in range(4):
C[i
@T.prim_func |
def mma_sp_m16n8k16_f16f16f32(a: T.handle, b: T.handle, c: T.handle, _metadata: T.handle):
T.func_attr({"global_symbol": "default_function", "tir.noalias": True})
A = T.match_buffer(a, [16, 8], dtype="float16")
B = T.match_buffer(b, [16, 8], dtype="float16")
C = T.match_buffer(c, [16, 8], dtype="float32")
metadata = T.match_buffer(_metadata, [8], dtype="uint32")
brow = T.env_thread("blockIdx.y")
bcol = T.env_thread("blockIdx.x")
tx = T.env_thread("threadIdx.x")
T.launch_thread(brow, 1)
T.launch_thread(bcol, 1)
T.launch_thread(tx, 32)
multi_a = T.decl_buffer([4], "float16", scope="local")
multi_b = T.decl_buffer([4], "float16", scope="local")
accum = T.decl_buffer([4], "float32", scope="local")
meta_local = T.decl_buffer([1], "uint32", scope="local")
for i in range(4):
accum[i] = T.float16(0)
for i in range(4):
multi_a[i] = A[tx
for i in range(4):
multi_b[i] = B[tx % 4 * 2 + i % 2 + i
meta_local[0] = metadata[tx
T.evaluate(
T.ptx_mma_sp(
"m16n8k16",
"row",
"col",
"fp16",
"fp16",
"fp32",
multi_a.data,
0,
multi_b.data,
0,
accum.data,
0,
meta_local.data,
0,
0,
False,
dtype="float32",
)
)
for i in range(4):
C[i
@T.prim_func
def mma_sp_m16n8k32_f16f16f16(a: T.handle, b: T.handle, c: T.handle, _metadata: T.handle):
T.func_attr({"global_symbol": "default_function", "tir.noalias": True})
A = T.match_buffer(a, [16, 16], dtype="float16")
B = T.match_buffer(b, [32, 8], dtype="float16")
C = T.match_buffer(c, [16, 8], dtype="float16")
metadata = T.match_buffer(_metadata, [16], dtype="uint32")
brow = T.env_thread("blockIdx.y")
bcol = T.env_thread("blockIdx.x")
tx = T.env_thread("threadIdx.x")
T.launch_thread(brow, 1)
T.launch_thread(bcol, 1) |
T.launch_thread(tx, 32)
multi_a = T.decl_buffer([8], "float16", scope="local")
multi_b = T.decl_buffer([8], "float16", scope="local")
accum = T.decl_buffer([4], "float16", scope="local")
meta_local = T.decl_buffer([1], "uint32", scope="local")
for i in range(4):
accum[i] = T.float16(0)
for i in range(8):
multi_a[i] = A[(i % 4)
for i in range(8):
multi_b[i] = B[i
meta_local[0] = metadata[tx
T.evaluate(
T.ptx_mma_sp(
"m16n8k32",
"row",
"col",
"fp16",
"fp16",
"fp16",
multi_a.data,
0,
multi_b.data,
0,
accum.data,
0,
meta_local.data,
0,
0,
False,
dtype="float16",
)
)
for i in range(4):
C[i
@T.prim_func
def mma_sp_m16n8k32_f16f16f32(a: T.handle, b: T.handle, c: T.handle, _metadata: T.handle):
T.func_attr({"global_symbol": "default_function", "tir.noalias": True})
A = T.match_buffer(a, [16, 16], dtype="float16")
B = T.match_buffer(b, [32, 8], dtype="float16")
C = T.match_buffer(c, [16, 8], dtype="float32")
metadata = T.match_buffer(_metadata, [16], dtype="uint32")
brow = T.env_thread("blockIdx.y")
bcol = T.env_thread("blockIdx.x")
tx = T.env_thread("threadIdx.x")
T.launch_thread(brow, 1)
T.launch_thread(bcol, 1)
T.launch_thread(tx, 32)
multi_a = T.decl_buffer([8], "float16", scope="local")
multi_b = T.decl_buffer([8], "float16", scope="local")
accum = T.decl_buffer([4], "float32", scope="local")
meta_local = T.decl_buffer([1], "uint32", scope="local")
for i in range(4):
accum[i] = T.float16(0)
for i in range(8):
multi_a[i] = A[(i % 4)
for i in range(8):
multi_b[i] = B[i
meta_local[0] = metadata[tx
T.evaluate(
T.ptx_mma_sp(
"m16n8k32",
"row",
"col",
"fp1 |
6",
"fp16",
"fp32",
multi_a.data,
0,
multi_b.data,
0,
accum.data,
0,
meta_local.data,
0,
0,
False,
dtype="float32",
)
)
for i in range(4):
C[i
@tvm.testing.requires_cuda_compute_version(8)
def test_mma_sp_m16n8k16_f16():
def get_meta_m16n8k16_half(mask):
assert mask.shape == (16, 4, 2)
mask = mask.reshape(16, 8)
ret = np.zeros((8,)).astype("uint32")
for i in range(8):
base = 1
for blk in range(2):
for j in range(8):
ret[i] |= int(mask[blk * 8 + i, j]) * base
base = base << 2
return ret
for out_dtype in ["float16", "float32"]:
func = mma_sp_m16n8k16_f16f16f16 if out_dtype == "float16" else mma_sp_m16n8k16_f16f16f32
sch = tvm.tir.Schedule(func)
cuda_mod = tvm.build(sch.mod, target="cuda")
A_np = np.random.uniform(-1, 1, [16, 8]).astype("float16")
B_np = np.random.uniform(-1, 1, [16, 8]).astype("float16")
mask = gen_2in4_mask(16, 16)
A_dense_np = get_dense_mat_by_mask(A_np, mask)
C_np = np.matmul(A_dense_np, B_np).astype(out_dtype)
meta = get_meta_m16n8k16_half(mask)
ctx = tvm.cuda()
A_tvm = tvm.nd.array(A_np, ctx)
B_tvm = tvm.nd.array(B_np, ctx)
C_tvm = tvm.nd.array(np.zeros_like(C_np), ctx)
meta_tvm = tvm.nd.array(meta, ctx)
cuda_mod(A_tvm, B_tvm, C_tvm, meta_tvm)
tvm.testing.assert_allclose(C_tvm.numpy(), C_np, atol=1e-3, rtol=1e-3)
@tvm.testing.requires_cuda_compute_version(8)
def test_mma_sp_m16n8k32_f16():
def get_meta_m16n8k32_half(mask):
assert mask.shape == (16, 8, 2)
mask = mask.reshape(16, 2, 8)
ret = np.zeros((8, 2)).astype("uint32")
for i in range(8):
for k in range(2):
base = 1
for bl |
k in range(2):
for j in range(8):
ret[i, k] |= int(mask[blk * 8 + i, k, j]) * base
base = base << 2
return ret.reshape(16)
for out_dtype in ["float16", "float32"]:
func = mma_sp_m16n8k32_f16f16f16 if out_dtype == "float16" else mma_sp_m16n8k32_f16f16f32
sch = tvm.tir.Schedule(func)
cuda_mod = tvm.build(sch.mod, target="cuda")
A_np = np.random.uniform(-1, 1, [16, 16]).astype("float16")
B_np = np.random.uniform(-1, 1, [32, 8]).astype("float16")
mask = gen_2in4_mask(16, 32)
A_dense_np = get_dense_mat_by_mask(A_np, mask)
C_np = np.matmul(A_dense_np, B_np).astype(out_dtype)
meta = get_meta_m16n8k32_half(mask)
ctx = tvm.cuda()
A_tvm = tvm.nd.array(A_np, ctx)
B_tvm = tvm.nd.array(B_np, ctx)
C_tvm = tvm.nd.array(np.zeros_like(C_np), ctx)
meta_tvm = tvm.nd.array(meta, ctx)
cuda_mod(A_tvm, B_tvm, C_tvm, meta_tvm)
tvm.testing.assert_allclose(C_tvm.numpy(), C_np, atol=1e-3, rtol=1e-3)
if __name__ == "__main__":
test_mma_sp_m16n8k16_f16()
test_mma_sp_m16n8k32_f16() |
import pytest |
import sys |
import tvm |
import tvm.testing
from tvm.script |
import tir as T
from tvm.tir.buffer |
import Buffer
from tvm.tir.function |
import PrimFunc
from tvm.tir.stmt |
import Block
def _check_func_signature_remap(lhs: PrimFunc, rhs: PrimFunc):
assert lhs != rhs
for x, y in zip(lhs.params, rhs.params):
assert x != y
assert lhs.buffer_map[x] != rhs.buffer_map[y]
def _check_buffer_decl(lhs: Buffer, rhs: Buffer):
assert lhs != rhs
assert lhs.data != rhs.data
def _check_block_signature_remap(lhs: Block, rhs: Block):
assert lhs != rhs
for x, y in zip(lhs.iter_vars, rhs.iter_vars):
assert x != y
assert x.var != y.var
for x, y in zip(lhs.alloc_buffers, rhs.alloc_buffers):
_check_buffer_decl(x, y)
for x, y in zip(lhs.match_buffers, rhs.match_buffers):
assert x != y
_check_buffer_decl(x.buffer, y.buffer)
def test_simple():
@T.prim_func
def elementwise(A: T.Buffer[(128, 128), "float32"]):
B = T.alloc_buffer((128, 128), "float32")
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
T.reads(A[vi, vj])
T.writes(B[vi, vj])
B[vi, vj] = A[vi, vj] * 2.0
f1 = elementwise
f2 = tvm.tir.stmt_functor.renew_defs(f1)
tvm.ir.assert_structural_equal(f1, f2)
_check_func_signature_remap(f1, f2)
_check_block_signature_remap(f1.body.block, f2.body.block)
assert f1.body.block.body.loop_var != f2.body.block.body.loop_var
assert f1.body.block.body.body.loop_var != f2.body.block.body.body.loop_var
def _get_block(f):
return f.body.block.body.body.body.block
_check_block_signature_remap(_get_block(f1), _get_block(f2))
def test_match_buffer():
@T.prim_func
def func_match_buffer(A: T.Buffer[(128, 128), "float32"], B: T.Buffer[(128, 128), "float32"]):
with T.block("root"):
s = T.var("int32")
e = T.var("int32")
A0 = T.match_buffer(
A[0:128, 0:128],
shape=(128, 128),
dtyp |
e="float32",
strides=[s, s],
elem_offset=e,
)
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A0[vi, vj] * 2.0
f1 = func_match_buffer
f2 = tvm.tir.stmt_functor.renew_defs(f1)
tvm.ir.assert_structural_equal(f1, f2)
_check_func_signature_remap(f1, f2)
_check_block_signature_remap(f1.body.block, f2.body.block)
assert f1.body.block.body.loop_var != f2.body.block.body.loop_var
def _get_block(f):
return f.body.block
block1 = _get_block(f1)
block2 = _get_block(f2)
_check_block_signature_remap(block1, block2)
matched_buffer1 = block1.match_buffers[0].buffer
matched_buffer2 = block2.match_buffers[0].buffer
assert matched_buffer1.strides[0] != matched_buffer2.strides[0]
assert matched_buffer1.strides[1] != matched_buffer2.strides[1]
assert matched_buffer1.strides[0] == matched_buffer1.strides[1]
assert matched_buffer2.strides[0] == matched_buffer2.strides[1]
assert matched_buffer1.elem_offset != matched_buffer2.elem_offset
def test_undefined_buffer():
@T.prim_func
def access_alloc():
A_data = T.allocate([128], "float16", "global")
A = T.buffer_decl(shape=[128], dtype="float16", data=A_data)
T.evaluate(A.data)
for i in range(128):
A[i] = A[i] + T.float16(1.0)
f1 = access_alloc
f2 = tvm.tir.stmt_functor.renew_defs(f1)
tvm.ir.assert_structural_equal(f1, f2)
assert f1.body.buffer_var != f2.body.buffer_var
def _get_buffer_store_buffer(f):
return f.body.body[1].body.buffer
_check_buffer_decl(_get_buffer_store_buffer(f1), _get_buffer_store_buffer(f2))
def test_symbolic_func():
@T.prim_func
def symbolic_func(a: T.handle, b: T.handle, n: T.int32):
m = T.var("int32")
A = T.match_buffer(a, (n, m))
B = T.match_buffer(b, (n, m |
* 2))
for i, j in T.grid(n, m):
B[i, j * 2] = A[i, j]
B[i, j * 2 + 1] = A[i, j]
f1 = symbolic_func
f2 = tvm.tir.stmt_functor.renew_defs(f1)
tvm.ir.assert_structural_equal(f1, f2)
if __name__ == "__main__":
tvm.testing.main() |
from typing |
import List |
import pytest |
import tvm |
import tvm.testing
from tvm.tir.function |
import TensorIntrin
from tvm.tir.tensor_intrin.x86 |
import dot_product_16x4_u8i8i32_desc
from tvm.tir.tensor_intrin.cuda |
import (
WMMA_SYNC_16x16x16_f16f16f16_INTRIN,
WMMA_SYNC_16x16x16_f16f16f32_INTRIN,
)
from tvm.tir |
import Evaluate, For, ForKind, IndexMap, Var, decl_buffer, floordiv, floormod, Schedule
from tvm.tir.analysis |
import expr_deep_equal
from tvm.tir.schedule.analysis |
import (
get_auto_tensorize_mapping_info,
suggest_index_map,
get_tensorize_loop_mapping,
TensorizeInfo,
)
from tvm.script |
import tir as T
from tvm.tir.stmt_functor |
import pre_order_visit
from tvm.meta_schedule.testing |
import te_workload
from tvm.te |
import create_prim_func
def _make_vars(*args: str) -> List[Var]:
return [Var(arg, dtype="int32") for arg in args]
def _make_loops(loop_vars: List[Var], extents: List[int]) -> List[For]:
assert len(loop_vars) == len(extents)
return [
For(
loop_var=loop_var,
min_val=0,
extent=extent,
kind=ForKind.SERIAL,
body=Evaluate(0),
)
for loop_var, extent in zip(loop_vars, extents)
]
def test_suggest_index_map_simple():
i, j = _make_vars("i", "j")
index_map = suggest_index_map(
buffer=decl_buffer(shape=[8, 256]),
indices=[
floordiv(i, 16) * 4 + floordiv(j, 16),
floormod(i, 16) * 16 + floormod(j, 16),
],
loops=_make_loops(
loop_vars=[i, j],
extents=[32, 64],
),
predicate=True,
)
expected_index_map = IndexMap.from_func(
lambda x, y: [
floordiv(x, 4),
floordiv(y, 16),
floormod(x, 4),
floormod(y, 16),
],
)
assert index_map.is_equivalent_to(expected_index_map)
def test_suggest_index_map_bijective():
i, j = _make_vars("i", "j")
index_map = suggest_index_map(
buffer=decl_buffer(shape=[8]),
indices=[floormod(j, 4) * 2 + i],
loops=_make_loops(
loop_vars=[i, j],
extents=[2, 32],
),
predicate=True,
)
expected_index_map = IndexMap.from_func(
lambda x: [
floormod(x, 2),
floordiv(x, 2),
],
)
assert index_map.is_equivalent_to(expected_index_map)
def test_suggest_index_map_winograd():
"""use case in winograd conv where the indices are complicated"""
fused_outer, i3_3_fused, i4_0, i4_1 = _make_vars("fused_outer", "i3_3_fused", "i4_0", "i4_1")
eps = floordiv(fused_outer, 336) * 2 + floordiv(floormod(fused_outer, 16), 8)
nu = floordiv(floormod(fused_outer, 336), 112) * 2 + floordiv(floormod(fused_outer, 8), 4) |
co = floormod(fused_outer, 4) * 32 + i3_3_fused
ci = (i4_0 * 32) + i4_1
buffer = decl_buffer(shape=[6, 6, 128, 128])
index_map = suggest_index_map(
buffer=buffer,
indices=[eps, nu, co, ci],
loops=_make_loops(
loop_vars=[fused_outer, i3_3_fused, i4_0, i4_1],
extents=[1008, 32, 4, 32],
),
predicate=True,
)
expected_index_map = IndexMap.from_func(
lambda i0, i1, i2, i3: (
floordiv(i0, 2),
floordiv(i1, 2),
floormod(i0, 2),
floormod(((i1 * 4) + floordiv(i2, 32)), 8),
floormod(i2, 32),
floordiv(i3, 32),
floormod(i3, 32),
)
)
assert index_map.is_equivalent_to(expected_index_map)
inverse_index_map = index_map.inverse(buffer.shape)
expected_inverse_index_map = IndexMap.from_func(
lambda i0, i1, i2, i3, i4, i5, i6: (
((i0 * 2) + i2),
((i1 * 2) + floordiv(((i3 * 32) + i4), 128)),
floormod(((i3 * 32) + i4), 128),
((i5 * 32) + i6),
)
)
assert inverse_index_map.is_equivalent_to(expected_inverse_index_map)
@tvm.script.ir_module
class DenseVNNIModule:
@T.prim_func
def main(
placeholder: T.Buffer[(1024, 1024), "uint8"],
placeholder_1: T.Buffer[(64, 256, 16, 4), "int8"],
compute: T.Buffer[(1024, 1024), "int32"],
) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
with T.block("root"):
T.reads()
T.writes()
for i0, i1, i2 in T.grid(1024, 1024, 1024):
with T.block("compute"):
i, j, k = T.axis.remap("SSR", [i0, i1, i2])
T.reads(placeholder[i, k], placeholder_1[j
T.writes(compute[i, j])
with T.init():
compute[i, j] = 0
compute[i, j] = compute[i, j] + T.cast(placeholder[i, k], "int32") * T.cast(
place |
holder_1[j
)
@tvm.script.ir_module
class Conv2dNCHWcVNNIModule:
@T.prim_func
def main(
placeholder: T.Buffer[(1, 4, 56, 56, 16), "uint8"],
placeholder_1: T.Buffer[(16, 4, 1, 1, 4, 16, 4), "int8"],
conv2d_NCHWc_int8: T.Buffer[(1, 16, 56, 56, 16), "int32"],
) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
for i0, i1, i2, i3, i4, i5, i6, i7, i8, i9 in T.grid(1, 16, 56, 56, 16, 1, 1, 4, 4, 4):
with T.block("conv2d_NCHWc_int8"):
(
n,
oc_chunk,
oh,
ow,
oc_block,
kh,
kw,
ic_outer,
ic_f_inner,
ic_s_inner,
) = T.axis.remap("SSSSSRRRRR", [i0, i1, i2, i3, i4, i5, i6, i7, i8, i9])
T.reads(
placeholder[n, ic_outer, oh + kh, ow + kw, ic_f_inner * 4 + ic_s_inner],
placeholder_1[oc_chunk, ic_outer, kh, kw, ic_f_inner, oc_block, ic_s_inner],
)
T.writes(conv2d_NCHWc_int8[n, oc_chunk, oh, ow, oc_block])
with T.init():
conv2d_NCHWc_int8[n, oc_chunk, oh, ow, oc_block] = 0
conv2d_NCHWc_int8[n, oc_chunk, oh, ow, oc_block] = conv2d_NCHWc_int8[
n, oc_chunk, oh, ow, oc_block
] + T.cast(
placeholder[n, ic_outer, oh + kh, ow + kw, ic_f_inner * 4 + ic_s_inner], "int32"
) * T.cast(
placeholder_1[oc_chunk, ic_outer, kh, kw, ic_f_inner, oc_block, ic_s_inner],
"int32",
)
def collect_loops(prim_func):
loops = []
def callback(node):
if isinstance(node, tvm.tir.For):
loops.append(node)
return True
pre_order_visit(prim_func.body, callback)
return loops
def test_get_tensorize_loop_mapping_dense_vnni(): |
s = Schedule(DenseVNNIModule)
block = s.get_block("compute")
info = get_tensorize_loop_mapping(s, block, dot_product_16x4_u8i8i32_desc)
assert isinstance(info, TensorizeInfo)
desc_loop_to_sref = dict((v, k) for k, v in info.loop_map.items())
desc_loops = collect_loops(dot_product_16x4_u8i8i32_desc)
_, loop_j, loop_k = s.get_loops(block)
assert desc_loops[0] in desc_loop_to_sref and desc_loops[1] in desc_loop_to_sref
assert s.get(desc_loop_to_sref[desc_loops[0]]) == s.get(loop_j)
assert s.get(desc_loop_to_sref[desc_loops[1]]) == s.get(loop_k)
def test_get_tensorize_loop_mapping_conv2d_nchwc_vnni():
s = Schedule(Conv2dNCHWcVNNIModule)
block = s.get_block("conv2d_NCHWc_int8")
info = get_tensorize_loop_mapping(s, block, dot_product_16x4_u8i8i32_desc)
desc_loop_to_sref = dict((v, k) for k, v in info.loop_map.items())
desc_loops = collect_loops(dot_product_16x4_u8i8i32_desc)
_, _, _, _, i4, _, _, _, _, i9 = s.get_loops(block)
assert desc_loops[0] in desc_loop_to_sref and desc_loops[1] in desc_loop_to_sref
assert s.get(desc_loop_to_sref[desc_loops[0]]) == s.get(i4)
assert s.get(desc_loop_to_sref[desc_loops[1]]) == s.get(i9)
def test_get_tensorize_loop_mapping_matmul_mma():
@T.prim_func
def matmul_16x16x16xf16f16f16_desc(
A: T.Buffer((16, 16), "float16", align=64, offset_factor=1),
B: T.Buffer((16, 16), "float16", align=64, offset_factor=1),
C: T.Buffer((16, 16), "float16", align=64, offset_factor=1),
) -> None:
with T.block("root"):
T.reads(C[0:16, 0:16], A[0:16, 0:16], B[0:16, 0:16])
T.writes(C[0:16, 0:16])
for i, j, k in T.grid(16, 16, 16):
with T.block("update"):
vii, vjj, vkk = T.axis.remap("SSR", [i, j, k])
C[vii, vjj] = C[vii, vjj] + A[vii, vkk] * B[vjj, vkk]
matmul = create_prim_func(
te_workload.matmul_relu(
n=512,
m=512,
k=512, |
)
)
s = Schedule(matmul)
block = s.get_block("C")
i0, i1, i2 = s.get_loops(block)
desc_loops = collect_loops(matmul_16x16x16xf16f16f16_desc)
for do_reorder in [False, True]:
if do_reorder:
s.reorder(i2, i0, i1)
info = get_tensorize_loop_mapping(s, block, matmul_16x16x16xf16f16f16_desc)
assert info is not None
desc_loop_to_sref = dict((v, k) for k, v in info.loop_map.items())
for i in range(3):
assert desc_loops[i] in desc_loop_to_sref
assert s.get(desc_loop_to_sref[desc_loops[0]]) == s.get(i0)
assert s.get(desc_loop_to_sref[desc_loops[1]]) == s.get(i1)
assert s.get(desc_loop_to_sref[desc_loops[2]]) == s.get(i2)
def test_get_tensorize_loop_mapping_padding_matmul():
matmul = create_prim_func(
te_workload.matmul_relu(
n=127,
m=256,
k=65,
in_dtype="float16",
out_dtype="float16",
)
)
s = Schedule(matmul)
block = s.get_block("C")
desc = TensorIntrin.get(WMMA_SYNC_16x16x16_f16f16f16_INTRIN).desc
info = get_tensorize_loop_mapping(s, block, desc, allow_padding=True)
assert info is not None
expected_padding = [1, 0, 15]
actual_padding = info.block_iter_paddings
assert actual_padding is not None
assert len(actual_padding) == len(expected_padding)
for actual, expected in zip(actual_padding, expected_padding):
assert actual == expected
def check_index_map(workload, block_name, intrin_name, expected_index_map):
s = Schedule(workload)
block = s.get_block(block_name)
desc_func = TensorIntrin.get(intrin_name).desc
info = get_auto_tensorize_mapping_info(s, block, desc_func)
if expected_index_map is None:
assert info is None
return
assert len(info.mappings) == 1
assert IndexMap.from_func(expected_index_map).is_equivalent_to(info.mappings[0])
def test_get_auto_tensorize_mapping_info_conv2d():
conv2d = create_prim_func( |
te_workload.conv2d_nhwc(4, 16, 16, 64, 64, 3, 1, 1, in_dtype="float16", out_dtype="float32")
)
check_index_map(
conv2d,
"conv2d_nhwc",
WMMA_SYNC_16x16x16_f16f16f32_INTRIN,
lambda n, h, w, c, rh, rw, rc: (n * 256 + h * 16 + w, c, rh * 192 + rw * 64 + rc),
)
def test_get_auto_tensorize_mapping_info_conv2d_unit_batch():
conv2d = create_prim_func(
te_workload.conv2d_nhwc(1, 16, 16, 64, 64, 3, 1, 1, in_dtype="float16", out_dtype="float32")
)
check_index_map(
conv2d,
"conv2d_nhwc",
WMMA_SYNC_16x16x16_f16f16f32_INTRIN,
lambda n, h, w, c, rh, rw, rc: (n * 256 + h * 16 + w, c, rh * 192 + rw * 64 + rc),
)
@pytest.mark.parametrize("b,m,n,k", [(1, 512, 512, 512), (16, 32, 32, 32)])
def test_get_auto_tensorize_mapping_info_batch_matmul(b, m, n, k):
matmul = create_prim_func(
te_workload.batch_matmul_nkkm(b, m, n, k, in_dtype="float16", out_dtype="float32")
)
check_index_map(
matmul, "Z", WMMA_SYNC_16x16x16_f16f16f32_INTRIN, lambda b, m, n, k: (b, m, n, k)
)
@pytest.mark.parametrize(
"n,m,k,expected",
[
(
512,
512,
512,
lambda n, m, k: (
n,
m,
k,
),
),
(1, 32, 32, lambda n, m, k: (n, m, k)),
],
)
def test_get_auto_tensorize_mapping_info_matmul(n, m, k, expected):
matmul = create_prim_func(te_workload.matmul(n, m, k, in_dtype="float16", out_dtype="float32"))
check_index_map(matmul, "C", WMMA_SYNC_16x16x16_f16f16f32_INTRIN, expected)
if __name__ == "__main__":
tvm.testing.main() |
import sys |
import pytest |
import tvm |
import tvm.testing
from tvm |
import tir
from tvm.script |
import tir as T
from tvm.tir.schedule |
import DepKind
from tvm.tir.stmt_functor |
import post_order_visit
@T.prim_func
def elementwise(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 128), "float32")
C = T.match_buffer(c, (128, 128), "float32")
B = T.alloc_buffer((128, 128), "float32")
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + 1.0
@T.prim_func
def matmul(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [128, 128])
B = T.match_buffer(b, [128, 128])
C = T.match_buffer(c, [128, 128])
for i, j in T.grid(128, 128):
with T.block("init"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = T.float32(0)
for k in range(0, 128):
with T.block("update"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vj, vk]
@T.prim_func
def war_dependency(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.match_buffer(b, (128, 128))
C = T.match_buffer(c, (128, 128))
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + 1.0
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
def _get_block(s: tir.ScheduleState, name_hint: str) -> tir.StmtSRef:
result = None
def f_visit(node):
nonlocal result
if isinstance(node, tvm.tir.Block) and node.name_hint == name_hint:
result = node
func = s.mod["main"]
post_order_visit(func.body, f_visit)
assert result is not None and isinstance(result, tvm.tir.Block)
return s.get_sref(result)
def test_elementwise_dependency():
s = tir.ScheduleState(elementwise, debug_mask="all")
root = _get_block( |
s, "root")
block_b = _get_block(s, "B")
block_c = _get_block(s, "C")
(dep,) = s.get_block_scope(root).get_deps_by_src(block_b)
assert dep.src.same_as(block_b)
assert dep.dst.same_as(block_c)
assert dep.kind == DepKind.RAW
(dep,) = s.get_block_scope(root).get_deps_by_dst(block_c)
assert dep.src.same_as(block_b)
assert dep.dst.same_as(block_c)
assert dep.kind == DepKind.RAW
def test_matmul_dependency():
s = tir.ScheduleState(matmul, debug_mask="all")
root = _get_block(s, "root")
init = _get_block(s, "init")
update = _get_block(s, "update")
p0, p1 = s.get_block_scope(root).get_deps_by_src(init)
assert p0.src.same_as(init)
assert p0.dst.same_as(update)
assert p1.src.same_as(init)
assert p1.dst.same_as(update)
assert (p0.kind == DepKind.RAW and p1.kind == DepKind.WAW) or (
p0.kind == DepKind.WAW and p1.kind == DepKind.RAW
)
p0, p1 = s.get_block_scope(root).get_deps_by_dst(update)
assert p0.src.same_as(init)
assert p0.dst.same_as(update)
assert p1.src.same_as(init)
assert p1.dst.same_as(update)
assert (p0.kind == DepKind.RAW and p1.kind == DepKind.WAW) or (
p0.kind == DepKind.WAW and p1.kind == DepKind.RAW
)
def test_war_dependency():
s = tir.ScheduleState(war_dependency, debug_mask="all")
root = _get_block(s, "root")
block_c = _get_block(s, "C")
block_b = _get_block(s, "B")
(dep,) = s.get_block_scope(root).get_deps_by_src(block_c)
assert dep.src.same_as(block_c)
assert dep.dst.same_as(block_b)
assert dep.kind == DepKind.WAR
(dep,) = s.get_block_scope(root).get_deps_by_dst(block_b)
assert dep.src.same_as(block_c)
assert dep.dst.same_as(block_b)
assert dep.kind == DepKind.WAR
if __name__ == "__main__":
tvm.testing.main() |
import tvm |
import tvm.testing
from tvm |
import tir
from tvm.script |
import tir as T
from tvm.tir.schedule.testing |
import verify_trace_roundtrip
@T.prim_func
def single_elementwise(A: T.Buffer[(128, 128), "float32"], B: T.Buffer[(128, 128), "float32"]):
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
def test_blockize_outer():
@T.prim_func
def after_blockize_outer(
A: T.Buffer[(128, 128), "float32"],
B: T.Buffer[(128, 128), "float32"],
) -> None:
with T.block("blockized_B"):
vio = T.axis.spatial(1, 0)
vjo = T.axis.spatial(1, 0)
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
func = single_elementwise
s = tir.Schedule(func, debug_mask="all")
x, _ = s.get_loops(s.get_block("B"))
s.blockize(x)
tvm.ir.assert_structural_equal(s.mod["main"], after_blockize_outer)
verify_trace_roundtrip(sch=s, mod=func)
def test_blockize_inner():
@T.prim_func
def after_blockize_inner(
A: T.Buffer[(128, 128), "float32"],
B: T.Buffer[(128, 128), "float32"],
) -> None:
for i in T.serial(128):
with T.block("blockized_B"):
vi = T.axis.spatial(128, i)
vjo = T.axis.spatial(1, 0)
for j in T.serial(128):
with T.block("B"):
vj = T.axis.remap("S", [j])
B[vi, vj] = A[vi, vj] * 2.0
func = single_elementwise
s = tir.Schedule(func, debug_mask="all")
_, y = s.get_loops(s.get_block("B"))
s.blockize(y)
tvm.ir.assert_structural_equal(s.mod["main"], after_blockize_inner)
verify_trace_roundtrip(sch=s, mod=func)
def test_two_elementwise_blockize_reverse_compute_at():
@T.prim_func
def before_blockize_rca(
A: T.Buffer[(128, 128), "float32"],
C: T.Buffer[(128, 128), "float32"],
) -> None:
B = T.alloc_buffer([128, 128], dtype="f |
loat32")
for i, j in T.grid(8, 8):
with T.block("B_o"):
vi, vj = T.axis.remap("SS", [i, j])
T.reads(A[vi * 16 : vi * 16 + 16, vj * 16 : vj * 16 + 16])
T.writes(B[vi * 16 : vi * 16 + 16, vj * 16 : vj * 16 + 16])
for i_1, j_1 in T.grid(16, 16):
with T.block("B"):
vi_i, vj_i = T.axis.remap("SS", [i_1, j_1])
T.reads(A[vi * 16 + vi_i, vj * 16 + vj_i])
T.writes(B[vi * 16 + vi_i, vj * 16 + vj_i])
B[vi * 16 + vi_i, vj * 16 + vj_i] = A[vi * 16 + vi_i, vj * 16 + vj_i] * 2.0
for ax0, ax1 in T.grid(16, 16):
with T.block("C"):
vi = T.axis.spatial(128, i * 16 + ax0)
vj = T.axis.spatial(128, j * 16 + ax1)
T.reads(B[vi, vj])
T.writes(C[vi, vj])
C[vi, vj] = B[vi, vj] + 1.0
@T.prim_func
def after_blockize_rca(
A: T.Buffer[(128, 128), "float32"],
C: T.Buffer[(128, 128), "float32"],
) -> None:
B = T.alloc_buffer([128, 128], dtype="float32")
for i, j in T.grid(8, 8):
with T.block("B_o"):
vi, vj = T.axis.remap("SS", [i, j])
T.reads(A[vi * 16 : vi * 16 + 16, vj * 16 : vj * 16 + 16])
T.writes(B[vi * 16 : vi * 16 + 16, vj * 16 : vj * 16 + 16])
for i_1, j_1 in T.grid(16, 16):
with T.block("B"):
vi_i, vj_i = T.axis.remap("SS", [i_1, j_1])
T.reads(A[vi * 16 + vi_i, vj * 16 + vj_i])
T.writes(B[vi * 16 + vi_i, vj * 16 + vj_i])
B[vi * 16 + vi_i, vj * 16 + vj_i] = A[vi * 16 + vi_i, vj * 16 + vj_i] * 2.0
with T.block("C_o"):
vi, vj = T.axis.remap("SS", [i, j])
T.reads(B[vi * 16 : vi * 16 + 16, vj * 16 : vj * 16 + 16])
T.writes(C[vi |
* 16 : vi * 16 + 16, vj * 16 : vj * 16 + 16])
for ax0, ax1 in T.grid(16, 16):
with T.block("C"):
vi_i, vj_i = T.axis.remap("SS", [ax0, ax1])
T.reads(B[vi * 16 + vi_i, vj * 16 + vj_i])
T.writes(C[vi * 16 + vi_i, vj * 16 + vj_i])
C[vi * 16 + vi_i, vj * 16 + vj_i] = B[vi * 16 + vi_i, vj * 16 + vj_i] + 1.0
func = before_blockize_rca
s = tir.Schedule(func, debug_mask="all")
_, _, x, _ = s.get_loops(s.get_block("C"))
s.blockize(x)
tvm.ir.assert_structural_equal(s.mod["main"], after_blockize_rca)
verify_trace_roundtrip(sch=s, mod=func)
def test_two_elementwise_blockize_compute_at():
@T.prim_func
def before_blockize_compute_at(
A: T.Buffer[(128, 128), "float32"],
C: T.Buffer[(128, 128), "float32"],
) -> None:
B = T.alloc_buffer([128, 128], dtype="float32")
for i_0, j_0 in T.grid(8, 8):
for ax0, ax1 in T.grid(16, 16):
with T.block("B"):
vi = T.axis.spatial(128, i_0 * 16 + ax0)
vj = T.axis.spatial(128, j_0 * 16 + ax1)
T.reads(A[vi, vj])
T.writes(B[vi, vj])
B[vi, vj] = A[vi, vj] * 2.0
with T.block("C_o"):
vi_o, vj_o = T.axis.remap("SS", [i_0, j_0])
T.reads(B[vi_o * 16 : vi_o * 16 + 16, vj_o * 16 : vj_o * 16 + 16])
T.writes(C[vi_o * 16 : vi_o * 16 + 16, vj_o * 16 : vj_o * 16 + 16])
for i_1, j_1 in T.grid(16, 16):
with T.block("C"):
vi_i, vj_i = T.axis.remap("SS", [i_1, j_1])
T.reads(B[vi_o * 16 + vi_i, vj_o * 16 + vj_i])
T.writes(C[vi_o * 16 + vi_i, vj_o * 16 + vj_i])
C[vi_o * 16 + vi_i, vj_o * 16 + vj_i] = (
B[vi_o * 16 + vi_i, vj_o * 16 + vj_i] + 1.0 |
)
@T.prim_func
def after_blockize_compute_at(
A: T.Buffer[(128, 128), "float32"],
C: T.Buffer[(128, 128), "float32"],
) -> None:
B = T.alloc_buffer([128, 128], dtype="float32")
for i_0, j_0 in T.grid(8, 8):
with T.block("B_o"):
vi_o, vj_o = T.axis.remap("SS", [i_0, j_0])
T.reads(A[vi_o * 16 : vi_o * 16 + 16, vj_o * 16 : vj_o * 16 + 16])
T.writes(B[vi_o * 16 : vi_o * 16 + 16, vj_o * 16 : vj_o * 16 + 16])
for ax0, ax1 in T.grid(16, 16):
with T.block("B"):
vi_i, vj_i = T.axis.remap("SS", [ax0, ax1])
T.reads(A[vi_o * 16 + vi_i, vj_o * 16 + vj_i])
T.writes(B[vi_o * 16 + vi_i, vj_o * 16 + vj_i])
B[vi_o * 16 + vi_i, vj_o * 16 + vj_i] = (
A[vi_o * 16 + vi_i, vj_o * 16 + vj_i] * 2.0
)
with T.block("C_o"):
vi_o, vj_o = T.axis.remap("SS", [i_0, j_0])
T.reads(B[vi_o * 16 : vi_o * 16 + 16, vj_o * 16 : vj_o * 16 + 16])
T.writes(C[vi_o * 16 : vi_o * 16 + 16, vj_o * 16 : vj_o * 16 + 16])
for i_1, j_1 in T.grid(16, 16):
with T.block("C"):
vi_i, vj_i = T.axis.remap("SS", [i_1, j_1])
T.reads(B[vi_o * 16 + vi_i, vj_o * 16 + vj_i])
T.writes(C[vi_o * 16 + vi_i, vj_o * 16 + vj_i])
C[vi_o * 16 + vi_i, vj_o * 16 + vj_i] = (
B[vi_o * 16 + vi_i, vj_o * 16 + vj_i] + 1.0
)
func = before_blockize_compute_at
s = tir.Schedule(func, debug_mask="all")
_, _, x, _ = s.get_loops(s.get_block("B"))
s.blockize(x)
tvm.ir.assert_structural_equal(s.mod["main"], after_blockize_compute_at)
verify_trace_roundtrip(sch=s, mod=func)
def test_blockize_init_loops():
@T.prim_func
def rowsum(A: T.B |
uffer[(128, 128), "float32"], B: T.Buffer[(128,), "float32"]) -> None:
for k, i in T.grid(128, 128):
with T.block("B"):
vk, vi = T.axis.remap("RS", [k, i])
with T.init():
B[vi] = 0.0
B[vi] = B[vi] + A[vi, vk]
@T.prim_func
def after_rowsum_blockize(
A: T.Buffer[(128, 128), "float32"],
B: T.Buffer[(128,), "float32"],
) -> None:
with T.block("blockized_B"):
vko = T.axis.R(1, 0)
vio = T.axis.S(1, 0)
with T.init():
for i1 in T.serial(0, 128):
with T.block("B_init"):
vi_init = T.axis.S(128, i1)
B[vi_init] = T.float32(0)
for i0, i1_1 in T.grid(128, 128):
with T.block("B"):
vk, vi = T.axis.remap("RS", [i0, i1_1])
B[vi] = B[vi] + A[vi, vk]
s = tir.Schedule(rowsum, debug_mask="all")
k, _ = s.get_loops(s.get_block("B"))
s.blockize(k)
tvm.ir.assert_structural_equal(s.mod["main"], after_rowsum_blockize)
verify_trace_roundtrip(sch=s, mod=rowsum)
def test_blockize_outer_int64_shape():
@T.prim_func
def single_elementwise_int64(
A: T.Buffer[(T.int64(16), T.int64(128)), "float32"],
B: T.Buffer[(T.int64(16), T.int64(128)), "float32"],
) -> None:
for i0, j0, i1, j1 in T.grid(T.int64(1), T.int64(8), T.int64(16), T.int64(16)):
with T.block("B"):
vi = T.axis.S(T.int64(16), i0 * T.int64(16) + i1)
vj = T.axis.S(T.int64(128), j0 * T.int64(16) + j1)
B[vi, vj] = A[vi, vj] + 1.0
@T.prim_func
def after_single_elementwise_int64_blockize(
A: T.Buffer[(T.int64(16), T.int64(128)), "float32"],
B: T.Buffer[(T.int64(16), T.int64(128)), "float32"],
) -> None:
for i0, j0 in T.grid(T.int64(1), T.int64(8)):
with T.block("B_o"):
vi_o = T.axis.spatial(T.int64(1), T.in |
t64(0))
vj_o = T.axis.spatial(T.int64(8), j0)
for i1, j1 in T.grid(T.int64(16), T.int64(16)):
with T.block("B"):
vi_i, vj_i = T.axis.remap("SS", [i1, j1])
B[vi_i, vj_o * T.int64(16) + vj_i] = A[
vi_i, vj_o * T.int64(16) + vj_i
] + T.float32(1)
s = tir.Schedule(single_elementwise_int64, debug_mask="all")
_, _, i1, _ = s.get_loops(s.get_block("B"))
s.blockize(i1)
tvm.ir.assert_structural_equal(s.mod["main"], after_single_elementwise_int64_blockize)
verify_trace_roundtrip(sch=s, mod=single_elementwise_int64)
if __name__ == "__main__":
tvm.testing.main() |
import sys |
import pytest |
import tvm |
import tvm.testing
from tvm |
import tir
from tvm.script |
import tir as T
from tvm.tir.schedule.testing |
import verify_trace_roundtrip
@T.prim_func
def resize(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (1, 3, 40, 40))
B = T.match_buffer(b, (1, 3, 80, 80))
for i0, i1, i2, i3 in T.grid(1, 3, 80, 80):
with T.block("A"):
n, c, vi, vj = T.axis.remap("SSSS", [i0, i1, i2, i3])
B[n, c, vi, vj] = A[n, c, vi
@T.prim_func
def resize_cache_index(
A: T.Buffer[(1, 3, 40, 40), "float32"], B: T.Buffer[(1, 3, 80, 80), "float32"]
) -> None:
index_var_0 = T.alloc_buffer([80, 80], dtype="int32", strides=[1])
index_var_1 = T.alloc_buffer([80], dtype="int32", strides=[1])
for ax0, ax1 in T.grid(80, 80):
with T.block("index_0"):
v0 = T.axis.spatial(80, ax0)
v1 = T.axis.spatial(80, ax1)
T.reads()
T.writes(index_var_0[v0, v1])
index_var_0[v0, v1] = v0
for ax0 in T.serial(80):
with T.block("index_1"):
v0 = T.axis.spatial(80, ax0)
T.reads()
T.writes(index_var_1[v0])
index_var_1[v0] = v0
for i0, i1, i2, i3 in T.grid(1, 3, 80, 80):
with T.block("A"):
n, c, vi, vj = T.axis.remap("SSSS", [i0, i1, i2, i3])
T.reads(A[n, c, vi
T.writes(B[n, c, vi, vj])
B[n, c, vi, vj] = A[n, c, index_var_0[vi, vj], index_var_1[vj]]
def test_inplace_cache_read():
sch = tvm.tir.Schedule(resize, debug_mask="all")
block = sch.get_block("A")
sch.cache_index(block, 0)
tvm.ir.assert_structural_equal(resize_cache_index, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=resize)
if __name__ == "__main__":
tvm.testing.main() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.