text
stringlengths 1
2.05k
|
---|
import tir as T
from tvm.tir.schedule.state |
import CachedFlags
from tvm.tir.stmt_functor |
import post_order_visit
@T.prim_func
def elementwise(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 128), "float32")
C = T.match_buffer(c, (128, 128), "float32")
B = T.alloc_buffer((128, 128), "float32")
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + 1.0
@T.prim_func
def matmul(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [128, 128])
B = T.match_buffer(b, [128, 128])
C = T.match_buffer(c, [128, 128])
for i, j in T.grid(128, 128):
with T.block("init"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = 0.0
for k in range(0, 128):
with T.block("update"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vj, vk]
@T.prim_func
def block_in_opaque_block(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128), "float32")
B = T.match_buffer(b, (128, 128), "float32")
for i in range(128):
with T.block("B"):
vi = T.axis.S(128, i)
T.reads([A[0:128, 0:128]])
T.writes([B[0:128, 0:128]])
B[vi, 0] = A[vi, 0]
if A[vi, 0] == 0.0:
with T.block("C"):
T.reads([A[0:128, 0:128]])
T.writes([B[0:128, 0:128]])
for j in range(128):
with T.block("D"):
vj = T.axis.S(128, j)
B[vi, vj] = A[vi, vj] * 3.0
else:
with T.block("E"):
T.reads([A[0:128, 0:128]])
T.writes([B[0:128, 0:128]])
for j in range(128):
with T.block("F"):
vj = T.axis.S |
(128, j)
B[vi, vj] = A[vi, vj] * 2.0
@T.prim_func
def write_after_read(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.match_buffer(b, (128, 128))
C = T.match_buffer(c, (128, 128))
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + 1.0
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
@T.prim_func
def loop_carried_dependency(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128,))
B = T.match_buffer(b, (128,))
C = T.match_buffer(c, (128,))
for i in range(0, 128):
with T.block("B"):
vi = T.axis.S(128, i)
B[vi] = A[vi] * 2.0
with T.block("C"):
vi = T.axis.S(128, i)
C[vi] = T.if_then_else(vi >= 1, B[vi - 1] + 1.0, 0.0, dtype="float32")
@T.prim_func
def concatenate_multi_producer(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128,))
B = T.match_buffer(b, (128,))
for i in range(0, 64):
with T.block("A_0"):
vi = T.axis.S(64, i)
A[vi] = vi + 1
for i in range(0, 64):
with T.block("A_1"):
vi = T.axis.S(64, i + 64)
A[vi] = vi + 2
for i in range(0, 128):
with T.block("B"):
vi = T.axis.S(128, i)
B[vi] = A[vi] * 2.0
@T.prim_func
def concatenate_multi_producer_uncovered(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128,))
B = T.match_buffer(b, (128,))
for i in range(0, 63):
with T.block("A_0"):
vi = T.axis.S(63, i)
A[vi] = vi + 1
for i in range(0, 64):
with T.block("A_1"):
vi = T.axis.S(64, i + 64)
A[vi] = vi + 2
for i in range(0, 128):
with T.block("B"):
vi = T.axis.S(128, i)
B[vi] = A[vi] * 2.0
@T. |
prim_func
def lca_at_loop(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128,))
B = T.match_buffer(b, (128,))
C = T.match_buffer(c, (128,))
for i in range(0, 128):
with T.block("B"):
vi = T.axis.S(128, i)
B[vi] = A[vi] * 2.0
with T.block("C"):
vi = T.axis.S(128, i)
C[vi] = B[vi] + 1.0
@T.prim_func
def multi_producer_consumer(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128,))
B = T.match_buffer(b, (128,))
for i in range(0, 64):
with T.block("A_0"):
vi = T.axis.S(64, i)
A[vi] = vi + 1
for i in range(0, 64):
with T.block("A_1"):
vi = T.axis.S(64, i + 64)
A[vi] = vi + 2
for i in range(0, 64):
with T.block("B_0"):
vi = T.axis.S(64, i)
B[vi] = A[vi] + 2.0
for i in range(0, 64):
with T.block("B_1"):
vi = T.axis.S(64, i + 64)
B[vi] = A[vi] + 3.0
@T.prim_func
def elementwise_affine_producer(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 128), "float32")
C = T.match_buffer(c, (128, 128), "float32")
B = T.alloc_buffer((128, 128), "float32")
for i, j, k, l in T.grid(16, 2, 32, 16):
with T.block("B"):
vi = T.axis.S(128, i * 8 + j * 4 + k
vj = T.axis.S(128, k % 8 * 16 + l)
B[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + 1.0
@T.prim_func
def elementwise_subblock(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 128), "float32")
C = T.match_buffer(c, (128, 128), "float32")
B = T.alloc_buffer((128, 128), "float32")
for i, j in T.grid(32, 32):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
T.reads([A[vi * 4 : vi * 4 + 4, vj * 4 : vj * 4 + 4]])
T.writes([B[vi * 4 : vi * 4 + 4, vj * 4 |
: vj * 4 + 4]])
for ii, jj in T.grid(4, 4):
with T.block("B_sub"):
vi_i, vj_i = T.axis.remap("SS", [ii, jj])
B[vi * 4 + vi_i, vj * 4 + vj_i] = A[vi * 4 + vi_i, vj * 4 + vj_i] * 2.0
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + 1.0
@T.prim_func
def elementwise_subblock_uncovered(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 128), "float32")
C = T.match_buffer(c, (128, 128), "float32")
B = T.alloc_buffer((128, 128), "float32")
for i, j in T.grid(32, 32):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
T.reads([A[vi * 4 : vi * 4 + 2, vj * 4 : vj * 4 + 2]])
T.writes([B[vi * 4 : vi * 4 + 2, vj * 4 : vj * 4 + 2]])
for ii, jj in T.grid(2, 2):
with T.block("B_sub"):
vi_i, vj_i = T.axis.remap("SS", [ii, jj])
B[vi * 4 + vi_i, vj * 4 + vj_i] = A[vi * 4 + vi_i, vj * 4 + vj_i] * 2.0
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + 1.0
@T.prim_func
def bound_to_thread(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [128, 128])
C = T.match_buffer(c, [128, 128])
B = T.alloc_buffer([128, 128], scope="shared")
for i in T.thread_binding(0, 128, thread="threadIdx.x"):
for j in T.serial(0, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
for j in T.serial(0, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vj, vi] = B[vj, vi] + 1.0
@T.prim_func
def equal_ranked_threads(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [128, 128])
C = T.match_buffer(c, [128, 128])
B = T.alloc_buffer([128, 128], scope="shared")
for i_o |
in T.thread_binding(0, 16, thread="threadIdx.x"):
for i_i in T.thread_binding(0, 8, thread="threadIdx.y"):
for j in T.serial(0, 128):
with T.block("B"):
vi = T.axis.S(128, i_o * 8 + i_i)
vj = T.axis.S(128, j)
B[vi, vj] = A[vi, vj] * 2.0
for j in T.serial(0, 128):
with T.block("C"):
vi = T.axis.S(128, i_o * 8 + i_i)
vj = T.axis.S(128, j)
C[vj, vi] = B[vj, vi] + 1.0
@T.prim_func
def warp_memory(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [128, 128])
C = T.match_buffer(c, [128, 128])
B = T.alloc_buffer([128, 4, 32], scope="warp")
for i_o in T.thread_binding(0, 4, thread="threadIdx.y"):
for i_i in T.thread_binding(0, 32, thread="threadIdx.x"):
for j in T.serial(0, 128):
with T.block("B"):
warp_id, lane_id, vj = T.axis.remap("SSS", [i_o, i_i, j])
B[vj, warp_id, lane_id] = A[warp_id * 32 + lane_id, vj] * 2.0
for j in T.serial(0, 128):
with T.block("C"):
warp_id, lane_id, vj = T.axis.remap("SSS", [i_o, i_i, j])
C[warp_id * 32 + lane_id, vj] = B[vj, warp_id, lane_id] + 1.0
@T.prim_func
def warp_memory_negative(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [128, 128])
C = T.match_buffer(c, [128, 128])
B = T.alloc_buffer([128, 4, 32], scope="warp")
for i_o in T.thread_binding(0, 4, thread="threadIdx.y"):
for i_i in T.thread_binding(0, 32, thread="threadIdx.x"):
for j in T.serial(0, 128):
with T.block("B"):
warp_id, lane_id, vj = T.axis.remap("SSS", [i_o, i_i, j])
B[vj, warp_id, lane_id] = A[warp_id * 32 + lane_id, vj] * 2.0
for i_o_prime in T.thread_binding(0, 4, thread="threadIdx.y"):
for j in T.serial(0, 128):
with T.b |
lock("C"):
_warp_id, warp_id, lane_id, vj = T.axis.remap(
"SSSS", [i_o, i_i, i_o_prime, j]
)
C[warp_id * 32 + lane_id, vj] = B[vj, warp_id, lane_id] + 1.0
@T.prim_func
def non_perfect_tiling_cache(a: T.handle, b: T.handle) -> None:
X = T.match_buffer(a, [224, 224], dtype="float32")
Y = T.match_buffer(b, [224, 224], dtype="float32")
cache = T.alloc_buffer([224, 224], dtype="float32")
for hh_0, ww_0 in T.grid(28, 28):
for ax0 in T.serial(0, 10):
for ax1 in T.serial(0, 10):
with T.block("cache"):
h = T.axis.spatial(224, hh_0 * 8 - 1 + ax0)
w = T.axis.spatial(224, ww_0 * 8 - 1 + ax1)
T.where(
1 <= hh_0 * 8 + ax0
and hh_0 * 8 + ax0 < 225
and 1 <= ww_0 * 8 + ax1
and ww_0 * 8 + ax1 < 225
)
cache[h, w] = X[h, w]
for hh_1, ww_1, khh, kww in T.grid(8, 8, 3, 3):
with T.block("compute"):
h = T.axis.spatial(224, hh_0 * 8 + hh_1)
w = T.axis.spatial(224, ww_0 * 8 + ww_1)
kh, kw = T.axis.remap("RR", [khh, kww])
with T.init():
Y[h, w] = 0.0
Y[h, w] = T.max(
Y[h, w],
T.if_then_else(
T.likely(1 <= h + kh, dtype="bool")
and T.likely(h + kh < 225, dtype="bool")
and T.likely(1 <= w + kw, dtype="bool")
and T.likely(w + kw < 225, dtype="bool"),
cache[h + kh - 1, w + kw - 1],
0.0,
dtype="float32",
),
)
@T.prim_func
def uncovered_producer_region(A: T.Buffer[(128,), "float32"], B: T.Buffer[(128,), "float32"]):
for i in range(120): |
with T.block("producer"):
vi = T.axis.S((0, 120), i)
A[vi] = 1.0
for i in range(120):
with T.block("consumer"):
vi = T.axis.S((8, 128), i + 8)
B[vi] = A[vi]
@T.prim_func
def matmul_relu_padding(A: T.Buffer[(127, 127), "float16"], B: T.Buffer[(127, 127), "float16"], compute: T.Buffer[(127, 127), "float32"]) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
C = T.alloc_buffer([127, 127], dtype="float32")
A_reindex = T.alloc_buffer([128, 128], dtype="float16")
B_reindex = T.alloc_buffer([128, 128], dtype="float16")
C_reindex_shared = T.alloc_buffer([128, 128], dtype="float32", scope="shared")
C_reindex_shared_wmma_accumulator = T.alloc_buffer([128, 128], dtype="float32", scope="wmma.accumulator")
for ax0, ax1, ax2 in T.grid(128, 1, 128):
with T.block("A_reindex"):
v0, v1, v2 = T.axis.remap("SSS", [ax0, ax1, ax2])
T.reads(A[v0, v2])
T.writes(A_reindex[v0, v2])
A_reindex[v0, v2] = T.if_then_else(v0 < 127 and v2 < 127, A[v0, v2], T.float16(0), dtype="float16")
for ax0, ax1, ax2 in T.grid(1, 128, 128):
with T.block("B_reindex"):
v0, v1, v2 = T.axis.remap("SSS", [ax0, ax1, ax2])
T.reads(B[v2, v1])
T.writes(B_reindex[v2, v1])
B_reindex[v2, v1] = T.if_then_else(v2 < 127 and v1 < 127, B[v2, v1], T.float16(0), dtype="float16")
for ax0_0_0_ax1_0_0_fused in T.thread_binding(2, thread="blockIdx.y"):
for ax0_0_1_ax1_0_1_fused in T.thread_binding(1, thread="blockIdx.x"):
for ax0_0_2_ax1_0_2_fused in T.thread_binding(16, thread="threadIdx.y"):
for ax2_0_0, ax2_0_1, ax0_0_3, ax1_0_3, ax2_0_2, ax0_0_4, ax1_0_4 in T.grid(2, 2, 1, 2, 2, 1, 1):
with T.block("C_o"):
v0_o = T.axis.spatial(8, ax0_0_2_ax1_0_2_fused
v1_o = T.axis.spatial(8, ax1_0_4 + ax0_0_0_ax1_0_0_fused * 4 + ax0_0_2_ax1_0_ |
2_fused % 2 * 2 + ax1_0_3)
v2_o = T.axis.reduce(8, ax2_0_0 * 4 + ax2_0_1 * 2 + ax2_0_2)
T.reads(A_reindex[v0_o * 16 : v0_o * 16 + 16, v2_o * 16 : v2_o * 16 + 16], B_reindex[v2_o * 16 : v2_o * 16 + 16, v1_o * 16 : v1_o * 16 + 16])
T.writes(C_reindex_shared_wmma_accumulator[v0_o * 16 : v0_o * 16 + 16, v1_o * 16 : v1_o * 16 + 16])
T.block_attr({"meta_schedule.auto_tensorize":"wmma_sync_16x16x16_f16f16f32", "meta_schedule.auto_tensorize_init":"wmma_fill_16x16x16_f32", "warp_execution":1})
with T.init():
for ax0_1, ax1_1 in T.grid(16, 16):
with T.block("C_init"):
v0_i_init, v1_i_init = T.axis.remap("SS", [ax0_1, ax1_1])
T.reads()
T.writes(C_reindex_shared_wmma_accumulator[v0_o * 16 + v0_i_init, v1_o * 16 + v1_i_init])
C_reindex_shared_wmma_accumulator[v0_o * 16 + v0_i_init, v1_o * 16 + v1_i_init] = T.float32(0)
for ax0_1, ax1_1, ax2_1 in T.grid(16, 16, 16):
with T.block("C"):
v0_i, v1_i, v2_i = T.axis.remap("SSR", [ax0_1, ax1_1, ax2_1])
T.reads(C_reindex_shared_wmma_accumulator[v0_o * 16 + v0_i, v1_o * 16 + v1_i], A_reindex[v0_o * 16 + v0_i, v2_o * 16 + v2_i], B_reindex[v2_o * 16 + v2_i, v1_o * 16 + v1_i])
T.writes(C_reindex_shared_wmma_accumulator[v0_o * 16 + v0_i, v1_o * 16 + v1_i])
T.block_attr({"meta_schedule.tiling_structure":"SSSRRSRS"})
C_reindex_shared_wmma_accumulator[v0_o * 16 + v0_i, v1_o * 16 + v1_i] = C_reindex_shared_wmma_accumulator[v0_o * 16 + v0_i, v1_o * 16 + v1_i] + T.cast(A_reindex[v0_o * 16 + v0_i, v2_o * 16 + v2_i], "float32") * T.cast(B_reindex[v2_o * 16 + v2_i, v1_o * |
16 + v1_i], "float32")
for ax0, ax1 in T.grid(16, 32):
with T.block("C_reindex_shared_wmma.accumulator"):
v0 = T.axis.spatial(128, ax0_0_2_ax1_0_2_fused
v1 = T.axis.spatial(128, ax0_0_0_ax1_0_0_fused * 64 + ax0_0_2_ax1_0_2_fused % 2 * 32 + ax1)
T.reads(C_reindex_shared_wmma_accumulator[v0, v1])
T.writes(C_reindex_shared[v0, v1])
C_reindex_shared[v0, v1] = C_reindex_shared_wmma_accumulator[v0, v1]
for ax0, ax1 in T.grid(128, 64):
with T.block("C_reindex_shared"):
v0 = T.axis.spatial(128, ax0)
v1 = T.axis.spatial(128, ax0_0_0_ax1_0_0_fused * 64 + ax1)
T.where(ax0 < 127 and ax0_0_0_ax1_0_0_fused * 64 + ax1 < 127)
T.reads(C_reindex_shared[v0, v1])
T.writes(C[v0, v1])
T.block_attr({"meta_schedule.cooperative_fetch":3})
C[v0, v1] = C_reindex_shared[v0, v1]
for i0, i1 in T.grid(127, 127):
with T.block("compute"):
i0_1, i1_1 = T.axis.remap("SS", [i0, i1])
T.reads(C[i0_1, i1_1])
T.writes(compute[i0_1, i1_1])
compute[i0_1, i1_1] = T.max(C[i0_1, i1_1], T.float32(0))
@T.prim_func
def splitted_square_sum_with_predicate(
A: T.Buffer[(1, 7, 7, 512), "float32"], B: T.Buffer[(1, 1, 1, 512), "float32"]
) -> None:
for i0_i1_i2_i3_0_fused, ax0, ax1, ax2, ax3 in T.grid(2, 1, 1, 1, 256):
for ax4_ax5_fused_0, ax4_ax5_fused_1 in T.grid(1, 256):
with T.block("B"):
T.where(ax4_ax5_fused_0 * 256 + ax4_ax5_fused_1 < 49)
ax0_1, ax1_1, ax2_1 = T.axis.remap("SSS", [ax0, ax1, ax2])
ax3_1 = T.axis.spatial(512, i0_i1_i2_i3_0_fused * 256 + ax3)
rv0 = T.axis.reduce(7, (ax4_ax5_fused_0 * 256 + ax4_ax5_fused_1)
rv1 = T.axis.reduce(7, (ax4_ax5_fused_0 * 256 + ax4_a |
x5_fused_1) % 7)
T.reads(A[ax0_1, ax1_1 * 7 + rv0, ax2_1 * 7 + rv1, ax3_1])
T.writes(B[ax0_1, ax1_1, ax2_1, ax3_1])
with T.init():
B[ax0_1, ax1_1, ax2_1, ax3_1] = T.float32(0)
B[ax0_1, ax1_1, ax2_1, ax3_1] += A[ax0_1, ax1_1 * 7 + rv0, ax2_1 * 7 + rv1, ax3_1]
def _get_block(s: tir.ScheduleState, name_hint: str) -> tir.StmtSRef:
result = None
def f_visit(node):
nonlocal result
if isinstance(node, tvm.tir.Block) and node.name_hint == name_hint:
result = node
func = s.mod["main"]
post_order_visit(func.body, f_visit)
assert result is not None and isinstance(result, tvm.tir.Block)
return s.get_sref(result)
def test_elementwise():
s = tir.ScheduleState(elementwise, debug_mask="all")
assert s._get_cached_flags(_get_block(s, "B")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
assert s._get_cached_flags(_get_block(s, "C")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
assert s._get_cached_flags(_get_block(s, "root")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
def test_matmul():
s = tir.ScheduleState(matmul, debug_mask="all")
assert s._get_cached_flags(_get_block(s, "init")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
assert s._get_cached_flags(_get_block(s, "update")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
assert s._get_cached_flags(_get_block(s, "root")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
def test_block_in_opaque_block():
s = tir.ScheduleState(block_in_opaque_block, debug_mask="all")
assert s._get_cached_flags(_get_block(s, "B")) |
== CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
assert s._get_cached_flags(_get_block(s, "C")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
assert s._get_cached_flags(_get_block(s, "E")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
assert s._get_cached_flags(_get_block(s, "F")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
assert s._get_cached_flags(_get_block(s, "root")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
def test_write_after_read():
s = tir.ScheduleState(write_after_read, debug_mask="all")
assert s._get_cached_flags(_get_block(s, "B")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
assert s._get_cached_flags(_get_block(s, "C")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
assert s._get_cached_flags(_get_block(s, "root")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=False,
)
def test_loop_carried_dependency():
s = tir.ScheduleState(loop_carried_dependency, debug_mask="all")
assert s._get_cached_flags(_get_block(s, "B")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
assert s._get_cached_flags(_get_block(s, "C")) == CachedFlags(
affine_binding=True,
region_cover=False,
stage_pipeline=True,
)
assert s._get_cached_flags(_get_block(s, "root")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=False,
)
def test_concatenate_multi_producer_covered():
s = tir.ScheduleState(concatenate_multi_producer, debug_mask="all") |
assert s._get_cached_flags(_get_block(s, "A_0")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
assert s._get_cached_flags(_get_block(s, "A_1")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
assert s._get_cached_flags(_get_block(s, "B")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
assert s._get_cached_flags(_get_block(s, "root")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
def test_concatenate_multi_producer_uncovered():
s = tir.ScheduleState(concatenate_multi_producer_uncovered, debug_mask="all")
assert s._get_cached_flags(_get_block(s, "A_0")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
assert s._get_cached_flags(_get_block(s, "A_1")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
assert s._get_cached_flags(_get_block(s, "B")) == CachedFlags(
affine_binding=True,
region_cover=False,
stage_pipeline=True,
)
assert s._get_cached_flags(_get_block(s, "root")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=False,
)
def test_lca_at_loop():
s = tir.ScheduleState(lca_at_loop, debug_mask="all")
assert s._get_cached_flags(_get_block(s, "B")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
assert s._get_cached_flags(_get_block(s, "C")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
assert s._get_cached_flags(_get_block(s, "root")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
def test_multi_producer_consumer():
s |
= tir.ScheduleState(multi_producer_consumer, debug_mask="all")
assert s._get_cached_flags(_get_block(s, "A_0")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
assert s._get_cached_flags(_get_block(s, "A_1")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
assert s._get_cached_flags(_get_block(s, "B_0")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
assert s._get_cached_flags(_get_block(s, "B_1")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
def test_elementwise_affine_producer():
s = tir.ScheduleState(elementwise_affine_producer, debug_mask="all")
assert s._get_cached_flags(_get_block(s, "root")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
assert s._get_cached_flags(_get_block(s, "B")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
assert s._get_cached_flags(_get_block(s, "C")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
def test_subblock():
s = tir.ScheduleState(elementwise_subblock, debug_mask="all")
assert s._get_cached_flags(_get_block(s, "root")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
assert s._get_cached_flags(_get_block(s, "B")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
assert s._get_cached_flags(_get_block(s, "B_sub")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
assert s._get_cached_flags(_get_block(s, "C")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
) |
def test_subblock_uncovered():
s = tir.ScheduleState(elementwise_subblock_uncovered, debug_mask="all")
assert s._get_cached_flags(_get_block(s, "root")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=False,
)
assert s._get_cached_flags(_get_block(s, "B")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
assert s._get_cached_flags(_get_block(s, "B_sub")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
assert s._get_cached_flags(_get_block(s, "C")) == CachedFlags(
affine_binding=True,
region_cover=False,
stage_pipeline=True,
)
def test_thread_binding():
s = tir.ScheduleState(bound_to_thread, debug_mask="all")
assert s._get_cached_flags(_get_block(s, "root")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
assert s._get_cached_flags(_get_block(s, "B")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
assert s._get_cached_flags(_get_block(s, "C")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
def test_equal_ranked_threads():
s = tir.ScheduleState(equal_ranked_threads, debug_mask="all")
assert s._get_cached_flags(_get_block(s, "root")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
assert s._get_cached_flags(_get_block(s, "B")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
assert s._get_cached_flags(_get_block(s, "C")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
def test_warp_memory():
s = tir.ScheduleState(warp_memory, debug_mask="all")
assert s._get_cached_flag |
s(_get_block(s, "root")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
assert s._get_cached_flags(_get_block(s, "B")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
assert s._get_cached_flags(_get_block(s, "C")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
def test_warp_memory_negative():
s = tir.ScheduleState(warp_memory_negative, debug_mask="all")
assert s._get_cached_flags(_get_block(s, "root")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=False,
)
assert s._get_cached_flags(_get_block(s, "B")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
assert s._get_cached_flags(_get_block(s, "C")) == CachedFlags(
affine_binding=True,
region_cover=False,
stage_pipeline=True,
)
def test_non_perfect_tiling_cache():
s = tir.ScheduleState(non_perfect_tiling_cache, debug_mask="all")
assert s._get_cached_flags(_get_block(s, "cache")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
assert s._get_cached_flags(_get_block(s, "compute")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
def test_uncovered_producer_region():
s = tir.ScheduleState(uncovered_producer_region, debug_mask="all")
assert s._get_cached_flags(_get_block(s, "consumer")) == CachedFlags(
affine_binding=True,
region_cover=False,
stage_pipeline=True,
)
def test_matmul_relu_padding():
s = tir.ScheduleState(matmul_relu_padding, debug_mask="all")
assert s._get_cached_flags(_get_block(s, "C_reindex_shared")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True, |
)
def test_splitted_square_sum_with_predicate():
s = tir.ScheduleState(splitted_square_sum_with_predicate, debug_mask="all")
assert s._get_cached_flags(_get_block(s, "B")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
if __name__ == "__main__":
tvm.testing.main() |
import pytest |
import tvm
from tvm |
import tir
from tvm.script |
import tir as T
from tvm.tir.schedule.testing |
import verify_trace_roundtrip
@T.prim_func
def element_wise(a: T.handle, c: T.handle) -> None:
C = T.match_buffer(c, [128, 128], elem_offset=0, align=64, offset_factor=1)
A = T.match_buffer(a, [128, 128], elem_offset=0, align=64, offset_factor=1)
with T.block("root"):
T.reads([])
T.writes([])
B = T.alloc_buffer([128, 128], elem_offset=0, align=64, offset_factor=1)
for i0 in T.serial(0, 128):
for ax1 in T.serial(0, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i0, ax1])
T.reads([A[vi, vj]])
T.writes([B[vi, vj]])
B[vi, vj] = (A[vi, vj]*T.float32(2))
for i1 in T.serial(0, 128):
with T.block("C"):
vi_1, vj_1 = T.axis.remap("SS", [i0, i1])
T.reads([B[vi_1, vj_1]])
T.writes([C[vi_1, vj_1]])
C[vi_1, vj_1] = (B[vi_1, vj_1] + T.float32(1))
@T.prim_func
def element_wise_storage_align(a: T.handle, c: T.handle) -> None:
C = T.match_buffer(c, [128, 128], elem_offset=0, align=64, offset_factor=1)
A = T.match_buffer(a, [128, 128], elem_offset=0, align=64, offset_factor=1)
with T.block("root"):
T.reads([])
T.writes([])
B = T.alloc_buffer([128, 128], elem_offset=0, align=64, offset_factor=1)
for i0 in T.serial(0, 128):
for ax1 in T.serial(0, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i0, ax1])
T.reads([A[vi, vj]])
T.writes([B[vi, vj]])
T.block_attr({"buffer_dim_align":[[0, 0, 128, 127]]})
B[vi, vj] = (A[vi, vj]*T.float32(2))
for i1 in T.serial(0, 128):
with T.block("C"):
vi_1, vj_1 = T.axis.remap("SS", [i0, i1])
T.reads([B[vi_1, vj_1]])
T.writes([C[vi_1, vj_1]])
C[v |
i_1, vj_1] = (B[vi_1, vj_1] + T.float32(1))
@T.prim_func
def element_wise_invalid_annotation(a: T.handle, c: T.handle) -> None:
C = T.match_buffer(c, [128, 128], elem_offset=0, align=64, offset_factor=1)
A = T.match_buffer(a, [128, 128], elem_offset=0, align=64, offset_factor=1)
with T.block("root"):
T.reads([])
T.writes([])
B = T.alloc_buffer([128, 128], elem_offset=0, align=64, offset_factor=1)
for i0 in T.serial(0, 128):
for ax1 in T.serial(0, 128):
with T.block("B"):
T.block_attr({"buffer_dim_align": [0]})
vi, vj = T.axis.remap("SS", [i0, ax1])
T.reads([A[vi, vj]])
T.writes([B[vi, vj]])
B[vi, vj] = (A[vi, vj]*T.float32(2))
for i1 in T.serial(0, 128):
with T.block("C"):
vi_1, vj_1 = T.axis.remap("SS", [i0, i1])
T.reads([B[vi_1, vj_1]])
T.writes([C[vi_1, vj_1]])
C[vi_1, vj_1] = (B[vi_1, vj_1] + T.float32(1))
use_block_name = tvm.testing.parameter(by_dict={"block_obj": False, "block_name": True})
def test_storage_align(use_block_name):
func = element_wise
s = tir.Schedule(func, debug_mask='all')
B = 'B' if use_block_name else s.get_block("B")
s.storage_align(B, 0, axis=0, factor=128, offset=127)
tvm.ir.assert_structural_equal(element_wise_storage_align, s.mod["main"])
verify_trace_roundtrip(sch=s, mod=func)
def test_storage_align_update():
func = element_wise
s = tir.Schedule(func, debug_mask='all')
B = s.get_block("B")
s.storage_align(B, 0, axis=0, factor=128, offset=0)
s.storage_align(B, 0, axis=0, factor=128, offset=127)
tvm.ir.assert_structural_equal(element_wise_storage_align, s.mod["main"])
verify_trace_roundtrip(sch=s, mod=func)
def test_storage_align_invalid_factor1():
func = element_wise
s = tir.Schedule(func, debug_mask='all')
B = s.get_block("B") |
with pytest.raises(tir.ScheduleError):
s.storage_align(B, 0, axis=0, factor=0, offset=127)
def test_storage_align_invalid_factor2():
func = element_wise
s = tir.Schedule(func, debug_mask='all')
B = s.get_block("B")
with pytest.raises(tir.ScheduleError):
s.storage_align(B, 0, axis=0, factor=-1, offset=127)
def test_storage_align_invalid_buffer():
func = element_wise
s = tir.Schedule(func, debug_mask='all')
C = s.get_block("C")
with pytest.raises(tir.ScheduleError):
s.storage_align(C, 0, axis=0, factor=128, offset=127)
def test_storage_align_invalid_buffer_index():
func = element_wise
s = tir.Schedule(func, debug_mask='all')
B = s.get_block("B")
with pytest.raises(tir.ScheduleError):
s.storage_align(B, 2, axis=0, factor=128, offset=127)
def test_storage_align_invalid_axis():
func = element_wise
s = tir.Schedule(func, debug_mask='all')
B = s.get_block("B")
with pytest.raises(tir.ScheduleError):
s.storage_align(B, 0, axis=2, factor=128, offset=127)
def test_storage_align_invalid_annotation():
func = element_wise_invalid_annotation
s = tir.Schedule(func, debug_mask='all')
B = s.get_block("B")
with pytest.raises(tir.ScheduleError):
s.storage_align(B, 0, axis=2, factor=128, offset=127)
if __name__ == "__main__":
test_storage_align()
test_storage_align_update()
test_storage_align_invalid_factor1()
test_storage_align_invalid_factor2()
test_storage_align_invalid_buffer()
test_storage_align_invalid_buffer_index()
test_storage_align_invalid_axis()
test_storage_align_invalid_annotation() |
import sys |
import pytest |
import tvm |
import tvm.testing
from tvm |
import te, tir
from tvm.script |
import tir as T
from tvm.tir.schedule.testing |
import verify_trace_roundtrip
from tvm.tir.tensor_intrin.arm_cpu |
import (
DP4A_INTRIN,
ARM_DOT_4x4_i8_NEON_INTRIN,
ARM_DOT_4x4_i8_SDOT_INTRIN,
)
from tvm.tir.tensor_intrin.rocm |
import AMDGPU_SDOT4_INTRIN
from tvm.tir.tensor_intrin.x86 |
import VNNI_DOT_16x4_INTRIN
@T.prim_func
def mma_desc(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (16, 16), align=64, offset_factor=1)
B = T.match_buffer(b, (16, 16), align=64, offset_factor=1)
C = T.match_buffer(c, (16, 16), align=64, offset_factor=1)
with T.block("root"):
T.reads(C[0 : 16, 0 : 16], A[0 : 16, 0 : 16], B[0 : 16, 0 : 16])
T.writes(C[0 : 16, 0 : 16])
for i, j, k in T.grid(16, 16, 16):
with T.block("update"):
vii, vjj, vkk = T.axis.remap("SSR", [i, j, k])
C[vii, vjj] = C[vii, vjj] + A[vii, vkk] * B[vjj, vkk]
@T.prim_func
def mma_intrin(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (16, 16), align=64, offset_factor=1)
B = T.match_buffer(b, (16, 16), align=64, offset_factor=1)
C = T.match_buffer(c, (16, 16), align=64, offset_factor=1)
with T.block("root"):
T.reads(C[0 : 16, 0 : 16], A[0 : 16, 0 : 16], B[0 : 16, 0 : 16])
T.writes(C[0 : 16, 0 : 16])
T.evaluate(
T.tvm_mma_sync(
C.data,
C.elem_offset
A.data,
A.elem_offset
B.data,
B.elem_offset
C.data,
C.elem_offset
dtype="handle",
)
)
@T.prim_func
def dot_product_desc(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (4,))
B = T.match_buffer(b, (4,))
C = T.match_buffer(c, ())
with T.block("root"):
T.reads(C[()], A[0 : 4], B[0 : 4])
T.writes(C[()])
for i in range(0, 4):
with T.block("update"):
vi = T.axis.remap("R", [i])
C[()] = C[()] + A[vi] * B[vi]
@T.prim_func
def dot_product_intrin(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (4,), offset_factor=1)
B = T.match_buffer(b, (4,), offset_factor=1)
C = T.match_buffer(c, (), offset_factor=1)
with T.block(" |
root"):
T.reads(C[()], A[0 : 4], B[0 : 4])
T.writes(C[()])
T.evaluate(
T.call_extern(
"vec4add",
C.data,
C.elem_offset,
A.data,
A.elem_offset,
B.data,
B.elem_offset,
dtype="int32",
)
)
@T.prim_func
def outer_product_desc(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (16, 1), offset_factor=1)
B = T.match_buffer(b, (16, 1), offset_factor=1)
C = T.match_buffer(c, (16, 16), offset_factor=1)
with T.block("root"):
T.reads(
C[0 : 16, 0 : 16],
A[0 : 16, 0 : 1],
B[0 : 16, 0 : 1],
)
T.writes(C[0 : 16, 0 : 16])
for i, j in T.grid(16, 16):
with T.block("update"):
vii, vjj = T.axis.remap("SS", [i, j])
C[vii, vjj] = C[vii, vjj] + A[vii, 0] * B[vjj, 0]
@T.prim_func
def outer_product_intrin(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (16, 1), offset_factor=1)
B = T.match_buffer(b, (16, 1), offset_factor=1)
C = T.match_buffer(c, (16, 16), offset_factor=1)
with T.block("root"):
T.reads(
C[0 : 16, 0 : 16],
A[0 : 16, 0 : 1],
B[0 : 16, 0 : 1],
)
T.writes(C[0 : 16, 0 : 16])
T.evaluate(
T.call_extern(
"outer_product",
C.data,
C.elem_offset,
A.data,
A.elem_offset,
B.data,
B.elem_offset,
dtype="int32",
)
)
@T.prim_func
def matmul(
A: T.Buffer[(128, 128), "float32"],
B: T.Buffer[(128, 128), "float32"],
C: T.Buffer[(128, 128), "float32"],
) -> None:
for i, j, k in T.grid(128, 128, 128):
with T.block("update"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
with T.init():
C[vi, |
vj] = T.float32(0)
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vj, vk]
@T.prim_func
def tensorized_matmul(a: T.handle, b: T.handle, c: T.handle) -> None:
C = T.match_buffer(c, [128, 128], elem_offset=0, align=64, offset_factor=1)
B = T.match_buffer(b, [128, 128], elem_offset=0, align=64, offset_factor=1)
A = T.match_buffer(a, [128, 128], elem_offset=0, align=64, offset_factor=1)
for i_outer, j_outer in T.grid(8, 8):
for i_inner_init, j_inner_init in T.grid(16, 16):
with T.block("init"):
vi_init = T.axis.S(128, ((i_outer * 16) + i_inner_init))
vj_init = T.axis.S(128, ((j_outer * 16) + j_inner_init))
C[vi_init, vj_init] = T.float32(0)
for k_outer in T.grid(8):
with T.block("update"):
vi, vj, vk = T.axis.remap("SSR", [i_outer, j_outer, k_outer])
T.reads(
[
C[vi * 16 : vi * 16 + 16, vj * 16 : vj * 16 + 16],
A[vi * 16 : vi * 16 + 16, vk * 16 : vk * 16 + 16],
B[vj * 16 : vj * 16 + 16, vk * 16 : vk * 16 + 16],
]
)
T.writes(C[vi * 16 : vi * 16 + 16, vj * 16 : vj * 16 + 16])
A_elem_offset = T.var("int32")
B_elem_offset = T.var("int32")
C_elem_offset = T.var("int32")
A_sub = T.match_buffer(
A[vi * 16 : vi * 16 + 16, vk * 16 : vk * 16 + 16],
[16, 16],
elem_offset=A_elem_offset,
)
B_sub = T.match_buffer(
B[vj * 16 : vj * 16 + 16, vk * 16 : vk * 16 + 16],
[16, 16],
elem_offset=B_elem_offset,
)
C_sub = T.match_buffer(
C[vi * 16 : vi * 16 + 16, vj * 16 : vj * 16 + 16],
[16, 16],
elem_offset=C_elem_offset,
)
T.e |
valuate(
T.tvm_mma_sync(
C_sub.data,
T.floordiv(C_sub.elem_offset, 256),
A_sub.data,
T.floordiv(A_sub.elem_offset, 256),
B_sub.data,
T.floordiv(B_sub.elem_offset, 256),
C_sub.data,
T.floordiv(C_sub.elem_offset, 256),
dtype="handle",
)
)
@T.prim_func
def batch_matmul(
A: T.Buffer[(16, 128, 128), "float32"],
B: T.Buffer[(16, 128, 128), "float32"],
C: T.Buffer[(16, 128, 128), "float32"],
) -> None:
for n, i, j in T.grid(16, 128, 128):
with T.block("init"):
vn, vi, vj = T.axis.remap("SSS", [n, i, j])
C[vn, vi, vj] = T.float32(0)
for n, i, j, k in T.grid(16, 128, 128, 128):
with T.block("update"):
vn, vi, vj, vk = T.axis.remap("SSSR", [n, i, j, k])
C[vn, vi, vj] = C[vn, vi, vj] + A[vn, vi, vk] * B[vn, vj, vk]
@T.prim_func
def tensorized_batch_matmul_mma(
A: T.Buffer[(16, 128, 128), "float32"],
B: T.Buffer[(16, 128, 128), "float32"],
C: T.Buffer[(16, 128, 128), "float32"],
) -> None:
for n, i, j in T.grid(16, 128, 128):
with T.block("init"):
vn, vi, vj = T.axis.remap("SSS", [n, i, j])
T.reads()
T.writes(C[vn, vi, vj])
C[vn, vi, vj] = T.float32(0)
for n in range(0, 16):
for i, j, k in T.grid(8, 8, 8):
with T.block("update"):
vn, vi, vj, vk = T.axis.remap("SSSR", [n, i, j, k])
T.reads(
C[vn : vn + 1, vi * 16 : vi * 16 + 16, vj * 16 : vj * 16 + 16],
A[vn : vn + 1, vi * 16 : vi * 16 + 16, vk * 16 : vk * 16 + 16],
B[vn : vn + 1, vj * 16 : vj * 16 + 16, vk * 16 : vk * 16 + 16],
)
T.writes(C[vn : vn + 1, vi * 16 : vi * 16 + 16, vj * 16 : vj * 16 + 16]) |
A_elem_offset = T.var("int32")
B_elem_offset = T.var("int32")
C_elem_offset = T.var("int32")
A_sub = T.match_buffer(
A[vn : vn + 1, vi * 16 : vi * 16 + 16, vk * 16 : vk * 16 + 16],
(16, 16),
elem_offset=A_elem_offset,
)
B_sub = T.match_buffer(
B[vn : vn + 1, vj * 16 : vj * 16 + 16, vk * 16 : vk * 16 + 16],
(16, 16),
elem_offset=B_elem_offset,
)
C_sub = T.match_buffer(
C[vn : vn + 1, vi * 16 : vi * 16 + 16, vj * 16 : vj * 16 + 16],
(16, 16),
elem_offset=C_elem_offset,
)
T.evaluate(
T.tvm_mma_sync(
C_sub.data,
T.floordiv(C_sub.elem_offset, 256),
A_sub.data,
T.floordiv(A_sub.elem_offset, 256),
B_sub.data,
T.floordiv(B_sub.elem_offset, 256),
C_sub.data,
T.floordiv(C_sub.elem_offset, 256),
dtype="handle",
)
)
@T.prim_func
def tensorized_batch_matmul_dot_product(
A: T.Buffer[(16, 128, 128), "float32"],
B: T.Buffer[(16, 128, 128), "float32"],
C: T.Buffer[(16, 128, 128), "float32"],
) -> None:
for n, i, j in T.grid(16, 128, 128):
with T.block("init"):
vn, vi, vj = T.axis.remap("SSS", [n, i, j])
T.reads()
T.writes(C[vn, vi, vj])
C[vn, vi, vj] = T.float32(0)
for n, i, j, k_0 in T.grid(16, 128, 128, 32):
with T.block("blockized_update"):
vn, vi, vj, vko = T.axis.remap("SSSR", [n, i, j, k_0])
T.reads(
C[vn, vi, vj], A[vn, vi, vko * 4 : vko * 4 + 4], B[vn, vj, vko * 4 : vko * 4 + 4]
) |
T.writes(C[vn, vi, vj])
A_1 = T.match_buffer(
A[vn, vi, vko * 4 : vko * 4 + 4], [4], dtype="float32", offset_factor=1
)
B_1 = T.match_buffer(
B[vn, vj, vko * 4 : vko * 4 + 4], [4], dtype="float32", offset_factor=1
)
C_1 = T.match_buffer(C[vn, vi, vj], [], dtype="float32", offset_factor=1)
T.evaluate(
T.call_extern(
"vec4add",
C_1.data,
C_1.elem_offset,
A_1.data,
A_1.elem_offset,
B_1.data,
B_1.elem_offset,
dtype="int32",
)
)
@T.prim_func
def tensorized_batch_matmul_outer_product(
A: T.Buffer[(16, 128, 128), "float32"],
B: T.Buffer[(16, 128, 128), "float32"],
C: T.Buffer[(16, 128, 128), "float32"],
) -> None:
for n, i, j in T.grid(16, 128, 128):
with T.block("init"):
vn, vi, vj = T.axis.remap("SSS", [n, i, j])
T.reads()
T.writes(C[vn, vi, vj])
C[vn, vi, vj] = T.float32(0)
for n, i_0, j_0, k in T.grid(16, 8, 8, 128):
with T.block("blockized_update"):
vn, vio, vjo, vk = T.axis.remap("SSSR", [n, i_0, j_0, k])
T.reads(
C[vn, vio * 16 : vio * 16 + 16, vjo * 16 : vjo * 16 + 16],
A[vn, vio * 16 : vio * 16 + 16, vk],
B[vn, vjo * 16 : vjo * 16 + 16, vk],
)
T.writes(C[vn, vio * 16 : vio * 16 + 16, vjo * 16 : vjo * 16 + 16])
A_1 = T.match_buffer(A[vn, vio * 16 : vio * 16 + 16, vk], [16, 1], dtype="float32", offset_factor=1)
B_1 = T.match_buffer(B[vn, vjo * 16 : vjo * 16 + 16, vk], [16, 1], dtype="float32", offset_factor=1
)
C_1 = T.match_buffer(
C[vn, vio * 16 : vio * 16 + 16, vjo * 16 : vjo * 16 + 16], [16, 16], dtype="float32", offset_factor=1
)
T.evalu |
ate(
T.call_extern("outer_product", C_1.data, C_1.elem_offset, A_1.data, A_1.elem_offset,
B_1.data, B_1.elem_offset, dtype="int32"
)
)
@T.prim_func
def annotated_mma_desc(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (16, 16), align=64, offset_factor=1)
B = T.match_buffer(b, (16, 16), align=64, offset_factor=1)
C = T.match_buffer(c, (16, 16), align=64, offset_factor=1)
with T.block("root"):
T.reads(C[0 : 16, 0 : 16], A[0 : 16, 0 : 16], B[0 : 16, 0 : 16])
T.writes(C[0 : 16, 0 : 16])
for i, j, k in T.grid(16, 16, 16):
with T.block("update"):
T.block_attr({"test_annotation": True})
vii, vjj, vkk = T.axis.remap("SSR", [i, j, k])
C[vii, vjj] = C[vii, vjj] + A[vii, vkk] * B[vjj, vkk]
@T.prim_func
def annotated_matmul(
A: T.Buffer[(128, 128), "float32"],
B: T.Buffer[(128, 128), "float32"],
C: T.Buffer[(128, 128), "float32"],
) -> None:
for i, j, k in T.grid(128, 128, 128):
with T.block("update"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
T.block_attr({"test_annotation": True})
with T.init():
C[vi, vj] = T.float32(0)
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vj, vk]
@T.prim_func
def annotated_tensorized_matmul(a: T.handle, b: T.handle, c: T.handle) -> None:
C = T.match_buffer(c, [128, 128], elem_offset=0, align=64, offset_factor=1)
B = T.match_buffer(b, [128, 128], elem_offset=0, align=64, offset_factor=1)
A = T.match_buffer(a, [128, 128], elem_offset=0, align=64, offset_factor=1)
for i_outer, j_outer in T.grid(8, 8):
for i_inner_init, j_inner_init in T.grid(16, 16):
with T.block("init"):
vi_init = T.axis.S(128, ((i_outer * 16) + i_inner_init))
vj_init = T.axis.S(128, ((j_outer * 16) + j_inner_init))
T.block_attr({"test_annotation": True}) |
C[vi_init, vj_init] = T.float32(0)
for k_outer in T.grid(8):
with T.block("update"):
vi, vj, vk = T.axis.remap("SSR", [i_outer, j_outer, k_outer])
T.reads(
[
C[vi * 16 : vi * 16 + 16, vj * 16 : vj * 16 + 16],
A[vi * 16 : vi * 16 + 16, vk * 16 : vk * 16 + 16],
B[vj * 16 : vj * 16 + 16, vk * 16 : vk * 16 + 16],
]
)
T.writes(C[vi * 16 : vi * 16 + 16, vj * 16 : vj * 16 + 16])
A_elem_offset = T.var("int32")
B_elem_offset = T.var("int32")
C_elem_offset = T.var("int32")
A_sub = T.match_buffer(
A[vi * 16 : vi * 16 + 16, vk * 16 : vk * 16 + 16],
[16, 16],
elem_offset=A_elem_offset,
)
B_sub = T.match_buffer(
B[vj * 16 : vj * 16 + 16, vk * 16 : vk * 16 + 16],
[16, 16],
elem_offset=B_elem_offset,
)
C_sub = T.match_buffer(
C[vi * 16 : vi * 16 + 16, vj * 16 : vj * 16 + 16],
[16, 16],
elem_offset=C_elem_offset,
)
T.evaluate(
T.tvm_mma_sync(
C_sub.data,
T.floordiv(C_sub.elem_offset, 256),
A_sub.data,
T.floordiv(A_sub.elem_offset, 256),
B_sub.data,
T.floordiv(B_sub.elem_offset, 256),
C_sub.data,
T.floordiv(C_sub.elem_offset, 256),
dtype="handle",
)
)
tir.TensorIntrin.register("test_mma_intrin", mma_desc, mma_intrin)
tir.TensorIntrin.register("test_annotated_mma_intrin", annotated_mma_desc, mma_intrin)
tir.TensorIntrin.register(" |
test_dot_product_intrin", dot_product_desc, dot_product_intrin)
tir.TensorIntrin.register("test_outer_product_intrin", outer_product_desc, outer_product_intrin)
def test_tensorize_matmul():
func = matmul
s = tir.Schedule(func, debug_mask="all")
update = s.get_block("update")
i, j, k = s.get_loops(update)
io, ii = s.split(i, factors=[None, 16])
jo, ji = s.split(j, factors=[None, 16])
ko, ki = s.split(k, factors=[None, 16])
s.reorder(io, jo, ko, ii, ji, ki)
s.decompose_reduction(update, ko)
s.tensorize(ii, "test_mma_intrin")
tvm.ir.assert_structural_equal(tensorized_matmul, s.mod["main"])
verify_trace_roundtrip(sch=s, mod=func)
def test_tensorize_batch_matmul():
func = batch_matmul
s = tir.Schedule(func, debug_mask="all")
update = s.get_block("update")
_, i, j, k = s.get_loops(update)
io, ii = s.split(i, factors=[None, 16])
jo, ji = s.split(j, factors=[None, 16])
ko, ki = s.split(k, factors=[None, 16])
s.reorder(io, jo, ko, ii, ji, ki)
s.tensorize(ii, "test_mma_intrin")
tvm.ir.assert_structural_equal(tensorized_batch_matmul_mma, s.mod["main"])
verify_trace_roundtrip(sch=s, mod=batch_matmul)
def test_tensorize_dot_product():
func = batch_matmul
s = tir.Schedule(func, debug_mask="all")
C = s.get_block("update")
_, _, _, k = s.get_loops(C)
_, ki = s.split(k, factors=[None, 4])
s.tensorize(ki, "test_dot_product_intrin")
tvm.ir.assert_structural_equal(tensorized_batch_matmul_dot_product, s.mod["main"])
verify_trace_roundtrip(sch=s, mod=func)
def test_tensorize_outer_product():
func = batch_matmul
s = tir.Schedule(func, debug_mask="all")
C = s.get_block("update")
_, i, j, k = s.get_loops(C)
io, ii = s.split(i, factors=[None, 16])
jo, ji = s.split(j, factors=[None, 16])
s.reorder(io, jo, k, ii, ji)
s.tensorize(ii, "test_outer_product_intrin")
tvm.ir.assert_structural_equal(tensorized_batch_matmul_outer_product, s.mod["main"])
verify_trace_roundtrip |
(sch=s, mod=func)
def test_tensorize_with_annotation():
func = annotated_matmul
s = tir.Schedule(func, debug_mask="all")
update = s.get_block("update")
i, j, k = s.get_loops(update)
io, ii = s.split(i, factors=[None, 16])
jo, ji = s.split(j, factors=[None, 16])
ko, ki = s.split(k, factors=[None, 16])
s.reorder(io, jo, ko, ii, ji, ki)
s.decompose_reduction(update, ko)
s.tensorize(ii, "test_annotated_mma_intrin")
tvm.ir.assert_structural_equal(annotated_tensorized_matmul, s.mod["main"])
verify_trace_roundtrip(sch=s, mod=func)
def get_matmul_packed(m, n, k, lhs_type, int32_lanes):
X = te.placeholder((m, k), name="X", dtype=lhs_type)
packed_W = te.placeholder((n
ak = te.reduce_axis((0, k), name="k")
matmul = te.compute(
(m, n),
lambda i, j: te.sum(
X[i, ak].astype("int32")
* packed_W[
tvm.tir.indexdiv(j, 16), tvm.tir.indexdiv(ak, 4), j % 16, ak % 4
].astype("int32"),
axis=ak,
),
name="compute",
)
return te.create_prim_func([X, packed_W, matmul])
def test_tensorize_vnni():
m, n, k = 128, 128, 128
func = get_matmul_packed(m, n, k, "uint8", 16)
sch = tir.Schedule(func, debug_mask="all")
block = sch.get_block("compute")
_, j, k = sch.get_loops(block)
_, ji = sch.split(j, factors=[None, 16])
ko, ki = sch.split(k, factors=[None, 4])
sch.reorder(ko, ji, ki)
sch.decompose_reduction(block, ko)
sch.tensorize(ji, VNNI_DOT_16x4_INTRIN)
verify_trace_roundtrip(sch=sch, mod=func)
def test_tensorize_arm_dot():
m, n, k = 128, 128, 128
func = get_matmul_packed(m, n, k, "int8", 4)
for intrin in [ARM_DOT_4x4_i8_SDOT_INTRIN, ARM_DOT_4x4_i8_NEON_INTRIN]:
sch = tir.Schedule(func, debug_mask="all")
block = sch.get_block("compute")
_, j, k = sch.get_loops(block)
_, ji = sch.split(j, factors=[None, 4])
ko, ki = sch.split(k, factors=[None, 4])
sch.reorder(ko |
, ji, ki)
sch.decompose_reduction(block, ko)
sch.tensorize(ji, intrin)
verify_trace_roundtrip(sch=sch, mod=func)
def test_tensorize_dpa4():
m, n, k = 128, 128, 128
X = te.placeholder((m, k), name="X", dtype="int8")
W = te.placeholder((n, k), name="W", dtype="int8")
ak = te.reduce_axis((0, k), name="k")
matmul = te.compute(
(m, n),
lambda i, j: te.sum(
X[i, ak].astype("int32")
* W[j, ak].astype("int32"),
axis=ak,
),
name="compute",
)
func = te.create_prim_func([X, W, matmul])
for intrin in [AMDGPU_SDOT4_INTRIN, DP4A_INTRIN]:
sch = tir.Schedule(func, debug_mask="all")
block = sch.get_block("compute")
i, j, k = sch.get_loops(block)
by, ty, yi = sch.split(i, factors=sch.sample_perfect_tile(i, n=3))
bx, tx, xi = sch.split(j, factors=sch.sample_perfect_tile(j, n=3))
ko, ki = sch.split(k, [None, 4])
ko, kt = sch.split(ko, factors=sch.sample_perfect_tile(ko, n=2))
sch.reorder(by, bx, ty, tx, yi, xi)
CC = sch.cache_write(block, 0, "local")
sch.reverse_compute_at(CC, tx)
def fetch_to_shared(block, idx):
block_read = sch.cache_read(block, idx, "shared")
sch.compute_at(block_read, ko, True)
return block_read
fetch_to_shared(block, 0)
fetch_to_shared(block, 1)
sch.decompose_reduction(block, ko)
sch.tensorize(ki, intrin)
verify_trace_roundtrip(sch=sch, mod=func)
def test_tensor_intrin_look_up():
intrin_name = 'non_existent_intrin'
assert tir.TensorIntrin.get(intrin_name, allow_missing=True) is None
with pytest.raises(ValueError):
tir.TensorIntrin.get(intrin_name)
def test_tensorize_matmul_mixed_dtype():
@T.prim_func
def matmul_int64_shape(
A: T.Buffer[(T.int64(128), T.int64(128)), "float32"],
B: T.Buffer[(T.int64(128), T.int64(128)), "float32"],
C: T.Buffer[(T.int64(128 |
), T.int64(128)), "float32"]
) -> None:
for i_0, j_0 in T.grid(T.int64(8), T.int64(8)):
for i_1_init, j_1_init in T.grid(T.int64(16), T.int64(16)):
with T.block("init"):
vi = T.axis.spatial(T.int64(128), i_0 * T.int64(16) + i_1_init)
vj = T.axis.spatial(T.int64(128), j_0 * T.int64(16) + j_1_init)
C[vi, vj] = T.float32(0)
for k_0, i_1, j_1, k_1 in T.grid(T.int64(8), T.int64(16), T.int64(16), T.int64(16)):
with T.block("update"):
vi = T.axis.spatial(T.int64(128), i_0 * T.int64(16) + i_1)
vj = T.axis.spatial(T.int64(128), j_0 * T.int64(16) + j_1)
vk = T.axis.reduce(T.int64(128), k_0 * T.int64(16) + k_1)
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vj, vk]
@T.prim_func
def tensorized_matmul_int64_shape(
A: T.Buffer[(T.int64(128), T.int64(128)), "float32"],
B: T.Buffer[(T.int64(128), T.int64(128)), "float32"],
C: T.Buffer[(T.int64(128), T.int64(128)), "float32"]
) -> None:
for i_outer, j_outer in T.grid(T.int64(8), T.int64(8)):
for i_inner_init, j_inner_init in T.grid(T.int64(16), T.int64(16)):
with T.block("init"):
vi = T.axis.spatial(T.int64(128), i_outer * T.int64(16) + i_inner_init)
vj = T.axis.spatial(T.int64(128), j_outer * T.int64(16) + j_inner_init)
C[vi, vj] = T.float32(0)
for k_outer in T.grid(T.int64(8)):
with T.block("update"):
vi, vj, vk = T.axis.remap("SSR", [i_outer, j_outer, k_outer])
T.reads(
[
C[vi * T.int64(16) : vi * T.int64(16) + T.int64(16), vj * T.int64(16) : vj * T.int64(16) + T.int64(16)],
A[vi * T.int64(16) : vi * T.int64(16) + T.int64(16), vk * T.int64(16) : vk * T.int64(16) + T.int64(16)], |
B[vj * T.int64(16) : vj * T.int64(16) + T.int64(16), vk * T.int64(16) : vk * T.int64(16) + T.int64(16)],
]
)
T.writes(C[vi * T.int64(16) : vi * T.int64(16) + T.int64(16), vj * T.int64(16) : vj * T.int64(16) + T.int64(16)])
A_elem_offset = T.var("int32")
B_elem_offset = T.var("int32")
C_elem_offset = T.var("int32")
A_sub = T.match_buffer(
A[vi * T.int64(16) : vi * T.int64(16) + T.int64(16), vk * T.int64(16) : vk * T.int64(16) + T.int64(16)],
[16, 16],
elem_offset=A_elem_offset,
)
B_sub = T.match_buffer(
B[vj * T.int64(16) : vj * T.int64(16) + T.int64(16), vk * T.int64(16) : vk * T.int64(16) + T.int64(16)],
[16, 16],
elem_offset=B_elem_offset,
)
C_sub = T.match_buffer(
C[vi * T.int64(16) : vi * T.int64(16) + T.int64(16), vj * T.int64(16) : vj * T.int64(16) + T.int64(16)],
[16, 16],
elem_offset=C_elem_offset,
)
T.evaluate(
T.tvm_mma_sync(
C_sub.data,
T.floordiv(C_sub.elem_offset, 256),
A_sub.data,
T.floordiv(A_sub.elem_offset, 256),
B_sub.data,
T.floordiv(B_sub.elem_offset, 256),
C_sub.data,
T.floordiv(C_sub.elem_offset, 256),
dtype="handle",
)
)
s = tir.Schedule(matmul_int64_shape, debug_mask="all")
update = s.get_block("update")
ii = s.get_loops(update)[-3]
s.tensorize(ii, "test_mma_intrin")
tvm.ir.asser |
t_structural_equal(s.mod["main"], tensorized_matmul_int64_shape)
verify_trace_roundtrip(sch=s, mod=matmul_int64_shape)
if __name__ == "__main__":
tvm.testing.main() |
import tvm
from tvm |
import te
from tvm.tir.tensor_intrin.cuda |
import (
LDMATRIX_16x16_A_INTRIN,
LDMATRIX_16x16_B_INTRIN,
LDMATRIX_16x16_B_TRANS_INTRIN,
LDMATRIX_16x32_A_INTRIN,
LDMATRIX_32x16_B_INTRIN,
LDMATRIX_16x32_B_TRANS_INTRIN,
MMA_f16f16f32_INTRIN,
MMA_f16f16f32_TRANS_INTRIN,
MMA_f16f16f16_INTRIN,
MMA_f16f16f16_TRANS_INTRIN,
MMA_i8i8i32_INTRIN,
MMA_i8i8i32_TRANS_INTRIN,
MMA_fill_16x16_f32_INTRIN,
MMA_fill_16x16_f16_INTRIN,
MMA_fill_16x16_i32_INTRIN,
MMA_store_16x16_f32_global_INTRIN,
MMA_store_16x16_f16_global_INTRIN,
MMA_store_16x16_i32_global_INTRIN,
shared_16x16_to_ldmatrix_32x8_layout,
shared_32x16_to_ldmatrix_32x16_layout,
shared_16x32_to_ldmatrix_32x16_layout,
) |
import tvm.testing |
import numpy as np
from tvm.testing.tir |
import mma_schedule
M = 4096
N = 4096
K = 4096
measure_perf = False
gflops = (N * M * K) * 2 / 1e9
def matmul(m, n, k, in_dtype, out_dtype, b_transposed):
b_shape = (n, k) if b_transposed else (k, n)
a = te.placeholder((m, k), name="A", dtype=in_dtype)
b = te.placeholder(b_shape, name="B", dtype=in_dtype)
k = te.reduce_axis((0, k), name="k")
def maybe_cast(v):
if in_dtype != out_dtype:
return tvm.tir.Cast(out_dtype, v)
return v
def maybe_swap(i, j):
if b_transposed:
return j, i
return i, j
c = te.compute(
(m, n),
lambda i, j: te.sum(maybe_cast(a[i, k]) * maybe_cast(b[maybe_swap(k, j)]), axis=[k]),
name="C",
)
return (a, b, c)
def run_test(
k_inner,
in_dtype,
out_dtype,
b_transposed,
i_factors,
j_factors,
k_factors,
index_map_A,
index_map_B,
index_map_C,
ldmatrix_a_intrin,
ldmatrix_b_intrin,
mma_intrin,
mma_fill_intrin,
mma_store_intrin,
):
sch = mma_schedule(
te.create_prim_func(matmul(M, N, K, in_dtype, out_dtype, b_transposed)),
k_inner,
in_dtype,
b_transposed,
i_factors,
j_factors,
k_factors,
index_map_A,
index_map_B,
index_map_C,
ldmatrix_a_intrin,
ldmatrix_b_intrin,
mma_intrin,
mma_fill_intrin,
mma_store_intrin,
)
f = tvm.build(sch.mod["main"], target="cuda", name="dense")
dev = tvm.device("cuda", 0)
if in_dtype == "float16":
a_np = np.random.uniform(size=(M, K)).astype("float16")
if b_transposed:
b_np = np.random.uniform(size=(N, K)).astype("float16")
c_np = np.dot(a_np.astype("float32"), b_np.astype("float32").transpose()).astype(
out_dtype
)
else:
b_np = np.random.uniform(size=(K, N)).astype("float16")
c_np = np.dot(a_np.astype("float32"), b_np.astype("float32")).astype(out_dtype) |
else:
a_np = np.random.randint(-128, 128, (M, K)).astype("int8")
if b_transposed:
b_np = np.random.randint(-128, 128, (N, K)).astype("int8")
c_np = np.dot(a_np.astype("float32"), b_np.astype("float32").transpose()).astype(
"int32"
)
else:
b_np = np.random.randint(-128, 128, (K, N)).astype("int8")
c_np = np.dot(a_np.astype("float32"), b_np.astype("float32")).astype("int32")
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array(b_np, dev)
c = tvm.nd.array(np.zeros((M, N), dtype=out_dtype), dev)
f(a, b, c)
if out_dtype != "float16":
tvm.testing.assert_allclose(c.numpy(), c_np, rtol=1e-3)
return lambda: f.time_evaluator(f.entry_name, dev, number=500)(a, b, c)
@tvm.testing.requires_cuda_compute_version(8)
def test_f16f16f32_m16n16k16():
def index_map(i, j):
return (
i
j
*shared_16x16_to_ldmatrix_32x8_layout(i % 16, j % 16),
)
k_inner = 16
in_dtype = "float16"
out_dtype = "float32"
i_factors, j_factors, k_factors = [4, 8, 2, 4, 1], [1, 64, 2, 1, 2], [128, 2, 1]
timer = run_test(
k_inner,
in_dtype,
out_dtype,
False,
i_factors,
j_factors,
k_factors,
index_map,
index_map,
index_map,
LDMATRIX_16x16_A_INTRIN,
LDMATRIX_16x16_B_INTRIN,
MMA_f16f16f32_INTRIN,
MMA_fill_16x16_f32_INTRIN,
MMA_store_16x16_f32_global_INTRIN,
)
if measure_perf and timer:
print("f16f16f32_m16n16k16: %f GFLOPS" % (gflops / (timer().mean)))
timer = run_test(
k_inner,
in_dtype,
out_dtype,
True,
i_factors,
j_factors,
k_factors,
index_map,
index_map,
index_map,
LDMATRIX_16x16_A_INTRIN,
LDMATRIX_16x16_B_TRANS_INTRIN,
MMA_f16f16f32_TRANS_INTRIN,
MMA_fill_16x16_f32_INTRIN, |
MMA_store_16x16_f32_global_INTRIN,
)
if measure_perf and timer:
print("f16f16f32_m16n16k16_trans: %f GFLOPS" % (gflops / (timer().mean)))
@tvm.testing.requires_cuda_compute_version(8)
def test_f16f16f16_m16n16k16():
def index_map(i, j):
return (
i
j
*shared_16x16_to_ldmatrix_32x8_layout(i % 16, j % 16),
)
k_inner = 16
in_dtype = "float16"
out_dtype = "float16"
i_factors, j_factors, k_factors = [16, 2, 1, 4, 2], [16, 2, 2, 1, 4], [128, 2, 1]
timer = run_test(
k_inner,
in_dtype,
out_dtype,
False,
i_factors,
j_factors,
k_factors,
index_map,
index_map,
index_map,
LDMATRIX_16x16_A_INTRIN,
LDMATRIX_16x16_B_INTRIN,
MMA_f16f16f16_INTRIN,
MMA_fill_16x16_f16_INTRIN,
MMA_store_16x16_f16_global_INTRIN,
)
if measure_perf and timer:
print("f16f16f16_m16n16k16: %f GFLOPS" % (gflops / (timer().mean)))
timer = run_test(
k_inner,
in_dtype,
out_dtype,
True,
i_factors,
j_factors,
k_factors,
index_map,
index_map,
index_map,
LDMATRIX_16x16_A_INTRIN,
LDMATRIX_16x16_B_TRANS_INTRIN,
MMA_f16f16f16_TRANS_INTRIN,
MMA_fill_16x16_f16_INTRIN,
MMA_store_16x16_f16_global_INTRIN,
)
if measure_perf and timer:
print("f16f16f16_m16n16k16_trans: %f GFLOPS" % (gflops / (timer().mean)))
@tvm.testing.requires_cuda_compute_version(8)
def test_i8i8i32_m16n16k32():
def index_map_A(i, j):
return (
i
j
*shared_16x32_to_ldmatrix_32x16_layout(i % 16, j % 32),
)
def index_map_B(i, j):
return (
i
j
*shared_32x16_to_ldmatrix_32x16_layout(i % 32, j % 16),
)
def index_map_C(i, j):
return (
i
j
*shared_16x16_to_ldmatrix_32x8 |
_layout(i % 16, j % 16),
)
k_inner = 32
in_dtype = "int8"
out_dtype = "int32"
i_factors, j_factors, k_factors = [1, 32, 1, 4, 2], [8, 4, 4, 2, 1], [32, 2, 2]
timer = run_test(
k_inner,
in_dtype,
out_dtype,
False,
i_factors,
j_factors,
k_factors,
index_map_A,
index_map_B,
index_map_C,
LDMATRIX_16x32_A_INTRIN,
LDMATRIX_32x16_B_INTRIN,
MMA_i8i8i32_INTRIN,
MMA_fill_16x16_i32_INTRIN,
MMA_store_16x16_i32_global_INTRIN,
)
if measure_perf and timer:
print("i8i8i32_m16n16k32: %f GOPS" % (gflops / (timer().mean)))
timer = run_test(
k_inner,
in_dtype,
out_dtype,
True,
i_factors,
j_factors,
k_factors,
index_map_A,
index_map_A,
index_map_C,
LDMATRIX_16x32_A_INTRIN,
LDMATRIX_16x32_B_TRANS_INTRIN,
MMA_i8i8i32_TRANS_INTRIN,
MMA_fill_16x16_i32_INTRIN,
MMA_store_16x16_i32_global_INTRIN,
)
if measure_perf and timer:
print("i8i8i32_m16n16k32_trans: %f GOPS" % (gflops / (timer().mean)))
if __name__ == "__main__":
tvm.testing.main() |
import sys |
import pytest |
import tvm |
import tvm.testing
from tvm |
import tir
from tvm.script |
import tir as T
from tvm.tir.schedule |
import BlockRV, Instruction, InstructionKind, LoopRV, Trace
@T.prim_func
def elementwise(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.alloc_buffer((128, 128))
C = T.match_buffer(c, (128, 128))
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + 1.0
@T.prim_func
def elementwise_inlined(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
C = T.match_buffer(c, (128, 128))
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = A[vi, vj] * 2.0 + 1.0
def _make_get_block(name, output):
return Instruction(
kind=InstructionKind.get("GetBlock"),
inputs=[],
attrs=[name, "main"],
outputs=[output],
)
def _make_get_loops(input, outputs):
return Instruction(
kind=InstructionKind.get("GetLoops"),
inputs=[input],
attrs=[],
outputs=outputs,
)
def _make_compute_inline(input):
return Instruction(
kind=InstructionKind.get("ComputeInline"),
inputs=[input],
attrs=[],
outputs=[],
)
def _make_split(inputs, outputs):
return Instruction(
kind=InstructionKind.get("Split"),
inputs=inputs,
attrs=[True],
outputs=outputs,
)
def _make_enter_postproc():
return Instruction(
kind=InstructionKind.get("EnterPostproc"),
inputs=[],
attrs=[],
outputs=[],
)
def _make_annotate(block: BlockRV, annotation: str):
return Instruction(
kind=InstructionKind.get("Annotate"),
inputs=[block, annotation],
attrs=["meta_schedule.auto_tensorize"],
outputs=[],
)
def _make_trace_1(b0, l1, l2):
return Trace(
insts= |
[
_make_get_block(name="block", output=b0),
_make_get_loops(input=b0, outputs=[l1, l2]),
],
decisions={},
)
def _make_trace_2(b0):
return Trace(
insts=[
_make_get_block(name="B", output=b0),
_make_compute_inline(input=b0),
],
decisions={},
)
def _make_trace_3(b0, b1, add_postproc):
if add_postproc:
insts = [
_make_get_block(name="B", output=b0),
_make_compute_inline(input=b0),
_make_get_block(name="C", output=b1),
_make_enter_postproc(),
_make_compute_inline(input=b1),
]
else:
insts = [
_make_get_block(name="B", output=b0),
_make_compute_inline(input=b0),
_make_get_block(name="C", output=b1),
]
return Trace(insts=insts, decisions={})
def _make_trace_4(b0, l1, l2, l3):
return Trace(
insts=[
_make_get_block(name="B", output=b0),
_make_get_loops(input=b0, outputs=[l1]),
_make_split([l1, None, 32], [l2, l3]),
],
decisions={},
)
def test_trace_construct_1():
trace = _make_trace_1(BlockRV(), LoopRV(), LoopRV())
assert str(trace) == "\n".join(
(
"
"def apply_trace(sch: tir.Schedule) -> None:",
' b0 = sch.get_block(name="block", func_name="main")',
" l1, l2 = sch.get_loops(block=b0)",
)
)
assert len(trace.insts) == 2
assert len(trace.decisions) == 0
def test_trace_construct_get_decision_1():
trace = _make_trace_1(BlockRV(), LoopRV(), LoopRV())
assert trace.get_decision(trace.insts[0]) is None
assert trace.get_decision(trace.insts[1]) is None
def test_trace_construct_append_1():
trace = _make_trace_1(BlockRV(), LoopRV(), LoopRV())
trace.append(inst=_make_get_block("block2", BlockRV()))
assert str(trace) == "\n".join(
(
"
"def apply_trace(sch: tir.Schedule) -> Non |
e:",
' b0 = sch.get_block(name="block", func_name="main")',
" l1, l2 = sch.get_loops(block=b0)",
' b3 = sch.get_block(name="block2", func_name="main")',
)
)
def test_trace_construct_pop_1():
trace = _make_trace_1(BlockRV(), LoopRV(), LoopRV())
last_inst = trace.insts[-1]
assert trace.pop().same_as(last_inst)
assert str(trace) == "\n".join(
(
"
"def apply_trace(sch: tir.Schedule) -> None:",
' b0 = sch.get_block(name="block", func_name="main")',
)
)
def test_trace_construct_pop_2():
trace = Trace([], {})
assert str(trace) == "\n".join(
(
"
"def apply_trace(sch: tir.Schedule) -> None:",
" pass",
)
)
assert trace.pop() is None
assert str(trace) == "\n".join(
(
"
"def apply_trace(sch: tir.Schedule) -> None:",
" pass",
)
)
def test_trace_apply_to_schedule():
trace = _make_trace_2(BlockRV())
sch = tir.Schedule(elementwise, debug_mask="all")
trace.apply_to_schedule(sch, remove_postproc=False, decision_provider=None)
tvm.ir.assert_structural_equal(elementwise_inlined, sch.mod["main"])
def test_trace_as_json_1():
trace = _make_trace_1(BlockRV(), LoopRV(), LoopRV())
obj = trace.as_json()
assert obj == [
[
["GetBlock", [], ["block", "main"], ["b0"]],
["GetLoops", ["b0"], [], ["l1", "l2"]],
],
[],
]
def test_trace_simplified_1():
trace = _make_trace_3(BlockRV(), BlockRV(), add_postproc=True)
assert str(trace) == "\n".join(
(
"
"def apply_trace(sch: tir.Schedule) -> None:",
' b0 = sch.get_block(name="B", func_name="main")',
" sch.compute_inline(block=b0)",
' b1 = sch.get_block(name="C", func_name="main")',
" sch.enter_postproc()",
" sch.compute_inline(block=b1)",
)
)
tra |
ce = trace.simplified(remove_postproc=True)
assert str(trace) == "\n".join(
(
"
"def apply_trace(sch: tir.Schedule) -> None:",
' b0 = sch.get_block(name="B", func_name="main")',
" sch.compute_inline(block=b0)",
)
)
def test_trace_simplified_2():
trace = _make_trace_3(BlockRV(), BlockRV(), add_postproc=True)
assert str(trace) == "\n".join(
(
"
"def apply_trace(sch: tir.Schedule) -> None:",
' b0 = sch.get_block(name="B", func_name="main")',
" sch.compute_inline(block=b0)",
' b1 = sch.get_block(name="C", func_name="main")',
" sch.enter_postproc()",
" sch.compute_inline(block=b1)",
)
)
trace = trace.simplified(remove_postproc=False)
assert str(trace) == "\n".join(
(
"
"def apply_trace(sch: tir.Schedule) -> None:",
' b0 = sch.get_block(name="B", func_name="main")',
" sch.compute_inline(block=b0)",
' b1 = sch.get_block(name="C", func_name="main")',
" sch.enter_postproc()",
" sch.compute_inline(block=b1)",
)
)
def test_trace_simplified_3():
trace = _make_trace_4(BlockRV(), LoopRV(), LoopRV(), LoopRV()).simplified(remove_postproc=False)
assert str(trace) == "\n".join(
(
"
"def apply_trace(sch: tir.Schedule) -> None:",
' b0 = sch.get_block(name="B", func_name="main")',
" l1, = sch.get_loops(block=b0)",
" l2, l3 = sch.split(loop=l1, factors=[None, 32], preserve_unit_iters=True)",
)
)
def test_apply_json_to_schedule_1():
trace = _make_trace_2(BlockRV())
json_obj = trace.as_json()
sch = tir.Schedule(elementwise, debug_mask="all")
Trace.apply_json_to_schedule(json_obj, sch)
tvm.ir.assert_structural_equal(elementwise_inlined, sch.mod["main"])
def _test_apply_annotation_trace_from_json(annotation: str):
" |
""Test applying an annotation works without crashing.
Designed to handle some previously failing edge cases like the
empty string.
"""
b0 = BlockRV()
trace = Trace(
insts=[
_make_get_block(name="B", output=b0),
_make_annotate(block=b0, annotation=annotation),
],
decisions={},
)
json_obj = trace.as_json()
sch = tir.Schedule(elementwise, debug_mask="all")
Trace.apply_json_to_schedule(json_obj, sch)
@T.prim_func
def elementwise_expected(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.alloc_buffer((128, 128))
C = T.match_buffer(c, (128, 128))
for i, j in T.grid(128, 128):
with T.block("B"):
T.block_attr({"meta_schedule.auto_tensorize": annotation})
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + 1.0
tvm.ir.assert_structural_equal(elementwise_expected, sch.mod["main"])
def test_apply_annotation_from_json():
_test_apply_annotation_trace_from_json("SSRSSR")
_test_apply_annotation_trace_from_json("")
_test_apply_annotation_trace_from_json('""')
_test_apply_annotation_trace_from_json('"')
if __name__ == "__main__":
test_trace_simplified_2() |
import tvm
from tvm.script |
import tir as T
from tvm.tir |
import Schedule
from tvm.tir.schedule.transform |
import tile_with_tensor_intrin
from tvm.tir.tensor_intrin.x86 |
import VNNI_DOT_16x4_INTRIN
@tvm.script.ir_module
class DenseVNNIModule:
@T.prim_func
def main(
placeholder: T.Buffer[(1024, 1024), "uint8"],
placeholder_1: T.Buffer[(64, 256, 16, 4), "int8"],
compute: T.Buffer[(1024, 1024), "int32"],
) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
with T.block("root"):
T.reads()
T.writes()
for i0, i1, i2 in T.grid(1024, 1024, 1024):
with T.block("compute"):
i, j, k = T.axis.remap("SSR", [i0, i1, i2])
T.reads(placeholder[i, k], placeholder_1[j
T.writes(compute[i, j])
with T.init():
compute[i, j] = 0
compute[i, j] = compute[i, j] + T.cast(placeholder[i, k], "int32") * T.cast(
placeholder_1[j
)
@tvm.script.ir_module
class DenseVNNIModuleTiled:
@T.prim_func
def main(
placeholder: T.Buffer[(1024, 1024), "uint8"],
placeholder_1: T.Buffer[(64, 256, 16, 4), "int8"],
compute: T.Buffer[(1024, 1024), "int32"],
) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
for i0, i1_0, i2_0, i1_1, i2_1 in T.grid(1024, 64, 256, 16, 4):
with T.block("compute"):
i = T.axis.spatial(1024, i0)
j = T.axis.spatial(1024, i1_0 * 16 + i1_1)
k = T.axis.reduce(1024, i2_0 * 4 + i2_1)
T.reads(placeholder[i, k], placeholder_1[j
T.writes(compute[i, j])
with T.init():
compute[i, j] = 0
compute[i, j] = compute[i, j] + T.cast(placeholder[i, k], "int32") * T.cast(
placeholder_1[j
)
@tvm.script.ir_module
class Conv2dNCHWcVNNIModule:
@T.prim_func
def main(
placeholder: T.Buffer[(1, 4, 56, 56, 16), "uint8"],
placeholder_1: T.Buf |
fer[(16, 4, 1, 1, 4, 16, 4), "int8"],
conv2d_NCHWc_int8: T.Buffer[(1, 16, 56, 56, 16), "int32"],
) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
for i0, i1, i2, i3, i4, i5, i6, i7, i8, i9 in T.grid(1, 16, 56, 56, 16, 1, 1, 4, 4, 4):
with T.block("conv2d_NCHWc_int8"):
(
n,
oc_chunk,
oh,
ow,
oc_block,
kh,
kw,
ic_outer,
ic_f_inner,
ic_s_inner,
) = T.axis.remap("SSSSSRRRRR", [i0, i1, i2, i3, i4, i5, i6, i7, i8, i9])
T.reads(
placeholder[n, ic_outer, oh + kh, ow + kw, ic_f_inner * 4 + ic_s_inner],
placeholder_1[oc_chunk, ic_outer, kh, kw, ic_f_inner, oc_block, ic_s_inner],
)
T.writes(conv2d_NCHWc_int8[n, oc_chunk, oh, ow, oc_block])
with T.init():
conv2d_NCHWc_int8[n, oc_chunk, oh, ow, oc_block] = 0
conv2d_NCHWc_int8[n, oc_chunk, oh, ow, oc_block] = conv2d_NCHWc_int8[
n, oc_chunk, oh, ow, oc_block
] + T.cast(
placeholder[n, ic_outer, oh + kh, ow + kw, ic_f_inner * 4 + ic_s_inner], "int32"
) * T.cast(
placeholder_1[oc_chunk, ic_outer, kh, kw, ic_f_inner, oc_block, ic_s_inner],
"int32",
)
@tvm.script.ir_module
class Conv2dNCHWcVNNIModuleTiled:
@T.prim_func
def main(
placeholder: T.Buffer[(1, 4, 56, 56, 16), "uint8"],
placeholder_1: T.Buffer[(16, 4, 1, 1, 4, 16, 4), "int8"],
conv2d_NCHWc_int8: T.Buffer[(1, 16, 56, 56, 16), "int32"],
) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
for i0, i1, i2, i3, i4_0, i5, i6, i7, i8, i9_0, i4_1, i9_1 in T.grid(
1, 16, |
56, 56, 1, 1, 1, 4, 4, 1, 16, 4
):
with T.block("conv2d_NCHWc_int8"):
n, oc_chunk, oh, ow = T.axis.remap("SSSS", [i0, i1, i2, i3])
oc_block = T.axis.spatial(16, i4_0 * 16 + i4_1)
kh, kw, ic_outer, ic_f_inner = T.axis.remap("RRRR", [i5, i6, i7, i8])
ic_s_inner = T.axis.reduce(4, i9_0 * 4 + i9_1)
T.reads(
placeholder[n, ic_outer, oh + kh, ow + kw, ic_f_inner * 4 + ic_s_inner],
placeholder_1[oc_chunk, ic_outer, kh, kw, ic_f_inner, oc_block, ic_s_inner],
)
T.writes(conv2d_NCHWc_int8[n, oc_chunk, oh, ow, oc_block])
with T.init():
conv2d_NCHWc_int8[n, oc_chunk, oh, ow, oc_block] = 0
conv2d_NCHWc_int8[n, oc_chunk, oh, ow, oc_block] = conv2d_NCHWc_int8[
n, oc_chunk, oh, ow, oc_block
] + T.cast(
placeholder[n, ic_outer, oh + kh, ow + kw, ic_f_inner * 4 + ic_s_inner], "int32"
) * T.cast(
placeholder_1[oc_chunk, ic_outer, kh, kw, ic_f_inner, oc_block, ic_s_inner],
"int32",
)
def test_tile_with_tensor_intrin_dense_vnni():
s = Schedule(DenseVNNIModule)
block = s.get_block("compute")
tiled_loop = tile_with_tensor_intrin(s, block, VNNI_DOT_16x4_INTRIN)
_, _, _, i1_1, _ = s.get_loops(block)
assert s.get(tiled_loop) == s.get(i1_1)
tvm.ir.assert_structural_equal(s.mod, DenseVNNIModuleTiled)
def test_tile_with_tensor_intrin_conv2d_nchwc_vnni():
s = Schedule(Conv2dNCHWcVNNIModule)
block = s.get_block("conv2d_NCHWc_int8")
tiled_loop = tile_with_tensor_intrin(s, block, VNNI_DOT_16x4_INTRIN)
tiled_loops = s.get_loops(block)
assert len(tiled_loops) == 12
assert s.get(tiled_loop) == s.get(tiled_loops[-2])
tvm.ir.assert_structural_equal(s.mod, Conv2dNCHWcVNNIModuleTiled)
if __name__ == "__main__":
test_tile_with_tensor_intrin_de |
nse_vnni()
test_tile_with_tensor_intrin_conv2d_nchwc_vnni() |
import sys |
import pytest |
import tvm |
import tvm.testing
from tvm |
import tir
from tvm.script |
import tir as T
from tvm.tir.schedule.testing |
import verify_trace_roundtrip
def packed_index_map_func(m, n):
return m
@T.prim_func
def two_elementwise(A: T.Buffer[(128, 128), "float32"], C: T.Buffer[(128, 128), "float32"]) -> None:
B = T.alloc_buffer((128, 128), "float32")
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + 1.0
@T.prim_func
def two_elementwise_transformed_intermediate_buffer(
A: T.Buffer[(128, 128), "float32"], C: T.Buffer[(128, 128), "float32"]
) -> None:
B = T.alloc_buffer((8, 8, 16, 16), "float32")
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi
@T.prim_func
def two_elementwise_transformed_input_buffer(
A: T.Buffer[(8, 8, 16, 16), "float32"], C: T.Buffer[(128, 128), "float32"]
) -> None:
B = T.alloc_buffer((128, 128), "float32")
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + 1.0
@T.prim_func
def two_elementwise_transformed_output_buffer(
A: T.Buffer[(128, 128), "float32"], C: T.Buffer[(8, 8, 16, 16), "float32"]
) -> None:
B = T.alloc_buffer((128, 128), "float32")
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi
@T.prim_func
def elementwise(A: T.Buffer[(128, 128), "float32"], B: T.Buffe |
r[(128, 128), "float32"]) -> None:
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
@T.prim_func
def elementwise_transformed(A: T.Buffer[(128, 128), "float32"], B: T.Buffer[(128, 128), "float32"]) -> None:
for i in range(16384):
with T.block("B"):
vi = T.axis.remap("S", [i])
B[vi
@T.prim_func
def conv2d_nhwc(
Input: T.Buffer[(1, 224, 224, 3), "float32"],
Weight: T.Buffer[(7, 7, 3, 64), "float32"],
Conv2d_nhwc: T.Buffer[(1, 112, 112, 64), "float32"],
) -> None:
PadInput = T.alloc_buffer([1, 230, 230, 3], dtype="float32")
for i0, i1, i2, i3 in T.grid(1, 230, 230, 3):
with T.block("PadInput"):
i0_1, i1_1, i2_1, i3_1 = T.axis.remap("SSSS", [i0, i1, i2, i3])
PadInput[i0_1, i1_1, i2_1, i3_1] = T.if_then_else(
((((i1_1 >= 3) and (i1_1 < 227)) and (i2_1 >= 3)) and (i2_1 < 227)),
Input[i0_1, (i1_1 - 3), (i2_1 - 3), i3_1],
T.float32(0),
dtype="float32",
)
for i0, i1, i2, i3, i4, i5, i6 in T.grid(1, 112, 112, 64, 7, 7, 3):
with T.block("conv2d_nhwc"):
n, h, w, co, rh, rw, rc = T.axis.remap("SSSSRRR", [i0, i1, i2, i3, i4, i5, i6])
with T.init():
Conv2d_nhwc[n, h, w, co] = T.float32(0)
Conv2d_nhwc[n, h, w, co] = Conv2d_nhwc[n, h, w, co] + (
PadInput[n, ((h * 2) + rh), ((w * 2) + rw), ((T.floordiv(co, 64) * 3) + rc)]
* Weight[rh, rw, rc, co]
)
@T.prim_func
def conv2d_nhwc_transformed(
Input: T.Buffer[(1, 224, 224, 3), "float32"],
Weight: T.Buffer[(7, 7, 3, 64), "float32"],
Conv2d_nhwc: T.Buffer[(1, 112, 112, 64), "float32"],
) -> None:
PadInput = T.alloc_buffer([1, 230, 230, 3], dtype="float32")
for i0, i1, i2, i3 in T.grid(1, 230, 230, 3):
with T.block("PadInput"):
i0_1, i1_1, i2_1, i3_1 = T.axis.remap("SSSS", [i0, |
i1, i2, i3])
T.reads(Input[i0_1, i1_1 - 3, i2_1 - 3, i3_1])
T.writes(PadInput[i0_1, i1_1, i2_1, i3_1])
PadInput[i0_1, i1_1, i2_1, i3_1] = T.if_then_else(
i1_1 >= 3 and i1_1 < 227 and i2_1 >= 3 and i2_1 < 227,
Input[i0_1, i1_1 - 3, i2_1 - 3, i3_1],
T.float32(0),
dtype="float32",
)
for ax0, ax1, ax2 in T.grid(12544, 64, 147):
with T.block("conv2d_nhwc"):
v0, v1, v2 = T.axis.remap("SSR", [ax0, ax1, ax2])
T.reads(PadInput[v0
T.writes(Conv2d_nhwc[v0
with T.init():
Conv2d_nhwc[v0
Conv2d_nhwc[v0
@T.prim_func
def two_elementwise_unit_dim(A: T.Buffer[(1, 128), "float32"], C: T.Buffer[(1, 128), "float32"]) -> None:
B = T.alloc_buffer((1, 128), "float32")
for i, j in T.grid(1, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(1, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + 1.0
use_block_name = tvm.testing.parameter(by_dict={"block_obj": False, "block_name": True})
def test_two_elementwise_transform_intermediate_buffer(use_block_name):
sch = tir.Schedule(two_elementwise, debug_mask="all")
if use_block_name:
sch.transform_layout(
block="B",
buffer="B",
index_map=packed_index_map_func,
)
else:
block = sch.get_block("B")
sch.transform_layout(block, ("write", 0), packed_index_map_func)
tvm.ir.assert_structural_equal(two_elementwise_transformed_intermediate_buffer, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=two_elementwise)
def test_two_elementwise_transform_input_buffer(use_block_name):
sch = tir.Schedule(two_elementwise, debug_mask="all")
if use_block_name:
sch.transform_layout(
index_map=packed_index_map_func, |
block="B",
buffer="A",
)
else:
block = sch.get_block("B")
sch.transform_layout(block, ("read", 0), packed_index_map_func)
tvm.ir.assert_structural_equal(two_elementwise_transformed_input_buffer, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=two_elementwise)
def test_two_elementwise_transform_output_buffer(use_block_name):
sch = tir.Schedule(two_elementwise, debug_mask="all")
if use_block_name:
sch.transform_layout(
index_map=packed_index_map_func,
block="C",
buffer="C",
)
else:
block = sch.get_block("C")
sch.transform_layout(block, ("write", 0), packed_index_map_func)
tvm.ir.assert_structural_equal(two_elementwise_transformed_output_buffer, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=two_elementwise)
def test_two_elementwise_unit_dim(use_block_name):
sch = tir.Schedule(two_elementwise_unit_dim, debug_mask="all")
index_map = lambda i, j: (i, j)
if use_block_name:
sch.transform_layout(
index_map=index_map,
block="B",
buffer="B",
)
else:
block = sch.get_block("B")
sch.transform_layout(block, ("write", 0), index_map)
tvm.ir.assert_structural_equal(two_elementwise_unit_dim, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=two_elementwise_unit_dim)
def test_simplify():
sch = tir.Schedule(two_elementwise, debug_mask="all")
i, j = sch.get_loops(sch.get_block("C"))
i, i_inner = sch.split(i, factors=[None, 16])
j, j_inner = sch.split(j, factors=[None, 16])
sch.reorder(
i,
j,
i_inner,
j_inner,
)
block_outer = sch.blockize(i_inner)
B = sch.cache_read(block_outer, 0, "global")
sch.transform_layout(B, ("write", 0), lambda i, j: (i
@T.prim_func
def ref(B: T.Buffer[(8, 8, 16, 16), "float32"], C: T.Buffer[(128, 128), "float32"]):
for i_0, j_0 in T.grid(8, 8):
with T.block |
("C_o"):
vi_o, vj_o = T.axis.remap("SS", [i_0, j_0])
T.reads(B[vi_o, vj_o, 0:16, 0:16])
T.writes(C[vi_o * 16 : vi_o * 16 + 16, vj_o * 16 : vj_o * 16 + 16])
for i_1, j_1 in T.grid(16, 16):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i_1, j_1])
T.reads(B[vi_o, vj_o, vi, vj])
T.writes(C[vi_o * 16 + vi, vj_o * 16 + vj])
C[vi_o * 16 + vi, vj_o * 16 + vj] = B[vi_o, vj_o, vi, vj] + T.float32(1)
tvm.ir.assert_structural_equal(ref.body.block.body, sch.get(sch.get_loops(block_outer)[0]))
def test_var_args_sugar():
@T.prim_func
def summation_3d(
A: T.Buffer[(1024, 1024, 32), "float32"], B: T.Buffer[(1,), "float32"]
) -> None:
B[0] = 0
for i, j, k in T.grid(1024, 1024, 32):
with T.block("compute"):
vi, vj, vk = T.axis.remap("SSS", [i, j, k])
B[0] = B[0] + A[vi, vj, vk]
@T.prim_func
def summation_3d_split(
A: T.Buffer[(1024, 1024, 8, 4), "float32"], B: T.Buffer[(1,), "float32"]
) -> None:
B[0] = 0
for i, j, k in T.grid(1024, 1024, 32):
with T.block("compute"):
vi, vj, vk = T.axis.remap("SSS", [i, j, k])
B[0] = B[0] + A[vi, vj, vk
sch = tir.Schedule(summation_3d, debug_mask="all")
sch.transform_layout(
index_map=lambda *indices, k: [*indices, k
)
tvm.ir.assert_structural_equal(summation_3d_split, sch.mod["main"])
def test_transform_block_layout_basic(use_block_name):
sch = tir.Schedule(elementwise, debug_mask="all")
block = "B" if use_block_name else sch.get_block("B")
sch.transform_block_layout(block, lambda i, j: (i * 128 + j,))
tvm.ir.assert_structural_equal(elementwise_transformed, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=elementwise)
def te |
st_transform_block_layout_conv2d_nhwc(use_block_name):
sch = tir.Schedule(conv2d_nhwc, debug_mask="all")
block = "conv2d_nhwc" if use_block_name else sch.get_block("conv2d_nhwc")
sch.transform_block_layout(
block,
lambda n, h, w, co, rh, rw, rc: (n * 112 * 112 + h * 112 + w, co, rh * 7 * 3 + rw * 3 + rc),
)
tvm.ir.assert_structural_equal(conv2d_nhwc_transformed, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=conv2d_nhwc)
def test_transform_block_layout_unit_dim(use_block_name):
sch = tir.Schedule(two_elementwise_unit_dim, debug_mask="all")
block = "B" if use_block_name else sch.get_block("B")
sch.transform_block_layout(block, lambda i, j: (j, i))
@T.prim_func
def two_elementwise_unit_dim_transformed(
A: T.Buffer[(1, 128), "float32"], C: T.Buffer[(1, 128), "float32"]
) -> None:
B = T.alloc_buffer((1, 128), "float32")
for j, i in T.grid(128, 1):
with T.block("B"):
vj, vi = T.axis.remap("SS", [j, i])
B[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(1, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + 1.0
tvm.ir.assert_structural_equal(two_elementwise_unit_dim_transformed, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=two_elementwise_unit_dim)
def test_transform_block_layout_fail_non_affine(use_block_name):
sch = tir.Schedule(elementwise, debug_mask="all")
block = "B" if use_block_name else sch.get_block("B")
with pytest.raises(tir.ScheduleError):
sch.transform_block_layout(block, lambda i, j: (i + j,))
def test_transform_block_layout_fail_mixed_iter_type(use_block_name):
sch = tir.Schedule(conv2d_nhwc, debug_mask="all")
block = "conv2d_nhwc" if use_block_name else sch.get_block("conv2d_nhwc")
with pytest.raises(tir.ScheduleError):
sch.transform_block_layout(
block,
lambda n, h, w, co, rh, rw, rc: (n * 112 * 112 |
+ h * 112 + w, co * 7 + rh, rw * 3 + rc),
)
def test_transform_block_layout_int64_extent(use_block_name):
@T.prim_func
def elementwise_int64_extent(
A: T.Buffer[(T.int64(128), T.int64(128)), "float32"],
B: T.Buffer[(T.int64(128), T.int64(128)), "float32"],
) -> None:
for i, j in T.grid(T.int64(128), T.int64(128)):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
@T.prim_func
def elementwise_int64_extent_transformed(
A: T.Buffer[(T.int64(128), T.int64(128)), "float32"],
B: T.Buffer[(T.int64(128), T.int64(128)), "float32"],
) -> None:
for i in range(T.int64(16384)):
with T.block("B"):
vi = T.axis.remap("S", [i])
B[vi
A[vi
)
sch = tir.Schedule(elementwise_int64_extent, debug_mask="all")
block = "B" if use_block_name else sch.get_block("B")
sch.transform_block_layout(block, lambda i, j: (i * 128 + j,))
print(
tvm.ir.base.get_first_structural_mismatch(
elementwise_int64_extent_transformed, sch.mod["main"]
)
)
tvm.ir.assert_structural_equal(elementwise_int64_extent_transformed, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=elementwise_int64_extent) |
class BasePaddingCompare(tvm.testing.CompareBeforeAfter):
pad_value = tvm.testing.parameter(None)
transformed_buffer = tvm.testing.parameter("A")
@pytest.fixture
def transform(self, pad_value, transformed_buffer):
def transform(mod):
sch = tir.Schedule(mod)
sch.transform_layout(
"block", transformed_buffer, lambda i: [i
)
return sch.mod
return transform |
class TestNoPadding(BasePaddingCompare):
"""Transformations without padding do not depend on pad_value."""
pad_value = tvm.testing.parameter(None, 42)
def before():
A = T.alloc_buffer(16, "int32")
for i in T.serial(16):
with T.block("block"):
vi = T.axis.remap("S", [i])
A[vi] = 0
def expected():
A = T.alloc_buffer([4, 4], "int32")
for i in T.serial(16):
with T.block("block"):
vi = T.axis.remap("S", [i])
A[vi |
class TestNoPaddingMultipleUsage(BasePaddingCompare):
"""Transformations without padding do not depend on pad_value.
Like TestNoPadding, but the buffer A shows up in multiple
locations. To remain internally consistent, all instances of the
buffer should be rewritten.
"""
pad_value = tvm.testing.parameter(None, 42)
def before():
A = T.alloc_buffer(16, "int32")
for i in T.serial(16):
with T.block("block"):
vi = T.axis.remap("S", [i])
A[vi] = 0
B = T.alloc_buffer(16, "int32")
for i in T.serial(16):
with T.block("other"):
vi = T.axis.remap("S", [i])
B[vi] = A[vi]
def expected():
A = T.alloc_buffer([4, 4], "int32")
for i in T.serial(16):
with T.block("block"):
vi = T.axis.remap("S", [i])
A[vi
B = T.alloc_buffer(16, "int32")
for i in T.serial(16):
with T.block("other"):
vi = T.axis.remap("S", [i])
B[vi] = A[vi |
class TestNoPaddingOpaqueBlock(BasePaddingCompare):
"""Transformations without padding do not depend on pad_value.
Like TestNoPadding, but buffer access is done in an opaque block.
"""
pad_value = tvm.testing.parameter(None, 42)
def before():
A = T.alloc_buffer(16, "int32")
for i in T.serial(16):
with T.block("block"):
A[i] = 0
def expected():
A = T.alloc_buffer([4, 4], "int32")
for i in T.serial(16):
with T.block("block"):
A[i |
class TestErrorIfPaddingForbidden(BasePaddingCompare):
"""Unless padding is explicitly enabled, should raise error"""
def before():
A = T.alloc_buffer(14, "int32")
for i in T.serial(14):
with T.block("block"):
vi = T.axis.remap("S", [i])
A[vi] = 0
expected = tvm.tir.schedule.schedule.ScheduleError |
class TestErrorOnWrongPaddingType(BasePaddingCompare):
"""The padding must have the same dtype as the buffer"""
pad_value = tvm.testing.parameter(tir.IntImm("int8", 0))
def before():
A = T.alloc_buffer(14, "int32")
for i in T.serial(14):
with T.block("block"):
vi = T.axis.remap("S", [i])
A[vi] = 0
expected = tvm.tir.schedule.schedule.ScheduleError |
class TestPaddedTransformIfThenElse(BasePaddingCompare):
"""Use if_then_else to represent padding, if possible.
For a block that is a producer of the pre-transformation buffer,
which visits all indices according to a row-major traversal, and
which has no effect other than producing the transformed buffer,
transform the loop iterators to be a row-major traversal of the
post-transformation buffer, with padding represented by
`T.if_then_else`.
"""
pad_value = tvm.testing.parameter(0)
transformed_buffer = tvm.testing.parameter("B")
dtype = tvm.testing.parameter("int32", "int8")
@tvm.testing.fixture
def before(self, dtype):
@T.prim_func
def func(A: T.Buffer[14, dtype]):
B = T.alloc_buffer(14, dtype)
for i in T.serial(14):
with T.block("block"):
vi = T.axis.remap("S", [i])
B[vi] = A[vi]
return func
@tvm.testing.fixture
def expected(self, dtype, pad_value):
pad_value = tir.IntImm(dtype, pad_value)
@T.prim_func
def func(A: T.Buffer[14, dtype]):
B = T.alloc_buffer([4, 4], dtype)
for i, j in T.grid(4, 4):
with T.block("block"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = T.if_then_else(
vi == 3 and 2 <= vj, pad_value, A[vi * 4 + vj], dtype=dtype
)
return func |
class TestPaddedTransformWithoutLoop(BasePaddingCompare):
"""Handle padded writes without a loop
The statement being replaced may be something other than a
for-loop, such as if a loop has already been unrolled.
"""
pad_value = tvm.testing.parameter(0)
def before(A: T.Buffer[14, "int32"]):
with T.block("root"):
T.reads()
T.writes()
with T.block("block"):
A[0] = 0
def expected(A: T.Buffer[(4, 4), "int32"]):
with T.block("block"):
A[0, 0] = 0
for i, j in T.grid(4, 4):
with T.block("buffer_A_padding"):
vi, vj = T.axis.remap("SS", [i, j])
T.where(i == 3 and 2 <= j)
A[vi, vj] = 0 |
class TestPaddedTransformIfThenElseReduction(BasePaddingCompare):
"""Like TestPaddedTransformIfThenElse, but with a reduction axis"""
pad_value = tvm.testing.parameter(0)
transformed_buffer = tvm.testing.parameter("B")
def before(A: T.Buffer[(14, 32), "int32"]):
B = T.alloc_buffer(14, "int32")
for i, k in T.grid(14, 32):
with T.block("block"):
vi, vk = T.axis.remap("SR", [i, k])
with T.init():
B[vi] = 0
B[vi] = B[vi] + A[vi, vk]
def expected(A: T.Buffer[(14, 32), "int32"]):
B = T.alloc_buffer([4, 4], "int32")
for i, j, k in T.grid(4, 4, 32):
with T.block("block"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
with T.init():
B[vi, vj] = T.if_then_else(vi == 3 and 2 <= vj, 0, 0, dtype="int32")
B[vi, vj] = T.if_then_else(
vi == 3 and 2 <= vj, 0, B[vi, vj] + A[vi * 4 + vj, vk], dtype="int32"
) |
class TestPaddedTransformIfThenElseReductionOpaque(BasePaddingCompare):
"""Like TestPaddedTransformIfThenElseReduction, but with opaque blocks"""
pad_value = tvm.testing.parameter(0)
transformed_buffer = tvm.testing.parameter("B")
def before(A: T.Buffer[(14, 32), "int32"]):
B = T.alloc_buffer(14, "int32")
for i in T.serial(14):
B[i] = 0
for k in T.serial(32):
with T.block("block"):
B[i] = B[i] + A[i, k]
def expected(A: T.Buffer[(14, 32), "int32"]):
B = T.alloc_buffer([4, 4], "int32")
for i, j in T.grid(4, 4):
B[i, j] = T.if_then_else(i == 3 and 2 <= j, 0, 0, dtype="int32")
for k in T.serial(32):
with T.block("block"):
B[i, j] = T.if_then_else(
i == 3 and 2 <= j, 0, B[i, j] + A[i * 4 + j, k], dtype="int32"
) |
class TestPaddedTransformPostProcIfRequiredDueToSideEffects(BasePaddingCompare):
"""Set the transformation padding in a post-processing block.
Like TestPaddedTransformIfThenElse, but the block that produces B
also has the effect of setting `C`.
"""
pad_value = tvm.testing.parameter(0)
transformed_buffer = tvm.testing.parameter("B")
def before(A: T.Buffer[14, "int32"]):
B = T.alloc_buffer(14, "int32")
C = T.alloc_buffer(14, "int32")
for i in T.serial(14):
with T.block("block"):
vi = T.axis.remap("S", [i])
B[vi] = A[vi]
C[vi] = 0
def expected(A: T.Buffer[14, "int32"]):
B = T.alloc_buffer([4, 4], "int32")
C = T.alloc_buffer(14, "int32")
for i in T.serial(14):
with T.block("block"):
vi = T.axis.remap("S", [i])
B[vi
C[vi] = 0
for i, j in T.grid(4, 4):
with T.block("block_pad_B"):
vi, vj = T.axis.remap("SS", [i, j])
T.where(i == 3 and 2 <= j)
B[vi, vj] = 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.