text
stringlengths 1
2.05k
|
---|
c_buffer([1, 16, 231, 231], dtype="int8")
for i0, i1, i2, i3 in T.grid(1, 16, 231, 231):
with T.block("pad_temp"):
ax0, ax1, ax2, ax3 = T.axis.remap("SSSS", [i0, i1, i2, i3])
pad_temp[ax0, ax1, ax2, ax3] = T.if_then_else(
3 <= ax2 and ax2 < 228 and 3 <= ax3 and ax3 < 228,
x[ax0, ax1, ax2 - 3, ax3 - 3],
T.int8(0),
dtype="int8",
)
for i0, i1, i2, i3, i4, i5 in T.grid(1, 16, 225, 225, 7, 7):
with T.block("tensor"):
ax0, ax1, ax2, ax3, rv0, rv1 = T.axis.remap("SSSSRR", [i0, i1, i2, i3, i4, i5])
with T.init():
tensor[ax0, ax1, ax2, ax3] = T.int8(0)
tensor[ax0, ax1, ax2, ax3] = (
tensor[ax0, ax1, ax2, ax3] + pad_temp[ax0, ax1, ax2 + rv0, ax3 + rv1]
)
def test_decompose_hw_padding_direct():
"""Case 0. direct decompose"""
@T.prim_func
def pooling_decompose_0(
x: T.Buffer[(1, 16, 225, 225), "int8"], tensor: T.Buffer[(1, 16, 225, 225), "int8"]
):
pad_temp = T.alloc_buffer([1, 16, 231, 231], dtype="int8")
for i0, i1, i2, i3 in T.grid(1, 16, 231, 231):
with T.block("pad_temp_pad_const"):
ax0, ax1, ax2, ax3 = T.axis.remap("SSSS", [i0, i1, i2, i3])
pad_temp[ax0, ax1, ax2, ax3] = T.int8(0)
for i0, i1, i2, i3 in T.grid(1, 16, 225, 225):
with T.block("pad_temp"):
ax0, ax1, ax2, ax3 = T.axis.remap("SSSS", [i0, i1, i2, i3])
pad_temp[ax0, ax1, ax2 + 3, ax3 + 3] = x[ax0, ax1, ax2, ax3]
for i0, i1, i2, i3, i4, i5 in T.grid(1, 16, 225, 225, 7, 7):
with T.block("tensor"):
ax0, ax1, ax2, ax3, rv0, rv1 = T.axis.remap("SSSSRR", [i0, i1, i2, i3, i4, i5])
with T.init():
tensor[ax0, ax1, ax2, ax3] = T.int8(0)
tensor[ax0, ax1, ax2, ax3] = (
tensor[ax0, ax1, ax2, ax3] + pad_temp[ax0, ax1, ax2 + rv0, ax |
3 + rv1]
)
sch = tir.Schedule(sum_pool_2d, debug_mask="all")
pad = sch.get_block("pad_temp")
sch.decompose_padding(pad, sch.get_loops(pad)[0])
check_decompose_padding(sum_pool_2d, sch.mod["main"], pooling_decompose_0, check_run=True)
def test_decompose_hw_padding_tiled():
"""Case 1. tiling and then decompose"""
@T.prim_func
def pooling_decompose_1(
x: T.Buffer[(1, 16, 225, 225), "int8"], tensor: T.Buffer[(1, 16, 225, 225), "int8"]
) -> None:
pad_temp = T.alloc_buffer([1, 16, 231, 231], dtype="int8")
for i0, i2_0, i3_0 in T.grid(1, 3, 3):
for ax0, ax1, ax2 in T.grid(16, 81, 81):
with T.block("pad_temp_pad_const"):
ax0_1 = T.axis.spatial(1, 0)
ax1_1 = T.axis.spatial(16, ax0)
ax2_1 = T.axis.spatial(231, i2_0 * 75 + ax1)
ax3 = T.axis.spatial(231, i3_0 * 75 + ax2)
T.reads()
T.writes(pad_temp[ax0_1, ax1_1, ax2_1, ax3])
pad_temp[ax0_1, ax1_1, ax2_1, ax3] = T.int8(0)
for ax0, ax1, ax2 in T.grid(16, 81, 81):
with T.block("pad_temp"):
ax0_2 = T.axis.spatial(1, 0)
ax1_2 = T.axis.spatial(16, ax0)
ax2_2 = T.axis.spatial(225, i2_0 * 75 + ax1 - 3)
ax3 = T.axis.spatial(225, i3_0 * 75 + ax2 - 3)
T.where(
3 <= i2_0 * 75 + ax1
and i2_0 * 75 + ax1 < 228
and 3 <= i3_0 * 75 + ax2
and i3_0 * 75 + ax2 < 228
)
T.reads(x[ax0_2, ax1_2, ax2_2, ax3])
T.writes(pad_temp[ax0_2, ax1_2, ax2_2 + 3, ax3 + 3])
pad_temp[ax0_2, ax1_2, ax2_2 + 3, ax3 + 3] = x[ax0_2, ax1_2, ax2_2, ax3]
for i1, i2_1, i3_1, i4, i5 in T.grid(16, 75, 75, 7, 7):
with T.block("tensor"):
ax0_ |
3, ax1_3 = T.axis.remap("SS", [i0, i1])
ax2_3 = T.axis.spatial(225, i2_0 * 75 + i2_1)
ax3 = T.axis.spatial(225, i3_0 * 75 + i3_1)
rv0, rv1 = T.axis.remap("RR", [i4, i5])
T.reads(pad_temp[ax0_3, ax1_3, ax2_3 + rv0, ax3 + rv1])
T.writes(tensor[ax0_3, ax1_3, ax2_3, ax3])
with T.init():
tensor[ax0_3, ax1_3, ax2_3, ax3] = T.int8(0)
tensor[ax0_3, ax1_3, ax2_3, ax3] = (
tensor[ax0_3, ax1_3, ax2_3, ax3]
+ pad_temp[ax0_3, ax1_3, ax2_3 + rv0, ax3 + rv1]
)
sch = tir.Schedule(sum_pool_2d, debug_mask="all")
block = sch.get_block("tensor")
pad = sch.get_block("pad_temp")
n, c, h, w, kh, kw = sch.get_loops(block)
ho, hi = sch.split(h, [3, 75])
wo, wi = sch.split(w, [3, 75])
sch.reorder(n, ho, wo, c, hi, wi, kh, kw)
sch.compute_at(sch.get_block("pad_temp"), wo)
sch.decompose_padding(pad, sch.get_loops(pad)[3])
check_decompose_padding(sum_pool_2d, sch.mod["main"], pooling_decompose_1, check_run=True)
def test_decompose_hw_padding_tiled_and_lift_pad():
"""Case 2. tiling and then decompose, lift const pad values to outer loop"""
@T.prim_func
def pooling_decompose_2(
x: T.Buffer[(1, 16, 225, 225), "int8"], tensor: T.Buffer[(1, 16, 225, 225), "int8"]
) -> None:
pad_temp = T.alloc_buffer([1, 16, 231, 231], dtype="int8")
for i0, i2_0, i3_0, ax0, ax1, ax2 in T.grid(1, 3, 3, 16, 81, 81):
with T.block("pad_temp_pad_const"):
ax0_1 = T.axis.spatial(1, 0)
ax1_1 = T.axis.spatial(16, ax0)
ax2_1 = T.axis.spatial(231, i2_0 * 75 + ax1)
ax3 = T.axis.spatial(231, i3_0 * 75 + ax2)
T.reads()
T.writes(pad_temp[ax0_1, ax1_1, ax2_1, ax3])
pad_temp[ax0_1, ax1_1, ax2_1, ax3] = T.int8(0)
for i0, i2_0, i3_0 in T.grid( |
1, 3, 3):
for ax0, ax1, ax2 in T.grid(16, 81, 81):
with T.block("pad_temp"):
ax0_2 = T.axis.spatial(1, 0)
ax1_2 = T.axis.spatial(16, ax0)
ax2_2 = T.axis.spatial(225, i2_0 * 75 + ax1 - 3)
ax3 = T.axis.spatial(225, i3_0 * 75 + ax2 - 3)
T.where(
3 <= i2_0 * 75 + ax1
and i2_0 * 75 + ax1 < 228
and 3 <= i3_0 * 75 + ax2
and i3_0 * 75 + ax2 < 228
)
T.reads(x[ax0_2, ax1_2, ax2_2, ax3])
T.writes(pad_temp[ax0_2, ax1_2, ax2_2 + 3, ax3 + 3])
pad_temp[ax0_2, ax1_2, ax2_2 + 3, ax3 + 3] = x[ax0_2, ax1_2, ax2_2, ax3]
for i1, i2_1, i3_1, i4, i5 in T.grid(16, 75, 75, 7, 7):
with T.block("tensor"):
ax0_3, ax1_3 = T.axis.remap("SS", [i0, i1])
ax2_3 = T.axis.spatial(225, i2_0 * 75 + i2_1)
ax3 = T.axis.spatial(225, i3_0 * 75 + i3_1)
rv0, rv1 = T.axis.remap("RR", [i4, i5])
T.reads(pad_temp[ax0_3, ax1_3, ax2_3 + rv0, ax3 + rv1])
T.writes(tensor[ax0_3, ax1_3, ax2_3, ax3])
with T.init():
tensor[ax0_3, ax1_3, ax2_3, ax3] = T.int8(0)
tensor[ax0_3, ax1_3, ax2_3, ax3] = (
tensor[ax0_3, ax1_3, ax2_3, ax3]
+ pad_temp[ax0_3, ax1_3, ax2_3 + rv0, ax3 + rv1]
)
sch = tir.Schedule(sum_pool_2d, debug_mask="all")
block = sch.get_block("tensor")
pad = sch.get_block("pad_temp")
n, c, h, w, kh, kw = sch.get_loops(block)
ho, hi = sch.split(h, [3, 75])
wo, wi = sch.split(w, [3, 75])
sch.reorder(n, ho, wo, c, hi, wi, kh, kw)
sch.compute_at(sch.get_block("pad_temp"), wo)
sch.decompose_padding(pad, sch.get_loops(pad)[0])
check_decompose_padding(sum |
_pool_2d, sch.mod["main"], pooling_decompose_2, check_run=True)
def test_decompose_hw_padding_non_perfect_tiled():
"""Case 3. non-perfect tiling and then decompose"""
@T.prim_func
def pooling_decompose_3(
x: T.Buffer[(1, 16, 225, 225), "int8"], tensor: T.Buffer[(1, 16, 225, 225), "int8"]
) -> None:
pad_temp = T.alloc_buffer([1, 16, 231, 231], dtype="int8")
for i0, i2_0, i3_0 in T.grid(1, 3, 3):
for ax0, ax1, ax2 in T.grid(16, 86, 86):
with T.block("pad_temp_pad_const"):
ax0_1 = T.axis.spatial(1, 0)
ax1_1 = T.axis.spatial(16, ax0)
ax2_1 = T.axis.spatial(231, i2_0 * 80 + ax1)
ax3 = T.axis.spatial(231, i3_0 * 80 + ax2)
T.where(i2_0 * 80 + ax1 < 231 and i3_0 * 80 + ax2 < 231)
T.reads()
T.writes(pad_temp[ax0_1, ax1_1, ax2_1, ax3])
pad_temp[ax0_1, ax1_1, ax2_1, ax3] = T.int8(0)
for ax0, ax1, ax2 in T.grid(16, 86, 86):
with T.block("pad_temp"):
ax0_2 = T.axis.spatial(1, 0)
ax1_2 = T.axis.spatial(16, ax0)
ax2_2 = T.axis.spatial(225, i2_0 * 80 + ax1 - 3)
ax3 = T.axis.spatial(225, i3_0 * 80 + ax2 - 3)
T.where(
3 <= i2_0 * 80 + ax1
and i2_0 * 80 + ax1 < 228
and 3 <= i3_0 * 80 + ax2
and i3_0 * 80 + ax2 < 228
and i2_0 * 80 + ax1 < 231
and i3_0 * 80 + ax2 < 231
)
T.reads(x[ax0_2, ax1_2, ax2_2, ax3])
T.writes(pad_temp[ax0_2, ax1_2, ax2_2 + 3, ax3 + 3])
pad_temp[ax0_2, ax1_2, ax2_2 + 3, ax3 + 3] = x[ax0_2, ax1_2, ax2_2, ax3]
for i1, i2_1, i3_1, i4, i5 in T.grid(16, 80, 80, 7, 7):
with T.block("tensor"):
ax0_3, |
ax1_3 = T.axis.remap("SS", [i0, i1])
ax2_3 = T.axis.spatial(225, i2_0 * 80 + i2_1)
ax3 = T.axis.spatial(225, i3_0 * 80 + i3_1)
rv0, rv1 = T.axis.remap("RR", [i4, i5])
T.where(i2_0 * 80 + i2_1 < 225 and i3_0 * 80 + i3_1 < 225)
T.reads(pad_temp[ax0_3, ax1_3, ax2_3 + rv0, ax3 + rv1])
T.writes(tensor[ax0_3, ax1_3, ax2_3, ax3])
with T.init():
tensor[ax0_3, ax1_3, ax2_3, ax3] = T.int8(0)
tensor[ax0_3, ax1_3, ax2_3, ax3] = (
tensor[ax0_3, ax1_3, ax2_3, ax3]
+ pad_temp[ax0_3, ax1_3, ax2_3 + rv0, ax3 + rv1]
)
sch = tir.Schedule(sum_pool_2d, debug_mask="all")
block = sch.get_block("tensor")
pad = sch.get_block("pad_temp")
n, c, h, w, kh, kw = sch.get_loops(block)
ho, hi = sch.split(h, [None, 80])
wo, wi = sch.split(w, [None, 80])
sch.reorder(n, ho, wo, c, hi, wi, kh, kw)
sch.compute_at(sch.get_block("pad_temp"), wo)
sch.decompose_padding(pad, sch.get_loops(pad)[3])
check_decompose_padding(sum_pool_2d, sch.mod["main"], pooling_decompose_3, check_run=True)
if __name__ == "__main__":
tvm.testing.main() |
import sys |
import pytest |
import tvm |
import tvm.testing
from tvm |
import tir
from tvm.script |
import tir as T
@T.prim_func
def matmul(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [128, 128])
B = T.match_buffer(b, [128, 128])
C = T.match_buffer(c, [128, 128])
for i, j in T.grid(128, 128):
with T.block("init"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = T.float32(0)
for k in range(128):
with T.block("update"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vj, vk]
def test_tir_schedule_error_detail():
sch = tir.Schedule(matmul, debug_mask="all", error_render_level="detail")
with pytest.raises(tir.ScheduleError) as excinfo:
sch.get_block("wrong_name")
(msg,) = excinfo.value.args
assert "Cannot find a block with the name: wrong_name" in msg
def test_tir_schedule_error_fast():
sch = tir.Schedule(matmul, debug_mask="all", error_render_level="fast")
with pytest.raises(tir.ScheduleError) as excinfo:
sch.get_block("wrong_name")
(msg,) = excinfo.value.args
assert "Cannot find a block with the specified name" in msg
def test_tir_schedule_error_none():
sch = tir.Schedule(matmul, debug_mask="all", error_render_level="none")
with pytest.raises(tir.ScheduleError) as excinfo:
sch.get_block("wrong_name")
(msg,) = excinfo.value.args
assert "(not rendered)" in msg
def test_tir_schedule_attribute_error():
sch = tir.Schedule(matmul)
with pytest.raises(AttributeError):
sch.non_existent_field()
if __name__ == "__main__":
tvm.testing.main() |
import sys |
import pytest |
import tvm |
import tvm.testing
from tvm |
import tir
from tvm.script |
import tir as T
from tvm.tir.schedule.testing |
import verify_trace_roundtrip
@T.prim_func
def element_wise(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.match_buffer(b, (128, 128))
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
@T.prim_func
def element_wise_parallelized(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.match_buffer(b, (128, 128))
for i0 in T.parallel(0, 128):
for i1 in T.serial(0, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i0, i1])
B[vi, vj] = A[vi, vj] * 2.0
@T.prim_func
def element_wise_i_bound(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.match_buffer(b, (128, 128))
for i0 in T.thread_binding(0, 128, thread="threadIdx.x"):
for i1 in T.serial(0, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i0, i1])
B[vi, vj] = A[vi, vj] * 2.0
@T.prim_func
def element_wise_compute_at_split(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
C = T.match_buffer(c, (128, 128))
B = T.alloc_buffer((128, 128))
for i in T.serial(0, 128):
for j0 in T.serial(0, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j0])
B[vi, vj] = A[vi, vj] * 2.0
for j1o, j1i in T.grid(32, 4):
with T.block("C"):
vi = T.axis.S(128, i)
vj = T.axis.S(128, j1o * 4 + j1i)
C[vi, vj] = B[vi, vj] + 1.0
@T.prim_func
def element_wise_compute_at_split_vectorized(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
C = T.match_buffer(c, (128, 128))
B = T.alloc_buffer((128, 128))
for i in T.serial(0, 128):
for j0 in T.serial(0, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j0])
B[vi, vj] = A[vi, vj] * 2. |
0
for j1o in T.serial(0, 32):
for j1i in T.vectorized(0, 4):
with T.block("C"):
vi = T.axis.S(128, i)
vj = T.axis.S(128, j1o * 4 + j1i)
C[vi, vj] = B[vi, vj] + 1.0
@T.prim_func
def element_wise_split_predicate(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [128, 128])
B = T.match_buffer(b, [128, 128])
for i, j_0, j_1 in T.grid(128, 13, 10):
with T.block("B"):
T.where(j_0 * 10 + j_1 < 128)
vi = T.axis.S(128, i)
vj = T.axis.S(128, j_0 * 10 + j_1)
B[vi, vj] = A[vi, vj] * 2.0
@T.prim_func
def element_wise_split_predicate_parallelized(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [128, 128])
B = T.match_buffer(b, [128, 128])
for i in T.serial(0, 128):
for j_0 in T.parallel(0, 13):
for j_1 in T.serial(0, 10):
with T.block("B"):
T.where(j_0 * 10 + j_1 < 128)
vi = T.axis.S(128, i)
vj = T.axis.S(128, j_0 * 10 + j_1)
B[vi, vj] = A[vi, vj] * 2.0
@T.prim_func
def element_wise_split_predicate_vectorized(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [128, 128])
B = T.match_buffer(b, [128, 128])
for i in T.vectorized(0, 128):
for j_0, j_1 in T.grid(13, 10):
with T.block("B"):
T.where(j_0 * 10 + j_1 < 128)
vi = T.axis.S(128, i)
vj = T.axis.S(128, j_0 * 10 + j_1)
B[vi, vj] = A[vi, vj] * 2.0
@T.prim_func
def element_wise_compute_at_split_j0_j1o_bound(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
C = T.match_buffer(c, (128, 128))
B = T.alloc_buffer((128, 128))
for i in T.serial(0, 128):
for j0 in T.thread_binding(0, 128, thread="threadIdx.x"):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j0])
B[vi, vj] = A[vi, vj] * |
2.0
for j1o in T.thread_binding(0, 32, thread="threadIdx.x"):
for j1i in T.serial(0, 4):
with T.block("C"):
vi = T.axis.S(128, i)
vj = T.axis.S(128, j1o * 4 + j1i)
C[vi, vj] = B[vi, vj] + 1.0
@T.prim_func
def matmul(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.match_buffer(b, (128, 128))
C = T.match_buffer(c, (128, 128))
for i, j, k in T.grid(128, 128, 128):
with T.block("C"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
with T.init():
C[vi, vj] = 0.0
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vj, vk]
@T.prim_func
def rowsum(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.match_buffer(b, (128,))
for i, k in T.grid(128, 128):
with T.block("B"):
vi, vk = T.axis.remap("SR", [i, k])
with T.init():
B[vi] = 0.0
B[vi] = B[vi] + A[vi, vk]
@T.prim_func
def rowsum_unrolled(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.match_buffer(b, (128,))
for i0 in T.unroll(0, 128):
for i1 in T.serial(0, 128):
with T.block("B"):
vi, vk = T.axis.remap("SR", [i0, i1])
with T.init():
B[vi] = 0.0
B[vi] = B[vi] + A[vi, vk]
@T.prim_func
def rowsum_not_quasi_affine(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.match_buffer(b, (128,))
for i, k in T.grid(128, 16):
with T.block("B"):
vi = T.axis.S(128, i)
vk = T.axis.R(128, T.floordiv(k * k, 2))
with T.init():
B[vi] = 0.0
B[vi] = B[vi] + A[vi, vk]
@T.prim_func
def rowsum_not_compact_data_flow(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.match_buffer(b, (128,))
for i, k in T.grid(128, 16):
with |
T.block("B"):
vi, vk = T.axis.remap("SR", [i, k])
with T.init():
B[vk] = 0.0
B[vk] = B[vk] + A[vi, vk]
@T.prim_func
def rowsum_cross_thread_reduction(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.match_buffer(b, (128,))
for i0 in T.serial(0, 128):
for i1 in T.thread_binding(0, 128, thread="threadIdx.x"):
with T.block("B"):
vi, vk = T.axis.remap("SR", [i0, i1])
with T.init():
B[vi] = 0.0
B[vi] = B[vi] + A[vi, vk]
@T.prim_func
def opaque_block(a: T.handle) -> None:
A = T.match_buffer(a, (16,))
for i in T.serial(0, 15):
with T.block("opaque"):
A[i + 1] = A[i + 1] + A[i]
@T.prim_func
def block_inside_init(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [128, 128, 128], dtype="float32")
B = T.match_buffer(b, [128, 128], dtype="float32")
for i in T.serial(0, 128):
with T.block("outer"):
vi = T.axis.S(128, i)
with T.init():
for j in T.serial(0, 128):
with T.block("init"):
vj = T.axis.S(128, j)
B[vi, vj] = 0.0
for k in T.serial(0, 128):
for j in T.serial(0, 128):
with T.block("inner"):
vj, vk = T.axis.remap("SR", [j, k])
B[vi, vj] = B[vi, vj] + A[vi, vj, vk]
@T.prim_func
def thread_bound_block_inside_init(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [128, 128, 128], dtype="float32")
B = T.match_buffer(b, [128, 128], dtype="float32")
for i in T.thread_binding(0, 128, thread="threadIdx.x"):
with T.block("outer"):
vi = T.axis.S(128, i)
with T.init():
for j in T.serial(0, 128):
with T.block("init"):
vj = T.axis.S(128, j)
B[vi, vj] = 0.0 |
for k in T.serial(0, 128):
for j in T.serial(0, 128):
with T.block("inner"):
vj, vk = T.axis.remap("SR", [j, k])
B[vi, vj] = B[vi, vj] + A[vi, vj, vk]
@T.prim_func
def decomposed_gemm(
A: T.Buffer[(16, 16), "float32"],
B: T.Buffer[(16, 16), "float32"],
C: T.Buffer[(16, 16), "float32"],
):
local = T.alloc_buffer((16, 16), "float32")
for i, j in T.grid(4, 4):
for ii, jj in T.grid(4, 4):
with T.block("init"):
vi = T.axis.S(16, i * 4 + ii)
vj = T.axis.S(16, j * 4 + jj)
local[vi, vj] = 0
for k, ii, jj in T.grid(16, 4, 4):
with T.block("update"):
vi = T.axis.S(16, i * 4 + ii)
vj = T.axis.S(16, j * 4 + jj)
vk = T.axis.R(16, k)
local[vi, vj] += A[vi, vk] * B[vj, vk]
for ii, jj in T.grid(4, 4):
with T.block("C"):
vi = T.axis.S(16, i * 4 + ii)
vj = T.axis.S(16, j * 4 + jj)
C[vi, vj] = local[vi, vj]
@T.prim_func
def decomposed_gemm_after_vectorize(
A: T.Buffer[(16, 16), "float32"],
B: T.Buffer[(16, 16), "float32"],
C: T.Buffer[(16, 16), "float32"],
):
local = T.alloc_buffer((16, 16), "float32")
for i, j in T.grid(4, 4):
for ii, jj in T.grid(4, 4):
with T.block("init"):
vi = T.axis.S(16, i * 4 + ii)
vj = T.axis.S(16, j * 4 + jj)
local[vi, vj] = 0
for k, ii, jj in T.grid(16, 4, 4):
with T.block("update"):
vi = T.axis.S(16, i * 4 + ii)
vj = T.axis.S(16, j * 4 + jj)
vk = T.axis.R(16, k)
local[vi, vj] += A[vi, vk] * B[vj, vk]
for ii in range(4):
for jj in T.vectorized(4):
with T.block("C"):
vi = T.axis.S(16, i * 4 + ii)
vj = T.axis.S(16, j * 4 + jj) |
C[vi, vj] = local[vi, vj]
@T.prim_func
def nested_block_bind(
A: T.Buffer[(16, 16, 16, 16), "float32"], B: T.Buffer[(16, 16, 16), "float32"]
):
for i, j in T.grid(16, 16):
with T.block("outer"):
vi, vj = T.axis.remap("SS", [i, j])
for k, l in T.grid(16, 16):
with T.block("inner"):
vk, vl = T.axis.remap("SR", [k, l])
with T.init():
B[vi, vj, vk] = 0.0
B[vi, vj, vk] = B[vi, vj, vk] + A[vi, vj, vk, vl]
@T.prim_func
def thread_bound_nested_block(
A: T.Buffer[(16, 16, 16, 16), "float32"], B: T.Buffer[(16, 16, 16), "float32"]
) -> None:
for i in T.serial(16):
for j in T.thread_binding(16, thread="blockIdx.x"):
with T.block("outer"):
vi, vj = T.axis.remap("SS", [i, j])
for k in T.serial(16):
for l in T.thread_binding(16, thread="threadIdx.x"):
with T.block("inner"):
vk, vl = T.axis.remap("SR", [k, l])
with T.init():
B[vi, vj, vk] = T.float32(0)
B[vi, vj, vk] = B[vi, vj, vk] + A[vi, vj, vk, vl]
@T.prim_func
def nested_block_bind_after_cache_read(
A: T.Buffer[(16, 16), "float32"], B: T.Buffer[(16,), "float32"]
) -> None:
for i in T.serial(16):
with T.block("outer"):
vi = T.axis.spatial(16, i)
A_shared = T.alloc_buffer([1, 16], dtype="float32", scope="shared")
for ax0, ax1 in T.grid(1, 16):
with T.block("A_shared"):
v0 = T.axis.spatial(16, vi + ax0)
v1 = T.axis.spatial(16, ax1)
A_shared[v0, v1] = A[v0, v1]
for j in T.serial(16):
with T.block("inner"):
vj = T.axis.reduce(16, j)
with T.init():
B[vi] = T.float32(0)
B[vi] = |
B[vi] + A_shared[vi, vj]
@T.prim_func
def thread_bound_nested_block_after_cache_read(
A: T.Buffer[(16, 16), "float32"], B: T.Buffer[(16,), "float32"]
) -> None:
for i in T.thread_binding(16, thread="blockIdx.x"):
with T.block("outer"):
vi = T.axis.spatial(16, i)
A_shared = T.alloc_buffer([1, 16], dtype="float32", scope="shared")
for ax0, ax1 in T.grid(1, 16):
with T.block("A_shared"):
v0 = T.axis.spatial(16, vi + ax0)
v1 = T.axis.spatial(16, ax1)
A_shared[v0, v1] = A[v0, v1]
for j in T.thread_binding(16, thread="threadIdx.x"):
with T.block("inner"):
vj = T.axis.reduce(16, j)
with T.init():
B[vi] = T.float32(0)
B[vi] = B[vi] + A_shared[vi, vj]
@T.prim_func
def decomposed_gemm_parallelize_init(
A: T.Buffer[(16, 16), "float32"],
B: T.Buffer[(16, 16), "float32"],
C: T.Buffer[(16, 16), "float32"],
) -> None:
local = T.alloc_buffer([16, 16], dtype="float32")
for i, j in T.grid(4, 4):
for ii in T.serial(4):
for jj in T.vectorized(4):
with T.block("init"):
vi = T.axis.spatial(16, i * 4 + ii)
vj = T.axis.spatial(16, j * 4 + jj)
T.reads()
T.writes(local[vi, vj])
local[vi, vj] = 0
for k, ii, jj in T.grid(16, 4, 4):
with T.block("update"):
vi = T.axis.spatial(16, i * 4 + ii)
vj = T.axis.spatial(16, j * 4 + jj)
vk = T.axis.reduce(16, k)
T.reads(local[vi, vj], A[vi, vk], B[vj, vk])
T.writes(local[vi, vj])
local[vi, vj] = local[vi, vj] + A[vi, vk] * B[vj, vk]
for ii, jj in T.grid(4, 4):
with T.block("C"):
vi = T.axis.spatial(16, i * 4 + ii)
vj = T.axis.spatial(16, j * 4 |
+ jj)
T.reads(local[vi, vj])
T.writes(C[vi, vj])
C[vi, vj] = local[vi, vj]
@T.prim_func
def scatter_compute(A: T.Buffer[(16,), "float32"], B: T.Buffer[(16,), "float32"]):
for i in T.grid(8):
with T.block("first_half"):
vi = T.axis.spatial(16, 8 + i)
B[vi] = A[vi - 8]
for i in T.grid(8):
with T.block("last_half"):
vi = T.axis.spatial(16, i)
B[vi] = A[vi + 8]
@T.prim_func
def scatter_compute_parallelize(
A: T.Buffer[(16,), "float32"], B: T.Buffer[(16,), "float32"]
) -> None:
for i in T.parallel(8):
with T.block("first_half"):
vi = T.axis.spatial(16, 8 + i)
T.reads(A[vi - 8])
T.writes(B[vi])
B[vi] = A[vi - 8]
for i in T.parallel(8):
with T.block("last_half"):
vi = T.axis.spatial(16, i)
T.reads(A[vi + 8])
T.writes(B[vi])
B[vi] = A[vi + 8]
def test_parallel():
s = tir.Schedule(element_wise, debug_mask="all")
i, _ = s.get_loops(s.get_block("B"))
s.parallel(i)
tvm.ir.assert_structural_equal(s.mod["main"], element_wise_parallelized)
verify_trace_roundtrip(s, mod=element_wise)
def test_parallel_predicate():
s = tir.Schedule(element_wise_split_predicate, debug_mask="all")
_, j, _ = s.get_loops(s.get_block("B"))
s.parallel(j)
tvm.ir.assert_structural_equal(s.mod["main"], element_wise_split_predicate_parallelized)
verify_trace_roundtrip(s, mod=element_wise_split_predicate)
def test_parallel_reduction_block_iter():
s = tir.Schedule(matmul, debug_mask="all")
_, _, k = s.get_loops(s.get_block("C"))
with pytest.raises(tvm.tir.ScheduleError):
s.parallel(k)
def test_parallel_not_quasi_affine():
s = tir.Schedule(rowsum_not_quasi_affine, debug_mask="all")
i, _ = s.get_loops(s.get_block("B"))
with pytest.raises(tvm.tir.ScheduleError):
s.parallel(i)
def test_parallel_not_compact_data_flow |
():
s = tir.Schedule(rowsum_not_compact_data_flow, debug_mask="all")
i, _ = s.get_loops(s.get_block("B"))
with pytest.raises(tvm.tir.ScheduleError):
s.parallel(i)
def test_vectorize():
s = tir.Schedule(element_wise_compute_at_split, debug_mask="all")
_, _, j1i = s.get_loops(s.get_block("C"))
s.vectorize(j1i)
tvm.ir.assert_structural_equal(s.mod["main"], element_wise_compute_at_split_vectorized)
verify_trace_roundtrip(s, mod=element_wise_compute_at_split)
def test_vectorize_predicate():
s = tir.Schedule(element_wise_split_predicate, debug_mask="all")
i, _, _ = s.get_loops(s.get_block("B"))
s.vectorize(i)
tvm.ir.assert_structural_equal(s.mod["main"], element_wise_split_predicate_vectorized)
verify_trace_roundtrip(s, mod=element_wise_split_predicate)
def test_vectorize_opaque_block():
s = tir.Schedule(opaque_block, debug_mask="all")
(i,) = s.get_loops(s.get_block("opaque"))
with pytest.raises(tvm.tir.ScheduleError):
s.vectorize(i)
def test_unroll():
s = tir.Schedule(rowsum, debug_mask="all")
i, _ = s.get_loops(s.get_block("B"))
s.unroll(i)
tvm.ir.assert_structural_equal(s.mod["main"], rowsum_unrolled)
verify_trace_roundtrip(s, mod=rowsum)
def test_unroll_after_bind():
s = tir.Schedule(rowsum, debug_mask="all")
i, _ = s.get_loops(s.get_block("B"))
s.bind(i, "blockIdx.x")
s.unroll(i)
tvm.ir.assert_structural_equal(s.mod["main"], rowsum_unrolled)
verify_trace_roundtrip(s, mod=rowsum)
def test_bind1():
s = tir.Schedule(element_wise, debug_mask="all")
i, _ = s.get_loops(s.get_block("B"))
s.bind(i, "threadIdx.x")
tvm.ir.assert_structural_equal(s.mod["main"], element_wise_i_bound)
verify_trace_roundtrip(s, mod=element_wise)
def test_bind2():
s = tir.Schedule(element_wise_compute_at_split, debug_mask="all")
_, j0 = s.get_loops(s.get_block("B"))
_, j1o, _ = s.get_loops(s.get_block("C"))
s.bind(j0, "threadIdx.x")
s.bind(j1o, "threadIdx.x")
tvm.ir. |
assert_structural_equal(s.mod["main"], element_wise_compute_at_split_j0_j1o_bound)
verify_trace_roundtrip(s, mod=element_wise_compute_at_split)
def test_bind_cross_thread_reduction():
s = tir.Schedule(rowsum, debug_mask="all")
_, k = s.get_loops(s.get_block("B"))
s.bind(k, "threadIdx.x")
tvm.ir.assert_structural_equal(s.mod["main"], rowsum_cross_thread_reduction)
verify_trace_roundtrip(s, mod=rowsum)
def test_bind_not_cross_thread_reduction():
s = tir.Schedule(rowsum, debug_mask="all")
_, k = s.get_loops(s.get_block("B"))
with pytest.raises(tvm.tir.ScheduleError):
s.bind(k, "blockIdx.x")
def test_bind_after_bind():
s = tir.Schedule(element_wise, debug_mask="all")
i, _ = s.get_loops(s.get_block("B"))
s.bind(i, "blockIdx.x")
s.bind(i, "threadIdx.x")
tvm.ir.assert_structural_equal(s.mod["main"], element_wise_i_bound)
verify_trace_roundtrip(s, mod=element_wise)
def test_block_inside_init():
s = tir.Schedule(block_inside_init, debug_mask="all")
(i,) = s.get_loops(s.get_block("outer"))
s.bind(i, "threadIdx.x")
tvm.ir.assert_structural_equal(s.mod["main"], thread_bound_block_inside_init)
verify_trace_roundtrip(s, mod=block_inside_init)
def test_vectorize_after_decompose():
s = tir.Schedule(decomposed_gemm, debug_mask="all")
jj = s.get_loops(s.get_block("C"))[-1]
s.vectorize(jj)
tvm.ir.assert_structural_equal(s.mod["main"], decomposed_gemm_after_vectorize)
verify_trace_roundtrip(s, mod=decomposed_gemm)
def test_nested_block_bind():
s = tir.Schedule(nested_block_bind)
block_outer = s.get_block("outer")
block_inner = s.get_block("inner")
_, j = s.get_loops(block_outer)
_, l = s.get_loops(block_inner)
s.bind(l, "threadIdx.x")
s.bind(j, "blockIdx.x")
tvm.ir.assert_structural_equal(s.mod["main"], thread_bound_nested_block)
verify_trace_roundtrip(s, mod=nested_block_bind)
def test_nexted_block_bind_after_cache_read():
s = tir.Schedule(nested_block_bind_after_cache_rea |
d)
block_outer = s.get_block("outer")
block_inner = s.get_block("inner")
(i,) = s.get_loops(block_outer)
(j,) = s.get_loops(block_inner)
s.bind(i, "blockIdx.x")
s.bind(j, "threadIdx.x")
tvm.ir.assert_structural_equal(s.mod["main"], thread_bound_nested_block_after_cache_read)
verify_trace_roundtrip(s, mod=nested_block_bind_after_cache_read)
def test_vectorize_init():
s = tir.Schedule(decomposed_gemm, debug_mask="all")
init_blk = s.get_block("init")
upd_blk = s.get_block("update")
_, _, ii_0, jj_0 = s.get_loops(init_blk)
_, _, k_1, ii_1, jj_1 = s.get_loops(upd_blk)
s.vectorize(jj_0)
tvm.ir.assert_structural_equal(s.mod["main"], decomposed_gemm_parallelize_init)
verify_trace_roundtrip(s, mod=decomposed_gemm)
def test_scatter_parallelize():
s = tir.Schedule(scatter_compute, debug_mask="all")
first = s.get_block("first_half")
last = s.get_block("last_half")
(i_0,) = s.get_loops(first)
(i_1,) = s.get_loops(last)
s.parallel(i_0)
s.parallel(i_1)
tvm.ir.assert_structural_equal(s.mod["main"], scatter_compute_parallelize)
verify_trace_roundtrip(s, mod=scatter_compute)
if __name__ == "__main__":
tvm.testing.main() |
import sys |
import pytest |
import tvm.testing
from tvm.tir.schedule |
import BlockRV, Instruction, InstructionKind, LoopRV
def test_inst_kind_get():
kind = InstructionKind.get("EnterPostproc")
assert not kind.is_pure
assert kind.name == "EnterPostproc"
def test_inst_construct_1():
block = BlockRV()
loop0 = LoopRV()
loop1 = LoopRV()
inst = Instruction(
kind=InstructionKind.get("GetLoops"),
inputs=[block],
attrs=[],
outputs=[loop0, loop1],
)
assert str(inst) == "_, _ = sch.get_loops(block=_)"
assert len(inst.inputs) == 1
assert len(inst.attrs) == 0
assert len(inst.outputs) == 2
assert inst.kind.same_as(InstructionKind.get("GetLoops"))
assert inst.inputs[0].same_as(block)
assert inst.outputs[0].same_as(loop0)
assert inst.outputs[1].same_as(loop1)
def test_inst_construct_2():
block = BlockRV()
inst = Instruction(
kind=InstructionKind.get("ComputeInline"),
inputs=[block],
attrs=[],
outputs=[],
)
assert str(inst) == "sch.compute_inline(block=_)"
assert len(inst.inputs) == 1
assert len(inst.attrs) == 0
assert len(inst.outputs) == 0
assert inst.kind.same_as(InstructionKind.get("ComputeInline"))
assert inst.inputs[0].same_as(block)
if __name__ == "__main__":
tvm.testing.main() |
import sys |
import pytest |
import tvm |
import tvm.testing
from tvm |
import tir, te
from tvm.script |
import tir as T
from tvm.tir.schedule.schedule |
import ScheduleError
from tvm.tir.schedule.testing |
import verify_trace_roundtrip
from tvm.meta_schedule.testing |
import te_workload
@T.prim_func
def matmul_before(
A: T.Buffer[(128, 127), "float32"],
B: T.Buffer[(127, 127), "float32"],
C: T.Buffer[(128, 127), "float32"],
) -> None:
A_shared = T.alloc_buffer((128, 127), "float32", scope="shared")
B_shared = T.alloc_buffer((127, 127), "float32", scope="shared")
C_shared = T.alloc_buffer((128, 127), "float32", scope="shared")
for i0, i1 in T.grid(128, 127):
with T.block("A"):
i, j = T.axis.remap("SS", [i0, i1])
A_shared[i, j] = A[i, j]
for i0, i1 in T.grid(127, 127):
with T.block("B"):
i, j = T.axis.remap("SS", [i0, i1])
B_shared[i, j] = B[i, j]
for i0, i1, i2 in T.grid(128, 127, 127):
with T.block("C_shared"):
i, j, k = T.axis.remap("SSR", [i0, i1, i2])
with T.init():
C_shared[i, j] = T.float32(0)
C_shared[i, j] = C_shared[i, j] + A_shared[i, k] * B_shared[k, j]
for i0, i1 in T.grid(128, 127):
with T.block("C"):
i, j = T.axis.remap("SS", [i0, i1])
C[i, j] = C_shared[i, j]
@T.prim_func
def matmul_expected(
A: T.Buffer[(128, 127), "float32"],
B: T.Buffer[(127, 127), "float32"],
C: T.Buffer[(128, 127), "float32"],
) -> None:
A_shared_padded = T.alloc_buffer([128, 128], dtype="float32", scope="shared")
B_shared_padded = T.alloc_buffer([128, 128], dtype="float32", scope="shared")
C_shared_padded = T.alloc_buffer([128, 128], dtype="float32", scope="shared")
for i0, i1 in T.grid(128, 128):
with T.block("A"):
i, j = T.axis.remap("SS", [i0, i1])
T.reads(A[i, j])
T.writes(A_shared_padded[i, j])
A_shared_padded[i, j] = T.if_then_else(j < 127, A[i, j], T.float32(0), dtype="float32")
for i0, i1 in T.grid(128, 128):
with T.block("B"):
i, j = T.axis.remap("SS", [i0, i1])
T.reads(B[i, j])
T.writes(B_shared_padded[i, j])
B_shared_padded[i, j] = T.if_the |
n_else(
i < 127 and j < 127, B[i, j], T.float32(0), dtype="float32"
)
for i0, i1, i2 in T.grid(128, 128, 128):
with T.block("C_shared"):
i, j, k = T.axis.remap("SSR", [i0, i1, i2])
T.reads(A_shared_padded[i, k], B_shared_padded[k, j])
T.writes(C_shared_padded[i, j])
with T.init():
C_shared_padded[i, j] = T.float32(0)
C_shared_padded[i, j] = (
C_shared_padded[i, j] + A_shared_padded[i, k] * B_shared_padded[k, j]
)
for i0, i1 in T.grid(128, 127):
with T.block("C"):
i, j = T.axis.remap("SS", [i0, i1])
T.reads(C_shared_padded[i, j])
T.writes(C[i, j])
C[i, j] = C_shared_padded[i, j]
def test_pad_matmul():
sch = tir.Schedule(matmul_before, debug_mask="all")
C = sch.get_block("C_shared")
sch.pad_einsum(C, [0, 1, 1])
tvm.ir.assert_structural_equal(matmul_expected, sch.mod["main"])
verify_trace_roundtrip(sch, mod=matmul_before)
def test_pad_matmul_error_non_intermediate_buffer():
func = te.create_prim_func(te_workload.matmul(128, 127, 127))
sch = tir.Schedule(func, debug_mask="all")
C = sch.get_block("C")
with pytest.raises(ScheduleError):
sch.pad_einsum(C, [0, 1, 1])
if __name__ == "__main__":
tvm.testing.main() |
import sys |
import pytest |
import tvm |
import tvm.testing
from tvm |
import tir
from tvm.script |
import tir as T
from tvm.tir.schedule.testing |
import verify_trace_roundtrip
@T.prim_func
def rowsum_blockized(a: T.handle, b: T.handle) -> None:
B = T.match_buffer(b, [32, 4])
A = T.match_buffer(a, [32, 4, 128])
for i0, i2_0 in T.grid(32, 16):
with T.block("blockized_B"):
io, ko = T.axis.remap("SR", [i0, i2_0])
with T.init():
for i1 in T.serial(0, 4):
with T.block("B_init"):
ii_init = T.axis.S(4, i1)
B[io, ii_init] = 0.0
for i1_1, i2_1 in T.grid(4, 8):
with T.block("B"):
ii = T.axis.S(4, i1_1)
k = T.axis.R(128, ko * 8 + i2_1)
B[io, ii] = B[io, ii] + A[io, ii, k]
@T.prim_func
def matmul(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [128, 128])
B = T.match_buffer(b, [128, 128])
C = T.match_buffer(c, [128, 128])
for i, j, k in T.grid(128, 128, 128):
with T.block("update"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
with T.init():
C[vi, vj] = 0.0
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vj, vk]
@T.prim_func
def matmul_decompose0(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [128, 128])
B = T.match_buffer(b, [128, 128])
C = T.match_buffer(c, [128, 128])
for i, j in T.grid(128, 128):
with T.block("init"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = 0.0
for i, j, k in T.grid(128, 128, 128):
with T.block("update"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vj, vk]
@T.prim_func
def matmul_decompose1(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [32, 4, 128], elem_offset=0, align=64, offset_factor=1)
B = T.match_buffer(b, [32, 4], elem_offset=0, align=64, offset_factor=1)
for i0 in T.serial(0, 32):
with T.block("blockized_B_init"):
io = T.axis.S(32, i0) |
for i1 in T.serial(0, 4):
with T.block("B_init"):
ii = T.axis.S(4, i1)
B[io, ii] = T.float32(0)
for i0, i2_o in T.grid(32, 16):
with T.block("blockized_B_update"):
io, ko = T.axis.remap("SR", [i0, i2_o])
for i1, i2_i in T.grid(4, 8):
with T.block("B"):
ii = T.axis.S(4, i1)
k = T.axis.R(128, ko * 8 + i2_i)
B[io, ii] = B[io, ii] + A[io, ii, k]
@T.prim_func
def matmul_decompose2(a: T.handle, b: T.handle, c: T.handle) -> None:
C = T.match_buffer(c, [128, 128], elem_offset=0, align=64, offset_factor=1)
B = T.match_buffer(b, [128, 128], elem_offset=0, align=64, offset_factor=1)
A = T.match_buffer(a, [128, 128], elem_offset=0, align=64, offset_factor=1)
for i0, i1 in T.grid(128, 128):
with T.block("update_init"):
vi_init, vj_init = T.axis.remap("SS", [i0, i1])
C[vi_init, vj_init] = T.float32(0)
for i2 in T.serial(0, 128):
with T.block("update_update"):
vi, vj, vk = T.axis.remap("SSR", [i0, i1, i2])
C[vi, vj] = C[vi, vj] + (A[vi, vk] * B[vj, vk])
@T.prim_func
def matmul_decompose_fail3(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [128, 128])
B = T.match_buffer(b, [128, 128])
C = T.match_buffer(c, [128, 128])
for i, k, j in T.grid(128, 128, 128):
with T.block("update"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
with T.init():
C[vi, vj] = 0.0
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vj, vk]
@T.prim_func
def matmul_decompose4(a: T.handle, b: T.handle, c: T.handle) -> None:
C = T.match_buffer(c, [128, 128], elem_offset=0, align=64, offset_factor=1)
B = T.match_buffer(b, [128, 128], elem_offset=0, align=64, offset_factor=1)
A = T.match_buffer(a, [128, 128], elem_offset=0, align=64, offset_factor=1)
with T.block("root"): |
T.reads([])
T.writes([])
for i0_0 in T.serial(0, 16):
for i0_1_init, i1_init in T.grid(8, 128):
with T.block("update_init"):
vi_init = T.axis.S(128, i0_0 * 8 + i0_1_init)
vj_init = T.axis.S(128, i1_init)
C[vi_init, vj_init] = T.float32(0)
for i0_1, i1, i2_0, i2_1 in T.grid(8, 128, 19, 7):
with T.block("update_update"):
T.where((((i2_0 * 7) + i2_1) < 128))
vi = T.axis.S(128, i0_0 * 8 + i0_1)
vj = T.axis.S(128, i1)
vk = T.axis.R(128, i2_0 * 7 + i2_1)
C[vi, vj] = C[vi, vj] + (A[vi, vk] * B[vj, vk])
@T.prim_func
def matmul_with_annotation(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [128, 128])
B = T.match_buffer(b, [128, 128])
C = T.match_buffer(c, [128, 128])
for i, j, k in T.grid(128, 128, 128):
with T.block("update"):
T.block_attr({"test_annotation": 1})
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
with T.init():
C[vi, vj] = 0.0
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vj, vk]
@T.prim_func
def matmul_decompose_with_annotation(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [128, 128])
B = T.match_buffer(b, [128, 128])
C = T.match_buffer(c, [128, 128])
for i, j in T.grid(128, 128):
with T.block("init"):
T.block_attr({"test_annotation": 1})
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = 0.0
for i, j, k in T.grid(128, 128, 128):
with T.block("update"):
T.block_attr({"test_annotation": 1})
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vj, vk]
@T.prim_func
def colsum_with_vectorization(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [128, 32], dtype="float32")
B = T.match_buffer(b, [32], dtype |
="float32")
for k in T.serial(0, 128):
for i in T.vectorized(0, 32):
with T.block("B"):
vk, vi = T.axis.remap("RS", [k, i])
with T.init():
B[vi] = T.float32(0)
B[vi] = B[vi] + A[vk, vi]
@T.prim_func
def colsum_decompose_with_vectorization(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [128, 32], dtype="float32")
B = T.match_buffer(b, [32], dtype="float32")
for i in T.vectorized(0, 32):
with T.block("B_init"):
vi = T.axis.S(32, i)
B[vi] = T.float32(0)
for k in T.serial(0, 128):
for i in T.vectorized(0, 32):
with T.block("B"):
vk, vi = T.axis.remap("RS", [k, i])
B[vi] = B[vi] + A[vk, vi]
use_block_name = tvm.testing.parameter(by_dict={"block_obj": False, "block_name": True})
def test_reduction_decompose0(use_block_name):
s = tir.Schedule(matmul, debug_mask="all")
C = "update" if use_block_name else s.get_block("update")
i, j, k = s.get_loops(C)
s.decompose_reduction(C, i)
tvm.ir.assert_structural_equal(matmul_decompose0, s.mod["main"])
verify_trace_roundtrip(s, mod=matmul)
def test_reduction_decompose1(use_block_name):
s = tir.Schedule(rowsum_blockized, debug_mask="all")
blockized_B = "blockized_B" if use_block_name else s.get_block("blockized_B")
io, ko = s.get_loops(blockized_B)
s.decompose_reduction(blockized_B, io)
tvm.ir.assert_structural_equal(matmul_decompose1, s.mod["main"])
verify_trace_roundtrip(s, mod=rowsum_blockized)
def test_reduction_decompose2():
s = tir.Schedule(matmul, debug_mask="all")
C = s.get_block("update")
i, j, k = s.get_loops(C)
s.decompose_reduction(C, k)
tvm.ir.assert_structural_equal(matmul_decompose2, s.mod["main"])
verify_trace_roundtrip(s, mod=matmul)
def test_reduction_decompose3():
s = tir.Schedule(matmul_decompose_fail3, debug_mask="all")
C = s.get_block("update")
i, j, k = s.get_loops( |
C)
with pytest.raises(tvm.tir.ScheduleError):
s.decompose_reduction(C, k)
def test_reduction_decompose4():
s = tir.Schedule(matmul, debug_mask="all")
C = s.get_block("update")
i, j, k = s.get_loops(C)
io, ii = s.split(i, factors=[16, 8])
ko, ki = s.split(k, factors=[19, 7])
s.decompose_reduction(C, ii)
tvm.ir.assert_structural_equal(matmul_decompose4, s.mod["main"])
verify_trace_roundtrip(s, mod=matmul)
def test_reduction_decompose_with_annotation():
s = tir.Schedule(matmul_with_annotation, debug_mask="all")
C = s.get_block("update")
i, j, k = s.get_loops(C)
s.decompose_reduction(C, i)
tvm.ir.assert_structural_equal(matmul_decompose_with_annotation, s.mod["main"])
verify_trace_roundtrip(s, mod=matmul_with_annotation)
def test_reduction_decompose_with_different_for_kind():
s = tir.Schedule(colsum_with_vectorization, debug_mask="all")
B = s.get_block("B")
k, _ = s.get_loops(B)
B_init = s.decompose_reduction(B, k)
tvm.ir.assert_structural_equal(s.mod["main"], colsum_decompose_with_vectorization)
assert s.get(B).same_as(s.get(s.get_block("B_update")))
assert s.get(B_init).same_as(s.get(s.get_block("B_init")))
verify_trace_roundtrip(s, mod=colsum_with_vectorization)
def test_decompose_reduction_ref_hash_check():
mod = tvm.IRModule.from_expr(matmul)
mod_bak = mod
hash_before = tvm.ir.structural_hash(mod_bak)
s = tir.Schedule(mod["main"], debug_mask="all")
C = s.get_block("update")
i, j, k = s.get_loops(C)
s.decompose_reduction(C, k)
hash_after = tvm.ir.structural_hash(mod_bak)
assert hash_before == hash_after
if __name__ == "__main__":
tvm.testing.main() |
import pytest |
import tvm |
import tvm.testing
from tvm |
import tir
from tvm.script |
import tir as T
from tvm.tir.schedule.schedule |
import ScheduleError
from tvm.tir.schedule.testing |
import verify_trace_roundtrip
@T.prim_func
def transpose_elementwise(
A: T.Buffer[(128, 128), "float32"], B: T.Buffer[(128, 128), "float32"]
) -> None:
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vj, vi] * 2.0
@T.prim_func
def transpose_elementwise_reindex_read(
A: T.Buffer[(128, 128), "float32"], B: T.Buffer[(128, 128), "float32"]
) -> None:
A_reindex = T.alloc_buffer((128, 128), "float32")
for i, j in T.grid(128, 128):
with T.block("A_reindex"):
vi, vj = T.axis.remap("SS", [i, j])
A_reindex[vi, vj] = A[vj, vi]
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A_reindex[vi, vj] * 2.0
@T.prim_func
def conv2d_nhwc(
Input: T.Buffer[(1, 224, 224, 3), "float32"],
Weight: T.Buffer[(7, 7, 3, 64), "float32"],
Conv2d_nhwc: T.Buffer[(1, 112, 112, 64), "float32"],
) -> None:
PadInput = T.alloc_buffer([1, 230, 230, 3], dtype="float32")
for i0, i1, i2, i3 in T.grid(1, 230, 230, 3):
with T.block("PadInput"):
i0_1, i1_1, i2_1, i3_1 = T.axis.remap("SSSS", [i0, i1, i2, i3])
PadInput[i0_1, i1_1, i2_1, i3_1] = T.if_then_else(
((((i1_1 >= 3) and (i1_1 < 227)) and (i2_1 >= 3)) and (i2_1 < 227)),
Input[i0_1, (i1_1 - 3), (i2_1 - 3), i3_1],
T.float32(0),
dtype="float32",
)
for i0, i1, i2, i3, i4, i5, i6 in T.grid(1, 112, 112, 64, 7, 7, 3):
with T.block("conv2d_nhwc"):
n, h, w, co, rh, rw, rc = T.axis.remap("SSSSRRR", [i0, i1, i2, i3, i4, i5, i6])
with T.init():
Conv2d_nhwc[n, h, w, co] = T.float32(0)
Conv2d_nhwc[n, h, w, co] = Conv2d_nhwc[n, h, w, co] + (
PadInput[n, ((h * 2) + rh), ((w * 2) + rw), ((T.floordiv(co, 64) * 3) + rc)]
* Weight[rh, rw, rc, co]
)
@T.prim_func
def conv |
2d_nhwc_reindex_data(
Input: T.Buffer[(1, 224, 224, 3), "float32"],
Weight: T.Buffer[(7, 7, 3, 64), "float32"],
Conv2d_nhwc: T.Buffer[(1, 112, 112, 64), "float32"],
) -> None:
PadInput = T.alloc_buffer([1, 230, 230, 3], dtype="float32")
ReindexInput = T.alloc_buffer([1, 112, 112, 7, 7, 3], dtype="float32")
for i0, i1, i2, i3 in T.grid(1, 230, 230, 3):
with T.block("PadInput"):
i0_1, i1_1, i2_1, i3_1 = T.axis.remap("SSSS", [i0, i1, i2, i3])
PadInput[i0_1, i1_1, i2_1, i3_1] = T.if_then_else(
((((i1_1 >= 3) and (i1_1 < 227)) and (i2_1 >= 3)) and (i2_1 < 227)),
Input[i0_1, (i1_1 - 3), (i2_1 - 3), i3_1],
T.float32(0),
dtype="float32",
)
for i0, i1, i2, i3, i4, i5 in T.grid(1, 112, 112, 7, 7, 3):
with T.block("ReindexInput"):
n, h, w, rh, rw, rc = T.axis.remap("SSSSSS", [i0, i1, i2, i3, i4, i5])
ReindexInput[n, h, w, rh, rw, rc] = PadInput[n, ((h * 2) + rh), ((w * 2) + rw), rc]
for i0, i1, i2, i3, i4, i5, i6 in T.grid(1, 112, 112, 64, 7, 7, 3):
with T.block("conv2d_nhwc"):
n, h, w, co, rh, rw, rc = T.axis.remap("SSSSRRR", [i0, i1, i2, i3, i4, i5, i6])
with T.init():
Conv2d_nhwc[n, h, w, co] = T.float32(0)
Conv2d_nhwc[n, h, w, co] = Conv2d_nhwc[n, h, w, co] + (
ReindexInput[n, h, w, rh, rw, rc] * Weight[rh, rw, rc, co]
)
@T.prim_func
def conv2d_nhwc_reindex_weight(
var_inputs: T.handle, var_weight: T.handle, var_conv2d_nhwc: T.handle
) -> None:
inputs = T.match_buffer(var_inputs, [1, 224, 224, 3], dtype="float32")
weight = T.match_buffer(var_weight, [7, 7, 3, 64], dtype="float32")
conv2d_nhwc = T.match_buffer(var_conv2d_nhwc, [1, 112, 112, 64], dtype="float32")
PadInput = T.alloc_buffer([1, 230, 230, 3], dtype="float32")
weight_reindex = T.alloc_buffer([64, 7, 7, 3], dtype="float32")
for i0, i1, i2, i3 in T.grid(1, 230, 230, 3): |
with T.block("PadInput"):
i0_1, i1_1, i2_1, i3_1 = T.axis.remap("SSSS", [i0, i1, i2, i3])
T.reads(inputs[i0_1, i1_1 - 3, i2_1 - 3, i3_1])
T.writes(PadInput[i0_1, i1_1, i2_1, i3_1])
PadInput[i0_1, i1_1, i2_1, i3_1] = T.if_then_else(
i1_1 >= 3 and i1_1 < 227 and i2_1 >= 3 and i2_1 < 227,
inputs[i0_1, i1_1 - 3, i2_1 - 3, i3_1],
T.float32(0),
dtype="float32",
)
for ax3, ax4, ax5, ax6 in T.grid(64, 7, 7, 3):
with T.block("weight_reindex"):
v3, v4, v5, v6 = T.axis.remap("SSSS", [ax3, ax4, ax5, ax6])
T.reads(weight[v4, v5, v6, v3])
T.writes(weight_reindex[v3, v4, v5, v6])
weight_reindex[v3, v4, v5, v6] = weight[v4, v5, v6, v3]
for i0, i1, i2, i3, i4, i5, i6 in T.grid(1, 112, 112, 64, 7, 7, 3):
with T.block("conv2d_nhwc"):
n, h, w, co, rh, rw, rc = T.axis.remap("SSSSRRR", [i0, i1, i2, i3, i4, i5, i6])
T.reads(
PadInput[n, h * 2 + rh, w * 2 + rw, co
weight_reindex[co, rh, rw, rc],
)
T.writes(conv2d_nhwc[n, h, w, co])
with T.init():
conv2d_nhwc[n, h, w, co] = T.float32(0)
conv2d_nhwc[n, h, w, co] = (
conv2d_nhwc[n, h, w, co]
+ PadInput[n, h * 2 + rh, w * 2 + rw, co
* weight_reindex[co, rh, rw, rc]
)
@T.prim_func
def matmul(
A: T.Buffer[(512, 512), "float32"],
B: T.Buffer[(512, 512), "float32"],
C: T.Buffer[(512, 512), "float32"],
) -> None:
for i0, i1, i2 in T.grid(512, 512, 512):
with T.block("matmul"):
i, j, k = T.axis.remap("SSR", [i0, i1, i2])
T.reads(C[i, j], A[i, k], B[k, j])
T.writes(C[i, j])
with T.init():
C[i, j] = T.float32(0)
C[i, j] = C[i, j] + A[i, k] * B[k, j]
@T.prim_func
def matmul_reindex_write(
A: T.Buffer[(512, 512), "float32 |
"],
B: T.Buffer[(512, 512), "float32"],
C: T.Buffer[(512, 512), "float32"],
) -> None:
C_reindex = T.alloc_buffer([512, 512], dtype="float32")
for i0, i1, i2 in T.grid(512, 512, 512):
with T.block("matmul"):
i, j, k = T.axis.remap("SSR", [i0, i1, i2])
T.reads(C_reindex[i, j], A[i, k], B[k, j])
T.writes(C_reindex[i, j])
with T.init():
C_reindex[i, j] = T.float32(0)
C_reindex[i, j] = C_reindex[i, j] + A[i, k] * B[k, j]
for i0, i1 in T.grid(512, 512):
with T.block("C_reindex"):
v0, v1 = T.axis.remap("SS", [i0, i1])
T.reads(C_reindex[v0, v1])
T.writes(C[v0, v1])
C[v0, v1] = C_reindex[v0, v1]
@T.prim_func
def multiple_read(A: T.Buffer[(128, 128), "float32"], B: T.Buffer[(128, 128), "float32"]) -> None:
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vj, vi] + A[vi, vj]
@T.prim_func
def mixed_dtype(
p0: T.Buffer[(T.int64(2), 1280), "float16"],
p1: T.Buffer[(1280, 1280), "float16"],
T_matmul_NT: T.Buffer[(T.int64(2), 1280), "float16"],
) -> None:
for i0, i1, i2 in T.grid(T.int64(2), 1280, 1280):
with T.block("T_matmul_NT"):
i = T.axis.spatial(T.int64(2), i0)
j, k = T.axis.remap("SR", [i1, i2])
T.reads(p0[i, k], p1[j, k])
T.writes(T_matmul_NT[i, j])
with T.init():
T_matmul_NT[i, j] = T.float16(0)
T_matmul_NT[i, j] = T_matmul_NT[i, j] + p0[i, k] * p1[j, k]
@T.prim_func
def mixed_dtype_reindex_write(
p0: T.Buffer[(T.int64(2), 1280), "float16"],
p1: T.Buffer[(1280, 1280), "float16"],
T_matmul_NT: T.Buffer[(T.int64(2), 1280), "float16"],
) -> None:
T_matmul_NT_reindex = T.alloc_buffer([T.int64(2), 1280], dtype="float16")
for i0, i1, i2 in T.grid(T.int64(2), 1280, 1280):
with T.block("T_matmul_NT"):
i = T.axis.spatial(T.int64(2) |
, i0)
j, k = T.axis.remap("SR", [i1, i2])
T.reads(p0[i, k], p1[j, k])
T.writes(T_matmul_NT_reindex[i, j])
with T.init():
T_matmul_NT_reindex[i, j] = T.float16(0)
T_matmul_NT_reindex[i, j] = T_matmul_NT_reindex[i, j] + p0[i, k] * p1[j, k]
for ax0, ax1 in T.grid(T.int64(2), 1280):
with T.block("T_matmul_NT_reindex"):
v0 = T.axis.spatial(T.int64(2), ax0)
v1 = T.axis.remap("S", [ax1])
T.reads(T_matmul_NT_reindex[v0, v1])
T.writes(T_matmul_NT[v0, v1])
T_matmul_NT[v0, v1] = T_matmul_NT_reindex[v0, v1]
@T.prim_func
def matmul_unit_dim(
A: T.Buffer[(1, 512), "float32"],
B: T.Buffer[(512, 1), "float32"],
C: T.Buffer[(1, 1), "float32"],
) -> None:
for i0, i1, i2 in T.grid(1, 1, 512):
with T.block("matmul"):
i, j, k = T.axis.remap("SSR", [i0, i1, i2])
T.reads(C[i, j], A[i, k], B[k, j])
T.writes(C[i, j])
with T.init():
C[i, j] = T.float32(0)
C[i, j] = C[i, j] + A[i, k] * B[k, j]
@T.prim_func
def matmul_unit_dim_reindex_write(
A: T.Buffer[(1, 512), "float32"],
B: T.Buffer[(512, 1), "float32"],
C: T.Buffer[(1, 1), "float32"],
) -> None:
C_reindex = T.alloc_buffer([1, 1], dtype="float32")
for i0, i1, i2 in T.grid(1, 1, 512):
with T.block("matmul"):
i, j, k = T.axis.remap("SSR", [i0, i1, i2])
T.reads(C_reindex[i, j], A[i, k], B[k, j])
T.writes(C_reindex[i, j])
with T.init():
C_reindex[i, j] = T.float32(0)
C_reindex[i, j] = C_reindex[i, j] + A[i, k] * B[k, j]
for i0, i1 in T.grid(1, 1):
with T.block("C_reindex"):
v0, v1 = T.axis.remap("SS", [i0, i1])
T.reads(C_reindex[v0, v1])
T.writes(C[v0, v1])
C[v0, v1] = C_reindex[v0, v1]
use_block_name = tvm.testing.parameter(by_dict={"block_obj": False, "block_name": True})
use_bu |
ffer_name = tvm.testing.parameter(by_dict={"buffer_index": False, "buffer_name": True})
def test_reindex_read_basic(use_block_name, use_buffer_name):
sch = tir.Schedule(transpose_elementwise)
block = "B" if use_block_name else sch.get_block("B")
buf = "A" if use_buffer_name else ("read", 0)
sch.reindex(block, buf)
tvm.ir.assert_structural_equal(transpose_elementwise_reindex_read, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=transpose_elementwise)
def test_conv2d_reindex_weight(use_block_name, use_buffer_name):
sch = tir.Schedule(conv2d_nhwc)
block = "conv2d_nhwc" if use_block_name else sch.get_block("conv2d_nhwc")
buf = "Weight" if use_buffer_name else ("read", 1)
sch.reindex(block, buf)
tvm.ir.assert_structural_equal(conv2d_nhwc_reindex_weight, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=conv2d_nhwc)
def test_conv2d_reindex_data(use_block_name, use_buffer_name):
sch = tir.Schedule(conv2d_nhwc)
block = "conv2d_nhwc" if use_block_name else sch.get_block("conv2d_nhwc")
buf = "PadInput" if use_buffer_name else ("read", 0)
sch.reindex(block, buf)
tvm.ir.assert_structural_equal(conv2d_nhwc_reindex_data, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=conv2d_nhwc)
def test_matmul_reindex_write(use_block_name, use_buffer_name):
sch = tir.Schedule(matmul)
block = "matmul" if use_block_name else sch.get_block("matmul")
buf = "C" if use_buffer_name else ("write", 0)
sch.reindex(block, buf)
tvm.ir.assert_structural_equal(matmul_reindex_write, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=matmul)
def test_reindex_fail_multiple_read(use_block_name, use_buffer_name):
sch = tir.Schedule(multiple_read)
block = "B" if use_block_name else sch.get_block("B")
buf = "A" if use_buffer_name else ("read", 0)
with pytest.raises(ScheduleError):
sch.reindex(block, buf)
def test_reindex_mixed_dtype(use_block_name, use_buffer_name):
sch = tir.Schedule(mixed_dtype)
block = "T_ |
matmul_NT" if use_block_name else sch.get_block("T_matmul_NT")
buf = "T_matmul_NT" if use_buffer_name else ("write", 0)
sch.reindex(block, buf)
tvm.ir.assert_structural_equal(mixed_dtype_reindex_write, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=mixed_dtype)
def test_matmul_unit_dim_reindex_write(use_block_name, use_buffer_name):
sch = tir.Schedule(matmul_unit_dim)
block = "matmul" if use_block_name else sch.get_block("matmul")
buf = "C" if use_buffer_name else ("write", 0)
sch.reindex(block, buf)
tvm.ir.assert_structural_equal(matmul_unit_dim_reindex_write, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=matmul_unit_dim)
if __name__ == "__main__":
tvm.testing.main() |
import sys |
import pytest |
import tvm |
import tvm.testing
from tvm |
import tir
from tvm.script |
import tir as T
from tvm.tir.schedule.testing |
import verify_trace_roundtrip
@T.prim_func
def elementwise(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128, 128, 128))
B = T.match_buffer(b, (128, 128, 128, 128))
for i, j, k, l in T.grid(128, 128, 128, 128):
with T.block("B"):
vi, vj, vk, vl = T.axis.remap("SSSS", [i, j, k, l])
B[vi, vj, vk, vl] = A[vi, vj, vk, vl] * 2.0
@T.prim_func
def elementwise_not_affine(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128, 128, 128))
B = T.match_buffer(b, (128, 128, 128, 128))
for i, j, k, l in T.grid(128, 128, 128, 8):
with T.block("B"):
vi, vj, vk = T.axis.remap("SSS", [i, j, k])
vl = T.axis.S(128, l * 16)
B[vi, vj, vk, vl] = A[vi, vj, vk, vl] * 2.0
@T.prim_func
def elementwise_dependent_loop(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128, 128, 128))
B = T.match_buffer(b, (128, 128, 128, 128))
for i in T.serial(0, 128):
for j, k, l in T.grid(128, i, 128):
with T.block("B"):
vi, vj, vk, vl = T.axis.remap("SSSS", [i, j, k, l])
B[vi, vj, vk, vl] = A[vi, vj, vk, vl] * 2.0
@T.prim_func
def elementwise_predicate(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128, 128, 128))
B = T.match_buffer(b, (128, 128, 128, 128))
for i, j, k, l in T.grid(128, 128, 128, 128):
with T.block("B"):
T.where(i * 2097152 + j * 16384 + k * 128 + l < 100)
vi, vj, vk, vl = T.axis.remap("SSSS", [i, j, k, l])
B[vi, vj, vk, vl] = A[vi, vj, vk, vl] * 2.0
@T.prim_func
def elementwise_non_single_branch(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128, 128))
C = T.alloc_buffer((128, 128, 128))
B = T.match_buffer(b, (128, 128, 128))
for i, j in T.grid(128, 128):
for k in T.serial(0, 128):
with T.block("C"):
vi, vj, vk = T.axis.remap("SSS", [i, j, k])
C[vi, vj, vk] = A[vi, vj, v |
k] * 2.0
for k in T.serial(0, 128):
with T.block("B"):
vi, vj, vk = T.axis.remap("SSS", [i, j, k])
B[vi, vj, vk] = C[vi, vj, vk] * 2.0
@T.prim_func
def elementwise_with_loops_not_same_scope(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128, 128))
B = T.match_buffer(b, (128, 128, 128))
for i, j in T.grid(128, 128):
with T.block("A"):
vi, vj = T.axis.remap("SS", [i, j])
for k in T.serial(0, 128):
with T.block("B"):
vk = T.axis.S(128, k)
T.reads([A[vi, vj, vk]])
T.writes([B[vi, vj, vk]])
B[vi, vj, vk] = A[vi, vj, vk] * 2.0
@T.prim_func
def elementwise_with_wrong_block_var_type(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128, 128))
B = T.match_buffer(b, (128, 128, 128))
for i, j, k in T.grid(128, 128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
vk = T.axis.scan(128, k)
T.reads([A[vi, vj, vk]])
T.writes([B[vi, vj, vk]])
B[vi, vj, vk] = A[vi, vj, vk] * 2.0
@T.prim_func
def elementwise_reordered(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128, 128, 128))
B = T.match_buffer(b, (128, 128, 128, 128))
for l, j, k, i in T.grid(128, 128, 128, 128):
with T.block("B"):
vi, vj, vk, vl = T.axis.remap("SSSS", [i, j, k, l])
B[vi, vj, vk, vl] = A[vi, vj, vk, vl] * 2.0
@T.prim_func
def elementwise_reordered2(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128, 128, 128))
B = T.match_buffer(b, (128, 128, 128, 128))
for k, j, i, l in T.grid(128, 128, 128, 128):
with T.block("B"):
vi, vj, vk, vl = T.axis.remap("SSSS", [i, j, k, l])
B[vi, vj, vk, vl] = A[vi, vj, vk, vl] * 2.0
@T.prim_func
def elementwise_reordered_with_predicate(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, |
(128, 128, 128, 128))
B = T.match_buffer(b, (128, 128, 128, 128))
for l, j, k, i in T.grid(128, 128, 128, 128):
with T.block("B"):
T.where(i * 2097152 + j * 16384 + k * 128 + l < 100)
vi, vj, vk, vl = T.axis.remap("SSSS", [i, j, k, l])
B[vi, vj, vk, vl] = A[vi, vj, vk, vl] * 2.0
@T.prim_func
def opaque_access(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [16, 16], "float32")
B = T.match_buffer(b, [16, 16], "float32")
for i, j in T.grid(16, 16):
with T.block("A"):
vi, vj = T.axis.remap("SS", [i, j])
T.reads([])
T.writes([A[0:16, 0:16]])
A[vi, vj] = 1
for i, j in T.grid(16, 16):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
T.reads([])
T.writes([B[0:16, 0:16]])
T.evaluate(T.tvm_fill_fragment(B.data, 16, 16, 16, 0, vi * 16 + vj, dtype="handle"))
@T.prim_func
def opaque_access_reorder(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [16, 16], "float32")
B = T.match_buffer(b, [16, 16], "float32")
for j, i in T.grid(16, 16):
with T.block("A"):
vi, vj = T.axis.remap("SS", [i, j])
T.reads([])
T.writes([A[0:16, 0:16]])
A[vi, vj] = 1
for j, i in T.grid(16, 16):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
T.reads([])
T.writes([B[0:16, 0:16]])
T.evaluate(T.tvm_fill_fragment(B.data, 16, 16, 16, 0, vi * 16 + vj, dtype="handle"))
def test_reorder():
sch = tir.Schedule(elementwise, debug_mask="all")
block_b = sch.get_block("B")
i, j, k, l = sch.get_loops(block_b)
sch.reorder(l, i)
tvm.ir.assert_structural_equal(elementwise_reordered, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=elementwise)
def test_reorder2():
sch = tir.Schedule(elementwise, debug_mask="all")
block_b = sch.get_block("B")
i, j, k, l = sch.get_loops(block_b)
sch.re |
order(k, i, l)
tvm.ir.assert_structural_equal(elementwise_reordered2, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=elementwise)
def test_reorder_with_opaque_access():
sch = tir.Schedule(opaque_access, debug_mask="all")
block_a = sch.get_block("A")
i, j = sch.get_loops(block_a)
sch.reorder(j, i)
block_b = sch.get_block("B")
i, j = sch.get_loops(block_b)
sch.reorder(j, i)
tvm.ir.assert_structural_equal(opaque_access_reorder, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=opaque_access)
def test_reorder_overlapped_access():
@T.prim_func
def overlapped_access(A: T.Buffer[(14, 4), "float32"], B: T.Buffer[(14, 4), "float32"]):
for v0, v1, v2 in T.grid(6, 4, 4):
with T.block("block"):
i = T.axis.spatial(14, v0 * 2 + v1)
j = T.axis.spatial(4, v2)
B[i, j] = A[i, j] + 1.0
@T.prim_func
def overlapped_access_reorder(A: T.Buffer[(14, 4), "float32"], B: T.Buffer[(14, 4), "float32"]):
for v0, v2, v1 in T.grid(6, 4, 4):
with T.block("block"):
i = T.axis.spatial(14, v0 * 2 + v1)
j = T.axis.spatial(4, v2)
B[i, j] = A[i, j] + 1.0
sch = tir.Schedule(overlapped_access, debug_mask="all")
v0, v1, v2 = sch.get_loops(sch.get_block("block"))
sch.reorder(v0, v2, v1)
tvm.ir.assert_structural_equal(overlapped_access_reorder, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=overlapped_access)
def test_reorder_with_partial_affineness():
@T.prim_func
def non_affine_func(A: T.Buffer[(14, 4), "float32"], B: T.Buffer[(14, 4), "float32"]):
for v0, v1, v2 in T.grid(6, 4, 4):
with T.block("block"):
i = T.axis.spatial(14, v0 * v0 + v1)
j = T.axis.spatial(4, v2)
B[i, j] = A[i, j] + 1.0
@T.prim_func
def non_affine_func_reorder(A: T.Buffer[(14, 4), "float32"], B: T.Buffer[(14, 4), "float32"]):
for v0, v2, v1 |
in T.grid(6, 4, 4):
with T.block("block"):
i = T.axis.spatial(14, v0 * v0 + v1)
j = T.axis.spatial(4, v2)
B[i, j] = A[i, j] + 1.0
sch = tir.Schedule(non_affine_func, debug_mask="all")
v0, v1, v2 = sch.get_loops(sch.get_block("block"))
with pytest.raises(tvm.tir.ScheduleError):
sch.reorder(v0, v2, v1)
sch.reorder(v2, v1)
tvm.ir.assert_structural_equal(non_affine_func_reorder, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=non_affine_func)
def test_reorder_with_cascade_tiled_ops():
@T.prim_func
def cascade_pool_ops(
x: T.Buffer[(1, 16, 112, 112), "float32"], y2: T.Buffer[(1, 16, 108, 108), "float32"]
) -> None:
y1 = T.alloc_buffer([1, 16, 110, 110], dtype="float32")
for n, c, h, w, kh, kw in T.grid(1, 16, 110, 110, 3, 3):
with T.block("pool_0"):
ax0, ax1, ax2, ax3, rv0, rv1 = T.axis.remap("SSSSRR", [n, c, h, w, kh, kw])
with T.init():
y1[ax0, ax1, ax2, ax3] = 0.0
y1[ax0, ax1, ax2, ax3] = y1[ax0, ax1, ax2, ax3] + x[ax0, ax1, ax2 + rv0, ax3 + rv1]
for n, c, h, w, kh, kw in T.grid(1, 16, 108, 108, 3, 3):
with T.block("pool_1"):
ax0, ax1, ax2, ax3, rv0, rv1 = T.axis.remap("SSSSRR", [n, c, h, w, kh, kw])
with T.init():
y2[ax0, ax1, ax2, ax3] = 0.0
y2[ax0, ax1, ax2, ax3] = y2[ax0, ax1, ax2, ax3] + y1[ax0, ax1, ax2 + rv0, ax3 + rv1]
@T.prim_func
def cascade_pool_ops_tile_reordered(
x: T.Buffer[(1, 16, 112, 112), "float32"], y2: T.Buffer[(1, 16, 108, 108), "float32"]
) -> None:
y1 = T.alloc_buffer([1, 16, 110, 110], dtype="float32")
for n, c, h_o in T.grid(1, 16, 27):
for w, h_i, kh, kw in T.grid(110, 6, 3, 3):
with T.block("pool_0"):
ax0 = T.axis.spatial(1, 0)
ax1 = T.axis.spatial(16, c)
ax2 = T.ax |
is.spatial(110, h_o * 4 + h_i)
ax3, rv0, rv1 = T.axis.remap("SRR", [w, kh, kw])
with T.init():
y1[ax0, ax1, ax2, ax3] = 0.0
y1[ax0, ax1, ax2, ax3] = (
y1[ax0, ax1, ax2, ax3] + x[ax0, ax1, ax2 + rv0, ax3 + rv1]
)
for h_i, w, kh, kw in T.grid(4, 108, 3, 3):
with T.block("pool_1"):
ax0 = T.axis.spatial(1, n)
ax1 = T.axis.spatial(16, c)
ax2 = T.axis.spatial(108, h_o * 4 + h_i)
ax3, rv0, rv1 = T.axis.remap("SRR", [w, kh, kw])
with T.init():
y2[ax0, ax1, ax2, ax3] = 0.0
y2[ax0, ax1, ax2, ax3] = (
y2[ax0, ax1, ax2, ax3] + y1[ax0, ax1, ax2 + rv0, ax3 + rv1]
)
sch = tvm.tir.schedule.Schedule(cascade_pool_ops)
pool_0 = sch.get_block("pool_0")
pool_1 = sch.get_block("pool_1")
_, _, h, w, _, _ = sch.get_loops(pool_1)
ho, _ = sch.split(h, factors=[None, 4])
sch.compute_at(pool_0, ho)
_, _, _, h_i, w, _, _ = sch.get_loops(pool_0)
sch.reorder(w, h_i)
tvm.ir.assert_structural_equal(cascade_pool_ops_tile_reordered, sch.mod["main"], True)
verify_trace_roundtrip(sch=sch, mod=cascade_pool_ops)
def test_reorder_with_predicate():
sch = tir.Schedule(elementwise_predicate, debug_mask="all")
block_b = sch.get_block("B")
i, j, k, l = sch.get_loops(block_b)
with pytest.raises(tvm.tir.ScheduleError):
sch.reorder(l, i)
def test_reorder_fail_with_multi_appearance_loops():
sch = tir.Schedule(elementwise, debug_mask="all")
block_b = sch.get_block("B")
i, j, k, l = sch.get_loops(block_b)
with pytest.raises(tvm.tir.ScheduleError):
sch.reorder(k, i, i)
def test_reorder_fail_with_non_single_branch_loop():
sch = tir.Schedule(elementwise_non_single_branch, debug_mask="all")
block_b = sch.get_block("B")
i, j, |
k = sch.get_loops(block_b)
with pytest.raises(tvm.tir.ScheduleError):
sch.reorder(k, i)
sch = tir.Schedule(elementwise_non_single_branch, debug_mask="all")
block_b = sch.get_block("B")
block_c = sch.get_block("C")
i, j, k1 = sch.get_loops(block_b)
_, _, k2 = sch.get_loops(block_c)
with pytest.raises(tvm.tir.ScheduleError):
sch.reorder(k1, i, k2)
def test_reorder_fail_with_loops_not_under_same_scope():
sch = tir.Schedule(elementwise_with_loops_not_same_scope, debug_mask="all")
block_b = sch.get_block("B")
block_a = sch.get_block("A")
i, j = sch.get_loops(block_a)
k = sch.get_loops(block_b)[0]
with pytest.raises(tvm.tir.ScheduleError):
sch.reorder(k, i)
def test_reorder_fail_with_wrong_block_var_type():
sch = tir.Schedule(elementwise_with_wrong_block_var_type, debug_mask="all")
block_b = sch.get_block("B")
i, j, k = sch.get_loops(block_b)
with pytest.raises(tvm.tir.ScheduleError):
sch.reorder(k, i)
def test_reorder_fail_with_dependent_loops():
sch = tir.Schedule(elementwise_dependent_loop, debug_mask="all")
block_b = sch.get_block("B")
i, j, k, l = sch.get_loops(block_b)
with pytest.raises(tvm.tir.ScheduleError):
sch.reorder(l, i)
def test_reorder_fail_not_affine_bindings():
sch = tir.Schedule(elementwise_not_affine, debug_mask="all")
block_b = sch.get_block("B")
i, j, k, l = sch.get_loops(block_b)
with pytest.raises(tvm.tir.ScheduleError):
sch.reorder(l, i)
if __name__ == "__main__":
tvm.testing.main() |
import pytest |
import tvm |
import tvm.testing
from tvm |
import te, tir, topi
from tvm.script |
import tir as T
from tvm.tir.schedule.testing |
import verify_trace_roundtrip
@T.prim_func
def transformed_matmul(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [128, 128], dtype="float32")
B = T.match_buffer(b, [128, 128], dtype="float32")
C = T.match_buffer(c, [128, 128], dtype="float32")
for i0, i1, i2_outer, i2_inner_outer, i2_inner_inner in T.grid(128, 128, 4, 8, 4):
with T.block("update"):
vi, vj = T.axis.remap("SS", [i0, i1])
vk = T.axis.R(128, i2_outer * 32 + i2_inner_outer * 4 + i2_inner_inner)
T.reads([A[vi, vk], B[vj, vk]])
T.writes([C[vi, vj]])
with T.init():
C[vi, vj] = 0.0
C[vi, vj] = C[vi, vj] + (A[vi, vk] * B[vj, vk])
@T.prim_func
def transformed_matmul_with_let(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [128, 128], dtype="float32")
B = T.match_buffer(b, [128, 128], dtype="float32")
C = T.match_buffer(c, [128, 128], dtype="float32")
for i0, i1, i2_outer, i2_inner_outer, i2_inner_inner in T.grid(128, 128, 4, 8, 4):
with T.block("update"):
vi, vj = T.axis.remap("SS", [i0, i1])
vk = T.axis.R(128, i2_outer * 32 + i2_inner_outer * 4 + i2_inner_inner)
T.reads([A[vi, vk], B[vj, vk]])
T.writes([C[vi, vj]])
with T.init():
C[vi, vj] = 0.0
v_C: T.float32 = C[vi, vj] + (A[vi, vk] * B[vj, vk])
C[vi, vj] = v_C
@T.prim_func
def matmul_rfactor(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [128, 128], dtype="float32")
B = T.match_buffer(b, [128, 128], dtype="float32")
C = T.match_buffer(c, [128, 128], dtype="float32")
C_rf = T.alloc_buffer([4, 128, 128], dtype="float32")
for i0, i1, i2_outer, i2_inner_outer, i2_inner_inner in T.grid(128, 128, 4, 8, 4):
with T.block("update_rf"):
vi2_inner_inner = T.axis.S(4, i2_inner_inner)
vi = T.axis.S(128, i0)
vj = T.axis.S(128, i1)
vi |
2_outer = T.axis.R(4, i2_outer)
vi2_inner_outer = T.axis.R(8, i2_inner_outer)
with T.init():
C_rf[vi2_inner_inner, vi, vj] = 0.0
C_rf[vi2_inner_inner, vi, vj] = C_rf[vi2_inner_inner, vi, vj] + (
A[vi, (((vi2_outer * 32) + (vi2_inner_outer * 4)) + vi2_inner_inner)]
* B[vj, (((vi2_outer * 32) + (vi2_inner_outer * 4)) + vi2_inner_inner)]
)
for i0_1, i1_1, i2_inner_inner_1 in T.grid(128, 128, 4):
with T.block("update"):
vi2_inner_inner_1, vi_1, vj_1 = T.axis.remap("RSS", [i2_inner_inner_1, i0_1, i1_1])
with T.init():
C[vi_1, vj_1] = 0.0
C[vi_1, vj_1] = C[vi_1, vj_1] + C_rf[vi2_inner_inner_1, vi_1, vj_1]
@T.prim_func
def matmul_not_stage_pipeline(a: T.handle, b: T.handle, d: T.handle) -> None:
A = T.match_buffer(a, [256, 256])
B = T.match_buffer(b, [256, 256])
D = T.match_buffer(d, [256, 256])
C = T.alloc_buffer([256, 256])
for i, j, k in T.grid(128, 128, 128):
with T.block("C"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
with T.init():
C[vi, vj] = 0.0
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vk, vj]
for i, j in T.grid(256, 256):
with T.block("D"):
vi, vj = T.axis.remap("SS", [i, j])
D[vi, vj] = C[vi, vj]
@T.prim_func
def matmul_not_same_buffer_access(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.match_buffer(b, (128, 128))
C = T.match_buffer(c, (128, 128))
for i, j, k in T.grid(128, 128, 128):
with T.block("C"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
with T.init():
C[vi, vj] = 0.0
C[vj, vi] = C[vj, vi] + A[vi, vk] * B[vk, vj]
@T.prim_func
def matmul_loop_multiple_children(a: T.handle, b: T.handle, c: T.handle, d: T.handle) -> None:
A = T.match_buffer(a, [128, 128])
B = T.match_buffer(b, [128, 128])
C = T.m |
atch_buffer(c, [128, 128])
D = T.match_buffer(d, [128, 128])
for k, i, j in T.grid(128, 128, 128):
with T.block("C"):
ck, ci, cj = T.axis.remap("RSS", [k, i, j])
with T.init():
C[ci, cj] = 0.0
C[ci, cj] = C[ci, cj] + A[ci, ck] * B[ck, cj]
with T.block("D"):
dk, di, dj = T.axis.remap("RSS", [k, i, j])
with T.init():
D[di, dj] = 0.0
D[di, dj] = D[di, dj] + B[di, dk] * A[dk, dj]
@T.prim_func
def square_sum(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [16, 256, 256])
C = T.match_buffer(c, [16])
for b0, i0, j0 in T.grid(16, 256, 256):
with T.block("C"):
b, i, j = T.axis.remap("SRR", [b0, i0, j0])
with T.init():
C[b] = 0.0
C[b] = C[b] + A[b, i, j] * A[b, i, j]
@T.prim_func
def square_sum_rfactor(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [16, 256, 256])
C = T.match_buffer(c, [16])
C_rf = T.alloc_buffer([16, 256])
for i0, i1, i2 in T.grid(16, 256, 256):
with T.block("C_rf"):
vi2, b, i = T.axis.remap("SSR", [i2, i0, i1])
with T.init():
C_rf[b, vi2] = 0.0
C_rf[b, vi2] = C_rf[b, vi2] + (A[b, i, vi2] * A[b, i, vi2])
for i0_1, i2_1 in T.grid(16, 256):
with T.block("C"):
vi2_1, b_1 = T.axis.remap("RS", [i2_1, i0_1])
with T.init():
C[b_1] = 0.0
C[b_1] = C[b_1] + C_rf[b_1, vi2_1]
@T.prim_func
def transformed_square_sum_square_root(a: T.handle, d: T.handle) -> None:
A = T.match_buffer(a, [16, 256, 256])
D = T.match_buffer(d, [16])
C = T.alloc_buffer([16])
for i0, i1_i2_fused_outer, i1_i2_fused_inner in T.grid(16, 65536, 1):
with T.block("C"):
b = T.axis.S(16, i0)
i = T.axis.R(256, T.floordiv(i1_i2_fused_outer, 256))
j = T.axis.R(256, T.floormod(i1_i2_fused_outer, 256))
T.reads([A[b, i, j] |
])
T.writes([C[b]])
with T.init():
C[b] = 0.0
C[b] = C[b] + (A[b, i, j] * A[b, i, j])
for i0_1 in T.serial(0, 16):
with T.block("D"):
b_1 = T.axis.S(16, i0_1)
T.reads([C[b_1]])
T.writes([D[b_1]])
D[b_1] = T.sqrt(C[b_1], dtype="float32")
@T.prim_func
def square_sum_square_root_rfactor(a: T.handle, d: T.handle) -> None:
A = T.match_buffer(a, [16, 256, 256])
D = T.match_buffer(d, [16])
C = T.alloc_buffer([16])
C_rf = T.alloc_buffer([1, 16])
for i0, i1_i2_fused_outer, i1_i2_fused_inner in T.grid(16, 65536, 1):
with T.block("C_rf"):
vi1_i2_fused_inner, b = T.axis.remap("SS", [i1_i2_fused_inner, i0])
i = T.axis.R(256, T.floordiv(i1_i2_fused_outer, 256))
j = T.axis.R(256, T.floormod(i1_i2_fused_outer, 256))
with T.init():
C_rf[vi1_i2_fused_inner, b] = 0.0
C_rf[vi1_i2_fused_inner, b] = C_rf[vi1_i2_fused_inner, b] + (A[b, i, j] * A[b, i, j])
for i0_1, i1_i2_fused_inner_1 in T.grid(16, 1):
with T.block("C"):
vi1_i2_fused_inner_1, b_1 = T.axis.remap("RS", [i1_i2_fused_inner_1, i0_1])
with T.init():
C[b_1] = 0.0
C[b_1] = C[b_1] + C_rf[vi1_i2_fused_inner_1, b_1]
for i0_2 in T.serial(0, 16):
with T.block("D"):
b_2 = T.axis.S(16, i0_2)
D[b_2] = T.sqrt(C[b_2], dtype="float32")
@T.prim_func
def transformed_square_sum_square_root_factor_one_1(a: T.handle, d: T.handle) -> None:
A = T.match_buffer(a, [16, 256, 256])
D = T.match_buffer(d, [16])
C = T.alloc_buffer([16])
for i0, i1_i2_fused_outer, i1_i2_fused_inner in T.grid(16, 65536, 1):
with T.block("C"):
b = T.axis.S(16, i0)
i = T.axis.R(256, T.floordiv(i1_i2_fused_outer, 256))
j = T.axis.R(256, T.floormod(i1_i2_fused_outer, 256))
with T.init():
C[b] = 0.0
C[b] = C[ |
b] + (A[b, i, j] * A[b, i, j])
for i0_1 in T.serial(0, 16):
with T.block("D"):
b_1 = T.axis.S(16, i0_1)
D[b_1] = T.sqrt(C[b_1], dtype="float32")
@T.prim_func
def square_sum_square_root_factor_one_1_rfactor(
A: T.Buffer[(16, 256, 256), "float32"], D: T.Buffer[(16,), "float32"]
) -> None:
C = T.alloc_buffer([16], dtype="float32")
C_rf = T.alloc_buffer([1, 16], dtype="float32")
for i0, i1_i2_fused_outer, i1_i2_fused_inner in T.grid(16, 65536, 1):
with T.block("C_rf"):
b = T.axis.spatial(16, i0)
i = T.axis.reduce(256, i1_i2_fused_outer
j = T.axis.reduce(256, i1_i2_fused_outer % 256)
vi1_i2_fused_inner = T.axis.spatial(1, i1_i2_fused_inner)
with T.init():
C_rf[vi1_i2_fused_inner, b] = T.float32(0)
C_rf[vi1_i2_fused_inner, b] = C_rf[vi1_i2_fused_inner, b] + A[b, i, j] * A[b, i, j]
for i0, i1_i2_fused_inner in T.grid(16, 1):
with T.block("C"):
b, vi1_i2_fused_inner = T.axis.remap("SR", [i0, i1_i2_fused_inner])
with T.init():
C[b] = T.float32(0)
C[b] = C[b] + C_rf[vi1_i2_fused_inner, b]
for i0_1 in T.serial(16):
with T.block("D"):
b_1 = T.axis.spatial(16, i0_1)
D[b_1] = T.sqrt(C[b_1], dtype="float32")
@T.prim_func
def transformed_square_sum_square_root_factor_one_2(a: T.handle, d: T.handle) -> None:
A = T.match_buffer(a, [16, 256, 256])
D = T.match_buffer(d, [16])
C = T.alloc_buffer([16])
for i0, i1_i2_fused_outer, i1_i2_fused_inner in T.grid(16, 1, 65536):
with T.block("C"):
b = T.axis.S(16, i0)
i = T.axis.R(256, T.floordiv(i1_i2_fused_inner, 256))
j = T.axis.R(256, T.floormod(i1_i2_fused_inner, 256))
with T.init():
C[b] = 0.0
C[b] = C[b] + (A[b, i, j] * A[b, i, j])
for i0_1 in T.serial(0, 16):
with T.block("D"):
b_1 = T.axis.S(16, i0_1) |
D[b_1] = T.sqrt(C[b_1], dtype="float32")
@T.prim_func
def square_sum_square_root_factor_one_2_rfactor(
A: T.Buffer[(16, 256, 256), "float32"], D: T.Buffer[(16,), "float32"]
) -> None:
C = T.alloc_buffer([16], dtype="float32")
C_rf = T.alloc_buffer([16, 1], dtype="float32")
for i0, i1_i2_fused_outer, i1_i2_fused_inner in T.grid(16, 1, 65536):
with T.block("C_rf"):
b = T.axis.spatial(16, i0)
i = T.axis.reduce(256, i1_i2_fused_inner
j = T.axis.reduce(256, i1_i2_fused_inner % 256)
vi1_i2_fused_outer = T.axis.spatial(1, i1_i2_fused_outer)
with T.init():
C_rf[b, vi1_i2_fused_outer] = T.float32(0)
C_rf[b, vi1_i2_fused_outer] = C_rf[b, vi1_i2_fused_outer] + A[b, i, j] * A[b, i, j]
for i0, i1_i2_fused_outer in T.grid(16, 1):
with T.block("C"):
b, vi1_i2_fused_outer = T.axis.remap("SR", [i0, i1_i2_fused_outer])
with T.init():
C[b] = T.float32(0)
C[b] = C[b] + C_rf[b, vi1_i2_fused_outer]
for i0_1 in T.serial(16):
with T.block("D"):
b_1 = T.axis.spatial(16, i0_1)
D[b_1] = T.sqrt(C[b_1], dtype="float32")
@T.prim_func
def square_sum_with_annotation(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [16, 256, 256])
C = T.match_buffer(c, [16])
for b0, i0, j0 in T.grid(16, 256, 256):
with T.block("C"):
T.block_attr({"test_annotation": 1})
b, i, j = T.axis.remap("SRR", [b0, i0, j0])
with T.init():
C[b] = 0.0
C[b] = C[b] + A[b, i, j] * A[b, i, j]
@T.prim_func
def square_sum_with_annotation_rfactor(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [16, 256, 256])
C = T.match_buffer(c, [16])
C_rf = T.alloc_buffer([16, 256])
for i0, i1, i2 in T.grid(16, 256, 256):
with T.block("C_rf"):
T.block_attr({"test_annotation": 1})
vi2, b, i = T.axis.remap("SSR", [i2, i0, i1]) |
with T.init():
C_rf[b, vi2] = 0.0
C_rf[b, vi2] = C_rf[b, vi2] + (A[b, i, vi2] * A[b, i, vi2])
for i0_1, i2_1 in T.grid(16, 256):
with T.block("C"):
T.block_attr({"test_annotation": 1})
vi2_1, b_1 = T.axis.remap("RS", [i2_1, i0_1])
with T.init():
C[b_1] = 0.0
C[b_1] = C[b_1] + C_rf[b_1, vi2_1]
@T.prim_func
def element_wise(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.match_buffer(b, (128, 128))
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
@T.prim_func
def rowsum(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.match_buffer(b, (128,))
for i, k in T.grid(128, 128):
with T.block("B"):
vi, vk = T.axis.remap("SR", [i, k])
with T.init():
B[vi] = 0.0
B[vi] = B[vi] + A[vi, vk]
@T.prim_func
def rowsum_not_quasi_affine(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.match_buffer(b, (128,))
for i, k in T.grid(128, 16):
with T.block("B"):
vi = T.axis.S(128, i)
vk = T.axis.R(128, T.floordiv(k * k, 2))
with T.init():
B[vi] = 0.0
B[vi] = B[vi] + A[vi, vk]
@T.prim_func
def rowsum_not_dominant(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.match_buffer(b, (128, 128))
for i, k in T.grid(128, 128):
with T.block("B"):
vi, vk = T.axis.remap("SR", [i, k])
with T.init():
B[vi, vk] = 0.0
B[vi, vk] = B[vi, vk] + A[vi, vk]
@T.prim_func
def rowsum_not_serial(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.match_buffer(b, (128,))
for i in T.serial(0, 128):
for k in T.parallel(0, 128):
with T.block("B"): |
vi, vk = T.axis.remap("SR", [i, k])
with T.init():
B[vi] = 0.0
B[vi] = B[vi] + A[vi, vk]
@T.prim_func
def rowsum_wrong_reduce_pattern1(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.match_buffer(b, (128,))
for i, k in T.grid(128, 128):
with T.block("B"):
vi, vk = T.axis.remap("SR", [i, k])
with T.init():
B[vi] = 1.0
B[vi] = B[vi] + A[vi, vk]
@T.prim_func
def rowsum_wrong_reduce_pattern2(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.match_buffer(b, (128,))
for i, k in T.grid(128, 128):
with T.block("B"):
vi, vk = T.axis.remap("SR", [i, k])
with T.init():
B[vi] = 0.0
B[vi] = B[vi] - A[vi, vk]
@T.prim_func
def rowsum_init_not_bufferstore(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.match_buffer(b, (128,))
for i, k in T.grid(128, 128):
with T.block("B"):
vi, vk = T.axis.remap("SR", [i, k])
with T.init():
v_init: T.float32 = T.float32(0)
B[vi] = v_init
B[vi] = B[vi] + A[vi, vk]
@T.prim_func
def rowsum_transformed(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.match_buffer(b, (128,))
for io, ii_ko_fused, ki in T.grid(32, 128, 4):
with T.block("B"):
vi = T.axis.S(128, io * 4 + T.floordiv(ii_ko_fused, 32))
vk = T.axis.R(128, T.floormod(ii_ko_fused, 32) * 4 + ki)
with T.init():
B[vi] = 0.0
B[vi] = B[vi] + A[vi, vk]
@T.prim_func
def rowsum_zero_dim(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [128])
B = T.match_buffer(b, [])
for k0 in range(128):
with T.block("B"):
k = T.axis.R(128, k0)
with T.init():
B[()] = 0.0
B[()] = B[()] + A[k]
@T.prim_func
de |
f rowsum_zero_dim_rfactor(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [128])
B = T.match_buffer(b, [])
B_rf = T.alloc_buffer([128])
for i in range(128):
with T.block("B_rf"):
vi0 = T.axis.S(128, i)
B_rf[vi0] = A[vi0]
for i in range(128):
with T.block("B"):
vi0_1 = T.axis.R(128, i)
with T.init():
B[()] = 0.0
B[()] = B[()] + B_rf[vi0_1]
@T.prim_func
def rowsum_predicate(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [128, 128], dtype="float32")
B = T.match_buffer(b, [128], dtype="float32")
for i, k_0, k_1 in T.grid(128, 13, 10):
with T.block("B"):
T.where(k_0 * 10 + k_1 < 128)
vi = T.axis.S(128, i)
vk = T.axis.R(128, k_0 * 10 + k_1)
with T.init():
B[vi] = 0.0
B[vi] = B[vi] + A[vi, vk]
@T.prim_func
def rowsum_predicate_rfactor(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [128, 128], dtype="float32")
B = T.match_buffer(b, [128], dtype="float32")
B_rf = T.alloc_buffer([128, 13], dtype="float32")
for i, k_0, k_1 in T.grid(128, 13, 10):
with T.block("B_rf"):
vk_0, vi, vk_1 = T.axis.remap("SSR", [k_0, i, k_1])
T.where(k_0 * 10 + k_1 < 128)
with T.init():
B_rf[vi, vk_0] = T.float32(0)
B_rf[vi, vk_0] = B_rf[vi, vk_0] + A[vi, vk_0 * 10 + vk_1]
for i, k_0 in T.grid(128, 13):
with T.block("B"):
vk_0, vi = T.axis.remap("RS", [k_0, i])
with T.init():
B[vi] = T.float32(0)
B[vi] = B[vi] + B_rf[vi, vk_0]
@T.prim_func
def multiple_reduction_blocks(a: T.handle, f: T.handle) -> None:
A = T.match_buffer(a, (16, 16, 16))
C = T.alloc_buffer((16, 16))
D = T.alloc_buffer((16, 16))
E = T.alloc_buffer((16, 16))
F = T.match_buffer(f, (16, 16))
for i in T.serial(0, 16):
for j1 in T.serial(0, 16):
for k1 |
o, k1i in T.grid(4, 4):
with T.block("C"):
ci, cj = T.axis.remap("SS", [i, j1])
ck = T.axis.R(16, k1o * 4 + k1i)
with T.init():
C[ci, cj] = 0.0
C[ci, cj] = C[ci, cj] + A[ci, cj, ck]
for k2o, k2i in T.grid(4, 4):
with T.block("D"):
di, dj = T.axis.remap("SS", [i, j1])
dk = T.axis.R(16, k2o * 4 + k2i)
with T.init():
D[di, dj] = 0.0
D[di, dj] = D[di, dj] + A[di, dj, dk] + C[di, dj]
for j2 in T.serial(0, 16):
for k3o, k3i in T.grid(4, 4):
with T.block("E"):
ei, ej = T.axis.remap("SS", [i, j2])
ek = T.axis.R(16, k3o * 4 + k3i)
with T.init():
E[ei, ej] = 0.0
E[ei, ej] = E[ei, ej] + A[ei, ej, ek] + D[ei, ej]
for k4o, k4i in T.grid(4, 4):
with T.block("F"):
fi, fj = T.axis.remap("SS", [i, j2])
fk = T.axis.R(16, k4o * 4 + k4i)
with T.init():
F[fi, fj] = 0.0
F[fi, fj] = F[fi, fj] + A[fi, fj, fk] + E[fi, fj]
@T.prim_func
def multiple_reduction_blocks_rfactor(a: T.handle, f: T.handle) -> None:
A = T.match_buffer(a, [16, 16, 16])
C = T.alloc_buffer([16, 16])
D = T.alloc_buffer([16, 16])
E = T.alloc_buffer([16, 16])
F = T.match_buffer(f, [16, 16])
C_rf = T.alloc_buffer([16, 16, 4])
for i, j1, k1o, k1i in T.grid(16, 16, 4, 4):
with T.block("C_rf"):
vk1o, ci, cj, vk1i = T.axis.remap("SSSR", [k1o, i, j1, k1i])
with T.init():
C_rf[ci, cj, vk1o] = 0.0
C_rf[ci, cj, vk1o] = C_rf[ci, cj, vk1o] + A[ci, cj, ((vk1o * 4) + vk1i)]
for i_1 in T.serial(0, 16):
for j1_1 in T.serial(0, 16):
for k1o_1 in T.serial(0, 4 |
):
with T.block("C"):
vk1o_1, ci_1, cj_1 = T.axis.remap("RSS", [k1o_1, i_1, j1_1])
with T.init():
C[ci_1, cj_1] = 0.0
C[ci_1, cj_1] = C[ci_1, cj_1] + C_rf[ci_1, cj_1, vk1o_1]
for k2o, k2i in T.grid(4, 4):
with T.block("D"):
di, dj = T.axis.remap("SS", [i_1, j1_1])
dk = T.axis.R(16, k2o * 4 + k2i)
with T.init():
D[di, dj] = 0.0
D[di, dj] = (D[di, dj] + A[di, dj, dk]) + C[di, dj]
for j2 in T.serial(0, 16):
for k3o, k3i in T.grid(4, 4):
with T.block("E"):
ei, ej = T.axis.remap("SS", [i_1, j2])
ek = T.axis.R(16, k3o * 4 + k3i)
with T.init():
E[ei, ej] = 0.0
E[ei, ej] = (E[ei, ej] + A[ei, ej, ek]) + D[ei, ej]
for k4o, k4i in T.grid(4, 4):
with T.block("F"):
fi, fj = T.axis.remap("SS", [i_1, j2])
fk = T.axis.R(16, k4o * 4 + k4i)
with T.init():
F[fi, fj] = 0.0
F[fi, fj] = (F[fi, fj] + A[fi, fj, fk]) + E[fi, fj]
@T.prim_func
def rfactor_spatial_only(
A: T.Buffer[(1, 512, 7, 7), "float32"],
B: T.Buffer[(1, 512, 1, 1), "float32"],
) -> None:
for _i0, i1, _i2, _i3, i4, _i5 in T.grid(1, 512, 1, 1, 49, 1):
with T.block("acc"):
ax0 = T.axis.spatial(1, 0)
ax1 = T.axis.spatial(512, i1)
ax2 = T.axis.spatial(1, 0)
ax3 = T.axis.spatial(1, 0)
rv0 = T.axis.reduce(7, i4
rv1 = T.axis.reduce(7, i4 % 7)
T.reads(A[ax0, ax1, ax2 * 7 + rv0, ax3 * 7 + rv1])
T.writes(B[ax0, ax1, ax2, ax3])
with T.init():
B[ax0, ax1, ax2, ax3] = T.float32(0)
B[ax0, ax1, ax2, ax3] = (
B[ax0, ax1 |
, ax2, ax3] + A[ax0, ax1, ax2 * 7 + rv0, ax3 * 7 + rv1]
)
@T.prim_func
def rfactor_spatial_only_after(
A: T.Buffer[(1, 512, 7, 7), "float32"],
B: T.Buffer[(1, 512, 1, 1), "float32"],
) -> None:
B_rf = T.alloc_buffer([1, 512, 1, 1, 49], dtype="float32")
for _i0, i1, _i2, _i3, i4, _i5 in T.grid(1, 512, 1, 1, 49, 1):
with T.block("acc_rf"):
vi4 = T.axis.spatial(49, i4)
ax0 = T.axis.spatial(1, 0)
ax1 = T.axis.spatial(512, i1)
ax2 = T.axis.spatial(1, 0)
ax3 = T.axis.spatial(1, 0)
B_rf[ax0, ax1, ax2, ax3, vi4] = A[ax0, ax1, ax2 * 7 + vi4
for _i0, i1, _i2, _i3, i4, _i5 in T.grid(1, 512, 1, 1, 49, 1):
with T.block("acc"):
vi4 = T.axis.reduce(49, i4)
ax0 = T.axis.spatial(1, 0)
ax1 = T.axis.spatial(512, i1)
ax2 = T.axis.spatial(1, 0)
ax3 = T.axis.spatial(1, 0)
with T.init():
B[ax0, ax1, ax2, ax3] = T.float32(0)
B[ax0, ax1, ax2, ax3] = B[ax0, ax1, ax2, ax3] + B_rf[ax0, ax1, ax2, ax3, vi4]
@T.prim_func
def argmax_split(
idx: T.Buffer[(128, 128), "int32"],
val: T.Buffer[(128, 128), "float32"],
argmax_v0: T.Buffer[(128,), "int32"],
argmax_v1: T.Buffer[(128,), "float32"],
) -> None:
for i0, i1_0, i1_1 in T.grid(128, 4, 32):
with T.block("argmax"):
i = T.axis.spatial(128, i0)
k = T.axis.reduce(128, i1_0 * 32 + i1_1)
T.reads(idx[i, k], val[i, k])
T.writes(argmax_v0[i], argmax_v1[i])
with T.init():
argmax_v0[i] = -1
argmax_v1[i] = T.min_value("float32")
v_argmax_v0: T.int32 = T.Select(argmax_v1[i] >= val[i, k], argmax_v0[i], idx[i, k])
v_argmax_v1: T.float32 = T.Select(argmax_v1[i] >= val[i, k], argmax_v1[i], val[i, k])
argmax_v0[i] = v_argmax_v0
argmax_v1[i] = v_argmax_v1
@T.prim_func
def argmin_split_init_update_reordered(
idx: |
T.Buffer[(128, 128), "int32"],
val: T.Buffer[(128, 128), "float32"],
argmin_v0: T.Buffer[(128,), "int32"],
argmin_v1: T.Buffer[(128,), "float32"],
) -> None:
for i0, i1_0, i1_1 in T.grid(128, 4, 32):
with T.block("argmin"):
i = T.axis.spatial(128, i0)
k = T.axis.reduce(128, i1_0 * 32 + i1_1)
T.reads(idx[i, k], val[i, k])
T.writes(argmin_v0[i], argmin_v1[i])
with T.init():
argmin_v1[i] = T.max_value("float32")
argmin_v0[i] = -1
v_argmin_v0: T.int32 = T.Select(argmin_v1[i] <= val[i, k], argmin_v0[i], idx[i, k])
v_argmin_v1: T.float32 = T.Select(argmin_v1[i] <= val[i, k], argmin_v1[i], val[i, k])
argmin_v1[i] = v_argmin_v1
argmin_v0[i] = v_argmin_v0
@T.prim_func
def argmax_split_different_shape(
idx: T.Buffer[(128, 128), "int32"],
val: T.Buffer[(128, 128), "float32"],
argmax_v0: T.Buffer[(256,), "int32"],
argmax_v1: T.Buffer[(128,), "float32"],
) -> None:
for i0, i1_0, i1_1 in T.grid(128, 4, 32):
with T.block("argmax"):
i = T.axis.spatial(128, i0)
k = T.axis.reduce(128, i1_0 * 32 + i1_1)
T.reads(idx[i, k], val[i, k])
T.writes(argmax_v0[i], argmax_v1[i])
with T.init():
argmax_v0[i] = -1
argmax_v1[i] = T.min_value("float32")
v_argmax_v0: T.int32 = T.Select(argmax_v1[i] >= val[i, k], argmax_v0[i], idx[i, k])
v_argmax_v1: T.float32 = T.Select(argmax_v1[i] >= val[i, k], argmax_v1[i], val[i, k])
argmax_v0[i] = v_argmax_v0
argmax_v1[i] = v_argmax_v1
@T.prim_func
def argmax_split_different_indices(
idx: T.Buffer[(128, 128), "int32"],
val: T.Buffer[(128, 128), "float32"],
argmax_v0: T.Buffer[(128,), "int32"],
argmax_v1: T.Buffer[(128,), "float32"],
) -> None:
for i0, i1_0, i1_1 in T.grid(128, 4, 32):
with T.block("argmax"):
i = T.axis.spatial(128, i0) |
k = T.axis.reduce(128, i1_0 * 32 + i1_1)
T.reads(idx[i, k], val[i, k])
T.writes(argmax_v0[i], argmax_v1[i])
with T.init():
argmax_v0[i] = -1
argmax_v1[i + 1] = T.min_value("float32")
v_argmax_v0: T.int32 = T.Select(argmax_v1[i] >= val[i, k], argmax_v0[i], idx[i, k])
v_argmax_v1: T.float32 = T.Select(argmax_v1[i] >= val[i, k], argmax_v1[i], val[i, k])
argmax_v0[i] = v_argmax_v0
argmax_v1[i + 1] = v_argmax_v1
@T.prim_func
def argmax_split_init_not_bufferstore(
idx: T.Buffer[(128, 128), "int32"],
val: T.Buffer[(128, 128), "float32"],
argmax_v0: T.Buffer[(128,), "int32"],
argmax_v1: T.Buffer[(128,), "float32"],
) -> None:
for i0, i1_0, i1_1 in T.grid(128, 4, 32):
with T.block("argmax"):
i = T.axis.spatial(128, i0)
k = T.axis.reduce(128, i1_0 * 32 + i1_1)
T.reads(idx[i, k], val[i, k])
T.writes(argmax_v0[i], argmax_v1[i])
with T.init():
argmax_v0[i] = -1
v1_init: T.float32 = T.min_value("float32")
argmax_v1[i] = v1_init
v_argmax_v0: T.int32 = T.Select(argmax_v1[i] >= val[i, k], argmax_v0[i], idx[i, k])
v_argmax_v1: T.float32 = T.Select(argmax_v1[i] >= val[i, k], argmax_v1[i], val[i, k])
argmax_v0[i] = v_argmax_v0
argmax_v1[i] = v_argmax_v1
@T.prim_func
def argmax_split_init_buffer_duplicate(
idx: T.Buffer[(128, 128), "int32"],
val: T.Buffer[(128, 128), "float32"],
argmax_v0: T.Buffer[(128,), "int32"],
argmax_v1: T.Buffer[(128,), "float32"],
) -> None:
for i0, i1_0, i1_1 in T.grid(128, 4, 32):
with T.block("argmax"):
i = T.axis.spatial(128, i0)
k = T.axis.reduce(128, i1_0 * 32 + i1_1)
T.reads(idx[i, k], val[i, k])
T.writes(argmax_v0[i], argmax_v1[i])
with T.init():
argmax_v0[i] = -1
argmax_v0[i] |
= -1
v_argmax_v0: T.int32 = T.Select(argmax_v1[i] >= val[i, k], argmax_v0[i], idx[i, k])
v_argmax_v1: T.float32 = T.Select(argmax_v1[i] >= val[i, k], argmax_v1[i], val[i, k])
argmax_v0[i] = v_argmax_v0
argmax_v1[i] = v_argmax_v1
@T.prim_func
def argmax_split_letstmt_fewer_than_init(
idx: T.Buffer[(128, 128), "int32"],
val: T.Buffer[(128, 128), "float32"],
argmax_v0: T.Buffer[(128,), "int32"],
argmax_v1: T.Buffer[(128,), "float32"],
) -> None:
for i0, i1_0, i1_1 in T.grid(128, 4, 32):
with T.block("argmax"):
i = T.axis.spatial(128, i0)
k = T.axis.reduce(128, i1_0 * 32 + i1_1)
T.reads(idx[i, k], val[i, k])
T.writes(argmax_v0[i], argmax_v1[i])
with T.init():
argmax_v0[i] = -1
argmax_v1[i] = T.min_value("float32")
v_argmax_v0: T.int32 = T.Select(argmax_v1[i] >= val[i, k], argmax_v0[i], idx[i, k])
argmax_v0[i] = v_argmax_v0
argmax_v1[i] = T.Select(argmax_v1[i] >= val[i, k], argmax_v1[i], val[i, k])
@T.prim_func
def argmax_split_letstmt_more_than_init(
idx: T.Buffer[(128, 128), "int32"],
val: T.Buffer[(128, 128), "float32"],
argmax_v0: T.Buffer[(128,), "int32"],
argmax_v1: T.Buffer[(128,), "float32"],
) -> None:
for i0, i1_0, i1_1 in T.grid(128, 4, 32):
with T.block("argmax"):
i = T.axis.spatial(128, i0)
k = T.axis.reduce(128, i1_0 * 32 + i1_1)
T.reads(idx[i, k], val[i, k])
T.writes(argmax_v0[i], argmax_v1[i])
with T.init():
argmax_v0[i] = -1
v_argmax_v0: T.int32 = T.Select(argmax_v1[i] >= val[i, k], argmax_v0[i], idx[i, k])
v_argmax_v1: T.float32 = T.Select(argmax_v1[i] >= val[i, k], argmax_v1[i], val[i, k])
argmax_v0[i] = v_argmax_v0
argmax_v1[i] = v_argmax_v1
@T.prim_func
def argmax_split_let_body_neither_seqstmt_nor_bufferstore(
idx: T.Buffer[(128, 128), |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.