text
stringlengths 1
2.05k
|
---|
"int32"],
val: T.Buffer[(128, 128), "float32"],
argmax_v0: T.Buffer[(128,), "int32"],
argmax_v1: T.Buffer[(128,), "float32"],
) -> None:
for i0, i1_0, i1_1 in T.grid(128, 4, 32):
with T.block("argmax"):
i = T.axis.spatial(128, i0)
k = T.axis.reduce(128, i1_0 * 32 + i1_1)
T.reads(idx[i, k], val[i, k])
T.writes(argmax_v0[i], argmax_v1[i])
with T.init():
argmax_v0[i] = -1
argmax_v1[i] = T.min_value("float32")
v_argmax_v0: T.int32 = T.Select(argmax_v1[i] >= val[i, k], argmax_v0[i], idx[i, k])
v_argmax_v1: T.float32 = T.Select(argmax_v1[i] >= val[i, k], argmax_v1[i], val[i, k])
T.evaluate(0)
@T.prim_func
def argmax_split_init_update_inconsistent_bufferstore_number(
idx: T.Buffer[(128, 128), "int32"],
val: T.Buffer[(128, 128), "float32"],
argmax_v0: T.Buffer[(128,), "int32"],
argmax_v1: T.Buffer[(128,), "float32"],
) -> None:
for i0, i1_0, i1_1 in T.grid(128, 4, 32):
with T.block("argmax"):
i = T.axis.spatial(128, i0)
k = T.axis.reduce(128, i1_0 * 32 + i1_1)
T.reads(idx[i, k], val[i, k])
T.writes(argmax_v0[i], argmax_v1[i])
with T.init():
argmax_v0[i] = -1
argmax_v1[i] = T.min_value("float32")
v_argmax_v0: T.int32 = T.Select(argmax_v1[i] >= val[i, k], argmax_v0[i], idx[i, k])
v_argmax_v1: T.float32 = T.Select(argmax_v1[i] >= val[i, k], argmax_v1[i], val[i, k])
argmax_v0[i] = v_argmax_v0
argmax_v1[i] = v_argmax_v1
argmax_v1[i] = v_argmax_v1
@T.prim_func
def argmax_split_body_seq_not_bufferstore(
idx: T.Buffer[(128, 128), "int32"],
val: T.Buffer[(128, 128), "float32"],
argmax_v0: T.Buffer[(128,), "int32"],
argmax_v1: T.Buffer[(128,), "float32"],
) -> None:
for i0, i1_0, i1_1 in T.grid(128, 4, 32):
with T.block("argmax"):
i = T.axis.spatial(128, i0) |
k = T.axis.reduce(128, i1_0 * 32 + i1_1)
T.reads(idx[i, k], val[i, k])
T.writes(argmax_v0[i], argmax_v1[i])
with T.init():
argmax_v0[i] = -1
argmax_v1[i] = T.min_value("float32")
v_argmax_v0: T.int32 = T.Select(argmax_v1[i] >= val[i, k], argmax_v0[i], idx[i, k])
v_argmax_v1: T.float32 = T.Select(argmax_v1[i] >= val[i, k], argmax_v1[i], val[i, k])
argmax_v0[i] = v_argmax_v0
T.evaluate(0)
@T.prim_func
def argmax_split_body_bufferstore_value_not_var(
idx: T.Buffer[(128, 128), "int32"],
val: T.Buffer[(128, 128), "float32"],
argmax_v0: T.Buffer[(128,), "int32"],
argmax_v1: T.Buffer[(128,), "float32"],
) -> None:
for i0, i1_0, i1_1 in T.grid(128, 4, 32):
with T.block("argmax"):
i = T.axis.spatial(128, i0)
k = T.axis.reduce(128, i1_0 * 32 + i1_1)
T.reads(idx[i, k], val[i, k])
T.writes(argmax_v0[i], argmax_v1[i])
with T.init():
argmax_v0[i] = -1
argmax_v1[i] = T.min_value("float32")
v_argmax_v0: T.int32 = T.Select(argmax_v1[i] >= val[i, k], argmax_v0[i], idx[i, k])
v_argmax_v1: T.float32 = T.Select(argmax_v1[i] >= val[i, k], argmax_v1[i], val[i, k])
argmax_v0[i] = T.Select(argmax_v1[i] >= val[i, k], argmax_v0[i], idx[i, k])
argmax_v1[i] = v_argmax_v1
@T.prim_func
def argmax_split_body_bufferstore_value_unbound_var(
idx: T.Buffer[(128, 128), "int32"],
val: T.Buffer[(128, 128), "float32"],
argmax_v0: T.Buffer[(128,), "int32"],
argmax_v1: T.Buffer[(128,), "float32"],
) -> None:
v_unbound = T.var("int32")
for i0, i1_0, i1_1 in T.grid(128, 4, 32):
with T.block("argmax"):
i = T.axis.spatial(128, i0)
k = T.axis.reduce(128, i1_0 * 32 + i1_1)
T.reads(idx[i, k], val[i, k])
T.writes(argmax_v0[i], argmax_v1[i])
with T.init():
argmax_v0 |
[i] = -1
argmax_v1[i] = T.min_value("float32")
v_argmax_v0: T.int32 = T.Select(argmax_v1[i] >= val[i, k], argmax_v0[i], idx[i, k])
v_argmax_v1: T.float32 = T.Select(argmax_v1[i] >= val[i, k], argmax_v1[i], val[i, k])
argmax_v0[i] = v_unbound
argmax_v1[i] = v_argmax_v1
@T.prim_func
def argmax_split_one_let_var_used_multi_times(
idx: T.Buffer[(128, 128), "int32"],
val: T.Buffer[(128, 128), "int32"],
argmax_v0: T.Buffer[(128,), "int32"],
argmax_v1: T.Buffer[(128,), "int32"],
) -> None:
for i0, i1_0, i1_1 in T.grid(128, 4, 32):
with T.block("argmax"):
i = T.axis.spatial(128, i0)
k = T.axis.reduce(128, i1_0 * 32 + i1_1)
T.reads(idx[i, k], val[i, k])
T.writes(argmax_v0[i], argmax_v1[i])
with T.init():
argmax_v0[i] = -1
argmax_v1[i] = T.min_value("int32")
v_argmax_v0: T.int32 = T.Select(argmax_v1[i] >= val[i, k], argmax_v0[i], idx[i, k])
v_argmax_v1: T.int32 = T.Select(argmax_v1[i] >= val[i, k], argmax_v1[i], val[i, k])
argmax_v0[i] = v_argmax_v0
argmax_v1[i] = v_argmax_v0
@T.prim_func
def argmax_split_body_one_buffer_updated_multi_times(
idx: T.Buffer[(128, 128), "int32"],
val: T.Buffer[(128, 128), "int32"],
argmax_v0: T.Buffer[(128,), "int32"],
argmax_v1: T.Buffer[(128,), "int32"],
) -> None:
for i0, i1_0, i1_1 in T.grid(128, 4, 32):
with T.block("argmax"):
i = T.axis.spatial(128, i0)
k = T.axis.reduce(128, i1_0 * 32 + i1_1)
T.reads(idx[i, k], val[i, k])
T.writes(argmax_v0[i], argmax_v1[i])
with T.init():
argmax_v0[i] = -1
argmax_v1[i] = T.min_value("int32")
v_argmax_v0: T.int32 = T.Select(argmax_v1[i] >= val[i, k], argmax_v0[i], idx[i, k])
v_argmax_v1: T.int32 = T.Select(argmax_v1[i] >= val[i, k], argmax_v1[i], val[i, k])
argmax_v0[ |
i] = v_argmax_v0
argmax_v0[i] = v_argmax_v1
@T.prim_func
def argmax_split_init_buffer_not_match(
idx: T.Buffer[(128, 128), "int32"],
val: T.Buffer[(128, 128), "float32"],
argmax_v0: T.Buffer[(128,), "int32"],
argmax_v0_1: T.Buffer[(128,), "int32"],
argmax_v1: T.Buffer[(128,), "float32"],
) -> None:
for i0, i1_0, i1_1 in T.grid(128, 4, 32):
with T.block("argmax"):
i = T.axis.spatial(128, i0)
k = T.axis.reduce(128, i1_0 * 32 + i1_1)
T.reads(idx[i, k], val[i, k])
T.writes(argmax_v0[i], argmax_v0_1[i], argmax_v1[i])
with T.init():
argmax_v0_1[i] = -1
argmax_v1[i] = T.min_value("float32")
v_argmax_v0: T.int32 = T.Select(argmax_v1[i] >= val[i, k], argmax_v0[i], idx[i, k])
v_argmax_v1: T.float32 = T.Select(argmax_v1[i] >= val[i, k], argmax_v1[i], val[i, k])
argmax_v0[i] = v_argmax_v0
argmax_v1[i] = v_argmax_v1
@T.prim_func
def argmax_split_rfactor(
idx: T.Buffer[(128, 128), "int32"],
val: T.Buffer[(128, 128), "float32"],
argmax_v0: T.Buffer[(128,), "int32"],
argmax_v1: T.Buffer[(128,), "float32"],
) -> None:
argmax_v0_rf = T.alloc_buffer([128, 32], dtype="int32")
argmax_v1_rf = T.alloc_buffer([128, 32], dtype="float32")
for i0, i1_0, i1_1 in T.grid(128, 4, 32):
with T.block("argmax_rf"):
vi1_1, i, vi1_0 = T.axis.remap("SSR", [i1_1, i0, i1_0])
T.reads(idx[i, vi1_0 * 32 + vi1_1], val[i, vi1_0 * 32 + vi1_1])
T.writes(argmax_v0_rf[i, vi1_1], argmax_v1_rf[i, vi1_1])
with T.init():
argmax_v0_rf[i, vi1_1] = -1
argmax_v1_rf[i, vi1_1] = T.min_value("float32")
v_argmax_v0_rf: T.int32 = T.Select(
argmax_v1_rf[i, vi1_1] >= val[i, vi1_0 * 32 + vi1_1],
argmax_v0_rf[i, vi1_1],
idx[i, vi1_0 * 32 + vi1_1],
)
v_argmax_v1_rf: T.float32 = T.Select( |
argmax_v1_rf[i, vi1_1] >= val[i, vi1_0 * 32 + vi1_1],
argmax_v1_rf[i, vi1_1],
val[i, vi1_0 * 32 + vi1_1],
)
argmax_v0_rf[i, vi1_1] = v_argmax_v0_rf
argmax_v1_rf[i, vi1_1] = v_argmax_v1_rf
for i0, i1_1 in T.grid(128, 32):
with T.block("argmax"):
vi1_1, i = T.axis.remap("RS", [i1_1, i0])
T.reads(argmax_v0_rf[i, vi1_1], argmax_v1_rf[i, vi1_1])
T.writes(argmax_v0[i], argmax_v1[i])
with T.init():
argmax_v0[i] = -1
argmax_v1[i] = T.min_value("float32")
v_argmax_v0: T.int32 = T.Select(
argmax_v1[i] >= argmax_v1_rf[i, vi1_1], argmax_v0[i], argmax_v0_rf[i, vi1_1]
)
v_argmax_v1: T.float32 = T.Select(
argmax_v1[i] >= argmax_v1_rf[i, vi1_1], argmax_v1[i], argmax_v1_rf[i, vi1_1]
)
argmax_v0[i] = v_argmax_v0
argmax_v1[i] = v_argmax_v1
@T.prim_func
def argmin_split_rfactor(
idx: T.Buffer[(128, 128), "int32"],
val: T.Buffer[(128, 128), "float32"],
argmin_v0: T.Buffer[(128,), "int32"],
argmin_v1: T.Buffer[(128,), "float32"],
) -> None:
argmin_v0_rf = T.alloc_buffer([128, 32], dtype="int32")
argmin_v1_rf = T.alloc_buffer([128, 32], dtype="float32")
for i0, i1_0, i1_1 in T.grid(128, 4, 32):
with T.block("argmin_rf"):
vi1_1, i, vi1_0 = T.axis.remap("SSR", [i1_1, i0, i1_0])
T.reads(idx[i, vi1_0 * 32 + vi1_1], val[i, vi1_0 * 32 + vi1_1])
T.writes(argmin_v0_rf[i, vi1_1], argmin_v1_rf[i, vi1_1])
with T.init():
argmin_v0_rf[i, vi1_1] = -1
argmin_v1_rf[i, vi1_1] = T.max_value("float32")
v_argmin_v0_rf: T.int32 = T.Select(
argmin_v1_rf[i, vi1_1] <= val[i, vi1_0 * 32 + vi1_1],
argmin_v0_rf[i, vi1_1],
idx[i, vi1_0 * 32 + vi1_1],
)
v_argmin_v1_rf: T.float32 = T.Select( |
argmin_v1_rf[i, vi1_1] <= val[i, vi1_0 * 32 + vi1_1],
argmin_v1_rf[i, vi1_1],
val[i, vi1_0 * 32 + vi1_1],
)
argmin_v0_rf[i, vi1_1] = v_argmin_v0_rf
argmin_v1_rf[i, vi1_1] = v_argmin_v1_rf
for i0, i1_1 in T.grid(128, 32):
with T.block("argmin"):
vi1_1, i = T.axis.remap("RS", [i1_1, i0])
T.reads(argmin_v0_rf[i, vi1_1], argmin_v1_rf[i, vi1_1])
T.writes(argmin_v0[i], argmin_v1[i])
with T.init():
argmin_v0[i] = -1
argmin_v1[i] = T.max_value("float32")
v_argmin_v0: T.int32 = T.Select(
argmin_v1[i] <= argmin_v1_rf[i, vi1_1], argmin_v0[i], argmin_v0_rf[i, vi1_1]
)
v_argmin_v1: T.float32 = T.Select(
argmin_v1[i] <= argmin_v1_rf[i, vi1_1], argmin_v1[i], argmin_v1_rf[i, vi1_1]
)
argmin_v0[i] = v_argmin_v0
argmin_v1[i] = v_argmin_v1
@T.prim_func
def argmax_topi_rfactor(
placeholder: T.Buffer[(1, 32), "int32"], placeholder_red: T.Buffer[1, "int32"]
) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
placeholder_red_temp_v0 = T.alloc_buffer([1], dtype="int32")
placeholder_red_temp_v1 = T.alloc_buffer([1], dtype="int32")
placeholder_red_temp_v0_rf = T.alloc_buffer([1, 8], dtype="int32")
placeholder_red_temp_v1_rf = T.alloc_buffer([1, 8], dtype="int32")
for i0, i1_0, i1_1 in T.grid(1, 4, 8):
with T.block("placeholder_red_temp_rf"):
vi1_1, ax0, vi1_0 = T.axis.remap("SSR", [i1_1, i0, i1_0])
T.reads(placeholder[ax0, vi1_0 * 8 + vi1_1])
T.writes(placeholder_red_temp_v0_rf[ax0, vi1_1], placeholder_red_temp_v1_rf[ax0, vi1_1])
with T.init():
placeholder_red_temp_v0_rf[ax0, vi1_1] = -1
placeholder_red_temp_v1_rf[ax0, vi1_1] = -2147483648
v_placeholder_red_temp_v0_rf: T.int32 = T.Select(
placeholder_red_temp_v1 |
_rf[ax0, vi1_1] > placeholder[ax0, vi1_0 * 8 + vi1_1]
or placeholder_red_temp_v1_rf[ax0, vi1_1] == placeholder[ax0, vi1_0 * 8 + vi1_1]
and placeholder_red_temp_v0_rf[ax0, vi1_1] < vi1_0 * 8 + vi1_1,
placeholder_red_temp_v0_rf[ax0, vi1_1],
vi1_0 * 8 + vi1_1,
)
v_placeholder_red_temp_v1_rf: T.int32 = T.Select(
placeholder_red_temp_v1_rf[ax0, vi1_1] > placeholder[ax0, vi1_0 * 8 + vi1_1],
placeholder_red_temp_v1_rf[ax0, vi1_1],
placeholder[ax0, vi1_0 * 8 + vi1_1],
)
placeholder_red_temp_v0_rf[ax0, vi1_1] = v_placeholder_red_temp_v0_rf
placeholder_red_temp_v1_rf[ax0, vi1_1] = v_placeholder_red_temp_v1_rf
for i0, i1_1 in T.grid(1, 8):
with T.block("placeholder_red_temp"):
vi1_1, ax0 = T.axis.remap("RS", [i1_1, i0])
T.reads(placeholder_red_temp_v0_rf[ax0, vi1_1], placeholder_red_temp_v1_rf[ax0, vi1_1])
T.writes(placeholder_red_temp_v0[ax0], placeholder_red_temp_v1[ax0])
with T.init():
placeholder_red_temp_v0[ax0] = -1
placeholder_red_temp_v1[ax0] = -2147483648
v_placeholder_red_temp_v0: T.int32 = T.Select(
placeholder_red_temp_v1[ax0] > placeholder_red_temp_v1_rf[ax0, vi1_1]
or placeholder_red_temp_v1[ax0] == placeholder_red_temp_v1_rf[ax0, vi1_1]
and placeholder_red_temp_v0[ax0] < placeholder_red_temp_v0_rf[ax0, vi1_1],
placeholder_red_temp_v0[ax0],
placeholder_red_temp_v0_rf[ax0, vi1_1],
)
v_placeholder_red_temp_v1: T.int32 = T.Select(
placeholder_red_temp_v1[ax0] > placeholder_red_temp_v1_rf[ax0, vi1_1],
placeholder_red_temp_v1[ax0],
placeholder_red_temp_v1_rf[ax0, vi1_1],
)
placeholder_red_temp_v0[ax0] = v_placeholder_red_temp_v0
placeholder_red_temp_v1[ax0] = |
v_placeholder_red_temp_v1
for i0 in T.serial(1):
with T.block("placeholder_red"):
ax0 = T.axis.spatial(1, i0)
T.reads(placeholder_red_temp_v0[ax0])
T.writes(placeholder_red[ax0])
placeholder_red[ax0] = placeholder_red_temp_v0[ax0]
@T.prim_func
def argmin_topi_rfactor(
placeholder: T.Buffer[(1, 32), "int32"], placeholder_red: T.Buffer[1, "int32"]
) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
placeholder_red_temp_v0 = T.alloc_buffer([1], dtype="int32")
placeholder_red_temp_v1 = T.alloc_buffer([1], dtype="int32")
placeholder_red_temp_v0_rf = T.alloc_buffer([1, 8], dtype="int32")
placeholder_red_temp_v1_rf = T.alloc_buffer([1, 8], dtype="int32")
for i0, i1_0, i1_1 in T.grid(1, 4, 8):
with T.block("placeholder_red_temp_rf"):
vi1_1, ax0, vi1_0 = T.axis.remap("SSR", [i1_1, i0, i1_0])
T.reads(placeholder[ax0, vi1_0 * 8 + vi1_1])
T.writes(placeholder_red_temp_v0_rf[ax0, vi1_1], placeholder_red_temp_v1_rf[ax0, vi1_1])
with T.init():
placeholder_red_temp_v0_rf[ax0, vi1_1] = -1
placeholder_red_temp_v1_rf[ax0, vi1_1] = 2147483647
v_placeholder_red_temp_v0_rf: T.int32 = T.Select(
placeholder_red_temp_v1_rf[ax0, vi1_1] < placeholder[ax0, vi1_0 * 8 + vi1_1]
or placeholder_red_temp_v1_rf[ax0, vi1_1] == placeholder[ax0, vi1_0 * 8 + vi1_1]
and placeholder_red_temp_v0_rf[ax0, vi1_1] < vi1_0 * 8 + vi1_1,
placeholder_red_temp_v0_rf[ax0, vi1_1],
vi1_0 * 8 + vi1_1,
)
v_placeholder_red_temp_v1_rf: T.int32 = T.Select(
placeholder_red_temp_v1_rf[ax0, vi1_1] < placeholder[ax0, vi1_0 * 8 + vi1_1],
placeholder_red_temp_v1_rf[ax0, vi1_1],
placeholder[ax0, vi1_0 * 8 + vi1_1],
)
placeholder_red_temp_v0_rf[ax0, vi1_1] = v_placeholder_red_temp_v0_rf
pl |
aceholder_red_temp_v1_rf[ax0, vi1_1] = v_placeholder_red_temp_v1_rf
for i0, i1_1 in T.grid(1, 8):
with T.block("placeholder_red_temp"):
vi1_1, ax0 = T.axis.remap("RS", [i1_1, i0])
T.reads(placeholder_red_temp_v0_rf[ax0, vi1_1], placeholder_red_temp_v1_rf[ax0, vi1_1])
T.writes(placeholder_red_temp_v0[ax0], placeholder_red_temp_v1[ax0])
with T.init():
placeholder_red_temp_v0[ax0] = -1
placeholder_red_temp_v1[ax0] = 2147483647
v_placeholder_red_temp_v0: T.int32 = T.Select(
placeholder_red_temp_v1[ax0] < placeholder_red_temp_v1_rf[ax0, vi1_1]
or placeholder_red_temp_v1[ax0] == placeholder_red_temp_v1_rf[ax0, vi1_1]
and placeholder_red_temp_v0[ax0] < placeholder_red_temp_v0_rf[ax0, vi1_1],
placeholder_red_temp_v0[ax0],
placeholder_red_temp_v0_rf[ax0, vi1_1],
)
v_placeholder_red_temp_v1: T.int32 = T.Select(
placeholder_red_temp_v1[ax0] < placeholder_red_temp_v1_rf[ax0, vi1_1],
placeholder_red_temp_v1[ax0],
placeholder_red_temp_v1_rf[ax0, vi1_1],
)
placeholder_red_temp_v0[ax0] = v_placeholder_red_temp_v0
placeholder_red_temp_v1[ax0] = v_placeholder_red_temp_v1
for i0 in T.serial(1):
with T.block("placeholder_red"):
ax0 = T.axis.spatial(1, i0)
T.reads(placeholder_red_temp_v0[ax0])
T.writes(placeholder_red[ax0])
placeholder_red[ax0] = placeholder_red_temp_v0[ax0]
def test_reduction_rfactor_matmul():
s = tir.Schedule(transformed_matmul, debug_mask="all")
update = s.get_block("update")
_, _, _, _, kii = s.get_loops(update)
rf_block = s.rfactor(kii, 0)
tvm.ir.assert_structural_equal(s.mod["main"], matmul_rfactor)
assert s.get(rf_block).same_as(s.get(s.get_block("update_rf")))
assert s.get(update).same_as(s.get(s.get_block("update")))
verify_tr |
ace_roundtrip(s, mod=transformed_matmul)
def test_reduction_rfactor_matmul_with_let():
s = tir.Schedule(transformed_matmul_with_let, debug_mask="all")
update = s.get_block("update")
_, _, _, _, kii = s.get_loops(update)
rf_block = s.rfactor(kii, 0)
tvm.ir.assert_structural_equal(s.mod["main"], matmul_rfactor)
assert s.get(rf_block).same_as(s.get(s.get_block("update_rf")))
assert s.get(update).same_as(s.get(s.get_block("update")))
verify_trace_roundtrip(s, mod=transformed_matmul_with_let)
def test_reduction_rfactor_square_sum():
s = tir.Schedule(square_sum, debug_mask="all")
C = s.get_block("C")
_, _, j = s.get_loops(C)
rf_block = s.rfactor(j, 1)
tvm.ir.assert_structural_equal(s.mod["main"], square_sum_rfactor)
assert s.get(rf_block).same_as(s.get(s.get_block("C_rf")))
assert s.get(C).same_as(s.get(s.get_block("C")))
verify_trace_roundtrip(s, mod=square_sum)
def test_reduction_rfactor_square_sum_square_root():
s = tir.Schedule(transformed_square_sum_square_root, debug_mask="all")
C = s.get_block("C")
_, _, f_i = s.get_loops(C)
rf_block = s.rfactor(f_i, 0)
tvm.ir.assert_structural_equal(s.mod["main"], square_sum_square_root_rfactor)
assert s.get(rf_block).same_as(s.get(s.get_block("C_rf")))
assert s.get(C).same_as(s.get(s.get_block("C")))
verify_trace_roundtrip(s, mod=transformed_square_sum_square_root)
def test_reduction_rfactor_loop_multiple_children():
s = tir.Schedule(matmul_loop_multiple_children, debug_mask="all")
k, _, _ = s.get_loops(s.get_block("C"))
with pytest.raises(tvm.tir.ScheduleError):
s.rfactor(k, 0)
def test_reduction_rfactor_not_stage_pipeline():
s = tir.Schedule(matmul_not_stage_pipeline, debug_mask="all")
_, _, k = s.get_loops(s.get_block("C"))
with pytest.raises(tvm.tir.ScheduleError):
s.rfactor(k, 0)
def test_reduction_rfactor_not_reduction_block1():
s = tir.Schedule(element_wise, debug_mask="all")
i, _ = s.get_loops(s.get_block("B")) |
with pytest.raises(tvm.tir.ScheduleError):
s.rfactor(i, 0)
def test_reduction_rfactor_not_reduction_block2():
s = tir.Schedule(rowsum_not_quasi_affine, debug_mask="all")
_, k = s.get_loops(s.get_block("B"))
with pytest.raises(tvm.tir.ScheduleError):
s.rfactor(k, 0)
def test_reduction_rfactor_not_reduction_block3():
s = tir.Schedule(rowsum_not_dominant, debug_mask="all")
_, k = s.get_loops(s.get_block("B"))
with pytest.raises(tvm.tir.ScheduleError):
s.rfactor(k, 0)
def test_reduction_rfactor_not_serial_loop():
s = tir.Schedule(rowsum_not_serial, debug_mask="all")
_, k = s.get_loops(s.get_block("B"))
with pytest.raises(tvm.tir.ScheduleError):
s.rfactor(k, 0)
def test_reduction_rfactor_not_same_buffer_access():
s = tir.Schedule(matmul_not_same_buffer_access, debug_mask="all")
_, _, k = s.get_loops(s.get_block("C"))
with pytest.raises(tvm.tir.ScheduleError):
s.rfactor(k, 0)
def test_reduction_rfactor_factor_axis_range_fail():
s = tir.Schedule(transformed_matmul, debug_mask="all")
_, _, _, _, kii = s.get_loops(s.get_block("update"))
with pytest.raises(tvm.tir.ScheduleError):
s.rfactor(kii, 3)
with pytest.raises(tvm.tir.ScheduleError):
s.rfactor(kii, -4)
def test_reduction_rfactor_factor_axis_range():
s = tir.Schedule(transformed_matmul, debug_mask="all")
update = s.get_block("update")
_, _, _, _, kii = s.get_loops(update)
rf_block = s.rfactor(kii, -3)
tvm.ir.assert_structural_equal(s.mod["main"], matmul_rfactor)
assert s.get(rf_block).same_as(s.get(s.get_block("update_rf")))
assert s.get(update).same_as(s.get(s.get_block("update")))
verify_trace_roundtrip(s, mod=transformed_matmul)
def test_reduction_rfactor_wrong_reduce_pattern1():
s = tir.Schedule(rowsum_wrong_reduce_pattern1, debug_mask="all")
_, k = s.get_loops(s.get_block("B"))
with pytest.raises(tvm.tir.ScheduleError):
s.rfactor(k, 0)
def test_reduction_rfactor_wrong_reduce_pat |
tern2():
s = tir.Schedule(rowsum_wrong_reduce_pattern2, debug_mask="all")
_, k = s.get_loops(s.get_block("B"))
with pytest.raises(tvm.tir.ScheduleError):
s.rfactor(k, 0)
def test_reduction_rfactor_init_not_bufferstore():
s = tir.Schedule(rowsum_init_not_bufferstore, debug_mask="all")
_, k = s.get_loops(s.get_block("B"))
with pytest.raises(tvm.tir.ScheduleError):
s.rfactor(k, 0)
def test_reduction_rfactor_wrong_loops1():
s = tir.Schedule(rowsum, debug_mask="all")
i, _ = s.get_loops(s.get_block("B"))
with pytest.raises(tvm.tir.ScheduleError):
s.rfactor(i, 0)
def test_reduction_rfactor_wrong_loops2():
s = tir.Schedule(rowsum_transformed, debug_mask="all")
_, _, k_i = s.get_loops(s.get_block("B"))
with pytest.raises(tvm.tir.ScheduleError):
s.rfactor(k_i, 0)
def test_reduction_rfactor_zero_dim():
s = tir.Schedule(rowsum_zero_dim, debug_mask="all")
B = s.get_block("B")
(k,) = s.get_loops(B)
rf_block = s.rfactor(k, 0)
tvm.ir.assert_structural_equal(s.mod["main"], rowsum_zero_dim_rfactor)
assert s.get(rf_block).same_as(s.get(s.get_block("B_rf")))
assert s.get(B).same_as(s.get(s.get_block("B")))
verify_trace_roundtrip(s, mod=rowsum_zero_dim)
def test_reduction_rfactor_outermost_loop_multiple_children_fail():
s = tir.Schedule(multiple_reduction_blocks, debug_mask="all")
_, _, k2o, k2i = s.get_loops(s.get_block("D"))
_, _, k3o, k3i = s.get_loops(s.get_block("E"))
_, _, k4o, k4i = s.get_loops(s.get_block("F"))
with pytest.raises(tvm.tir.ScheduleError):
s.rfactor(k2o, 0)
with pytest.raises(tvm.tir.ScheduleError):
s.rfactor(k2i, 0)
with pytest.raises(tvm.tir.ScheduleError):
s.rfactor(k3o, 0)
with pytest.raises(tvm.tir.ScheduleError):
s.rfactor(k3i, 0)
with pytest.raises(tvm.tir.ScheduleError):
s.rfactor(k4o, 0)
with pytest.raises(tvm.tir.ScheduleError):
s.rfactor(k4i, 0)
def test_reduction_rfactor_outermost_loop_mult |
iple_children():
s = tir.Schedule(multiple_reduction_blocks, debug_mask="all")
C = s.get_block("C")
_, _, k1o, _ = s.get_loops(C)
rf_block = s.rfactor(k1o, 2)
tvm.ir.assert_structural_equal(s.mod["main"], multiple_reduction_blocks_rfactor)
assert s.get(rf_block).same_as(s.get(s.get_block("C_rf")))
assert s.get(C).same_as(s.get(s.get_block("C")))
verify_trace_roundtrip(s, mod=multiple_reduction_blocks)
def test_reduction_rfactor_predicate():
s = tir.Schedule(rowsum_predicate, debug_mask="all")
B = s.get_block("B")
_, ko, _ = s.get_loops(B)
with pytest.raises(tvm.TVMError):
rf_block = s.rfactor(ko, 1)
def test_reduction_rfactor_with_annotation():
s = tir.Schedule(square_sum_with_annotation, debug_mask="all")
C = s.get_block("C")
_, _, j = s.get_loops(C)
rf_block = s.rfactor(j, 1)
tvm.ir.assert_structural_equal(s.mod["main"], square_sum_with_annotation_rfactor)
assert s.get(rf_block).same_as(s.get(s.get_block("C_rf")))
assert s.get(C).same_as(s.get(s.get_block("C")))
verify_trace_roundtrip(s, mod=square_sum_with_annotation)
def test_reduction_rfactor_spatial_only():
s = tir.Schedule(rfactor_spatial_only, debug_mask="all")
block = s.get_block(name="acc", func_name="main")
_, _, _, _, loop, _ = s.get_loops(block)
rf_block = s.rfactor(loop=loop, factor_axis=4)
tvm.ir.assert_structural_equal(s.mod["main"], rfactor_spatial_only_after)
assert s.get(rf_block).same_as(s.get(s.get_block("acc_rf")))
assert s.get(block).same_as(s.get(s.get_block("acc")))
verify_trace_roundtrip(s, mod=rfactor_spatial_only)
def test_reduction_rfactor_argmax():
s = tir.Schedule(argmax_split, debug_mask="all")
argmax = s.get_block("argmax")
_, _, ki = s.get_loops(argmax)
rf_block = s.rfactor(ki, 1)
tvm.ir.assert_structural_equal(s.mod["main"], argmax_split_rfactor)
assert s.get(rf_block).same_as(s.get(s.get_block("argmax_rf")))
assert s.get(argmax).same_as(s.get(s.get_block("argmax"))) |
verify_trace_roundtrip(s, mod=argmax_split)
def test_reduction_rfactor_argmin_init_update_reordeded():
s = tir.Schedule(argmin_split_init_update_reordered, debug_mask="all")
argmin = s.get_block("argmin")
_, _, ki = s.get_loops(argmin)
rf_block = s.rfactor(ki, 1)
tvm.ir.assert_structural_equal(s.mod["main"], argmin_split_rfactor)
assert s.get(rf_block).same_as(s.get(s.get_block("argmin_rf")))
assert s.get(argmin).same_as(s.get(s.get_block("argmin")))
verify_trace_roundtrip(s, mod=argmin_split_init_update_reordered)
def test_reduction_rfactor_argmax_reduction_buffer_different_shape():
s = tir.Schedule(argmax_split_different_shape, debug_mask="all")
argmax = s.get_block("argmax")
_, _, ki = s.get_loops(argmax)
with pytest.raises(tvm.tir.ScheduleError):
s.rfactor(ki, 1)
def test_reduction_rfactor_argmax_different_access_indices():
s = tir.Schedule(argmax_split_different_indices, debug_mask="all")
argmax = s.get_block("argmax")
_, _, ki = s.get_loops(argmax)
with pytest.raises(tvm.tir.ScheduleError):
s.rfactor(ki, 1)
def test_reduction_rfactor_argmax_init_not_bufferstore():
s = tir.Schedule(argmax_split_init_not_bufferstore, debug_mask="all")
argmax = s.get_block("argmax")
_, _, ki = s.get_loops(argmax)
with pytest.raises(tvm.tir.ScheduleError):
s.rfactor(ki, 1)
def test_reduction_rfactor_argmax_init_buffer_duplicate():
s = tir.Schedule(argmax_split_init_buffer_duplicate, debug_mask="all")
argmax = s.get_block("argmax")
_, _, ki = s.get_loops(argmax)
with pytest.raises(tvm.tir.ScheduleError):
s.rfactor(ki, 1)
def test_reduction_rfactor_argmax_letstmt_fewer_than_init():
s = tir.Schedule(argmax_split_letstmt_fewer_than_init, debug_mask="all")
argmax = s.get_block("argmax")
_, _, ki = s.get_loops(argmax)
with pytest.raises(tvm.tir.ScheduleError):
s.rfactor(ki, 1)
def test_reduction_rfactor_argmax_letstmt_more_than_init():
s = tir.Schedule(argmax_sp |
lit_letstmt_more_than_init, debug_mask="all")
argmax = s.get_block("argmax")
_, _, ki = s.get_loops(argmax)
with pytest.raises(tvm.tir.ScheduleError):
s.rfactor(ki, 1)
def test_reduction_rfactor_argmax_let_body_neither_seqstmt_nor_bufferstore():
s = tir.Schedule(argmax_split_let_body_neither_seqstmt_nor_bufferstore, debug_mask="all")
argmax = s.get_block("argmax")
_, _, ki = s.get_loops(argmax)
with pytest.raises(tvm.tir.ScheduleError):
s.rfactor(ki, 1)
def test_reduction_rfactor_argmax_init_update_inconsistent_bufferstore_number():
s = tir.Schedule(argmax_split_init_update_inconsistent_bufferstore_number, debug_mask="all")
argmax = s.get_block("argmax")
_, _, ki = s.get_loops(argmax)
with pytest.raises(tvm.tir.ScheduleError):
s.rfactor(ki, 1)
def test_reduction_rfactor_argmax_body_seq_not_bufferstore():
s = tir.Schedule(argmax_split_body_seq_not_bufferstore, debug_mask="all")
argmax = s.get_block("argmax")
_, _, ki = s.get_loops(argmax)
with pytest.raises(tvm.tir.ScheduleError):
s.rfactor(ki, 1)
def test_reduction_rfactor_argmax_body_bufferstore_value_not_var():
s = tir.Schedule(argmax_split_body_bufferstore_value_not_var, debug_mask="all")
argmax = s.get_block("argmax")
_, _, ki = s.get_loops(argmax)
with pytest.raises(tvm.tir.ScheduleError):
s.rfactor(ki, 1)
def test_reduction_rfactor_argmax_body_bufferstore_value_unbound_var():
s = tir.Schedule(argmax_split_body_bufferstore_value_unbound_var, debug_mask="all")
argmax = s.get_block("argmax")
_, _, ki = s.get_loops(argmax)
with pytest.raises(tvm.tir.ScheduleError):
s.rfactor(ki, 1)
def test_reduction_rfactor_argmax_one_let_var_used_multi_times():
s = tir.Schedule(argmax_split_one_let_var_used_multi_times, debug_mask="all")
argmax = s.get_block("argmax")
_, _, ki = s.get_loops(argmax)
with pytest.raises(tvm.tir.ScheduleError):
s.rfactor(ki, 1)
def test_reduction_rfactor_argmax_body_one_b |
uffer_updated_multi_times():
s = tir.Schedule(argmax_split_body_one_buffer_updated_multi_times, debug_mask="all")
argmax = s.get_block("argmax")
_, _, ki = s.get_loops(argmax)
with pytest.raises(tvm.tir.ScheduleError):
s.rfactor(ki, 1)
def test_reduction_rfactor_argmax_init_buffer_not_match():
s = tir.Schedule(argmax_split_init_buffer_not_match, debug_mask="all")
argmax = s.get_block("argmax")
_, _, ki = s.get_loops(argmax)
with pytest.raises(tvm.tir.ScheduleError):
s.rfactor(ki, 1)
def test_reduction_rfactor_topi_argmax():
A = te.placeholder((1, 32), dtype="int32")
B = topi.argmax(A, axis=1)
argmax_topi = te.create_prim_func([A, B])
s = tir.Schedule(argmax_topi, debug_mask="all")
argmax = s.get_block("placeholder_red_temp")
_, k = s.get_loops(argmax)
_, ki = s.split(k, [None, 8])
rf_block = s.rfactor(ki, 1)
tvm.ir.assert_structural_equal(s.mod["main"], argmax_topi_rfactor)
assert s.get(rf_block).same_as(s.get(s.get_block("placeholder_red_temp_rf")))
assert s.get(argmax).same_as(s.get(s.get_block("placeholder_red_temp")))
verify_trace_roundtrip(s, mod=argmax_topi)
def test_reduction_rfactor_topi_argmin():
A = te.placeholder((1, 32), dtype="int32")
B = topi.argmin(A, axis=1)
argmin_topi = te.create_prim_func([A, B])
s = tir.Schedule(argmin_topi, debug_mask="all")
argmin = s.get_block("placeholder_red_temp")
_, k = s.get_loops(argmin)
_, ki = s.split(k, [None, 8])
rf_block = s.rfactor(ki, 1)
tvm.ir.assert_structural_equal(s.mod["main"], argmin_topi_rfactor)
assert s.get(rf_block).same_as(s.get(s.get_block("placeholder_red_temp_rf")))
assert s.get(argmin).same_as(s.get(s.get_block("placeholder_red_temp")))
verify_trace_roundtrip(s, mod=argmin_topi)
if __name__ == "__main__":
tvm.testing.main() |
import numpy as np |
import tvm |
import tvm.testing
from tvm |
import tir
from tvm.script |
import tir as T
from tvm.tir.schedule.testing |
import verify_trace_roundtrip |
import pytest
def check_rolling_buffer(
sch: tir.Schedule, origin: tir.PrimFunc, expected: tir.PrimFunc, check_run=False
):
scheduled = sch.mod["main"]
tvm.ir.assert_structural_equal(scheduled, expected)
verify_trace_roundtrip(sch, origin)
if check_run:
in_buffer = origin.buffer_map[origin.params[0]]
out_buffer = origin.buffer_map[origin.params[1]]
in_shape = [int(_) for _ in in_buffer.shape]
out_shape = [int(_) for _ in out_buffer.shape]
x = tvm.nd.array(np.random.uniform(0, 64, in_shape).astype(in_buffer.dtype))
y0 = tvm.nd.array(np.zeros(out_shape).astype(out_buffer.dtype))
y1 = tvm.nd.array(np.zeros(out_shape).astype(out_buffer.dtype))
f_origin = tvm.build(origin)
f_scheduled = tvm.build(scheduled)
f_origin(x, y0)
f_scheduled(x, y1)
tvm.testing.assert_allclose(y0.numpy(), y1.numpy())
def _tile_nd(s, tile, block_name):
outer_indices = []
inner_indices = []
block = s.get_block(block_name)
loops = s.get_loops(block)
for i, size in enumerate(tile):
outer, inner = s.split(loops[i], [None, size])
outer_indices.append(outer)
inner_indices.append(inner)
s.reorder(*outer_indices, *inner_indices)
return outer_indices, inner_indices
def test_1d_rolling_buffer():
@T.prim_func
def before(A: T.Buffer[(4, 12), "int32"], C: T.Buffer[(4, 8), "int32"]):
B = T.alloc_buffer((4, 10), "int32")
for c in T.serial(4):
for i in T.serial(0, 10):
for k in T.serial(3):
with T.block("B"):
cc, vi, vk = T.axis.remap("SSR", [c, i, k])
with T.init():
B[cc, vi] = 0
B[cc, vi] = B[cc, vi] + A[cc, vi + vk]
for i in T.serial(0, 8):
for k in T.serial(3):
with T.block("C"):
cc, vi, vk = T.axis.remap("SSR", [c, i, k]) |
with T.init():
C[cc, vi] = 0
C[cc, vi] = C[cc, vi] + B[cc, vi + vk]
@T.prim_func
def expected(A: T.Buffer[(4, 12), "int32"], C: T.Buffer[(4, 8), "int32"]):
B = T.alloc_buffer([4, 6], dtype="int32")
for c, i_0 in T.grid(4, 2):
for ax0, ax1 in T.grid(6, 3):
with T.block("B"):
T.where(i_0 < 1 or 2 <= ax0)
cc = T.axis.spatial(4, c)
vi = T.axis.opaque(10, i_0 * 4 + ax0)
vk = T.axis.reduce(3, ax1)
T.reads(A[cc, vi + vk])
T.writes(B[cc, vi % 6])
with T.init():
B[cc, vi % 6] = 0
B[cc, vi % 6] = B[cc, vi % 6] + A[cc, vi + vk]
for i_1, k in T.grid(4, 3):
with T.block("C"):
cc = T.axis.spatial(4, c)
vi = T.axis.opaque(8, i_0 * 4 + i_1)
vk = T.axis.reduce(3, k)
T.reads(B[cc, (vi + vk) % 6])
T.writes(C[cc, vi])
with T.init():
C[cc, vi] = 0
C[cc, vi] = C[cc, vi] + B[cc, (vi + vk) % 6]
sch = tir.Schedule(before, debug_mask="all")
_, i, _ = sch.get_loops(sch.get_block("C"))
io, _ = sch.split(i, [2, 4])
sch.compute_at(sch.get_block("B"), io)
sch.rolling_buffer(sch.get_block("B"), 0)
check_rolling_buffer(sch, before, expected, check_run=True)
@T.prim_func
def cascade_2_max_pool2d(A: T.Buffer[(1, 12, 12, 16), "int8"], C: T.Buffer[(1, 8, 8, 16), "int8"]):
B = T.alloc_buffer([1, 10, 10, 16], dtype="int8")
for i0, i1, i2, i3, i4, i5 in T.grid(1, 10, 10, 16, 3, 3):
with T.block("B"):
ax0, ax1, ax2, ax3, rv0, rv1 = T.axis.remap("SSSSRR", [i0, i1, i2, i3, i4, i5])
with T.init():
B[ax0, ax1, ax2, ax3] = T.int8(-128)
B[ax0, ax1, ax2, ax3] = T.max(B[ax0, ax1, ax2, ax3], A[ |
ax0, ax1 + rv0, ax2 + rv1, ax3])
for i0, i1, i2, i3, i4, i5 in T.grid(1, 8, 8, 16, 3, 3):
with T.block("C"):
ax0, ax1, ax2, ax3, rv0, rv1 = T.axis.remap("SSSSRR", [i0, i1, i2, i3, i4, i5])
with T.init():
C[ax0, ax1, ax2, ax3] = T.int8(-128)
C[ax0, ax1, ax2, ax3] = T.max(C[ax0, ax1, ax2, ax3], B[ax0, ax1 + rv0, ax2 + rv1, ax3])
@T.prim_func
def cascade_3_max_pool2d_with_stride(
A: T.Buffer[(1, 24, 24, 16), "int8"], C: T.Buffer[(1, 8, 8, 16), "int8"]
):
B_0 = T.alloc_buffer([1, 22, 22, 16], dtype="int8")
B_1 = T.alloc_buffer([1, 10, 10, 16], dtype="int8")
for i0, i1, i2, i3, i4, i5 in T.grid(1, 22, 22, 16, 3, 3):
with T.block("B_0"):
ax0, ax1, ax2, ax3, rv0, rv1 = T.axis.remap("SSSSRR", [i0, i1, i2, i3, i4, i5])
with T.init():
B_0[ax0, ax1, ax2, ax3] = T.int8(-128)
B_0[ax0, ax1, ax2, ax3] = T.max(
B_0[ax0, ax1, ax2, ax3], A[ax0, ax1 + rv0, ax2 + rv1, ax3]
)
for i0, i1, i2, i3, i4, i5 in T.grid(1, 10, 10, 16, 3, 3):
with T.block("B_1"):
ax0, ax1, ax2, ax3, rv0, rv1 = T.axis.remap("SSSSRR", [i0, i1, i2, i3, i4, i5])
with T.init():
B_1[ax0, ax1, ax2, ax3] = T.int8(-128)
B_1[ax0, ax1, ax2, ax3] = T.max(
B_1[ax0, ax1, ax2, ax3], B_0[ax0, ax1 * 2 + rv0, ax2 * 2 + rv1, ax3]
)
for i0, i1, i2, i3, i4, i5 in T.grid(1, 8, 8, 16, 3, 3):
with T.block("C"):
ax0, ax1, ax2, ax3, rv0, rv1 = T.axis.remap("SSSSRR", [i0, i1, i2, i3, i4, i5])
with T.init():
C[ax0, ax1, ax2, ax3] = T.int8(-128)
C[ax0, ax1, ax2, ax3] = T.max(
C[ax0, ax1, ax2, ax3], B_1[ax0, ax1 + rv0, ax2 + rv1, ax3]
)
def test_cascade_max_pool2d_w_tiled():
@T.prim_func
def expected(A: T.Buffer[(1, 12, 12, 16), "int8"], C: T.Buffer[(1, 8, 8, 16), "int8"]):
B = T.alloc_buffer([1, 10, 6, 16], dtype="int8") |
for i0_0, i1_0, i2_0, i3_0 in T.grid(1, 1, 2, 1):
for ax0, ax1, ax2, ax3, ax4 in T.grid(10, 6, 16, 3, 3):
with T.block("B"):
T.where(i2_0 < 1 or 2 <= ax1)
ax0_1 = T.axis.spatial(1, 0)
ax1_1 = T.axis.spatial(10, ax0)
ax2_1 = T.axis.opaque(10, i2_0 * 4 + ax1)
ax3_1, rv0, rv1 = T.axis.remap("SRR", [ax2, ax3, ax4])
T.reads(A[ax0_1, ax1_1 + rv0, ax2_1 + rv1, ax3_1])
T.writes(B[ax0_1, ax1_1, ax2_1 % 6, ax3_1])
with T.init():
B[ax0_1, ax1_1, ax2_1 % 6, ax3_1] = T.int8(-128)
B[ax0_1, ax1_1, ax2_1 % 6, ax3_1] = T.max(
B[ax0_1, ax1_1, ax2_1 % 6, ax3_1], A[ax0_1, ax1_1 + rv0, ax2_1 + rv1, ax3_1]
)
for i0_1, i1_1, i2_1, i3_1, i4, i5 in T.grid(1, 8, 4, 16, 3, 3):
with T.block("C"):
ax0 = T.axis.spatial(1, i0_0 + i0_1)
ax1 = T.axis.spatial(8, i1_0 * 8 + i1_1)
ax2 = T.axis.opaque(8, i2_0 * 4 + i2_1)
ax3 = T.axis.spatial(16, i3_0 * 16 + i3_1)
rv0, rv1 = T.axis.remap("RR", [i4, i5])
T.reads(B[ax0, ax1 + rv0, (ax2 + rv1) % 6, ax3])
T.writes(C[ax0, ax1, ax2, ax3])
with T.init():
C[ax0, ax1, ax2, ax3] = T.int8(-128)
C[ax0, ax1, ax2, ax3] = T.max(
C[ax0, ax1, ax2, ax3], B[ax0, ax1 + rv0, (ax2 + rv1) % 6, ax3]
)
sch = tir.Schedule(cascade_2_max_pool2d, debug_mask="all")
oi, _ = _tile_nd(sch, [1, 8, 4, 16], "C")
sch.compute_at(sch.get_block("B"), oi[-1])
sch.rolling_buffer(sch.get_block("B"), 0)
check_rolling_buffer(sch, cascade_2_max_pool2d, expected, check_run=True)
def test_cascade_max_pool2d_h_tiled():
@T.prim_func
def expected(A: T.Buffer[(1, 12, 12, 1 |
6), "int8"], C: T.Buffer[(1, 8, 8, 16), "int8"]):
B = T.alloc_buffer([1, 6, 10, 16], dtype="int8")
for i0_0, i1_0, i2_0, i3_0 in T.grid(1, 2, 1, 1):
for ax0, ax1, ax2, ax3, ax4 in T.grid(6, 10, 16, 3, 3):
with T.block("B"):
T.where(i1_0 < 1 or 2 <= ax0)
ax0_1 = T.axis.spatial(1, 0)
ax1_1 = T.axis.opaque(10, i1_0 * 4 + ax0)
ax2_1 = T.axis.spatial(10, ax1)
ax3_1, rv0, rv1 = T.axis.remap("SRR", [ax2, ax3, ax4])
T.reads(A[ax0_1, ax1_1 + rv0, ax2_1 + rv1, ax3_1])
T.writes(B[ax0_1, ax1_1 % 6, ax2_1, ax3_1])
with T.init():
B[ax0_1, ax1_1 % 6, ax2_1, ax3_1] = T.int8(-128)
B[ax0_1, ax1_1 % 6, ax2_1, ax3_1] = T.max(
B[ax0_1, ax1_1 % 6, ax2_1, ax3_1], A[ax0_1, ax1_1 + rv0, ax2_1 + rv1, ax3_1]
)
for i0_1, i1_1, i2_1, i3_1, i4, i5 in T.grid(1, 4, 8, 16, 3, 3):
with T.block("C"):
ax0 = T.axis.spatial(1, i0_0 + i0_1)
ax1 = T.axis.opaque(8, i1_0 * 4 + i1_1)
ax2 = T.axis.spatial(8, i2_0 * 8 + i2_1)
ax3 = T.axis.spatial(16, i3_0 * 16 + i3_1)
rv0, rv1 = T.axis.remap("RR", [i4, i5])
T.reads(B[ax0, (ax1 + rv0) % 6, ax2 + rv1, ax3])
T.writes(C[ax0, ax1, ax2, ax3])
with T.init():
C[ax0, ax1, ax2, ax3] = T.int8(-128)
C[ax0, ax1, ax2, ax3] = T.max(
C[ax0, ax1, ax2, ax3], B[ax0, (ax1 + rv0) % 6, ax2 + rv1, ax3]
)
sch = tir.Schedule(cascade_2_max_pool2d, debug_mask="all")
io, _ = _tile_nd(sch, [1, 4, 8, 16], "C")
sch.compute_at(sch.get_block("B"), io[-1])
sch.rolling_buffer(sch.get_block("B"), 0)
check_rolling_buffer(sch, cascade_2_max_pool2d, expected, check_ru |
n=True)
def test_cascade_max_pool2d_h_w_c_tiled():
@T.prim_func
def expected(A: T.Buffer[(1, 12, 12, 16), "int8"], C: T.Buffer[(1, 8, 8, 16), "int8"]):
B = T.alloc_buffer([1, 6, 10, 16], dtype="int8")
for i0_0, i1_0, i2_0, i3_0 in T.grid(1, 2, 2, 2):
for ax0, ax1, ax2, ax3, ax4 in T.grid(6, 6, 8, 3, 3):
with T.block("B"):
T.where((i1_0 < 1 or 2 <= ax0) and (i2_0 < 1 or 2 <= ax1))
ax0_1 = T.axis.spatial(1, 0)
ax1_1 = T.axis.opaque(10, i1_0 * 4 + ax0)
ax2_1 = T.axis.spatial(10, i2_0 * 4 + ax1)
ax3_1 = T.axis.spatial(16, i3_0 * 8 + ax2)
rv0, rv1 = T.axis.remap("RR", [ax3, ax4])
T.reads(A[ax0_1, ax1_1 + rv0, ax2_1 + rv1, ax3_1])
T.writes(B[ax0_1, ax1_1 % 6, ax2_1, ax3_1])
with T.init():
B[ax0_1, ax1_1 % 6, ax2_1, ax3_1] = T.int8(-128)
B[ax0_1, ax1_1 % 6, ax2_1, ax3_1] = T.max(
B[ax0_1, ax1_1 % 6, ax2_1, ax3_1], A[ax0_1, ax1_1 + rv0, ax2_1 + rv1, ax3_1]
)
for i0_1, i1_1, i2_1, i3_1, i4, i5 in T.grid(1, 4, 4, 8, 3, 3):
with T.block("C"):
ax0 = T.axis.spatial(1, i0_0 + i0_1)
ax1 = T.axis.opaque(8, i1_0 * 4 + i1_1)
ax2 = T.axis.spatial(8, i2_0 * 4 + i2_1)
ax3 = T.axis.spatial(16, i3_0 * 8 + i3_1)
rv0, rv1 = T.axis.remap("RR", [i4, i5])
T.reads(B[ax0, (ax1 + rv0) % 6, ax2 + rv1, ax3])
T.writes(C[ax0, ax1, ax2, ax3])
with T.init():
C[ax0, ax1, ax2, ax3] = T.int8(-128)
C[ax0, ax1, ax2, ax3] = T.max(
C[ax0, ax1, ax2, ax3], B[ax0, (ax1 + rv0) % 6, ax2 + rv1, ax3]
)
sch = tir.Schedule(cascade_2_max_pool2d, debug_mask="all")
io, _ = |
_tile_nd(sch, [1, 4, 4, 8], "C")
sch.compute_at(sch.get_block("B"), io[-1])
sch.rolling_buffer(sch.get_block("B"), 0)
check_rolling_buffer(sch, cascade_2_max_pool2d, expected, check_run=True)
def test_cascade_max_pool2d_non_perfect_tiled():
@T.prim_func
def expected(A: T.Buffer[(1, 12, 12, 16), "int8"], C: T.Buffer[(1, 8, 8, 16), "int8"]) -> None:
B = T.alloc_buffer([1, 8, 10, 16], dtype="int8")
for i0_0, i1_0, i2_0, i3_0 in T.grid(1, 2, 2, 1):
for ax0, ax1, ax2, ax3, ax4 in T.grid(8, 8, 16, 3, 3):
with T.block("B"):
T.where(
i1_0 * 6 + ax0 < 10
and i2_0 * 6 + ax1 < 10
and (i1_0 < 1 or 2 <= ax0)
and (i2_0 < 1 or 2 <= ax1)
)
ax0_1 = T.axis.spatial(1, 0)
ax1_1 = T.axis.opaque(10, i1_0 * 6 + ax0)
ax2_1 = T.axis.spatial(10, i2_0 * 6 + ax1)
ax3_1, rv0, rv1 = T.axis.remap("SRR", [ax2, ax3, ax4])
T.reads(A[ax0_1, ax1_1 + rv0, ax2_1 + rv1, ax3_1])
T.writes(B[ax0_1, ax1_1 % 8, ax2_1, ax3_1])
with T.init():
B[ax0_1, ax1_1 % 8, ax2_1, ax3_1] = T.int8(-128)
B[ax0_1, ax1_1 % 8, ax2_1, ax3_1] = T.max(
B[ax0_1, ax1_1 % 8, ax2_1, ax3_1], A[ax0_1, ax1_1 + rv0, ax2_1 + rv1, ax3_1]
)
for i0_1, i1_1, i2_1, i3_1, i4, i5 in T.grid(1, 6, 6, 16, 3, 3):
with T.block("C"):
T.where(i1_0 * 6 + i1_1 < 8 and i2_0 * 6 + i2_1 < 8)
ax0 = T.axis.spatial(1, i0_0 + i0_1)
ax1 = T.axis.opaque(8, i1_0 * 6 + i1_1)
ax2 = T.axis.spatial(8, i2_0 * 6 + i2_1)
ax3 = T.axis.spatial(16, i3_0 * 16 + i3_1)
rv0, rv1 = T.axis.remap("RR", [i4, i5])
T.reads(B[ax0, (ax1 + rv0) % |
8, ax2 + rv1, ax3])
T.writes(C[ax0, ax1, ax2, ax3])
with T.init():
C[ax0, ax1, ax2, ax3] = T.int8(-128)
C[ax0, ax1, ax2, ax3] = T.max(
C[ax0, ax1, ax2, ax3], B[ax0, (ax1 + rv0) % 8, ax2 + rv1, ax3]
)
sch = tir.Schedule(cascade_2_max_pool2d, debug_mask="all")
io, _ = _tile_nd(sch, [1, 6, 6, 16], "C")
sch.compute_at(sch.get_block("B"), io[-1])
sch.rolling_buffer(sch.get_block("B"), 0)
check_rolling_buffer(sch, cascade_2_max_pool2d, expected, check_run=True)
def test_cascade_3_max_pool2d_with_stride():
@T.prim_func
def expected(A: T.Buffer[(1, 24, 24, 16), "int8"], C: T.Buffer[(1, 8, 8, 16), "int8"]) -> None:
B_0 = T.alloc_buffer([1, 13, 22, 16], dtype="int8")
B_1 = T.alloc_buffer([1, 6, 10, 16], dtype="int8")
for i0_0, i1_0, i2_0, i3_0 in T.grid(1, 2, 2, 1):
for ax0, ax1, ax2, ax3, ax4 in T.grid(13, 13, 16, 3, 3):
with T.block("B_0"):
T.where((i1_0 < 1 or 5 <= ax0) and (i2_0 < 1 or 5 <= ax1))
ax0_1 = T.axis.spatial(1, 0)
ax1_1 = T.axis.opaque(22, i1_0 * 8 + ax0)
ax2_1 = T.axis.spatial(22, i2_0 * 8 + ax1)
ax3_1, rv0, rv1 = T.axis.remap("SRR", [ax2, ax3, ax4])
T.reads(A[ax0_1, ax1_1 + rv0, ax2_1 + rv1, ax3_1])
T.writes(B_0[ax0_1, ax1_1 % 13, ax2_1, ax3_1])
with T.init():
B_0[ax0_1, ax1_1 % 13, ax2_1, ax3_1] = T.int8(-128)
B_0[ax0_1, ax1_1 % 13, ax2_1, ax3_1] = T.max(
B_0[ax0_1, ax1_1 % 13, ax2_1, ax3_1],
A[ax0_1, ax1_1 + rv0, ax2_1 + rv1, ax3_1],
)
for ax0, ax1, ax2, ax3, ax4 in T.grid(6, 6, 16, 3, 3):
with T.block("B_1"):
T.where((i1_0 < 1 or 2 <= ax0) and (i2_0 < 1 or 2 <= ax1)) |
ax0_2 = T.axis.spatial(1, 0)
ax1_2 = T.axis.opaque(10, i1_0 * 4 + ax0)
ax2_2 = T.axis.spatial(10, i2_0 * 4 + ax1)
ax3_2, rv0, rv1 = T.axis.remap("SRR", [ax2, ax3, ax4])
T.reads(B_0[ax0_2, (ax1_2 * 2 + rv0) % 13, ax2_2 * 2 + rv1, ax3_2])
T.writes(B_1[ax0_2, ax1_2 % 6, ax2_2, ax3_2])
with T.init():
B_1[ax0_2, ax1_2 % 6, ax2_2, ax3_2] = T.int8(-128)
B_1[ax0_2, ax1_2 % 6, ax2_2, ax3_2] = T.max(
B_1[ax0_2, ax1_2 % 6, ax2_2, ax3_2],
B_0[ax0_2, (ax1_2 * 2 + rv0) % 13, ax2_2 * 2 + rv1, ax3_2],
)
for i0_1, i1_1, i2_1, i3_1, i4, i5 in T.grid(1, 4, 4, 16, 3, 3):
with T.block("C"):
ax0_3 = T.axis.spatial(1, i0_0 + i0_1)
ax1_3 = T.axis.opaque(8, i1_0 * 4 + i1_1)
ax2_3 = T.axis.spatial(8, i2_0 * 4 + i2_1)
ax3_3 = T.axis.spatial(16, i3_0 * 16 + i3_1)
rv0, rv1 = T.axis.remap("RR", [i4, i5])
T.reads(B_1[ax0_3, (ax1_3 + rv0) % 6, ax2_3 + rv1, ax3_3])
T.writes(C[ax0_3, ax1_3, ax2_3, ax3_3])
with T.init():
C[ax0_3, ax1_3, ax2_3, ax3_3] = T.int8(-128)
C[ax0_3, ax1_3, ax2_3, ax3_3] = T.max(
C[ax0_3, ax1_3, ax2_3, ax3_3],
B_1[ax0_3, (ax1_3 + rv0) % 6, ax2_3 + rv1, ax3_3],
)
sch = tir.Schedule(cascade_3_max_pool2d_with_stride, debug_mask="all")
io, _ = _tile_nd(sch, [1, 4, 4, 16], "C")
sch.compute_at(sch.get_block("B_1"), io[-1])
sch.compute_at(sch.get_block("B_0"), io[-1])
sch.rolling_buffer(sch.get_block("B_0"), 0)
sch.rolling_buffer(sch.get_block("B_1"), 0)
check_rolling_buffer(sch, cascade_3_max_pool2d_with_stride, expected, check_run=True)
def test_upscale():
@T.prim_fun |
c
def before(A: T.Buffer[(1, 16, 16, 16), "int8"], C: T.Buffer[(1, 24, 24, 16), "int8"]) -> None:
B = T.alloc_buffer([1, 14, 14, 16], dtype="int8")
for i0_0, i1_0, i2_0, i3_0 in T.grid(1, 5, 5, 1):
for ax0, ax1, ax2, ax3, ax4 in T.grid(5, 5, 16, 3, 3):
with T.block("B"):
T.where(i1_0 * 5
ax0_1 = T.axis.spatial(1, 0)
ax1_1 = T.axis.spatial(14, i1_0 * 5
ax2_1 = T.axis.spatial(14, i2_0 * 5
ax3_1 = T.axis.spatial(16, ax2)
rv0, rv1 = T.axis.remap("RR", [ax3, ax4])
T.reads(A[ax0_1, ax1_1 + rv0, ax2_1 + rv1, ax3_1])
T.writes(B[ax0_1, ax1_1, ax2_1, ax3_1])
with T.init():
B[ax0_1, ax1_1, ax2_1, ax3_1] = T.int8(-128)
B[ax0_1, ax1_1, ax2_1, ax3_1] = T.max(
B[ax0_1, ax1_1, ax2_1, ax3_1], A[ax0_1, ax1_1 + rv0, ax2_1 + rv1, ax3_1]
)
for i0_1, i1_1, i2_1, i3_1, i4, i5 in T.grid(1, 5, 5, 16, 3, 3):
with T.block("C"):
T.where(i1_0 * 5 + i1_1 < 24 and i2_0 * 5 + i2_1 < 24)
ax0 = T.axis.spatial(1, i0_0 + i0_1)
ax1 = T.axis.spatial(24, i1_0 * 5 + i1_1)
ax2 = T.axis.spatial(24, i2_0 * 5 + i2_1)
ax3 = T.axis.spatial(16, i3_0 * 16 + i3_1)
rv0, rv1 = T.axis.remap("RR", [i4, i5])
T.reads(B[ax0, ax1
T.writes(C[ax0, ax1, ax2, ax3])
with T.init():
C[ax0, ax1, ax2, ax3] = T.int8(-128)
C[ax0, ax1, ax2, ax3] = T.max(
C[ax0, ax1, ax2, ax3], B[ax0, ax1
)
@T.prim_func
def expected(
A: T.Buffer[(1, 16, 16, 16), "int8"], C: T.Buffer[(1, 24, 24, 16), "int8"]
) -> None:
B = T.alloc_buffer([1, 5, 14, 16], dtype="int8" |
)
for i0_0, i1_0, i2_0, i3_0 in T.grid(1, 5, 5, 1):
for ax0, ax1, ax2, ax3, ax4 in T.grid(5, 5, 16, 3, 3):
with T.block("B"):
T.where(
i1_0 * 5
and i2_0 * 5
and (i1_0 < 1 or 2 <= ax0)
and (i2_0 < 1 or 2 <= ax1)
)
ax0_1 = T.axis.spatial(1, 0)
ax1_1 = T.axis.opaque(14, i1_0 * 5
ax2_1 = T.axis.spatial(14, i2_0 * 5
ax3_1 = T.axis.spatial(16, ax2)
rv0, rv1 = T.axis.remap("RR", [ax3, ax4])
T.reads(A[ax0_1, ax1_1 + rv0, ax2_1 + rv1, ax3_1])
T.writes(B[ax0_1, ax1_1 % 5, ax2_1, ax3_1])
with T.init():
B[ax0_1, ax1_1 % 5, ax2_1, ax3_1] = T.int8(-128)
B[ax0_1, ax1_1 % 5, ax2_1, ax3_1] = T.max(
B[ax0_1, ax1_1 % 5, ax2_1, ax3_1], A[ax0_1, ax1_1 + rv0, ax2_1 + rv1, ax3_1]
)
for i0_1, i1_1, i2_1, i3_1, i4, i5 in T.grid(1, 5, 5, 16, 3, 3):
with T.block("C"):
T.where(i1_0 * 5 + i1_1 < 24 and i2_0 * 5 + i2_1 < 24)
ax0 = T.axis.spatial(1, i0_0 + i0_1)
ax1 = T.axis.opaque(24, i1_0 * 5 + i1_1)
ax2 = T.axis.spatial(24, i2_0 * 5 + i2_1)
ax3 = T.axis.spatial(16, i3_0 * 16 + i3_1)
rv0, rv1 = T.axis.remap("RR", [i4, i5])
T.reads(B[ax0, (ax1
T.writes(C[ax0, ax1, ax2, ax3])
with T.init():
C[ax0, ax1, ax2, ax3] = T.int8(-128)
C[ax0, ax1, ax2, ax3] = T.max(
C[ax0, ax1, ax2, ax3], B[ax0, (ax1
)
sch = tir.Schedule(before, debug_mask="all")
sch.rolling_buffer(sch.get_block("B"), 0)
check_rolling_buffer(sch, before, expected, |
check_run=True)
def test_fail_rolling_buffer_multi_writers():
@T.prim_func
def func_multi_writers(
A: T.Buffer[(1, 12, 12, 16), "int8"], C: T.Buffer[(1, 12, 12, 16), "int8"]
):
B = T.alloc_buffer([1, 12, 12, 16], dtype="int8")
for i0, i1, i2, i3 in T.grid(1, 3, 3, 1):
for ax0, ax1, ax2 in T.grid(6, 6, 16):
with T.block("B_writer_0"):
ax0_1 = T.axis.spatial(1, i0)
ax1_1 = T.axis.spatial(12, i1 * 4 + ax0)
ax2_1 = T.axis.spatial(12, i2 * 4 + ax1)
ax3_1 = T.axis.spatial(16, ax2)
with T.init():
B[ax0_1, ax1_1, ax2_1, ax3_1] = T.int8(-128)
B[ax0_1, ax1_1, ax2_1, ax3_1] = A[ax0_1, ax1_1, ax2_1, ax3_1] + T.int8(1)
for ax0, ax1, ax2 in T.grid(6, 6, 16):
with T.block("B_writer_1"):
ax0_2 = T.axis.spatial(1, i0)
ax1_2 = T.axis.spatial(12, i1 * 4 + ax0)
ax2_2 = T.axis.spatial(12, i2 * 4 + ax1)
ax3_2 = T.axis.spatial(16, ax2)
with T.init():
B[ax0_2, ax1_2, ax2_2, ax3_2] = T.int8(-128)
B[ax0_2, ax1_2, ax2_2, ax3_2] = B[ax0_2, ax1_2, ax2_2, ax3_2] + A[
ax0_2, ax1_2, ax2_2, ax3_2
] * T.int8(2)
for ax0, ax1, ax2, ax3, ax4, ax5 in T.grid(1, 4, 4, 16, 3, 3):
with T.block("C"):
ax0_3 = T.axis.spatial(1, i0 + ax0)
ax1_3 = T.axis.spatial(12, i1 * 4 + ax1)
ax2_3 = T.axis.spatial(12, i2 * 4 + ax2)
ax3_3 = T.axis.spatial(16, i3 * 16 + ax3)
rv0, rv1 = T.axis.remap("RR", [ax4, ax5])
with T.init():
C[ax0_3, ax1_3, ax2_3, ax3_3] = T.int8(-128)
C[ax0_3, ax1_3, ax2_3, ax3_3] = T.max(
C[ax0_3, ax1_3, ax2_3, ax3_3], |
B[ax0_3, ax1_3 + rv0, ax2_3 + rv1, ax3_3]
)
sch = tir.Schedule(func_multi_writers, debug_mask="all")
with pytest.raises(tvm.tir.ScheduleError):
sch.rolling_buffer(sch.get_block("B_writer_0"), 0)
def test_fail_rolling_buffer_not_match():
@T.prim_func
def func_non_overlap(
A: T.Buffer[(1, 12, 12, 16), "int8"], C: T.Buffer[(1, 12, 12, 16), "int8"]
):
B = T.alloc_buffer([1, 12, 12, 16], dtype="int8")
for i0_0, i1_0, i2_0, i3_0 in T.grid(1, 3, 3, 1):
for ax0, ax1, ax2 in T.grid(4, 4, 16):
with T.block("B"):
ax0_1 = T.axis.spatial(1, 0)
ax1_1 = T.axis.spatial(12, i1_0 * 4 + ax0)
ax2_1 = T.axis.spatial(12, i2_0 * 4 + ax1)
ax3 = T.axis.spatial(16, ax2)
T.reads(A[ax0_1, ax1_1, ax2_1, ax3])
T.writes(B[ax0_1, ax1_1, ax2_1, ax3])
with T.init():
B[ax0_1, ax1_1, ax2_1, ax3] = T.int8(-128)
B[ax0_1, ax1_1, ax2_1, ax3] = A[ax0_1, ax1_1, ax2_1, ax3]
for i0_1, i1_1, i2_1, i3_1, i4, i5 in T.grid(1, 4, 4, 16, 1, 1):
with T.block("C"):
ax0 = T.axis.spatial(1, i0_0 + i0_1)
ax1 = T.axis.spatial(12, i1_0 * 4 + i1_1)
ax2 = T.axis.spatial(12, i2_0 * 4 + i2_1)
ax3 = T.axis.spatial(16, i3_0 * 16 + i3_1)
rv0, rv1 = T.axis.remap("RR", [i4, i5])
T.reads(B[ax0, ax1 + rv0, ax2 + rv1, ax3])
T.writes(C[ax0, ax1, ax2, ax3])
with T.init():
C[ax0, ax1, ax2, ax3] = T.int8(-128)
C[ax0, ax1, ax2, ax3] = T.max(
C[ax0, ax1, ax2, ax3], B[ax0, ax1 + rv0, ax2 + rv1, ax3]
)
sch = tir.Schedule(func_non_overlap, debug_mask="all")
with pytest.raises(tvm.tir.ScheduleError):
sch.rolling_buffer(s |
ch.get_block("B"), 0)
def test_fail_rolling_buffer_injection_invalid():
sch = tir.Schedule(cascade_2_max_pool2d, debug_mask="all")
_, _ = _tile_nd(sch, [1, 4, 8, 16], "C")
_, _ = _tile_nd(sch, [1, 4, 8, 16], "B")
with pytest.raises(tvm.tir.ScheduleError):
sch.rolling_buffer(sch.get_block("B"), 0)
if __name__ == "__main__":
tvm.testing.main() |
from collections |
import defaultdict |
import sys |
import numpy |
import pytest |
import tvm.testing
from tvm |
import tir
from tvm.script |
import tir as T
from tvm.tir.schedule.testing |
import verify_trace_roundtrip
@T.prim_func
def elementwise(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 257, 1470))
B = T.match_buffer(b, (128, 257, 1470))
for i, j, k in T.grid(128, 257, 1470):
with T.block("B"):
vi, vj, vk = T.axis.remap("SSS", [i, j, k])
B[vi, vj, vk] = A[vi, vj, vk] * 2.0
@T.prim_func
def tiled_conv2d_with_padding(
inputs: T.Buffer[(1, 224, 224, 3), "float32"],
weight: T.Buffer[(7, 7, 3, 64), "float32"],
conv2d_nhwc: T.Buffer[(1, 112, 112, 64), "float32"],
) -> None:
PadInput = T.alloc_buffer([1, 230, 230, 3], dtype="float32")
for i0, i1, i2, i3 in T.grid(1, 230, 230, 3):
with T.block("PadInput"):
i0_1, i1_1, i2_1, i3_1 = T.axis.remap("SSSS", [i0, i1, i2, i3])
T.reads(inputs[i0_1, i1_1 - 3, i2_1 - 3, i3_1])
T.writes(PadInput[i0_1, i1_1, i2_1, i3_1])
PadInput[i0_1, i1_1, i2_1, i3_1] = T.if_then_else(
3 <= i1_1 and i1_1 < 227 and 3 <= i2_1 and i2_1 < 227,
inputs[i0_1, i1_1 - 3, i2_1 - 3, i3_1],
T.float32(0),
dtype="float32",
)
for (
i0_0,
i1_0,
i2_0,
i3_0,
i0_1_1,
i1_1_1,
i2_1_1,
i3_1_1,
i4_0,
i5_0,
i6_0,
i0_2,
i1_2,
i2_2,
i3_2,
i4_1,
i5_1,
i6_1,
i0_3,
i1_3,
i2_3,
i3_3,
) in T.grid(1, 1, 4, 1, 1, 2, 4, 1, 7, 7, 1, 1, 1, 1, 1, 1, 1, 3, 1, 56, 7, 64):
with T.block("conv2d_nhwc"):
n = T.axis.spatial(1, 0)
h = T.axis.spatial(112, i1_1_1 * 56 + i1_3)
w = T.axis.spatial(112, i2_0 * 28 + i2_1_1 * 7 + i2_3)
co, rh, rw, rc = T.axis.remap("SRRR", [i3_3, i4_0, i5_0, i6_1])
T.reads(
conv2d_nhwc[n, h, w, co],
PadInput[n, h * 2 + rh, w * 2 + rw, co
weight[rh, rw, rc, co],
) |
T.writes(conv2d_nhwc[n, h, w, co])
with T.init():
conv2d_nhwc[n, h, w, co] = T.float32(0)
conv2d_nhwc[n, h, w, co] = (
conv2d_nhwc[n, h, w, co]
+ PadInput[n, h * 2 + rh, w * 2 + rw, co
)
def test_sample_categorical():
"""Test sample categorical sampling function"""
n = 1000
sch = tir.Schedule(elementwise, seed=42, debug_mask="all")
counter = defaultdict(int)
candidates = [5, 2, 7, 1]
probs = [0.15, 0.55, 0.05, 0.25]
for _ in range(n):
v = sch.get(sch.sample_categorical(candidates, probs))
counter[v] += 1
for i, prob in enumerate(probs):
assert (prob - 0.07) * n <= counter[candidates[i]] <= (prob + 0.07) * n
verify_trace_roundtrip(sch, mod=elementwise)
def test_sample_categorical_copy():
"""Check the random variable sampling results after schedule copy"""
n = 100
sch = tir.Schedule(elementwise, seed=42, debug_mask="all")
candidates = [1, 2, 3, 4]
probs = [0.1, 0.2, 0.3, 0.4]
rv_decisions = []
for _ in range(n):
rv = sch.sample_categorical(candidates, probs)
rv_decisions.append((rv, sch.get(rv)))
sch_copy = sch.copy()
for rv, decision in rv_decisions:
decision_copy = sch_copy.get(rv)
assert int(decision) == int(decision_copy)
def test_sample_categorical_serialize():
"""Check the random variable sampling results after schedule serialization"""
n = 100
sch = tir.Schedule(elementwise, seed=42, debug_mask="all")
candidates = [5, 6, 7, 8]
probs = [0.23, 0.19, 0.37, 0.21]
decisions = []
for _ in range(n):
rv = sch.get(sch.sample_categorical(candidates, probs))
decisions.append(rv)
new_sch = verify_trace_roundtrip(sch, mod=elementwise)
for i, new_inst in enumerate(new_sch.trace.insts):
assert decisions[i] == candidates[new_sch.trace.decisions[new_inst].value]
def test_sample_perfect_tile_power_of_two():
sch = tir.Schedule(elementwi |
se, debug_mask="all")
i, _, _ = sch.get_loops(sch.get_block("B"))
factors = sch.sample_perfect_tile(i, n=4)
factors = [sch.get(i) for i in factors]
prod = factors[0] * factors[1] * factors[2] * factors[3]
assert prod == 128
verify_trace_roundtrip(sch, mod=elementwise)
def test_sample_perfect_tile_prime():
sch = tir.Schedule(elementwise, debug_mask="all")
_, i, _ = sch.get_loops(sch.get_block("B"))
factors = sch.sample_perfect_tile(i, n=4)
factors = [sch.get(i) for i in factors]
prod = factors[0] * factors[1] * factors[2] * factors[3]
assert prod == 257
verify_trace_roundtrip(sch, mod=elementwise)
def test_sample_perfect_tile_composite():
sch = tir.Schedule(elementwise, debug_mask="all")
_, _, i = sch.get_loops(sch.get_block("B"))
factors = sch.sample_perfect_tile(i, n=4)
factors = [sch.get(i) for i in factors]
prod = factors[0] * factors[1] * factors[2] * factors[3]
assert prod == 1470
verify_trace_roundtrip(sch, mod=elementwise)
use_sugared_block = tvm.testing.parameter(by_dict={"block_obj": False, "block_name": True})
def test_sample_compute_location(use_sugared_block):
n = 100
sch = tir.Schedule(tiled_conv2d_with_padding, seed=42, debug_mask="all")
if use_sugared_block:
pad_input = "PadInput"
else:
pad_input = sch.get_block("PadInput")
decision_dict = dict()
for _ in range(n):
_ = sch.sample_compute_location(pad_input)
decision = sch.trace.decisions[sch.trace.insts[-1]]
decision_dict[decision] = decision_dict[decision] + 1 if decision in decision_dict else 1
n_candidates = 8
expected_rate = 1.0 / n_candidates
for _, cnt in decision_dict.items():
numpy.testing.assert_allclose(expected_rate, cnt / n, atol=0.04)
def test_sample_perfect_tile_after_copy():
sch = tir.Schedule(elementwise, debug_mask="all")
sch_copy = sch.copy()
_, _, i = sch.get_loops(sch.get_block("B"))
sch.sample_perfect_tile(i, n=4)
_, _, i = sch_copy. |
get_loops(sch_copy.get_block("B"))
sch_copy.sample_perfect_tile(i, n=4)
if __name__ == "__main__":
tvm.testing.main() |
import pytest |
import tvm |
import tvm.testing
from tvm |
import tir
from tvm.tir |
import IndexMap
from tvm.script |
import tir as T
from tvm.tir.schedule.testing |
import verify_trace_roundtrip
@T.prim_func
def element_wise(A: T.Buffer[(128, 128), "float32"], C: T.Buffer[(128, 128), "float32"]) -> None:
B = T.alloc_buffer((128, 128), dtype="float32")
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + 1.0
@T.prim_func
def element_wise_set_axis_separator(A: T.Buffer[(128, 128), "float32"], C: T.Buffer[(128, 128), "float32"]) -> None:
B = T.alloc_buffer([128, 128], dtype="float32", axis_separators=[1])
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * T.float32(2)
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + T.float32(1)
@T.prim_func
def element_wise_set_axis_separator_input_buffer(A: T.Buffer(shape=(128, 128), dtype="float32", axis_separators=(1,)), C: T.Buffer[(128, 128), "float32"]) -> None:
B = T.alloc_buffer([128, 128], dtype="float32")
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * T.float32(2)
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + T.float32(1)
@T.prim_func
def element_wise_subregion_match(A: T.Buffer[(128, 128), "float32"], C: T.Buffer[(128, 128), "float32"]) -> None:
B = T.alloc_buffer((128, 128), dtype="float32")
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B_subregion0 = T.match_buffer(B[vi, vj], [], offset_factor=1)
B_subregion0[()] = A[vi, vj] * 2.0
for i, j in T.grid(128, 128):
with T.block("C"): |
vi, vj = T.axis.remap("SS", [i, j])
B_subregion1 = T.match_buffer(B[vi, vj], [], offset_factor=1)
C[vi, vj] = B_subregion1[()] + 1.0
@T.prim_func
def element_wise_subregion_match_set_axis_separator(A: T.Buffer[(128, 128), "float32"], C: T.Buffer[(128, 128), "float32"]) -> None:
B = T.alloc_buffer([128, 128], dtype="float32", axis_separators=[1])
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B_subregion0 = T.match_buffer(B[vi, vj], [], dtype="float32", offset_factor=1, axis_separators=[1])
B_subregion0[()] = A[vi, vj] * T.float32(2)
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
B_subregion1 = T.match_buffer(B[vi, vj], [], dtype="float32", offset_factor=1, axis_separators=[1])
C[vi, vj] = B_subregion1[()] + T.float32(1)
argument_style = tvm.testing.parameter('set_axis_separators',
'transform_layout_named',
'transform_layout_buffer_object',
)
def test_set_axis_separator(argument_style):
func = element_wise
s = tir.Schedule(func, debug_mask='all')
if argument_style=='set_axis_separators':
s.set_axis_separator(s.get_block("B"), ("write",0), [1])
elif argument_style=='transform_layout_named':
s.transform_layout(block='B', buffer='B', index_map=lambda i,j: [i,IndexMap.AXIS_SEPARATOR,j])
elif argument_style =='transform_layout_buffer_object':
B = s.get(s.get_block('B')).writes[0].buffer
s.transform_layout(block='B', buffer=B, index_map=lambda i,j: [i,IndexMap.AXIS_SEPARATOR,j])
else:
raise ValueError(f'Unexpected argument_style: {argument_style}')
tvm.ir.assert_structural_equal(element_wise_set_axis_separator, s.mod["main"])
verify_trace_roundtrip(sch=s, mod=func)
def test_set_scope_fail_on_index_out_of_bound():
func = e |
lement_wise
s = tir.Schedule(func, debug_mask='all')
with pytest.raises(AssertionError):
s.set_axis_separator(s.get_block("B"), ("write",1),[1])
with pytest.raises(AssertionError):
s.set_axis_separator(s.get_block("B"), ("read",-1),[1])
def test_set_axis_separator_input_buffer(argument_style):
func = element_wise
s = tir.Schedule(func, debug_mask='all')
if argument_style=='set_axis_separators':
s.set_axis_separator(s.get_block("B"), ("read",0), [1])
elif argument_style=='transform_layout_named':
s.transform_layout(block='B', buffer='A', index_map=lambda i,j: [i,IndexMap.AXIS_SEPARATOR,j])
elif argument_style =='transform_layout_buffer_object':
A = s.get(s.get_block('B')).reads[0].buffer
s.transform_layout(block='B', buffer=A, index_map=lambda i,j: [i,IndexMap.AXIS_SEPARATOR,j])
else:
raise ValueError(f'Unexpected argument_style: {argument_style}')
tvm.ir.assert_structural_equal(element_wise_set_axis_separator_input_buffer, s.mod["main"])
verify_trace_roundtrip(sch=s, mod=func)
def test_set_axis_separator_subregion(argument_style):
func = element_wise_subregion_match
s = tir.Schedule(func, debug_mask='all')
if argument_style=='set_axis_separators':
s.set_axis_separator(s.get_block("B"), ("write",0), [1])
elif argument_style=='transform_layout_named':
s.transform_layout(block='B', buffer='B', index_map=lambda i,j: [i,IndexMap.AXIS_SEPARATOR,j])
elif argument_style =='transform_layout_buffer_object':
B = s.get(s.get_block('B')).writes[0].buffer
s.transform_layout(block='B', buffer=B, index_map=lambda i,j: [i,IndexMap.AXIS_SEPARATOR,j])
else:
raise ValueError(f'Unexpected argument_style: {argument_style}')
tvm.ir.assert_structural_equal(element_wise_subregion_match_set_axis_separator, s.mod["main"])
verify_trace_roundtrip(sch=s, mod=func) |
class TestIndexedLookup(tvm.testing.CompareBeforeAfter):
def transform(self):
def func(mod):
sch = tir.Schedule(mod)
sch.set_axis_separator('block', 'B', [1])
return sch.mod
return func
@T.prim_func
def before():
A = T.alloc_buffer([4,4], dtype="int32")
B = T.alloc_buffer([1,1], dtype="int32")
for j in T.serial(4):
with T.block('block'):
A[B[0,0],j] = 0
@T.prim_func
def expected():
A = T.alloc_buffer([4,4], dtype="int32")
B = T.alloc_buffer([1,1], dtype="int32", axis_separators=[1])
for j in T.serial(4):
with T.block('block'):
A[B[0,0],j] = 0
if __name__ == "__main__":
tvm.testing.main() |
import pytest |
import tvm |
import tvm.testing
from tvm |
import tir
from tvm.script |
import tir as T
from tvm.tir.schedule.testing |
import verify_trace_roundtrip
@T.prim_func
def element_wise(A: T.Buffer[(128, 128), "float32"], C: T.Buffer[(128, 128), "float32"]) -> None:
B = T.alloc_buffer((128, 128), dtype="float32")
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + 1.0
@T.prim_func
def element_wise_set_scope(A: T.Buffer[(128, 128), "float32"], C: T.Buffer[(128, 128), "float32"]) -> None:
B_shared = T.alloc_buffer([128, 128], dtype="float32", scope="shared")
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B_shared[vi, vj] = A[vi, vj] * T.float32(2)
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B_shared[vi, vj] + T.float32(1)
@T.prim_func
def element_wise_subregion_match(A: T.Buffer[(128, 128), "float32"], C: T.Buffer[(128, 128), "float32"]) -> None:
B = T.alloc_buffer((128, 128), dtype="float32")
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B_subregion0 = T.match_buffer(B[vi, vj], [], offset_factor=1)
B_subregion0[()] = A[vi, vj] * 2.0
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
B_subregion1 = T.match_buffer(B[vi, vj], [], offset_factor=1)
C[vi, vj] = B_subregion1[()] + 1.0
@T.prim_func
def element_wise_subregion_match_set_scope(A: T.Buffer[(128, 128), "float32"], C: T.Buffer[(128, 128), "float32"]) -> None:
B_shared = T.alloc_buffer([128, 128], dtype="float32", scope="shared")
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B_subregion0_shared = T.match_buffer(B_shared |
[vi, vj], [], dtype="float32", scope="shared", offset_factor=1)
B_subregion0_shared[()] = A[vi, vj] * T.float32(2)
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
B_subregion1_shared = T.match_buffer(B_shared[vi, vj], [], dtype="float32", scope="shared", offset_factor=1)
C[vi, vj] = B_subregion1_shared[()] + T.float32(1)
use_block_name = tvm.testing.parameter(by_dict={"block_obj": False, "block_name": True})
def test_set_scope(use_block_name):
func = element_wise
s = tir.Schedule(func, debug_mask='all')
s.set_scope('B' if use_block_name else s.get_block("B"), 0, "shared")
tvm.ir.assert_structural_equal(element_wise_set_scope, s.mod["main"])
verify_trace_roundtrip(sch=s, mod=func)
def test_set_scope_fail_on_output_buffer(use_block_name):
func = element_wise
s = tir.Schedule(func, debug_mask='all')
with pytest.raises(tvm.tir.ScheduleError):
s.set_scope('C' if use_block_name else s.get_block("C"), 0, "shared")
def test_set_scope_fail_on_index_out_of_bound():
func = element_wise
s = tir.Schedule(func, debug_mask='all')
with pytest.raises(tvm.tir.ScheduleError):
s.set_scope(s.get_block("B"), 1, "shared")
with pytest.raises(tvm.tir.ScheduleError):
s.set_scope(s.get_block("B"), -1, "shared")
def test_set_scope_fail_on_invalid_scope():
func = element_wise
s = tir.Schedule(func, debug_mask='all')
with pytest.raises(tvm.tir.ScheduleError):
s.set_scope(s.get_block("B"), 0, "test_scope")
def test_set_scope_subregion():
func = element_wise_subregion_match
s = tir.Schedule(func, debug_mask='all')
s.set_scope(s.get_block("B"), 0, "shared")
tvm.ir.assert_structural_equal(element_wise_subregion_match_set_scope, s.mod["main"])
verify_trace_roundtrip(sch=s, mod=func)
if __name__ == "__main__":
tvm.testing.main() |
import pytest |
import tvm |
import tvm.testing
from tvm |
import te, tir
from tvm.script |
import tir as T
from tvm.tir.expr |
import IntImm
from tvm.tir.schedule.testing |
import verify_trace_roundtrip
@T.prim_func
def elementwise(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128, 128))
B = T.match_buffer(b, (128, 128, 128))
for i, j, k in T.grid(128, 128, 128):
with T.block("B"):
vi, vj, vk = T.axis.remap("SSS", [i, j, k])
B[vi, vj, vk] = A[vi, vj, vk] * 2.0
@T.prim_func
def elementwise_dependent_loops(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128, 128))
B = T.match_buffer(b, (128, 128, 128))
for i in T.serial(0, 128):
for j, k in T.grid(i, 128):
with T.block("B"):
vi = T.axis.S(128, i)
vj = T.axis.S(i, j)
vk = T.axis.S(128, k)
B[vi, vj, vk] = A[vi, vj, vk] * 2.0
@T.prim_func
def elementwise_symbolic(a: T.handle, b: T.handle, n: T.int32) -> None:
A = T.match_buffer(a, (128, 128, n))
B = T.match_buffer(b, (128, 128, n))
for i, j, k in T.grid(128, 128, n):
with T.block("B"):
vi, vj, vk = T.axis.remap("SSS", [i, j, k])
B[vi, vj, vk] = A[vi, vj, vk] * 2.0
@T.prim_func
def elementwise_symbolic_fused(a: T.handle, b: T.handle, n: T.int32) -> None:
A = T.match_buffer(a, (128, 128, n))
B = T.match_buffer(b, (128, 128, n))
for i_j_k_fused in T.serial(0, (n * 16384)):
with T.block("B"):
vi = T.axis.S(128, T.floordiv(i_j_k_fused, n * 128))
vj = T.axis.S(128, T.floordiv(T.floormod(i_j_k_fused, n * 128), n))
vk = T.axis.S(n, T.floormod(i_j_k_fused, n))
T.reads([A[vi, vj, vk]])
T.writes([B[vi, vj, vk]])
B[vi, vj, vk] = A[vi, vj, vk] * 2.0
@T.prim_func
def elementwise_symbolic_split(a: T.handle, b: T.handle, n: T.int32) -> None:
A = T.match_buffer(a, (128, 128, n))
B = T.match_buffer(b, (128, 128, n))
for i, j, k0, k1 in T.grid(128, 128, 10, T.floordiv((n + 9), 10)):
with T.block("B"):
T.where((((k0 * T.floordiv((n + 9), 10)) + k1) < n)) |
vi, vj = T.axis.remap("SS", [i, j])
vk = T.axis.S(n, k0 * T.floordiv(n + 9, 10) + k1)
T.reads([A[vi, vj, vk]])
T.writes([B[vi, vj, vk]])
B[vi, vj, vk] = A[vi, vj, vk] * 2.0
@T.prim_func
def elementwise_with_seq(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128, 128))
B = T.match_buffer(b, (128, 128, 128))
C = T.alloc_buffer((128, 128, 128))
for i, j in T.grid(128, 128):
for k in T.serial(0, 128):
with T.block("C"):
vi, vj, vk = T.axis.remap("SSS", [i, j, k])
C[vi, vj, vk] = A[vi, vj, vk] * 2.0
for k in T.serial(0, 128):
with T.block("B"):
vi, vj, vk = T.axis.remap("SSS", [i, j, k])
B[vi, vj, vk] = C[vi, vj, vk] * 2.0
@T.prim_func
def elementwise_with_anno(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128, 128))
B = T.match_buffer(b, (128, 128, 128))
for i, j in T.grid(128, 128):
for k in T.serial(0, 128, annotations={"useless_annotation": True}):
with T.block("B"):
vi, vj, vk = T.axis.remap("SSS", [i, j, k])
T.reads([A[vi, vj, vk]])
T.writes([B[vi, vj, vk]])
B[vi, vj, vk] = A[vi, vj, vk] * 2.0
@T.prim_func
def elementwise_with_thread_binding(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128, 128))
B = T.match_buffer(b, (128, 128, 128))
for i, j in T.grid(128, 128):
for k in T.thread_binding(0, 128, thread="threadIdx.x"):
with T.block("B"):
vi, vj, vk = T.axis.remap("SSS", [i, j, k])
T.reads([A[vi, vj, vk]])
T.writes([B[vi, vj, vk]])
B[vi, vj, vk] = A[vi, vj, vk] * 2.0
@T.prim_func
def elementwise_with_starting_point(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128, 128))
B = T.match_buffer(b, (128, 128, 128))
for i, j in T.grid(128, 128):
for k in T.serial(10, |
128):
with T.block("B"):
vi, vj, vk = T.axis.remap("SSS", [i, j, k])
T.reads([A[vi, vj, vk]])
T.writes([B[vi, vj, vk]])
B[vi, vj, vk] = A[vi, vj, vk] * 2.0
@T.prim_func
def elementwise_with_opaque_block(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128, 128))
B = T.match_buffer(b, (128, 128, 128))
for i, j, k in T.grid(128, 128, 128):
with T.block("opaque"):
T.reads([A[i, j, k]])
T.writes([B[i, j, k]])
with T.block("B"):
vi, vj, vk = T.axis.remap("SSS", [i, j, k])
T.reads([A[vi, vj, vk]])
T.writes([B[vi, vj, vk]])
B[vi, vj, vk] = A[vi, vj, vk] * 2.0
@T.prim_func
def elementwise_fused(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128, 128))
B = T.match_buffer(b, (128, 128, 128))
for fused in T.serial(0, 2097152):
with T.block("B"):
vi = T.axis.S(128, T.floordiv(fused, 16384))
vj = T.axis.S(128, T.floordiv(T.floormod(fused, 16384), 128))
vk = T.axis.S(128, T.floormod(fused, 128))
T.reads([A[vi, vj, vk]])
T.writes([B[vi, vj, vk]])
B[vi, vj, vk] = A[vi, vj, vk] * 2.0
@T.prim_func
def elementwise_split_case0(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [128, 128, 128])
B = T.match_buffer(b, [128, 128, 128])
for i1, i2, i3, j1, j2, k1, k2 in T.grid(2, 1, 64, 4, 32, 16, 8):
with T.block("B"):
vi = T.axis.S(128, i1 * 64 + i2 * 64 + i3)
vj = T.axis.S(128, j1 * 32 + j2)
vk = T.axis.S(128, k1 * 8 + k2)
T.reads([A[vi, vj, vk]])
T.writes([B[vi, vj, vk]])
B[vi, vj, vk] = A[vi, vj, vk] * 2.0
@T.prim_func
def elementwise_split_case1(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [128, 128, 128])
B = T.match_buffer(b, [128, 128, 128])
for i1, i2, i3, j1, j2, j3, k1, k2, k3 in T.gri |
d(2, 1, 64, 2, 1, 64, 2, 1, 64):
with T.block("B"):
vi = T.axis.S(128, i1 * 64 + i2 * 64 + i3)
vj = T.axis.S(128, j1 * 64 + j2 * 64 + j3)
vk = T.axis.S(128, k1 * 64 + k2 * 64 + k3)
T.reads([A[vi, vj, vk]])
T.writes([B[vi, vj, vk]])
B[vi, vj, vk] = A[vi, vj, vk] * 2.0
@T.prim_func
def elementwise_split_with_predicate(a: T.handle, b: T.handle) -> None:
B = T.match_buffer(b, [128, 128, 128])
A = T.match_buffer(a, [128, 128, 128])
for i0, i1, i2, j0, j1, k0, k1 in T.grid(1000, 2, 3, 1, 129, 3, 43):
with T.block("B"):
vi = T.axis.S(128, i0 * 6 + i1 * 3 + i2)
vj = T.axis.S(128, j0 * 129 + j1)
vk = T.axis.S(128, k0 * 43 + k1)
T.where((i0 * 2 + i1) * 3 + i2 < 128 and j0 * 129 + j1 < 128 and k0 * 43 + k1 < 128)
T.reads([A[vi, vj, vk]])
T.writes([B[vi, vj, vk]])
B[vi, vj, vk] = A[vi, vj, vk] * 2.0
@T.prim_func
def elementwise_fuse_with_opaque_block(a: T.handle, b: T.handle) -> None:
B = T.match_buffer(b, [128, 128, 128])
A = T.match_buffer(a, [128, 128, 128])
for i_j_k_fused in T.serial(0, 2097152):
with T.block("opaque"):
T.reads(
[
A[
T.floordiv(i_j_k_fused, 16384),
T.floordiv(T.floormod(i_j_k_fused, 16384), 128),
T.floormod(i_j_k_fused, 128),
]
]
)
T.writes(
[
B[
T.floordiv(i_j_k_fused, 16384),
T.floordiv(T.floormod(i_j_k_fused, 16384), 128),
T.floormod(i_j_k_fused, 128),
]
]
)
with T.block("B"):
vi = T.axis.S(128, T.floordiv(i_j_k_fused, 16384))
vj = T.axis.S(128, T.floordiv(T.floormod(i_j_k_fused, 16384), 128))
vk = T.axis.S( |
128, T.floormod(i_j_k_fused, 128))
T.reads([A[vi, vj, vk]])
T.writes([B[vi, vj, vk]])
B[vi, vj, vk] = A[vi, vj, vk] * 2.0
@T.prim_func
def elementwise_split_with_opaque_block(a: T.handle, b: T.handle) -> None:
B = T.match_buffer(b, [128, 128, 128])
A = T.match_buffer(a, [128, 128, 128])
for i0, i1, j, k in T.grid(8, 16, 128, 128):
with T.block("opaque"):
T.reads([A[i0 * 16 + i1, j, k]])
T.writes([B[i0 * 16 + i1, j, k]])
with T.block("B"):
vi = T.axis.S(128, i0 * 16 + i1)
vj, vk = T.axis.remap("SS", [j, k])
T.reads([A[vi, vj, vk]])
T.writes([B[vi, vj, vk]])
B[vi, vj, vk] = A[vi, vj, vk] * 2.0
@T.prim_func
def opaque_access(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [16, 16], "float32")
B = T.match_buffer(b, [16, 16], "float32")
for i, j in T.grid(16, 16):
with T.block("A"):
vi, vj = T.axis.remap("SS", [i, j])
T.reads([])
T.writes([A[0:16, 0:16]])
A[vi, vj] = 1
for i, j in T.grid(16, 16):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
T.reads([])
T.writes([B[0:16, 0:16]])
T.evaluate(T.tvm_fill_fragment(B.data, 16, 16, 16, 0, vi * 16 + vj, dtype="handle"))
@T.prim_func
def opaque_access_fused(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [16, 16])
B = T.match_buffer(b, [16, 16])
for i_j_fused in T.serial(0, 256):
with T.block("A"):
vi = T.axis.S(16, T.floordiv(i_j_fused, 16))
vj = T.axis.S(16, T.floormod(i_j_fused, 16))
T.reads([])
T.writes([A[0:16, 0:16]])
A[vi, vj] = 1
for i_j_fused in T.serial(0, 256):
with T.block("B"):
vi = T.axis.S(16, T.floordiv(i_j_fused, 16))
vj = T.axis.S(16, T.floormod(i_j_fused, 16))
T.reads([])
T.writes( |
[B[0:16, 0:16]])
T.evaluate(T.tvm_fill_fragment(B.data, 16, 16, 16, 0, ((vi * 16) + vj), dtype="handle"))
@T.prim_func
def opaque_access_split(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (16, 16))
B = T.match_buffer(b, (16, 16))
for i, j0, j1 in T.grid(16, 4, 4):
with T.block("A"):
vi = T.axis.S(16, i)
vj = T.axis.S(16, j0 * 4 + j1)
T.reads([])
T.writes([A[0:16, 0:16]])
A[vi, vj] = 1
for i, j0, j1 in T.grid(16, 4, 4):
with T.block("B"):
vi = T.axis.S(16, i)
vj = T.axis.S(16, j0 * 4 + j1)
T.reads([])
T.writes([B[0:16, 0:16]])
T.evaluate(T.tvm_fill_fragment(B.data, 16, 16, 16, 0, ((vi * 16) + vj), dtype="handle"))
@T.prim_func
def elementwise_not_affine(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (127, 128))
B = T.match_buffer(b, (127, 128))
for i in T.serial(0, 4):
for j, k in T.grid(T.min(31, 126 - i * 32) + 1, 128):
with T.block("B"):
vi = T.axis.S(127, i * 32 + j)
vj = T.axis.S(128, k)
B[vi, vj] = A[vi, vj]
@T.prim_func
def elementwise_not_affine_fused(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [127, 128])
B = T.match_buffer(b, [127, 128])
for i in T.grid(4):
for j_k_fused in T.serial(0, T.min(31, 126 - i * 32) * 128 + 128):
with T.block("B"):
vi = T.axis.S(
127,
i * 32 + T.floordiv(j_k_fused, 128),
)
vj = T.axis.S(128, T.floormod(j_k_fused, 128))
T.reads([A[vi, vj]])
T.writes([B[vi, vj]])
B[vi, vj] = A[vi, vj]
def test_fuse():
sch = tir.Schedule(elementwise, debug_mask="all")
block_b = sch.get_block("B")
i, j, k = sch.get_loops(block_b)
sch.fuse(i, j, k)
tvm.ir.assert_structural_equal(elementwise_fused, sch.mod["main"])
verify_trace |
_roundtrip(sch=sch, mod=elementwise)
def test_split():
sch = tir.Schedule(elementwise, debug_mask="all")
block_b = sch.get_block("B")
i, j, k = sch.get_loops(block_b)
sch.split(i, factors=[2, 1, 64])
sch.split(j, factors=[4, 32])
sch.split(k, factors=[16, 8])
tvm.ir.assert_structural_equal(elementwise_split_case0, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=elementwise)
def test_split_with_inferred_factor():
sch = tir.Schedule(elementwise, debug_mask="all")
block_b = sch.get_block("B")
i, j, k = sch.get_loops(block_b)
sch.split(i, factors=[None, 1, 64])
sch.split(j, factors=[2, None, 64])
sch.split(k, factors=[2, 1, None])
tvm.ir.assert_structural_equal(elementwise_split_case1, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=elementwise)
def test_split_with_predicate():
sch = tir.Schedule(elementwise, debug_mask="all")
block_b = sch.get_block("B")
i, j, k = sch.get_loops(block_b)
sch.split(i, factors=[1000, 2, 3])
sch.split(j, factors=[None, 129])
sch.split(k, factors=[3, None])
tvm.ir.assert_structural_equal(elementwise_split_with_predicate, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=elementwise)
def test_fuse_fail_not_only_child():
sch = tir.Schedule(elementwise_with_seq, debug_mask="all")
block_b = sch.get_block("B")
_, j, k = sch.get_loops(block_b)
with pytest.raises(tvm.tir.ScheduleError):
sch.fuse(j, k)
def test_fuse_split_fail_with_annotation():
sch = tir.Schedule(elementwise_with_anno, debug_mask="all")
block_b = sch.get_block("B")
_, j, k = sch.get_loops(block_b)
with pytest.raises(tvm.tir.ScheduleError):
sch.fuse(j, k)
with pytest.raises(tvm.tir.ScheduleError):
sch.split(k, factors=[None, 10])
def test_fuse_split_fail_not_start_with_zero():
sch = tir.Schedule(elementwise_with_anno, debug_mask="all")
block_b = sch.get_block("B")
_, j, k = sch.get_loops(block_b)
with pytest.raises(tvm.tir.ScheduleErro |
r):
sch.fuse(j, k)
with pytest.raises(tvm.tir.ScheduleError):
sch.split(k, factors=[None, 10])
def test_fuse_with_opaque_block():
sch = tir.Schedule(elementwise_with_opaque_block, debug_mask="all")
block_opaque = sch.get_block("opaque")
i, j, k = sch.get_loops(block_opaque)
sch.fuse(i, j, k)
tvm.ir.assert_structural_equal(elementwise_fuse_with_opaque_block, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=elementwise_with_opaque_block)
def test_fuse_with_opaque_access():
sch = tir.Schedule(opaque_access, debug_mask="all")
block_a = sch.get_block("A")
i, j = sch.get_loops(block_a)
sch.fuse(i, j)
block_b = sch.get_block("B")
i, j = sch.get_loops(block_b)
sch.fuse(i, j)
tvm.ir.assert_structural_equal(opaque_access_fused, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=opaque_access)
def test_split_with_opaque_block():
sch = tir.Schedule(elementwise_with_opaque_block, debug_mask="all")
block_opaque = sch.get_block("opaque")
i, _, _ = sch.get_loops(block_opaque)
sch.split(i, factors=[None, 16])
tvm.ir.assert_structural_equal(elementwise_split_with_opaque_block, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=elementwise_with_opaque_block)
def test_split_with_opaque_access():
sch = tir.Schedule(opaque_access, debug_mask="all")
block_a = sch.get_block("A")
_, j = sch.get_loops(block_a)
sch.split(j, factors=[None, 4])
block_b = sch.get_block("B")
_, j = sch.get_loops(block_b)
sch.split(j, factors=[None, 4])
tvm.ir.assert_structural_equal(opaque_access_split, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=opaque_access)
def test_split_with_non_positive_factors():
sch = tir.Schedule(elementwise, debug_mask="all")
block_b = sch.get_block("B")
i, j, k = sch.get_loops(block_b)
with pytest.raises(tvm.tir.ScheduleError):
sch.split(i, factors=[-2, -64])
with pytest.raises(tvm.tir.ScheduleError):
sch.split(j, factors=[0, None]) |
with pytest.raises(tvm.tir.ScheduleError):
sch.split(k, factors=[None, -16])
def test_fuse_split_fail_with_thread_binding():
sch = tir.Schedule(elementwise_with_thread_binding, debug_mask="all")
block_b = sch.get_block("B")
_, j, k = sch.get_loops(block_b)
with pytest.raises(tvm.tir.ScheduleError):
sch.fuse(j, k)
with pytest.raises(tvm.tir.ScheduleError):
sch.split(k, factors=[None, 10])
def test_fuse_symbolic():
sch = tir.Schedule(elementwise_symbolic, debug_mask="all")
block_b = sch.get_block("B")
i, j, k = sch.get_loops(block_b)
sch.fuse(i, j, k)
tvm.ir.assert_structural_equal(elementwise_symbolic_fused, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=elementwise_symbolic)
def test_split_symbolic():
sch = tir.Schedule(elementwise_symbolic, debug_mask="all")
block_b = sch.get_block("B")
_, _, k = sch.get_loops(block_b)
sch.split(k, factors=[10, None])
tvm.ir.assert_structural_equal(elementwise_symbolic_split, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=elementwise_symbolic)
def test_fuse_fail_with_dependent_loops():
sch = tir.Schedule(elementwise_dependent_loops, debug_mask="all")
block_b = sch.get_block("B")
i, j, _ = sch.get_loops(block_b)
with pytest.raises(tvm.tir.ScheduleError):
sch.fuse(i, j)
def test_fuse_not_affine():
sch = tir.Schedule(elementwise_not_affine, debug_mask="all")
block_b = sch.get_block("B")
_, j, k = sch.get_loops(block_b)
sch.fuse(j, k)
tvm.ir.assert_structural_equal(elementwise_not_affine_fused, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=elementwise_not_affine)
def test_add_unit_loop_above_block():
@T.prim_func
def zero_dim(
A: T.Buffer[(), "int32"],
B: T.Buffer[(), "int32"],
C: T.Buffer[(), "int32"],
) -> None:
with T.block("C"):
vi = T.axis.spatial(1, 0)
C[()] = A[()] + B[()]
@T.prim_func
def zero_dim_added(
A: T.Buffer[() |
, "int32"],
B: T.Buffer[(), "int32"],
C: T.Buffer[(), "int32"],
) -> None:
for u in range(1):
with T.block("C"):
vi = T.axis.spatial(1, 0)
C[()] = A[()] + B[()]
sch = tir.Schedule(zero_dim, debug_mask="all")
block = sch.get_block("C")
sch.add_unit_loop(block)
tvm.ir.assert_structural_equal(zero_dim_added, sch.mod["main"])
def test_add_unit_loop_above_loop():
@T.prim_func
def zero_dim(
A: T.Buffer[(), "int32"],
B: T.Buffer[(), "int32"],
C: T.Buffer[(), "int32"],
) -> None:
for u in range(1):
with T.block("C"):
vi = T.axis.spatial(1, 0)
C[()] = A[()] + B[()]
@T.prim_func
def zero_dim_added(
A: T.Buffer[(), "int32"],
B: T.Buffer[(), "int32"],
C: T.Buffer[(), "int32"],
) -> None:
for u1, u2 in T.grid(1, 1):
with T.block("C"):
vi = T.axis.spatial(1, 0)
C[()] = A[()] + B[()]
sch = tir.Schedule(zero_dim, debug_mask="all")
block = sch.get_block("C")
(loop,) = sch.get_loops(block)
sch.add_unit_loop(loop)
tvm.ir.assert_structural_equal(zero_dim_added, sch.mod["main"])
@pytest.mark.skip("Pending fix in affine analysis")
def test_fuse_int64():
def _create_prim_func():
n = te.const(16, "int32")
m = te.const(32, "int64")
A = te.placeholder((n, m), name="A", dtype="int32")
B = te.compute((n, m), lambda i, j: A[i, j] + 1, name="B")
return te.create_prim_func([A, B])
mod = _create_prim_func()
sch = tir.Schedule(mod, debug_mask="all")
i, j = sch.get_loops(sch.get_block("B"))
sch.fuse(i, j)
verify_trace_roundtrip(sch=sch, mod=mod)
def test_split_int64_extent_with_mixed_factors():
def _create_prim_func():
m = te.const(384, "int64")
A = te.placeholder((m,), name="A", dtype="float32")
B = te.compute((m,), lambda i: A[i] + 1, name="B")
return te.cre |
ate_prim_func([A, B])
mod = _create_prim_func()
sch = tir.Schedule(mod, debug_mask="all")
(i,) = sch.get_loops(sch.get_block("B"))
sch.split(
i,
factors=[
te.const(1, "int64"),
te.const(512, "int32"),
],
)
def test_split_int64_extent_with_int32_factors():
def _create_prim_func():
m = te.const(12, "int64")
A = te.placeholder((m,), name="A", dtype="float32")
B = te.compute((m,), lambda i: A[i] + 1, name="B")
return te.create_prim_func([A, B])
mod = _create_prim_func()
sch = tir.Schedule(mod, debug_mask="all")
(i,) = sch.get_loops(sch.get_block("B"))
sch.split(
i,
factors=[
te.const(1, "int32"),
te.const(1, "int32"),
te.const(3, "int32"),
te.const(1, "int32"),
te.const(4, "int32"),
],
)
def test_split_int64_factors():
sch = tir.Schedule(elementwise_symbolic, debug_mask="all")
block_b = sch.get_block("B")
_, _, k = sch.get_loops(block_b)
sch.split(k, factors=[IntImm(dtype="int64", value=10), None])
tvm.ir.assert_structural_equal(elementwise_symbolic_split, sch.mod["main"])
if __name__ == "__main__":
tvm.testing.main() |
import gc |
import sys |
import pytest |
import tvm |
import tvm.testing
from tvm |
import tir
from tvm.ir |
import IRModule
from tvm.script |
import tir as T
@T.prim_func
def elementwise(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 128), "float32")
C = T.match_buffer(c, (128, 128), "float32")
B = T.alloc_buffer((128, 128), "float32")
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + 1.0
@T.prim_func
def matmul(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [128, 128])
B = T.match_buffer(b, [128, 128])
C = T.match_buffer(c, [128, 128])
for i, j in T.grid(128, 128):
with T.block("init"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = T.float32(0)
for k in range(0, 128):
with T.block("update"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vj, vk]
@T.prim_func
def block_in_opaque_block(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128), "float32")
B = T.match_buffer(b, (128, 128), "float32")
for i in range(128):
with T.block("B"):
vi = T.axis.S(128, i)
T.reads([A[0:128, 0:128]])
T.writes([B[0:128, 0:128]])
B[vi, 0] = A[vi, 0]
if A[vi, 0] == 0.0:
with T.block("C"):
T.reads([A[0:128, 0:128]])
T.writes([B[0:128, 0:128]])
for j in range(128):
with T.block("D"):
vj = T.axis.S(128, j)
B[vi, vj] = A[vi, vj] * 3.0
else:
with T.block("E"):
T.reads([A[0:128, 0:128]])
T.writes([B[0:128, 0:128]])
for j in range(128):
with T.block("F"):
vj = T.axis. |
S(128, j)
B[vi, vj] = A[vi, vj] * 2.0
def replace_ir_builder(deep_copy=False, realize=False):
new_func = tvm.script.from_source(elementwise.script())
s = tir.ScheduleState(new_func, debug_mask="all")
target = tvm.tir.Block(
iter_vars=[],
reads=[],
writes=[],
name_hint="target",
body=s.mod["main"].body.block.body[1],
init=None,
alloc_buffers=None,
match_buffers=None,
annotations=None,
)
if realize:
target = tvm.tir.BlockRealize(
iter_values=[],
predicate=True,
block=target,
)
if deep_copy:
target.__setstate__(target.__getstate__())
gc.collect()
return s, target
def replace_ir_builder_module(deep_copy=False, realize=False):
new_func = tvm.script.from_source(elementwise.script())
other_func = tvm.script.from_source(elementwise.script())
mod = IRModule(functions={"main": new_func, "other": other_func})
s = tir.ScheduleState(mod, debug_mask="all")
target = tvm.tir.Block(
iter_vars=[],
reads=[],
writes=[],
name_hint="target",
body=s.mod["main"].body.block.body[1],
init=None,
alloc_buffers=None,
match_buffers=None,
annotations=None,
)
if realize:
target = tvm.tir.BlockRealize(
iter_values=[],
predicate=True,
block=target,
)
if deep_copy:
target.__setstate__(target.__getstate__())
gc.collect()
return s, target
def replace_ir_builder_with_opaque():
func = tvm.script.from_source(block_in_opaque_block.script())
s = tir.ScheduleState(func, debug_mask="all")
gc.collect()
return s
def test_replace_direct_write0():
s, target = replace_ir_builder(realize=True)
old_hash = s.mod["main"].__hash__()
sref = s.get_sref(s.mod["main"].body.block.body[1])
s.replace(sref, target)
assert old_hash == s.mod["main"].__hash__() |
tvm.ir.assert_structural_equal(s.mod["main"].body.block.body[1], target)
assert sref.stmt is not None
def test_replace_direct_write1():
s, target = replace_ir_builder(realize=True)
old_hash = s.mod["main"].body.block.body.__hash__()
hold_ref = s.mod["main"].body.block.body[1]
sref = s.get_sref(s.mod["main"].body.block.body[1])
s.replace(sref, target)
assert old_hash == s.mod["main"].body.block.body.__hash__()
assert not tvm.ir.structural_equal(hold_ref.body, target)
tvm.ir.assert_structural_equal(s.mod["main"].body.block.body[1], target)
assert sref.stmt is not None
def test_replace_copy():
s, target = replace_ir_builder(deep_copy=True, realize=True)
old_hash = s.mod["main"].__hash__()
old_func = s.mod["main"]
sref = s.get_sref(s.mod["main"].body.block.body[0])
s.replace(sref, target)
assert old_hash != s.mod["main"].__hash__()
assert not tvm.ir.structural_equal(old_func.body, s.mod["main"].body)
assert old_hash == old_func.__hash__()
tvm.ir.assert_structural_equal(s.mod["main"].body.block.body[0], target)
assert sref.stmt is None
def test_replace_partial_copy0():
s, target = replace_ir_builder(deep_copy=True, realize=True)
func_old_hash = s.mod["main"].__hash__()
hold_ref = s.mod["main"].body.block.body[0]
ref_old_hash = hold_ref.__hash__()
sref = s.get_sref(s.mod["main"].body.block.body[0].body)
other_part_hash = s.mod["main"].body.block.body[1].__hash__()
s.replace(sref, target)
assert ref_old_hash != s.mod["main"].body.block.body[0].__hash__()
assert not tvm.ir.structural_equal(hold_ref.body, target)
assert func_old_hash == s.mod["main"].__hash__()
assert other_part_hash == s.mod["main"].body.block.body[1].__hash__()
tvm.ir.assert_structural_equal(s.mod["main"].body.block.body[0].body, target)
assert sref.stmt is None
def test_replace_partial_copy1():
s, target = replace_ir_builder(deep_copy=True) |
func_old_hash = s.mod["main"].__hash__()
hold_ref = s.mod["main"].body.block.body[0].body
stmt_old_hash = s.mod["main"].body.block.body[0].__hash__()
sref = s.get_sref(s.mod["main"].body.block.body[0].body.body.block)
other_part_hash = s.mod["main"].body.block.body[1].__hash__()
s.replace(sref, target)
assert stmt_old_hash == s.mod["main"].body.block.body[0].__hash__()
assert not tvm.ir.structural_equal(hold_ref.body, target)
assert func_old_hash == s.mod["main"].__hash__()
assert other_part_hash == s.mod["main"].body.block.body[1].__hash__()
tvm.ir.assert_structural_equal(s.mod["main"].body.block.body[0].body.body.block, target)
assert sref.stmt is None
def test_replace_root_write():
s, target = replace_ir_builder()
old_hash = s.mod["main"].__hash__()
sref = s.get_sref(s.mod["main"].body.block)
s.replace(sref, target)
assert old_hash == s.mod["main"].__hash__()
tvm.ir.assert_structural_equal(s.mod["main"].body.block, target)
def test_replace_root_copy0():
s, target = replace_ir_builder(deep_copy=True)
old_hash = s.mod["main"].__hash__()
func_ref = s.mod["main"]
sref = s.get_sref(s.mod["main"].body.block)
s.replace(sref, target)
assert old_hash != s.mod["main"].__hash__()
tvm.ir.assert_structural_equal(s.mod["main"].body.block, target)
assert old_hash == func_ref.__hash__()
assert not tvm.ir.structural_equal(func_ref.body, target)
def test_replace_root_copy1():
s, target = replace_ir_builder(deep_copy=True, realize=True)
old_hash = s.mod["main"].body.block.__hash__()
func_ref = s.mod["main"].body.block
sref = s.get_sref(s.mod["main"].body.block.body[0])
s.replace(sref, target)
assert old_hash != s.mod["main"].body.block.__hash__()
tvm.ir.assert_structural_equal(s.mod["main"].body.block.body[0], target)
assert old_hash == func_ref.__hash__()
assert not tvm.ir.structural_equal(func_ref.body, target)
def test_replace_root_copy |
2():
s, target = replace_ir_builder(deep_copy=True)
old_hash = s.mod.functions.__hash__()
func_ref = s.mod.functions
sref = s.get_sref(s.mod["main"].body.block)
s.replace(sref, target)
assert old_hash != s.mod.functions.__hash__()
tvm.ir.assert_structural_equal(s.mod["main"].body.block, target)
assert old_hash == func_ref.__hash__()
for _, v in func_ref.items():
assert not tvm.ir.structural_equal(v.body.block, target)
def test_replace_root_copy3():
s, target = replace_ir_builder(deep_copy=True)
old_hash = s.mod.__hash__()
func_ref = s.mod
sref = s.get_sref(s.mod["main"].body.block)
s.replace(sref, target)
assert old_hash != s.mod.__hash__()
tvm.ir.assert_structural_equal(s.mod["main"].body.block, target)
assert old_hash == func_ref.__hash__()
assert not tvm.ir.structural_equal(func_ref["main"].body.block, target)
def test_replace_block_remap():
func = elementwise
s = tir.ScheduleState(func, debug_mask="all")
target = matmul.body.block.body.body.body[0].block
sref = s.get_sref(s.mod["main"].body.block.body[0].body.body.block)
s.replace(sref, target, {sref.stmt: target})
sref_new = s.get_sref(s.mod["main"].body.block.body[0].body.body.block)
assert sref.__hash__() == sref_new.__hash__()
tvm.ir.assert_structural_equal(sref.stmt, target)
def test_replace_block_in_opaque_block():
s = replace_ir_builder_with_opaque()
root_hash = s.mod["main"].__hash__()
for_loop = s.mod["main"].body.block.body.body.block.body[1].then_case.block.body
sref = s.get_sref(for_loop)
new_for_loop = tir.For(
loop_var=for_loop.loop_var,
min_val=0,
extent=128,
kind=tir.ForKind.SERIAL,
body=tir.Evaluate(0),
thread_binding=None,
annotations=None,
)
s.replace(sref, new_for_loop)
assert root_hash == s.mod["main"].__hash__()
tvm.ir.assert_structural_equal(sref.stmt, new_for_loop)
def test_replace_ir_module(): |
s, target = replace_ir_builder_module(deep_copy=True)
old_hash = s.mod["main"].__hash__()
other_func_hash = s.mod["other"].__hash__()
func_ref = s.mod["main"]
sref = s.get_sref(s.mod["main"].body.block)
s.replace(sref, target)
assert old_hash != s.mod["main"].__hash__()
tvm.ir.assert_structural_equal(s.mod["main"].body.block, target)
assert old_hash == func_ref.__hash__()
assert not tvm.ir.structural_equal(func_ref.body, target)
assert other_func_hash == s.mod["other"].__hash__()
if __name__ == "__main__":
tvm.testing.main() |
import sys |
import pytest |
import tvm |
import tvm.testing
from tvm |
import tir
from tvm.script |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.