text
stringlengths
1
2.05k
class TestPaddedTransformOfInputCreatesAssumption(BasePaddingCompare): """Transformation of an input buffer places T.assume locally""" pad_value = tvm.testing.parameter(42) def before(A: T.Buffer[14, "int32"], B: T.Buffer[14, "int32"]): for i in T.serial(14): with T.block("block"): vi = T.axis.remap("S", [i]) B[vi] = A[vi] def expected(A: T.Buffer[(4, 4), "int32"], B: T.Buffer[14, "int32"]): for i, j in T.grid(4, 4): with T.block("buffer_A_assumption"): vi, vj = T.axis.remap("SS", [i, j]) T.evaluate(T.assume(not (vi == 3 and 2 <= vj) or A[vi, vj] == 42)) for i in T.serial(14): with T.block("block"): vi = T.axis.remap("S", [i]) B[vi] = A[vi
class TestPaddedTransformNonConstantValue(tvm.testing.CompareBeforeAfter): """Allow an expression to specify the pad value. Like TestPaddedTransformIfThenElse, but the pad value depends on the indices. """ @pytest.fixture def transform(self): def transform(mod): sch = tir.Schedule(mod) sch.transform_layout( "block", "B", lambda i: [i pad_value=lambda i, j: i + j, ) return sch.mod return transform def before(A: T.Buffer[14, "int32"]): B = T.alloc_buffer(14, "int32") for i in T.serial(14): with T.block("block"): vi = T.axis.remap("S", [i]) B[vi] = A[vi] def expected(A: T.Buffer[14, "int32"]): B = T.alloc_buffer([4, 4], "int32") for i, j in T.grid(4, 4): with T.block("block"): vi, vj = T.axis.remap("SS", [i, j]) B[vi, vj] = T.if_then_else( vi == 3 and 2 <= vj, vi + vj, A[vi * 4 + vj], dtype="int32" ) @pytest.mark.xfail(reason="Not yet implemented")
class TestPaddedTransformRepeatedBufferElement(tvm.testing.CompareBeforeAfter): """Allow an expression to specify the pad value. Like TestPaddedTransformOfInputCreatesAssumption, but the pad value depends on another portion of the buffer. In this case, the padding at the end of A contains repeated elements from the beginning of A. """ @pytest.fixture def transform(self): def transform(mod): sch = tir.Schedule(mod) A = sch.get(sch.get_block("block")).reads[0].buffer sch.transform_layout( "block", "A", lambda i: [i pad_value=lambda i, j: A[(4 * i + j) % 14], ) return sch.mod return transform def before(A: T.Buffer[14, "int32"]): B = T.alloc_buffer(14, "int32") for i in T.serial(14): with T.block("block"): vi = T.axis.remap("S", [i]) B[vi] = A[vi] def expected(A: T.Buffer[(4, 4), "int32"]): for i, j in T.grid(4, 4): with T.block("buffer_A_assumption"): vi, vj = T.axis.remap("SS", [i, j]) T.evaluate( T.assume( not (vi == 3 and 2 <= vj) or A[vi, vj] == A[((4 * vi + j) % 14) ) ) B = T.alloc_buffer(14, "int32") for i in T.grid(14): with T.block("block"): vi = T.axis.remap("S", [i]) B[vi] = A[vi
class TestPadValueMayNotReferenceOtherBuffer(tvm.testing.CompareBeforeAfter): """Allow an expression to specify the pad value. Like TestPaddedTransformRepeatedBufferElement, but the pad value depends on a different buffer, which is not allowed. """ @pytest.fixture def transform(self): def transform(mod): sch = tir.Schedule(mod) A = sch.get(sch.get_block("block")).reads[0].buffer other = tir.decl_buffer(1, A.dtype, name="other") sch.transform_layout( "block", "A", lambda i: [i pad_value=lambda i, j: other[0], ) return sch.mod return transform def before(A: T.Buffer[14, "int32"]): B = T.alloc_buffer(14, "int32") for i in T.serial(14): with T.block("block"): vi = T.axis.remap("S", [i]) B[vi] = A[vi] expected = tvm.tir.schedule.schedule.ScheduleError
class TestTransformLayoutWithVar(tvm.testing.CompareBeforeAfter): """Layout transform with dynamic parameter in transform""" @pytest.fixture def transform(self): def transform(mod): sch = tir.Schedule(mod) n = sch.mod["main"].params[1] sch.transform_layout( "block", "B", lambda i: [i pad_value=0, ) return sch.mod return transform def before(A: T.Buffer[16, "int32"], n: T.int32): B = T.alloc_buffer(16, "int32") for i in T.serial(16): with T.block("block"): vi = T.axis.remap("S", [i]) B[vi] = A[vi] def expected(A: T.Buffer[16, "int32"], n: T.int32): B = T.alloc_buffer([(-16 % n + 16) for i, j in T.grid((-16 % n + 16) with T.block("block"): vi, vj = T.axis.remap("SS", [i, j]) B[vi, vj] = T.if_then_else( -16 % n != 0 and ( (vj + vi * n) and 16 % n <= (vj + vi * n) % n ), 0, A[vj + vi * n], dtype="int32", ) if __name__ == "__main__": tvm.testing.main()
import sys
import pytest
import tvm
import tvm.testing from tvm
import tir from tvm.ir
import IRModule from tvm.script
import tir as T from tvm.tir.schedule.testing
import verify_trace_roundtrip @T.prim_func def matmul(a: T.handle, b: T.handle, c: T.handle) -> None: A = T.match_buffer(a, [128, 128]) B = T.match_buffer(b, [128, 128]) C = T.match_buffer(c, [128, 128]) for i, j in T.grid(128, 128): with T.block("init"): vi, vj = T.axis.remap("SS", [i, j]) C[vi, vj] = 0.0 for k in range(0, 128): with T.block("update"): vi, vj, vk = T.axis.remap("SSR", [i, j, k]) C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vj, vk] @T.prim_func def matmul_relu(a: T.handle, b: T.handle, d: T.handle) -> None: A = T.match_buffer(a, (1024, 1024)) B = T.match_buffer(b, (1024, 1024)) C = T.alloc_buffer((1024, 1024)) D = T.match_buffer(d, (1024, 1024)) for i, j, k in T.grid(1024, 1024, 1024): with T.block("matmul"): vi, vj, vk = T.axis.remap("SSR", [i, j, k]) with T.init(): C[vi, vj] = 0.0 C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vk, vj] for i, j in T.grid(1024, 1024): with T.block("relu"): vi, vj = T.axis.remap("SS", [i, j]) D[vi, vj] = T.max(C[vi, vj], 0.0) @T.prim_func def matmul_relu_ann1(a: T.handle, b: T.handle, d: T.handle) -> None: A = T.match_buffer(a, (1024, 1024)) B = T.match_buffer(b, (1024, 1024)) C = T.alloc_buffer((1024, 1024)) D = T.match_buffer(d, (1024, 1024)) for i in T.serial(0, 1024, annotations={"test1": "aaa", "test4": {"arr": [0, 0], "key": 3}}): for j in T.serial(0, 1024, annotations={"test2": 612, "test3": ["aa", 1]}): for k in T.serial(0, 1024): with T.block("matmul"): vi, vj, vk = T.axis.remap("SSR", [i, j, k]) with T.init(): C[vi, vj] = 0.0 C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vk, vj] for i, j in T.grid(1024, 1024): with T.block("relu"): vi, vj = T.axis.remap("SS", [i, j]) D[vi, vj] = T.max(
C[vi, vj], 0.0) @T.prim_func def matmul_relu_ann2(a: T.handle, b: T.handle, d: T.handle) -> None: A = T.match_buffer(a, (1024, 1024)) B = T.match_buffer(b, (1024, 1024)) C = T.alloc_buffer((1024, 1024)) D = T.match_buffer(d, (1024, 1024)) for i, j, k in T.grid(1024, 1024, 1024): with T.block("matmul"): vi, vj, vk = T.axis.remap("SSR", [i, j, k]) with T.init(): C[vi, vj] = 0.0 T.block_attr({"test1": "aaa", "test4": {"arr": [0, 0], "key": 3}}) C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vk, vj] for i, j in T.grid(1024, 1024): with T.block("relu"): vi, vj = T.axis.remap("SS", [i, j]) T.block_attr({"test2": 0.22, "test3": ["aa", 1]}) D[vi, vj] = T.max(C[vi, vj], 0.0) @tvm.script.ir_module class ModuleWithMultipleFuncs: @T.prim_func def vector_add( A: T.Buffer[128, "float32"], B: T.Buffer[128, "float32"], ) -> None: for i in range(128): with T.block("init"): vi = T.axis.remap("S", [i]) B[vi] = A[vi] @T.prim_func def vector_add_2( A: T.Buffer[128, "float32"], B: T.Buffer[128, "float32"], ) -> None: for i in range(128): with T.block("init"): vi = T.axis.remap("S", [i]) B[vi] = A[vi] @T.prim_func def tuple_reduction(data: T.Buffer[(4, 32), "float32"], T_add: T.Buffer[(4,), "float32"]) -> None: T.func_attr({"global_symbol": "main", "tir.noalias": True}) with T.block("root"): T.reads() T.writes() data_red_temp_v0 = T.alloc_buffer([4], dtype="float32") data_red_temp_v1 = T.alloc_buffer([4], dtype="float32") for i0, i1 in T.grid(4, 32): with T.block("data_red_temp"): ax0, k1 = T.axis.remap("SR", [i0, i1]) T.reads(data[ax0, k1]) T.writes(data_red_temp_v0[ax0], data_red_temp_v1[ax0]) with T.init():
data_red_temp_v0[ax0] = T.float32(0) data_red_temp_v1[ax0] = T.float32(0) v_data_red_temp_v0: T.float32 = data_red_temp_v0[ax0] + data[ax0, k1] v_data_red_temp_v1: T.float32 = ( data_red_temp_v1[ax0] + data[ax0, k1] * data[ax0, k1] ) data_red_temp_v0[ax0] = v_data_red_temp_v0 data_red_temp_v1[ax0] = v_data_red_temp_v1 for i0 in range(4): with T.block("T_add"): ax0 = T.axis.remap("S", [i0]) T.reads(data_red_temp_v0[ax0], data_red_temp_v1[ax0]) T.writes(T_add[ax0]) T_add[ax0] = data_red_temp_v0[ax0] + data_red_temp_v1[ax0] use_block_name = tvm.testing.parameter(by_dict={"block_obj": False, "block_name": True}) def test_tir_schedule_creation(): sch_1 = tir.Schedule(matmul, debug_mask="all") sch_2 = tir.Schedule(IRModule({"main": matmul}), debug_mask="all") assert sch_1.mod["main"].same_as(sch_2.mod["main"]) assert sch_1.state.mod["main"].same_as(sch_2.state.mod["main"]) def test_tir_schedule_get_block(): sch = tir.Schedule(matmul, debug_mask="all") block_rv = sch.get_block(name="update") block_sref = sch.get_sref(block_rv) block = sch.get(block_rv) assert block.name_hint == "update" assert block_sref.stmt.same_as(block) assert sch.state.get_sref(block).same_as(block_sref) assert block.same_as(matmul.body.block.body.body.body[1].body.block) def test_tir_schedule_work_on(): sch = tir.Schedule(ModuleWithMultipleFuncs, debug_mask="all") with pytest.raises(ValueError, match="does not know which function to be working on"): sch.get_block(name="init") sch.work_on(func_name="vector_add") sch.get_block(name="init") def test_tir_schedule_get_loops(use_block_name): sch = tir.Schedule(matmul, debug_mask="all") block = "update" if use_block_name else sch.get_block(n
ame="update") i, j, k = sch.get_loops(block) assert sch.get(i).loop_var.name == "i" assert sch.get(j).loop_var.name == "j" assert sch.get(k).loop_var.name == "k" def test_tir_schedule_copy_1(use_block_name): sch_1 = tir.Schedule(matmul, debug_mask="all") block_rv = sch_1.get_block(name="update") i, j, k = sch_1.get_loops(block="update" if use_block_name else block_rv) assert sch_1.get(i).loop_var.name == "i" assert sch_1.get(j).loop_var.name == "j" assert sch_1.get(k).loop_var.name == "k" sch_2 = sch_1.copy() assert sch_2.get(block_rv).name_hint == "update" assert sch_2.get(i).loop_var.name == "i" assert sch_2.get(j).loop_var.name == "j" assert sch_2.get(k).loop_var.name == "k" def test_tir_schedule_copy_2(): sch = tir.Schedule(mod=matmul, debug_mask="all") i, j, k = sch.get_loops(sch.get_block("update")) sch_copy = sch.copy() assert not sch.get_sref(i).same_as(sch_copy.get_sref(i)) assert not sch.get_sref(j).same_as(sch_copy.get_sref(j)) assert not sch.get_sref(k).same_as(sch_copy.get_sref(k)) assert sch.get_sref(i).stmt.same_as(sch_copy.get_sref(i).stmt) assert sch.get_sref(j).stmt.same_as(sch_copy.get_sref(j).stmt) assert sch.get_sref(k).stmt.same_as(sch_copy.get_sref(k).stmt) i_0, i_1 = sch.split(i, factors=[None, 64]) j_0, j_1 = sch_copy.split(j, factors=[None, 32]) assert sch.get_sref(i_0).stmt.extent == 2 assert sch.get_sref(i_1).stmt.extent == 64 with pytest.raises(IndexError): sch_copy.get_sref(i_0) with pytest.raises(IndexError): sch_copy.get_sref(i_1) with pytest.raises(IndexError): sch.get_sref(j_0) with pytest.raises(IndexError): sch.get_sref(j_1) assert sch_copy.get_sref(j_0).stmt.extent == 4 assert sch_copy.get_sref(j_1).stmt.extent == 32 verify_trace_roundtrip(sch, mod=matmul) verify_trace_roundtrip(sch_copy, mod=matmul) def test_tir_schedule_remove_rv(): sch = tir.Schedule(matmul, debug_mask
="all") block_rv = sch.get_block(name="update") assert sch.get(block_rv).name_hint == "update" sch.remove_rv(block_rv) with pytest.raises(IndexError): sch.get(block_rv) def test_get_child_blocks(): s = tir.Schedule(matmul, debug_mask="all") init = s.get_block("init") update = s.get_block("update") blocks = s.get_child_blocks(s.get_loops(init)[0]) assert len(blocks) == 2 assert s.get(init) == s.get(blocks[0]) assert s.get(update) == s.get(blocks[1]) root = s.get_block("root") blocks = s.get_child_blocks(root) assert len(blocks) == 2 assert s.get(init) == s.get(blocks[0]) assert s.get(update) == s.get(blocks[1]) def test_get_producers(use_block_name): sch = tir.Schedule(mod=matmul_relu, debug_mask="all") block = "relu" if use_block_name else sch.get_block("relu") (producer,) = sch.get_producers(block) assert tvm.ir.structural_equal( sch.get_sref(producer).stmt, sch.get_sref(sch.get_block("matmul")).stmt, ) verify_trace_roundtrip(sch, mod=matmul_relu) def test_get_producers_multiple_buffer_depdencies(use_block_name): sch = tir.Schedule(mod=tuple_reduction, debug_mask="all") block = "T_add" if use_block_name else sch.get_block("T_add") (producer,) = sch.get_producers(block) assert tvm.ir.structural_equal( sch.get_sref(producer).stmt, sch.get_sref(sch.get_block("data_red_temp")).stmt, ) def test_get_consumers(use_block_name): sch = tir.Schedule(mod=matmul_relu, debug_mask="all") block = "matmul" if use_block_name else sch.get_block("matmul") (consumer,) = sch.get_consumers(block) assert tvm.ir.structural_equal( sch.get_sref(consumer).stmt, sch.get_sref(sch.get_block("relu")).stmt, ) verify_trace_roundtrip(sch, mod=matmul_relu) def test_get_consumers_multiple_buffer_depdencies(use_block_name): sch = tir.Schedule(mod=tuple_reduction, debug_mask="all") block = "data_red_temp" if use_block_name else sch.get_bl
ock("data_red_temp") (consumer,) = sch.get_consumers(block) assert tvm.ir.structural_equal( sch.get_sref(consumer).stmt, sch.get_sref(sch.get_block("T_add")).stmt, ) def test_annotate_unannotate_loop(): sch = tir.Schedule(mod=matmul_relu, debug_mask="all") matmul = sch.get_block("matmul") relu = sch.get_block("relu") sch.annotate(sch.get_loops(matmul)[0], "test1", "aaa") sch.annotate(sch.get_loops(matmul)[1], "test2", 612) sch.annotate(sch.get_loops(matmul)[1], "test3", ["aa", 1]) sch.annotate(sch.get_loops(matmul)[0], "test4", {"arr": [0, 0], "key": 3}) tvm.ir.assert_structural_equal(sch.mod["main"], matmul_relu_ann1) verify_trace_roundtrip(sch=sch, mod=matmul_relu) sch.unannotate(sch.get_loops(matmul)[0], "test1") sch.unannotate(sch.get_loops(matmul)[1], "test2") sch.unannotate(sch.get_loops(matmul)[1], "test3") sch.unannotate(sch.get_loops(matmul)[0], "test4") verify_trace_roundtrip(sch=sch, mod=matmul_relu) def test_annotate_unannotate_block(): sch = tir.Schedule(mod=matmul_relu, debug_mask="all") matmul = sch.get_block("matmul") relu = sch.get_block("relu") sch.annotate(matmul, "test1", "aaa") sch.annotate(relu, "test2", 0.22) sch.annotate(relu, "test3", ["aa", 1]) sch.annotate(matmul, "test4", {"arr": [0, 0], "key": 3}) tvm.ir.assert_structural_equal(sch.mod["main"], matmul_relu_ann2) verify_trace_roundtrip(sch=sch, mod=matmul_relu) sch.unannotate(matmul, "test1") sch.unannotate(relu, "test2") sch.unannotate(relu, "test3") sch.unannotate(matmul, "test4") verify_trace_roundtrip(sch=sch, mod=matmul_relu) if __name__ == "__main__": tvm.testing.main()
import tvm from tvm.script
import tir as T @T.prim_func def matmul(a: T.handle, b: T.handle, c: T.handle, n: T.int32) -> None: m = T.var("int32") A = T.match_buffer(a, [m, n]) B = T.match_buffer(b, [m, n]) C = T.match_buffer(c, [m, m]) for i, j, k in T.grid(m, m, n): with T.block("update"): vi, vj, vk = T.axis.remap("SSR", [i, j, k]) with T.init(): C[vi, vj] = 0.0 C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vj, vk] @T.prim_func def matmul_128(a: T.handle, b: T.handle, c: T.handle) -> None: A = T.match_buffer(a, [128, 128]) B = T.match_buffer(b, [128, 128]) C = T.match_buffer(c, [128, 128]) for i, j, k in T.grid(128, 128, 128): with T.block("update"): vi, vj, vk = T.axis.remap("SSR", [i, j, k]) with T.init(): C[vi, vj] = 0.0 C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vj, vk] @T.prim_func def matmul_m_128(a: T.handle, b: T.handle, c: T.handle) -> None: m = T.var("int32") A = T.match_buffer(a, [m, 128]) B = T.match_buffer(b, [m, 128]) C = T.match_buffer(c, [m, m]) for i, j, k in T.grid(m, m, 128): with T.block("update"): vi, vj, vk = T.axis.remap("SSR", [i, j, k]) with T.init(): C[vi, vj] = 0.0 C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vj, vk] @T.prim_func def matmul_m_8x(a: T.handle, b: T.handle, c: T.handle) -> None: x = T.var("int32") m = T.var("int32") A = T.match_buffer(a, [m, x * 8]) B = T.match_buffer(b, [m, x * 8]) C = T.match_buffer(c, [m, m]) for i, j, k in T.grid(m, m, x * 8): with T.block("update"): vi, vj, vk = T.axis.remap("SSR", [i, j, k]) with T.init(): C[vi, vj] = 0.0 C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vj, vk] @T.prim_func def element_wise(a: T.handle, c: T.handle) -> None: m = T.var("int32") n = T.var("int32") A = T.match_buffer(a, (m, n), "float32") C = T.match_buffer(c, (m, n), "float32")
B = T.alloc_buffer((m, n), "float32") for i, j in T.grid(m, n): with T.block("B"): vi, vj = T.axis.remap("SS", [i, j]) B[vi, vj] = A[vi, vj] * 2.0 for i, j in T.grid(m, n): with T.block("C"): vi, vj = T.axis.remap("SS", [i, j]) C[vi, vj] = B[vi, vj] + 1.0 @T.prim_func def element_wise_128_64(a: T.handle, c: T.handle) -> None: A = T.match_buffer(a, (128, 64), "float32") C = T.match_buffer(c, (128, 64), "float32") B = T.alloc_buffer((128, 64), "float32") for i, j in T.grid(128, 64): with T.block("B"): vi, vj = T.axis.remap("SS", [i, j]) B[vi, vj] = A[vi, vj] * 2.0 for i, j in T.grid(128, 64): with T.block("C"): vi, vj = T.axis.remap("SS", [i, j]) C[vi, vj] = B[vi, vj] + 1.0 @T.prim_func def element_wise_128_n(a: T.handle, c: T.handle) -> None: n = T.var("int32") A = T.match_buffer(a, (128, n), "float32") C = T.match_buffer(c, (128, n), "float32") B = T.alloc_buffer((128, n), "float32") for i, j in T.grid(128, n): with T.block("B"): vi, vj = T.axis.remap("SS", [i, j]) B[vi, vj] = A[vi, vj] * 2.0 for i, j in T.grid(128, n): with T.block("C"): vi, vj = T.axis.remap("SS", [i, j]) C[vi, vj] = B[vi, vj] + 1.0 @T.prim_func def mem_copy(a: T.handle, b: T.handle, m: T.int32, n: T.int32, p: T.int32, q: T.int32) -> None: A = T.match_buffer(a, (m, n), "float32", strides=[p, 1], elem_offset=q) B = T.match_buffer(b, (m, n), "float32", strides=[p, 1], elem_offset=q) for i, j in T.grid(m, n): with T.block(): vi, vj = T.axis.remap("SS", [i, j]) B[vi, vj] = A[vi, vj] @T.prim_func def mem_copy_16_16_8_4(a: T.handle, b: T.handle) -> None: A = T.match_buffer(a, (16, 16), "float32", strides=[8, 1], elem_offset=4) B = T.match_buffer(b, (16, 16), "float32", strides=[8, 1], elem_offset=4) for i, j in T.grid(16, 16): wi
th T.block(): vi, vj = T.axis.remap("SS", [i, j]) B[vi, vj] = A[vi, vj] @T.prim_func def mem_copy_m_n_p_n(a: T.handle, b: T.handle, m: T.int32, n: T.int32, p: T.int32) -> None: A = T.match_buffer(a, (m, n), "float32", strides=[p, 1], elem_offset=n) B = T.match_buffer(b, (m, n), "float32", strides=[p, 1], elem_offset=n) for i, j in T.grid(m, n): with T.block(): vi, vj = T.axis.remap("SS", [i, j]) B[vi, vj] = A[vi, vj] @T.prim_func def param_in_arith_exprs(a: T.handle, b: T.handle) -> None: n = T.var("int32") A = T.match_buffer(a, [n B = T.match_buffer(b, [n], "int32") for i in range(n - 1): with T.block(): vi = T.axis.S(n - 1, i) B[vi] = A[vi @T.prim_func def param_in_arith_exprs_n_16(a: T.handle, b: T.handle) -> None: n = T.var("int32") A = T.match_buffer(a, [2, 8], "int32") B = T.match_buffer(b, [16], "int32") for i in range(15): with T.block(): vi = T.axis.S(15, i) B[vi] = A[vi def test_specialize_nothing(): func = matmul.specialize({}) assert func.same_as(matmul) def test_specialize_matmul(): a, _, _, n = matmul.params func = matmul.specialize({a: tvm.tir.decl_buffer((128, 128))}) tvm.ir.assert_structural_equal(func, matmul_128) func = matmul.specialize({n: 128}) tvm.ir.assert_structural_equal(func, matmul_m_128) func = matmul.specialize({n: tvm.tir.Var("x", "int32") * 8}) tvm.ir.assert_structural_equal(func, matmul_m_8x) def test_specialize_elemwise(): a, c = element_wise.params C = element_wise.buffer_map[c] func = element_wise.specialize({a: tvm.tir.decl_buffer((128, 64))}) tvm.ir.assert_structural_equal(func, element_wise_128_64) func = element_wise.specialize({c: tvm.tir.decl_buffer((128, C.shape[1]))}) tvm.ir.assert_structural_equal(func, element_wise_128_n) def test_specialize_mem_copy(): a, _, m, n, p, q = mem_copy.params fun
c = mem_copy.specialize({a: tvm.tir.decl_buffer((16, 16), strides=[8, 1], elem_offset=4)}) tvm.ir.assert_structural_equal(func, mem_copy_16_16_8_4) func = mem_copy.specialize({n: 16, m: 16, p: 8, q: 4}) tvm.ir.assert_structural_equal(func, mem_copy_16_16_8_4) func = mem_copy.specialize({q: n}) tvm.ir.assert_structural_equal(func, mem_copy_m_n_p_n) def test_specialize_recursive_load(): pass def test_specialize_with_const_folding(): b = param_in_arith_exprs.params[1] func = param_in_arith_exprs.specialize({b: tvm.tir.decl_buffer([16])}) tvm.ir.assert_structural_equal(func, param_in_arith_exprs_n_16) if __name__ == "__main__": test_specialize_nothing() test_specialize_matmul() test_specialize_elemwise() test_specialize_mem_copy() test_specialize_recursive_load() test_specialize_with_const_folding()
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import tvm from tvm import te def test_ir_transform(): ib = tvm.tir.ir_builder.create() n = te.var("n") with ib.for_range(0, n, name="i") as i: with ib.for_range(0, 10, name="j") as j: x = tvm.tir.call_extern("int32", "TestA", i * 3 + j * 1) ib.emit(tvm.tir.call_extern("int32", "TestB", x)) ib.emit(tvm.tir.call_extern("int32", "TestC", x)) body = ib.get() builtin_call_extern = tvm.ir.Op.get("tir.call_extern") def preorder(op): if op.op.same_as(builtin_call_extern) and op.args[0].value == "TestC": return tvm.tir.const(0, "int32") return None def postorder(op): assert isinstance(op, tvm.tir.Call) if op.op.same_as(builtin_call_extern) and op.args[0].value == "TestA": return tvm.tir.call_extern("int32", "TestB", op.args[1] + 1) return op body = tvm.tir.stmt_functor.ir_transform(body, preorder, postorder, ["tir.Call"]) stmt_list = tvm.tir.stmt_list(body.body.body) assert stmt_list[0].value.args[1].args[0].value == "TestB" assert stmt_list[1].value.value == 0 if __name__ == "__main__": test_ir_transform()
import tvm
import numpy as np
import pytest from tvm
import te from tvm.runtime
import ObjectPath def consistent_equal(x, y, map_free_vars=False): struct_equal0 = tvm.ir.structural_equal(x, y, map_free_vars) struct_equal1 = tvm.ir.structural_equal(y, x, map_free_vars) xhash = tvm.ir.structural_hash(x, map_free_vars) yhash = tvm.ir.structural_hash(y, map_free_vars) if struct_equal0 != struct_equal1: raise ValueError( "Non-commutative {} vs {}, sequal0={}, sequal1={}".format( x, y, struct_equal0, struct_equal1 ) ) if struct_equal0 != (xhash == yhash): raise ValueError( "Inconsistent {} vs {}, sequal={}, xhash={}, yhash={}".format( x, y, struct_equal0, xhash, yhash ) ) return struct_equal0 def get_sequal_mismatch(x, y, map_free_vars=False): mismatch_0 = tvm.ir.base.get_first_structural_mismatch(x, y, map_free_vars) mismatch_1 = tvm.ir.base.get_first_structural_mismatch(y, x, map_free_vars) if mismatch_0 is None and mismatch_1 is None: return None if ( mismatch_0 is None or mismatch_1 is None or mismatch_0[0] != mismatch_1[1] or mismatch_0[1] != mismatch_1[0] ): raise ValueError( "Non-commutative {} vs {}, mismatch_0={}, mismatch_1={}".format( x, y, mismatch_0, mismatch_1 ) ) return mismatch_0 def test_exprs(): x = tvm.tir.const(1, "int32") y = tvm.tir.const(10, "int32") vx = te.var("x") vy = te.var("y") vz = te.var("z") zx = vx + vx zy = vy + vy assert consistent_equal(zx * zx, (vx + vx) * (vx + vx), map_free_vars=False) with pytest.raises(ValueError): tvm.ir.assert_structural_equal(x, y) assert not consistent_equal(vx, vy) assert consistent_equal(vx, vy, map_free_vars=True) assert not consistent_equal(vx + vx, vy + vx, map_free_vars=True) assert consistent_equal(vx + vy, vy + vx, map_free_vars=True) assert consistent_equal(vx
+ vy + vz, vy + vz + vx, map_free_vars=True) assert not consistent_equal(vx + 1, vy + 1, map_free_vars=False) assert consistent_equal(tvm.tir.Let(vx, 1, vx - 1), tvm.tir.Let(vy, 1, vy - 1)) assert consistent_equal(tvm.tir.Let(vx, 1, vx assert consistent_equal(zx * zx, zx * zx) assert consistent_equal(zx * zx, zy * zy, map_free_vars=True) assert not consistent_equal(zx * zx, zy * zy, map_free_vars=False) def test_prim_func(): x = te.var("x") y = te.var("y") func0 = tvm.tir.PrimFunc([x, y], tvm.tir.Evaluate(x + y)) func1 = tvm.tir.PrimFunc([x, y], tvm.tir.Evaluate(y + x)) assert not consistent_equal(func0, func1) b = tvm.tir.decl_buffer((x,), "float32") stmt = tvm.tir.LetStmt(x, 10, tvm.tir.Evaluate(x + 1)) func0 = tvm.tir.PrimFunc([x, y, b], stmt) func1 = tvm.ir.load_json(tvm.ir.save_json(func0)) tvm.ir.assert_structural_equal(func0, func1) data0 = tvm.nd.array([1, 2, 3]) data1 = tvm.nd.array([1, 2, 3]) func0 = func0.with_attr("data", data0) func1 = func1.with_attr("data", data1) mod0 = tvm.IRModule.from_expr(func0) mod1 = tvm.IRModule.from_expr(func1) tvm.ir.assert_structural_equal(mod0, mod1) def test_prim_func_param_count_mismatch(): x = te.var("x") y = te.var("y") z = te.var("z") func0 = tvm.tir.PrimFunc([x, y], tvm.tir.Evaluate(x)) func1 = tvm.tir.PrimFunc([x, y, z], tvm.tir.Evaluate(x)) lhs_path, rhs_path = get_sequal_mismatch(func0, func1) expected_lhs_path = ObjectPath.root().attr("params").missing_array_element(2) expected_rhs_path = ObjectPath.root().attr("params").array_index(2) assert lhs_path == expected_lhs_path assert rhs_path == expected_rhs_path def test_prim_func_param_dtype_mismatch(): x = te.var("x") y_0 = te.var("y", dtype="int32") y_1 = te.var("z", dtype="float32") func0 = tvm.tir.PrimFunc([x, y_0], tvm.tir.Evaluate(x)) func1 = tvm.tir.PrimFunc([x, y_1], tvm.tir.Evaluate(x)) lhs_path,
rhs_path = get_sequal_mismatch(func0, func1) expected_path = ObjectPath.root().attr("params").array_index(1).attr("dtype") assert lhs_path == expected_path assert rhs_path == expected_path def test_prim_func_body_mismatch(): x_0 = te.var("x") y_0 = te.var("y") x_1 = te.var("x") y_1 = te.var("y") func0 = tvm.tir.PrimFunc([x_0, y_0], tvm.tir.Evaluate(x_0 + x_0)) func1 = tvm.tir.PrimFunc([x_1, y_1], tvm.tir.Evaluate(x_1 + y_1)) lhs_path, rhs_path = get_sequal_mismatch(func0, func1) expected_path = ObjectPath.root().attr("body").attr("value").attr("b") assert lhs_path == expected_path assert rhs_path == expected_path def test_array(): x = np.arange(10) nx = tvm.nd.array(x) ny = tvm.nd.array(x) nz = tvm.nd.array(x.reshape(2, 5)) assert consistent_equal(nx, ny) assert not consistent_equal(nx, nz) def test_env_func(): @tvm.register_func("test.sequal.env_func") def test(x): return x + 1 x = tvm.ir.EnvFunc.get("test.sequal.env_func") y = tvm.ir.EnvFunc.get("test.sequal.env_func") assert consistent_equal(y, x) def test_attrs(): x = tvm.ir.make_node("attrs.TestAttrs", axis=1, name="xx") y = tvm.ir.make_node("attrs.TestAttrs", axis=1, name="xx") z = tvm.ir.make_node("attrs.TestAttrs", axis=2, name="xx") tvm.ir.assert_structural_equal(y, x) assert not consistent_equal(y, z) x = tvm.runtime.convert({"x": [1, 2, 3], "y": 2}) y = tvm.runtime.convert({"y": 2, "x": [1, 2, 3]}) z = tvm.runtime.convert({"y": 2, "x": [1, 2, 3, 4]}) assert consistent_equal(y, x) assert not consistent_equal(y, z) def test_stmt(): x = te.var("x") y = te.var("y") n = 128 A = te.placeholder((n, n), name="A") B = te.placeholder((n, n), name="B") ii = te.var("i") jj = te.var("j") Ab = tvm.tir.decl_buffer((n,), name="A") n = te.var("n") def func2(): ib = tvm.tir.ir_builder.create() A = ib.buffer_ptr(Ab) with ib.for_range(0, n, name="i"
) as i: A[i] = A[i] + 1 with ib.for_range(0, 10, name="j") as j: A[j] = A[j] + 2 A[j] = A[j] + 2 return ib.get() assert consistent_equal(func2(), func2()) def test_buffer_storage_scope(): x = te.var("x", dtype="handle") buffer_local_0 = tvm.tir.decl_buffer((10, 10), "float32", scope="local") buffer_local_1 = tvm.tir.decl_buffer((10, 10), "float32", scope="local") buffer_global = tvm.tir.decl_buffer((10, 10), "float32") buffer_empty = tvm.tir.decl_buffer((10, 10), "float32", scope="") func0 = tvm.tir.PrimFunc([x], tvm.tir.Evaluate(x), buffer_map={x: buffer_local_0}) func1 = tvm.tir.PrimFunc([x], tvm.tir.Evaluate(x), buffer_map={x: buffer_local_1}) func2 = tvm.tir.PrimFunc([x], tvm.tir.Evaluate(x), buffer_map={x: buffer_global}) func3 = tvm.tir.PrimFunc([x], tvm.tir.Evaluate(x), buffer_map={x: buffer_empty}) assert consistent_equal(func0, func1) assert consistent_equal(func2, func3) assert not consistent_equal(func0, func2) def test_buffer_map_mismatch(): x = te.var("x") buffer_0 = tvm.tir.decl_buffer((10, 10)) buffer_0_clone = tvm.tir.decl_buffer((10, 10)) buffer_1 = tvm.tir.decl_buffer((10, 20)) func_0 = tvm.tir.PrimFunc([x], tvm.tir.Evaluate(x), buffer_map={x: buffer_0}) func_0_clone = tvm.tir.PrimFunc([x], tvm.tir.Evaluate(x), buffer_map={x: buffer_0_clone}) func_1 = tvm.tir.PrimFunc([x], tvm.tir.Evaluate(x), buffer_map={x: buffer_1}) lhs_path, rhs_path = get_sequal_mismatch(func_0, func_1) expected_path = ( ObjectPath.root().attr("buffer_map").map_value(x).attr("shape").array_index(1).attr("value") ) assert lhs_path == expected_path assert rhs_path == expected_path assert get_sequal_mismatch(func_0, func_0_clone) is None def test_buffer_map_length_mismatch(): x = te.var("x") y = te.var("x") buffer_0 = tvm.tir.decl_buffer((10, 10)) buffer_1 = tvm.tir.decl_buffer((10, 20)) func_0 = tvm.tir.PrimFunc([x], t
vm.tir.Evaluate(x), buffer_map={x: buffer_0}) func_1 = tvm.tir.PrimFunc([x], tvm.tir.Evaluate(x), buffer_map={x: buffer_0, y: buffer_1}) lhs_path, rhs_path = get_sequal_mismatch(func_0, func_1) expected_lhs_path = ObjectPath.root().attr("buffer_map").missing_map_entry() assert lhs_path == expected_lhs_path expected_rhs_path = ObjectPath.root().attr("buffer_map").map_value(y) assert rhs_path == expected_rhs_path def test_buffer_load_store(): b = tvm.tir.decl_buffer((10, 10), "float32") x = tvm.tir.BufferLoad(b, [0, 1]) y = tvm.tir.BufferLoad(b, [0, 1]) z = tvm.tir.BufferLoad(b, [1, 2]) assert consistent_equal(y, x) assert not consistent_equal(y, z) i = tvm.tir.Var("x", "int32") sx = tvm.tir.BufferStore(b, 0.1, [0, i]) sy = tvm.tir.BufferStore(b, 0.1, [0, i]) sz = tvm.tir.BufferStore(b, 0.1, [1, i]) assert consistent_equal(sy, sx) assert not consistent_equal(sy, sz) def test_while(): x = tvm.tir.Var("x", "int32") y = tvm.tir.Var("y", "int32") wx = tvm.tir.While(x > 0, tvm.tir.Evaluate(x)) wy = tvm.tir.While(y > 0, tvm.tir.Evaluate(y)) assert not consistent_equal(wx, wy) assert consistent_equal(wx, wy, map_free_vars=True) def test_while_condition_mismatch(): x = tvm.tir.Var("x", "int32") w_0 = tvm.tir.While(x > 0, tvm.tir.Evaluate(x)) w_1 = tvm.tir.While(x < 0, tvm.tir.Evaluate(x)) lhs_path, rhs_path = get_sequal_mismatch(w_0, w_1) expected_path = ObjectPath.root().attr("condition") assert lhs_path == expected_path assert rhs_path == expected_path def test_while_body_mismatch(): x = tvm.tir.Var("x", "int32") w_0 = tvm.tir.While(x > 0, tvm.tir.Evaluate(x)) w_1 = tvm.tir.While(x > 0, tvm.tir.Evaluate(x + 1)) lhs_path, rhs_path = get_sequal_mismatch(w_0, w_1) expected_path = ObjectPath.root().attr("body").attr("value") assert lhs_path == expected_path assert rhs_path == expected_path def test_seq_mismatch(): x = tvm.tir.Var("x", "int32") seq_0 = tvm
.tir.SeqStmt( [ tvm.tir.Evaluate(x), tvm.tir.Evaluate(x + 1), tvm.tir.Evaluate(x + 2), tvm.tir.Evaluate(x + 3), ] ) seq_1 = tvm.tir.SeqStmt( [ tvm.tir.Evaluate(x), tvm.tir.Evaluate(x + 1), tvm.tir.Evaluate(x + 99), tvm.tir.Evaluate(x + 3), ] ) lhs_path, rhs_path = get_sequal_mismatch(seq_0, seq_1) expected_path = ( ObjectPath.root().attr("seq").array_index(2).attr("value").attr("b").attr("value") ) assert lhs_path == expected_path assert rhs_path == expected_path def test_seq_mismatch_different_lengths(): x = tvm.tir.Var("x", "int32") seq_0 = tvm.tir.SeqStmt( [ tvm.tir.Evaluate(x), tvm.tir.Evaluate(x + 1), tvm.tir.Evaluate(x + 2), tvm.tir.Evaluate(x + 3), ] ) seq_1 = tvm.tir.SeqStmt([tvm.tir.Evaluate(x), tvm.tir.Evaluate(x + 1), tvm.tir.Evaluate(x + 3)]) lhs_path, rhs_path = get_sequal_mismatch(seq_0, seq_1) expected_path = ( ObjectPath.root().attr("seq").array_index(2).attr("value").attr("b").attr("value") ) assert lhs_path == expected_path assert rhs_path == expected_path def test_seq_length_mismatch(): x = tvm.tir.Var("x", "int32") seq_0 = tvm.tir.SeqStmt( [ tvm.tir.Evaluate(x), tvm.tir.Evaluate(x + 1), tvm.tir.Evaluate(x + 2), tvm.tir.Evaluate(x + 3), ] ) seq_1 = tvm.tir.SeqStmt([tvm.tir.Evaluate(x), tvm.tir.Evaluate(x + 1), tvm.tir.Evaluate(x + 2)]) lhs_path, rhs_path = get_sequal_mismatch(seq_0, seq_1) expected_lhs_path = ObjectPath.root().attr("seq").array_index(3) expected_rhs_path = ObjectPath.root().attr("seq").missing_array_element(3) assert lhs_path == expected_lhs_path assert rhs_path == expected_rhs_path if __name__ == "__main__": test_exprs() test_prim_func() test_attrs() test_array() test_env_func(
) test_stmt() test_buffer_storage_scope() test_buffer_load_store() test_while()
import sys
import pytest
import numpy as np
import tvm
import tvm.testing from tvm
import tir, te, TVMError from tvm.script
import tir as T from tvm.arith
import _ffi_api as _ffi_arith_api from tvm.tir.schedule
import _ffi_api as _ffi_schedule_api @T.prim_func def func_1(A: T.Buffer[(16,), "float32"], C: T.Buffer[(1,), "float32"]): for i in T.serial( 0, 16, ): with T.block(): B = T.alloc_buffer((1,), dtype="float32") with T.block(): B[0] = A[i] * T.float32(2) with T.block(): C[0] = C[0] + A[i] + B[0] + T.float32(1) A[i] = B[0] + T.float32(1) def verify_func_1(module): a_np = np.random.randint(low=-128, high=127, size=(16,)).astype(np.float32) c_np = np.zeros((1,), dtype=np.float32) a = tvm.nd.array(a_np, device=tvm.cpu(0)) c = tvm.nd.array(c_np, device=tvm.cpu(0)) module(a, c) tvm.testing.assert_allclose(c_np + np.sum(3 * a_np + 1), c.numpy(), rtol=1e-4) tvm.testing.assert_allclose(a_np * 2 + 1, a.numpy(), rtol=1e-4) @T.prim_func def func_2( C: T.Buffer[(1,), "float32"], A: T.Buffer[(16,), "float32"], D: T.Buffer[(2,), "float32"] ): for i in T.serial( 0, 16, ): with T.block(): B = T.alloc_buffer((1,), dtype="float32") with T.block(): B[0] = A[i] * T.float32(2) with T.block(): C[0] = C[0] + A[i] + B[0] + T.float32(1) + D[0] A[i] = B[0] + T.float32(1) + D[1] def verify_func_2(module): a_np = np.random.randint(low=-128, high=127, size=(16,)).astype(np.float32) d_np = np.random.randint(low=-128, high=127, size=(2,)).astype(np.float32) c_np = np.zeros((1,), dtype=np.float32) a = tvm.nd.array(a_np, device=tvm.cpu(0)) d = tvm.nd.array(d_np, device=tvm.cpu(0)) c = tvm.nd.array(c_np, device=tvm.cpu(0)) module(c, a, d) tvm.testing.assert_allclose(c_np + np.sum(3 * a_np + 1 + d_np[0]), c.numpy(), rtol=1e-4) tvm.testing.assert_allclose(a_np * 2 + 1 + d_np[1], a.numpy(), rtol=1e-4) @T.prim_func def func_3( C: T.Buffer[(1,), "float32"], A: T.Buffer[(16,), "float32"], D: T.Buffer[(2,), "float32"], E: T.
Buffer[(16,), "float32"], F: T.Buffer[(16,), "float32"], ): for i in T.serial( 0, 16, ): with T.block(): B = T.alloc_buffer((1,), dtype="float32") with T.block(): B[0] = A[i] * T.float32(2) with T.block(): E[i] = A[i] F[i] = E[i] + 1.0 C[0] = C[0] + A[i] + B[0] + T.float32(1) + D[0] A[i] = B[0] + T.float32(1) + D[1] def verify_func_3(module): a_np = np.random.randint(low=-128, high=127, size=(16,)).astype(np.float32) d_np = np.random.randint(low=-128, high=127, size=(2,)).astype(np.float32) c_np = np.zeros((1,), dtype=np.float32) e_np = np.zeros((16,), dtype=np.float32) f_np = np.zeros((16,), dtype=np.float32) a = tvm.nd.array(a_np, device=tvm.cpu(0)) d = tvm.nd.array(d_np, device=tvm.cpu(0)) c = tvm.nd.array(c_np, device=tvm.cpu(0)) e = tvm.nd.array(e_np, device=tvm.cpu(0)) f = tvm.nd.array(f_np, device=tvm.cpu(0)) module(c, a, d, e, f) tvm.testing.assert_allclose(c_np + np.sum(3 * a_np + 1 + d_np[0]), c.numpy(), rtol=1e-4) tvm.testing.assert_allclose(a_np * 2 + 1 + d_np[1], a.numpy(), rtol=1e-4) tvm.testing.assert_allclose(a_np, e.numpy(), rtol=1e-4) tvm.testing.assert_allclose(a_np + 1, f.numpy(), rtol=1e-4) @T.prim_func def func_4( C: T.Buffer[(1,), "float32"], A: T.Buffer[(16,), "float32"], F: T.Buffer[(16,), "float32"], D: T.Buffer[(2,), "float32"], E: T.Buffer[(16,), "float32"], ): for i in T.serial( 0, 16, ): with T.block(): B = T.alloc_buffer((1,), dtype="float32") with T.block(): B[0] = A[i] * T.float32(2) with T.block(): E[i] = A[i] F[i] = E[i] + 1.0 C[0] = C[0] + A[i] + B[0] + T.float32(1) + D[0] A[i] = B[0] + T.float32(1) + D[1] def verify_func_4(module): a_np = np.random.randint(low=-128, high=127, size=(16,)).astyp
e(np.float32) d_np = np.random.randint(low=-128, high=127, size=(2,)).astype(np.float32) c_np = np.zeros((1,), dtype=np.float32) e_np = np.zeros((16,), dtype=np.float32) f_np = np.zeros((16,), dtype=np.float32) a = tvm.nd.array(a_np, device=tvm.cpu(0)) d = tvm.nd.array(d_np, device=tvm.cpu(0)) c = tvm.nd.array(c_np, device=tvm.cpu(0)) e = tvm.nd.array(e_np, device=tvm.cpu(0)) f = tvm.nd.array(f_np, device=tvm.cpu(0)) module(c, a, f, d, e) tvm.testing.assert_allclose(c_np + np.sum(3 * a_np + 1 + d_np[0]), c.numpy(), rtol=1e-4) tvm.testing.assert_allclose(a_np * 2 + 1 + d_np[1], a.numpy(), rtol=1e-4) tvm.testing.assert_allclose(a_np, e.numpy(), rtol=1e-4) tvm.testing.assert_allclose(a_np + 1, f.numpy(), rtol=1e-4) class TestPrimFuncs: func, verify = tvm.testing.parameters( [func_1, verify_func_1], [func_2, verify_func_2], [func_3, verify_func_3], [func_4, verify_func_4], ) def test_primfunc_call(self, func, verify): target = tvm.target.Target("llvm") func = tvm.build(func, target=target) verify(func) def test_te_extern_call(self, func, verify): ir_mod = tvm.IRModule.from_expr(func.with_attr("global_symbol", "main")) prim_func = ir_mod["main"] input_tensors = create_input_tensors_for_primfunc(prim_func) output = te.extern_primfunc(input_tensors, prim_func) rt_prim_func = te.create_prim_func(tensors_from_extern_op(output, prim_func)) tvm.ir.assert_structural_equal(tvm.lower(prim_func), tvm.lower(rt_prim_func)) target = tvm.target.Target("llvm") func = tvm.build(rt_prim_func, target=target) verify(func) def tensors_from_extern_op(extern, func): if isinstance(extern, list): output_tensors = extern else: output_tensors = [extern] output_buffers = [] input_buffers = [] input_tensors = [] for ext in output_tensors: output_buffers.extend(ext.op.output_placeholders)
input_buffers.extend(ext.op.input_placeholders) input_tensors.extend(ext.op.input_tensors) input_binds = dict(zip(input_buffers, input_tensors)) output_binds = dict(zip(output_buffers, output_tensors)) buffer_to_tensor = {**input_binds, **output_binds} ordered_tensors = [] for var in func.params: buf = func.buffer_map[var] ordered_tensors.append(buffer_to_tensor[buf]) return ordered_tensors def create_input_tensors_for_primfunc(primfunc): access_map = {k: tuple(v) for k, v in _ffi_arith_api.DomainTouchedAccessMap(primfunc).items()} in_buffers = [buf for buf, access in access_map.items() if len(access[0])] out_buffers = [buf for buf, access in access_map.items() if len(access[1])] assert in_buffers, "PrimFunc has no input buffers" assert out_buffers, "PrimFunc has no output buffers" outputs = [] inplace = [] inputs = in_buffers for obuf in out_buffers: if obuf in in_buffers: inplace.append(obuf) else: outputs.append(obuf) if not outputs: iobuf = inplace.pop() inputs.remove(iobuf) outputs = [iobuf] def create_tensors(input_buffers): tensors = [] for buf in input_buffers: t = te.placeholder(buf.shape, dtype=buf.dtype, name=buf.name + "_placeholder") tensors.append(t) return tensors return create_tensors(inputs) if __name__ == "__main__": sys.exit(pytest.main(sys.argv))
import pytest
import tvm
import tvm.testing from tvm.ir.module
import IRModule from tvm
import tir from tvm.script
import tir as T def test_texture_scope(): @tvm.script.ir_module class PlusOneMultTwo: @T.prim_func def main(a: T.handle, b: T.handle) -> None: T.func_attr({"global_symbol": "main", "tir.noalias": True}) A = T.match_buffer(a, (128, 128, 4), dtype="float32", scope="global.texture") B = T.alloc_buffer((128, 128, 4), dtype="float32", scope="global.texture") C = T.match_buffer(b, (128, 128, 4), dtype="float32", scope="global.texture") for block_idx in T.thread_binding(0, 128, thread="blockIdx.x"): for thread_idx in T.thread_binding(0, 128, thread="threadIdx.x"): for k in T.serial(4): with T.block("B"): vb, vt, vk = T.axis.remap("SSS", [block_idx, thread_idx, k]) B[vb, vt, vk] = A[vb, vt, vk] + T.float32(1) for block_idx in T.thread_binding(0, 128, thread="blockIdx.x"): for thread_idx in T.thread_binding(0, 128, thread="threadIdx.x"): for k in T.serial(4): with T.block("C"): vb, vt, vk = T.axis.remap("SSS", [block_idx, thread_idx, k]) C[vb, vt, vk] = B[vb, vt, vk] * T.float32(2) sch = tir.Schedule(PlusOneMultTwo, debug_mask="all") def schedule_block(block): _, _, inner = sch.get_loops(block) sch.vectorize(inner) schedule_block(sch.get_block("B")) schedule_block(sch.get_block("C")) target = tvm.target.Target("opencl") mod = tvm.build(sch.mod["main"], target=target) if __name__ == "__main__": tvm.testing.main()
import tvm from tvm
import topi from tvm
import te def lower_stmt(sche, params, passfunc): func = tvm.driver.build_module.schedule_to_module(sche, params, "main", None)["main"] func = passfunc()(tvm.IRModule.from_expr(func))["main"] stmt = func.body return stmt def test_promote(): def runpass(op, passfunc): a = te.placeholder((100,), dtype="bfloat16") b = te.placeholder((100,), dtype="bfloat16") c = te.compute((100,), lambda i: op(a[i], b[i])) s = te.create_schedule(c.op) return lower_stmt(s, [a, b, c], passfunc) def get_promoted(op): a = te.placeholder((100,), dtype="bfloat16") b = te.placeholder((100,), dtype="bfloat16") c = te.compute( (100,), lambda i: topi.cast(op(topi.cast(a[i], "float"), topi.cast(b[i], "float")), "bfloat16"), ) s = te.create_schedule(c.op) func = tvm.driver.build_module.schedule_to_module(s, [a, b, c], "main", None)["main"] return func.body def test_promoted(op): stmt = runpass(op, tvm.tir.transform.BF16Promote) tvm.ir.assert_structural_equal(stmt, get_promoted(op)) test_promoted(topi.add) test_promoted(topi.subtract) test_promoted(topi.multiply) test_promoted(topi.divide) def test_eliminate(): def to32(v): return topi.cast(v, "float") def to16(v): return topi.cast(v, "bfloat16") def get_eliminated(): a = te.placeholder((100,), dtype="bfloat16") b = te.placeholder((100,), dtype="bfloat16") c = te.compute( (100,), lambda i: to16( topi.add( to32( to16( topi.add( to32(a[i]), to32(b[i]), ) ) ), to32( to16( topi.add( to32(a[i]),
to32(b[i]), ) ) ), ) ), ) s = te.create_schedule(c.op) stmt = lower_stmt(s, [a, b, c], tvm.tir.transform.BF16CastElimination) return stmt def get_target(): a = te.placeholder((100,), dtype="bfloat16") b = te.placeholder((100,), dtype="bfloat16") c = te.compute( (100,), lambda i: to16( topi.add( topi.add( to32(a[i]), to32(b[i]), ), topi.add( to32(a[i]), to32(b[i]), ), ) ), ) s = te.create_schedule(c.op) func = tvm.driver.build_module.schedule_to_module(s, [a, b, c], "main", None)["main"] return func.body tvm.ir.assert_structural_equal(get_eliminated(), get_target()) def test_legalize(): def to32(v): uint32_v = topi.cast(v, "uint32") uint32_v = tvm.tir.call_intrin( "uint32", "tir.shift_left", uint32_v, tvm.tir.const(16, "uint32") ) return tvm.tir.call_intrin("float32", "tir.reinterpret", uint32_v) def to16(v): uint32_v = tvm.tir.call_intrin("uint32", "tir.reinterpret", v) rounding_bias = tvm.tir.call_intrin( "uint32", "tir.shift_right", uint32_v, tvm.tir.const(16, "uint32") ) rounding_bias = tvm.tir.call_intrin( "uint32", "tir.bitwise_and", rounding_bias, tvm.tir.const(1, "uint32") ) rounding_bias = rounding_bias + tvm.tir.const(0x7FFF, "uint16") uint32_v = uint32_v + rounding_bias uint32_v = tvm.tir.call_intrin( "uint32", "tir.shift_right", uint32_v, tvm.tir.const(16, "uint32") ) return topi.cast(uint32_v, "uint16") def check(fcompute_before, fcompute_after): a = te.placeholder((100,), dty
pe="bfloat16", name="A") b = te.placeholder((100,), dtype="bfloat16", name="B") c = te.compute((100,), fcompute_before(a, b), name="C") s = te.create_schedule(c.op) stmt = lower_stmt(s, [a, b, c], tvm.tir.transform.BF16Legalize) a = te.placeholder((100,), dtype="uint16", name="A") b = te.placeholder((100,), dtype="uint16", name="B") c = te.compute((100,), fcompute_after(a, b), name="C") s = te.create_schedule(c.op) func = tvm.driver.build_module.schedule_to_module(s, [a, b, c], "main", None)["main"] tvm.ir.assert_structural_equal(stmt, func.body) def orig1(a, b): return lambda i: a[i] + b[i] + a[99 - i] + b[99 - i] def after1(a, b): return lambda i: to16(to32(a[i]) + to32(b[i]) + to32(a[99 - i]) + to32(b[99 - i])) def orig2(a, b): return lambda i: a[i] * b[i] + a[99 - i] * b[99 - i] + a[i] def after2(a, b): return lambda i: to16( to32(a[i]) * to32(b[i]) + to32(a[99 - i]) * to32(b[99 - i]) + to32(a[i]) ) check(orig1, after1) check(orig2, after2) if __name__ == "__main__": test_promote() test_eliminate() test_legalize()
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import tvm from tvm import te def test_for(): dev_type = te.var("dev_type") def device_context(dev_id): ctx = tvm.tir.call_extern("handle", "device_context", dev_type, dev_id) return tvm.tir.Call("handle", "tir.tvm_thread_context", [ctx]) ib = tvm.tir.ir_builder.create() n = te.var("n") A = ib.allocate("float32", n, name="A", scope="global") with ib.for_range(0, n, name="i") as i: ib.emit(tvm.tir.call_extern("int32", "fadd", device_context(0), A.asobject().data)) with ib.for_range(0, 10, name="j") as j: ib.emit(tvm.tir.call_extern("int32", "fadd", device_context(1), A.asobject().data)) ib.emit(tvm.tir.call_extern("int32", "fadd", device_context(0), A.asobject().data)) body = ib.get() mod = tvm.IRModule({"func": tvm.tir.PrimFunc([dev_type, n], body)}) mod = tvm.tir.transform.CombineContextCall()(mod) assert mod["func"].body.value.dtype == "handle" assert mod["func"].body.body.value.dtype == "handle" if __name__ == "__main__": test_for()
import hashlib
import tvm from tvm
import auto_scheduler, te, topi from tvm.ir.base
import save_json from tvm.ir.module
import IRModule from tvm.script
import tir as T def test_cse(): z1 = te.var("z1") z2 = te.var("z2") z3 = te.var("z3") i1 = te.var("i1") i2 = te.var("i2") x = te.var("x") y = te.var("y") a = te.var("a") b = te.var("b") dtype = "int32" buffer = tvm.tir.decl_buffer((50,), dtype) body = tvm.tir.LetStmt( z1, 1, tvm.tir.LetStmt( z2, 2, tvm.tir.SeqStmt( [ tvm.tir.BufferStore(buffer, z1 + z2, [i1]), tvm.tir.LetStmt( x, 1, tvm.tir.LetStmt( y, 1, tvm.tir.LetStmt( a, (x + y) + (z1 + z2), tvm.tir.LetStmt( b, (x + y) + z3, tvm.tir.BufferStore(buffer, a + b, [i2]) ), ), ), ), ] ), ), ) mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([i1, i2, z3], body)) body = tvm.tir.transform.CommonSubexprElimTIR()(mod) tvm.transform.PrintIR()(body) body = body["main"].body assert body.var.name == "z1" assert body.value == 1 body = body.body assert body.var.name == "z2" assert body.value == 2 assert isinstance(body.body, tvm.tir.LetStmt) body = body.body cse_var_1 = body.var assert body.var.name == "cse_var_1" assert tvm.ir.structural_equal(body.value, z1 + z2) assert isinstance(body.body, tvm.tir.SeqStmt) body = body.body assert isinstance(body[0], tvm.tir.BufferStore) assert isinstance(body[1], tvm.tir.LetStmt) body = body[1] assert body.var.name == "x" assert body.value == 1 body = body.body assert body.var.name ==
"y" assert body.value == 1 assert isinstance(body.body, tvm.tir.LetStmt) body = body.body cse_var_2 = body.var assert body.var.name == "cse_var_2" assert tvm.ir.structural_equal(body.value, x + y) body = body.body body.var.name == "a" assert tvm.ir.structural_equal(body.value, cse_var_2 + cse_var_1) body = body.body body.var.name == "b" assert tvm.ir.structural_equal(body.value, cse_var_2 + z3) assert isinstance(body.body, tvm.tir.BufferStore) def test_cse_ifNode_1(): b = te.var("b") i1 = te.var("i1") i2 = te.var("i2") i3 = te.var("i3") y = te.var("y") z = te.var("z") dtype = "int32" buffer = tvm.tir.decl_buffer((50,), dtype) body = tvm.tir.LetStmt( b, 1, tvm.tir.IfThenElse( b, tvm.tir.SeqStmt( [tvm.tir.BufferStore(buffer, y + z, [i1]), tvm.tir.BufferStore(buffer, y + z, [i2])] ), tvm.tir.BufferStore(buffer, y, [i3]), ), ) mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([i1, i2, i3, y, z], body)) body = tvm.tir.transform.CommonSubexprElimTIR()(mod) tvm.transform.PrintIR()(body) body = body["main"].body assert body.var.name == "b" assert body.value == 1 assert isinstance(body.body, tvm.tir.IfThenElse) body = body.body assert isinstance(body.then_case, tvm.tir.LetStmt) body = body.then_case assert body.var.name == "cse_var_1" assert tvm.ir.structural_equal(body.value, y + z) def test_cse_ifNode_2(): b = te.var("b") i1 = te.var("i1") i2 = te.var("i2") i3 = te.var("i3") y = te.var("y") z = te.var("z") dtype = "int32" buffer = tvm.tir.decl_buffer((50,), dtype) body = tvm.tir.LetStmt( b, 1, tvm.tir.IfThenElse( b, tvm.tir.SeqStmt( [
tvm.tir.BufferStore(buffer, y + z, [i1]), tvm.tir.BufferStore(buffer, y, [i2]), ] ), tvm.tir.BufferStore(buffer, y + z, [i3]), ), ) mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([i1, i2, i3, y, z], body)) body = tvm.tir.transform.CommonSubexprElimTIR()(mod) tvm.transform.PrintIR()(body) body = body["main"].body assert isinstance(body, tvm.tir.LetStmt) assert body.var.name == "cse_var_1" assert tvm.ir.structural_equal(body.value, y + z) def test_cse_cascade(): i1 = te.var("i1") i2 = te.var("i2") i3 = te.var("i3") x = te.var("x") y = te.var("y") z = te.var("z") dtype = "int32" buffer = tvm.tir.decl_buffer((50,), dtype) body = tvm.tir.SeqStmt( [ tvm.tir.BufferStore(buffer, (x + y) + z, [i1]), tvm.tir.BufferStore(buffer, (x + y) + z, [i2]), tvm.tir.BufferStore(buffer, (x + y), [i3]), ] ) mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([i1, i2, i3, x, y, z], body)) body = tvm.tir.transform.CommonSubexprElimTIR()(mod) tvm.transform.PrintIR()(body) body = body["main"].body assert isinstance(body, tvm.tir.LetStmt) cse_var_2 = body.var assert body.var.name == "cse_var_2" assert tvm.ir.structural_equal(body.value, (x + y)) body = body.body assert isinstance(body, tvm.tir.LetStmt) cse_var_1 = body.var assert body.var.name == "cse_var_1" assert tvm.ir.structural_equal(body.value, cse_var_2 + z) body = body.body assert isinstance(body, tvm.tir.SeqStmt) assert isinstance(body[0], tvm.tir.BufferStore) assert isinstance(body[1], tvm.tir.BufferStore) assert isinstance(body[2], tvm.tir.BufferStore) store1 = body[0] store2 = body[1] store3 = body[2] assert tvm.ir.structural_equal(store1.value, cse_var_1) assert tvm.ir.structural_equal(store2.value, cse_var_1
) assert tvm.ir.structural_equal(store3.value, cse_var_2) def test_no_normalization_without_commoning(): x = te.var("x") y = te.var("y") z = te.var("z") a = te.var("a") body = tvm.tir.LetStmt(a, x + (y + z), tvm.tir.Evaluate(a)) mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([x, y, z], body)) body = tvm.tir.transform.CommonSubexprElimTIR(identify_equiv_terms=True)(mod) tvm.transform.PrintIR()(body) body = body["main"].body assert body.var.name == "a" assert tvm.ir.structural_equal(body.value, x + (y + z)) @T.prim_func def func_distributivity(i1: T.int32, i2: T.int32, x: T.int32, y: T.int32, z: T.int32) -> None: B = T.buffer_decl((50,), "int32") B[i1] = x * (y + z) B[i2] = x * y + x * z @T.prim_func def func_distributivity_expected( i1: T.int32, i2: T.int32, x: T.int32, y: T.int32, z: T.int32 ) -> None: B = T.buffer_decl((50,), "int32") cse_var_1 = T.var("int32") with T.let(cse_var_1, x * y + x * z): B[i1] = cse_var_1 B[i2] = cse_var_1 @T.prim_func def func_associativity(i1: T.int32, i2: T.int32, x: T.int32, y: T.int32, z: T.int32) -> None: B = T.buffer_decl((50,), "int32") B[i1] = (x + y) + z B[i2] = x + (y + z) @T.prim_func def func_associativity_expected( i1: T.int32, i2: T.int32, x: T.int32, y: T.int32, z: T.int32 ) -> None: B = T.buffer_decl((50,), "int32") cse_var_1 = T.var("int32") with T.let(cse_var_1, (x + y) + z): B[i1] = cse_var_1 B[i2] = cse_var_1 def _check(original, transformed): func = original mod = tvm.IRModule.from_expr(func) body = tvm.tir.transform.CommonSubexprElimTIR(identify_equiv_terms=True)(mod) tvm.transform.PrintIR()(body) tvm.ir.assert_structural_equal(body["main"], transformed) def test_semantic_equiv_distributivity(): _check(func_distributivity, func_distributivity_expected) def test_semantic_equiv_associativity(): _check(func_associativity, func_associativity_expected) def test_det
erministic_cse():
import random """Test deterministic allocation of CSE vars We expect something like result = (x + 1) + (x + 2) + (x + 3) + (x + 1) + (x + 2) + (x + 3) --> cse_var_3 = (x + 1) cse_var_2 = (x + 2) cse_var_1 = (x + 3) result = cse_var_3 + cse_var_2 + cse_var_1 + cse_var_3 + cse_var_2 + cse_var_1 """ NUM_TERMS = 10 REPEATS = 10 x = te.var("x") result = te.var("result") offsets = sorted([i + 1 for i in range(NUM_TERMS)]) inc1 = [(x + offsets[i]) for i in range(NUM_TERMS)] inc2 = [(x + offsets[i]) for i in range(NUM_TERMS)] expression = x for add in inc1 + inc2: expression = expression + add let_stmt = tvm.tir.LetStmt(result, expression, tvm.tir.Evaluate(result)) mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([x], let_stmt)) initial_hash = None for _ in range(REPEATS): body = tvm.tir.transform.CommonSubexprElimTIR()(mod) body = body["main"] json_val = save_json(body) json_hash = hashlib.sha256(json_val.encode()).hexdigest() if initial_hash is None: initial_hash = json_hash assert json_hash == initial_hash LOG_LINE = '{"i": [["[\\"conv2d_layer\\", 1, 7, 7, 512, 512, 3, 3, [1, 1], [1, 1]]", \ "llvm -keys=cpu -mcpu=broadwell -num-cores=2", \ [8, 64, 64, 0, 0, 0, 0, 0], "", 1, []], [[], [["CI", 5], \ ["SP", 3, 0, 1, [1, 1, 1], 1], ["SP", 3, 4, 512, [1, 32, 16], 1], \ ["SP", 3, 8, 7, [7, 1, 1], 1], ["SP", 3, 12, 7, [1, 1, 1], 1], \ ["SP", 3, 16, 512, [1], 1], ["SP", 3, 18, 3, [1], 1], ["SP", 3, 20, 3, [3], 1], \ ["RE", 3, [0, 4, 8, 12, 1, 5, 9, 13, 16, 18, 20, 2, 6, 10, 14, 17, 19, 21, 3, 7, \ 11, 15]], ["FSP", 6, 0, 1, 2], ["FSP", 6, 3, 2, 2], ["FSP", 6, 6, 3, 2], \ ["FSP", 6, 9, 4, 2], ["RE", 6, [0, 3, 6, 9, 1, 4, 7, 10, 2, 5, 8, 11]], \ ["CA", 3, 6, 7], ["CA", 1, 6, 5], ["FU", 6, [0, 1, 2, 3, 4, 5]], ["AN", 6, 0,
3], \ ["PR", 3, 0, "auto_unroll_max_step$512"], ["AN", 1, 3, 2], ["AN", 3, 21, 2], \ ["AN", 6, 6, 2]]]], "r": [[0.0331129], 0, 0.900362, 1647464342], "v": "v0.6"}\n' @auto_scheduler.register_workload def conv2d_layer(N, H, W, CO, CI, KH, KW, stride, padding): data = te.placeholder((N, CI, H, W), name="data") kernel = te.placeholder((CO, CI, KH, KW), name="kernel") bias = te.placeholder((1, CO, 1, 1), name="bias") conv = topi.nn.conv2d_nchw(data, kernel, stride, padding, dilation=1, out_dtype="float32") out = topi.nn.relu(conv + bias) return [data, kernel, bias, out] def test_deterministic_cse_2(): inp, inr = auto_scheduler.measure_record.load_record_from_string(LOG_LINE) inp = auto_scheduler.measure.recover_measure_input(inp, rebuild_state=True) initial_hash = None for _ in range(10): sch, args = inp.task.compute_dag.apply_steps_from_state(inp.state) ir_module = tvm.lower(sch, args) primfunc = ir_module["main"] json_str = save_json(primfunc) new_hash = hashlib.sha256(json_str.encode("utf-8")).hexdigest() if initial_hash is None: initial_hash = new_hash assert new_hash == initial_hash if __name__ == "__main__": test_cse() test_cse_ifNode_1() test_cse_ifNode_2() test_cse_cascade() test_no_normalization_without_commoning() test_semantic_equiv_distributivity() test_semantic_equiv_associativity() test_deterministic_cse() test_deterministic_cse_2()
import tvm
import tvm.testing from tvm
import te from tvm.script
import tir as T def _check(original, transformed): func = original mod = tvm.IRModule.from_expr(func) mod = tvm.tir.transform.CompactBufferAllocation()(mod) mod = tvm.tir.transform.Simplify()(mod) transformed = tvm.tir.transform.Simplify()(tvm.IRModule.from_expr(transformed))["main"] tvm.ir.assert_structural_equal(mod["main"], transformed) @T.prim_func def elementwise_func(a: T.handle, c: T.handle) -> None: A = T.match_buffer(a, (16, 16), "float32") C = T.match_buffer(c, (16, 16), "float32") for i in range(0, 16): with T.block(): T.reads(A[i, 0:16]) T.writes(C[i, 0:16]) B = T.alloc_buffer((16, 16), "float32") for j in range(0, 16): with T.block(): T.reads(A[i, j]) T.writes(B[i, j]) B[i, j] = A[i, j] + 1.0 for j in range(0, 16): with T.block(): T.reads(B[i, j]) T.writes(C[i, j]) C[i, j] = B[i, j] * 2.0 @T.prim_func def compacted_elementwise_func(a: T.handle, c: T.handle) -> None: A = T.match_buffer(a, (16, 16), "float32") C = T.match_buffer(c, (16, 16), "float32") for i in range(0, 16): with T.block(): T.reads(A[i, 0:16]) T.writes(C[i, 0:16]) B = T.alloc_buffer((1, 16), "float32") for j in range(0, 16): with T.block(): T.reads(A[i, j]) T.writes(B[0, j]) B[0, j] = A[i, j] + 1.0 for j in range(0, 16): with T.block(): T.reads(B[0, j]) T.writes(C[i, j]) C[i, j] = B[0, j] * 2.0 @T.prim_func def unschedulable_func(a: T.handle, c: T.handle) -> None: A = T.match_buffer(a, (16, 16), "float32") C = T.match_buffer(c, (16, 16), "float32") for i in range(0, 16): with T.block(): T.reads(A[i, 0:16])
T.writes(C[i, 0:16]) B = T.alloc_buffer((16, 16), "float32") for j in range(0, 16): T.evaluate(T.call_extern("dummy_extern_function", B.data, dtype="int32")) B[i, j] = A[i, j] + 1.0 for j in range(0, 16): C[i, j] = B[i, j] * 2.0 @T.prim_func def param_buffer_access_func(a: T.handle, c: T.handle) -> None: A = T.match_buffer(a, (20, 20), "float32") B = T.match_buffer(c, (20, 20), "float32") for i in range(0, 16): with T.block(): T.reads(A[i, 0:16]) T.writes(B[i, 0:16]) for j in range(0, 16): with T.block(): T.reads(A[i, j]) T.writes(B[i, j]) B[i, j] = A[i, j] + 1.0 @T.prim_func def shared_mem_func(a: T.handle, c: T.handle) -> None: A = T.match_buffer(a, (16, 16), "float32") C = T.match_buffer(c, (16, 16), "float32") for i0 in T.thread_binding(0, 2, thread="blockIdx.x"): for i1 in T.thread_binding(0, 2, thread="vthread"): for i2 in T.thread_binding(0, 4, thread="threadIdx.x"): with T.block(): T.reads(A[i0 * 8 + i1 * 4 + i2, 0:16]) T.writes(C[i0 * 8 + i1 * 4 + i2, 0:16]) B = T.alloc_buffer((16, 16), "float32", scope="shared") for j in range(0, 16): with T.block(): T.reads(A[i0 * 8 + i1 * 4 + i2, j]) T.writes(B[i0 * 8 + i1 * 4 + i2, j]) B[i0 * 8 + i1 * 4 + i2, j] = A[i0 * 8 + i1 * 4 + i2, j] + 1.0 for j in range(0, 16): with T.block(): T.reads(B[i0 * 8 + i1 * 4 + i2, j]) T.writes(C[i0 * 8 + i1 * 4 + i2, j]) C[i0 * 8 + i1 * 4 + i2, j] = B[i0 * 8 + i1 * 4 + i2, j] * 2.0 @T.prim_func def compacted_shared_mem_func(a: T.handle, c: T.handle) -> Non
e: A = T.match_buffer(a, (16, 16), "float32") C = T.match_buffer(c, (16, 16), "float32") for i0 in T.thread_binding(0, 2, thread="blockIdx.x"): for i1 in T.thread_binding(0, 2, thread="vthread"): for i2 in T.thread_binding(0, 4, thread="threadIdx.x"): with T.block(): T.reads(A[i0 * 8 + i1 * 4 + i2, 0:16]) T.writes(C[i0 * 8 + i1 * 4 + i2, 0:16]) B = T.alloc_buffer((8, 16), "float32", scope="shared") for j in range(0, 16): with T.block(): T.reads(A[i0 * 8 + i1 * 4 + i2, j]) T.writes(B[i1 * 4 + i2, j]) B[i1 * 4 + i2, j] = A[i0 * 8 + i1 * 4 + i2, j] + 1.0 for j in range(0, 16): with T.block(): T.reads(B[i1 * 4 + i2, j]) T.writes(C[i0 * 8 + i1 * 4 + i2, j]) C[i0 * 8 + i1 * 4 + i2, j] = B[i1 * 4 + i2, j] * 2.0 @T.prim_func def warp_mem_func(a: T.handle, c: T.handle) -> None: A = T.match_buffer(a, (16, 16), "float32") C = T.match_buffer(c, (16, 16), "float32") for i0 in T.thread_binding(0, 2, thread="blockIdx.x"): for i1 in T.thread_binding(0, 2, thread="vthread"): for i2 in T.thread_binding(0, 4, thread="threadIdx.x"): with T.block(): T.reads(A[i0 * 8 + i1 * 4 + i2, 0:16]) T.writes(C[i0 * 8 + i1 * 4 + i2, 0:16]) B = T.alloc_buffer((16, 16), "float32", scope="warp") for j in range(0, 16): with T.block(): T.reads(A[i0 * 8 + i1 * 4 + i2, j]) T.writes(B[i0 * 8 + i1 * 4 + i2, j]) B[i0 * 8 + i1 * 4 + i2, j] = A[i0 * 8 + i1 * 4 + i2, j] + 1.0 for j in range(0, 16): with T.block():
T.reads(B[i0 * 8 + i1 * 4 + i2, j]) T.writes(C[i0 * 8 + i1 * 4 + i2, j]) C[i0 * 8 + i1 * 4 + i2, j] = B[i0 * 8 + i1 * 4 + i2, j] * 2.0 @T.prim_func def compacted_warp_mem_func(a: T.handle, c: T.handle) -> None: A = T.match_buffer(a, (16, 16), "float32") C = T.match_buffer(c, (16, 16), "float32") for i0 in T.thread_binding(0, 2, thread="blockIdx.x"): for i1 in T.thread_binding(0, 2, thread="vthread"): for i2 in T.thread_binding(0, 4, thread="threadIdx.x"): with T.block(): T.reads(A[i0 * 8 + i1 * 4 + i2, 0:16]) T.writes(C[i0 * 8 + i1 * 4 + i2, 0:16]) B = T.alloc_buffer((4, 16), "float32", scope="warp") for j in range(0, 16): with T.block(): T.reads(A[i0 * 8 + i1 * 4 + i2, j]) T.writes(B[i2, j]) B[i2, j] = A[i0 * 8 + i1 * 4 + i2, j] + 1.0 for j in range(0, 16): with T.block(): T.reads(B[i2, j]) T.writes(C[i0 * 8 + i1 * 4 + i2, j]) C[i0 * 8 + i1 * 4 + i2, j] = B[i2, j] * 2.0 @T.prim_func def symbolic_func(a: T.handle, c: T.handle, n: T.int32) -> None: A = T.match_buffer(a, (n * 8,), "float32") C = T.match_buffer(c, (n * 8,), "float32") for i in range(0, n): with T.block(): T.reads(A[i * 8 : i * 8 + 8]) T.writes(C[i * 8 : i * 8 + 8]) B = T.alloc_buffer((n * 8,), "float32") for j in range(0, 8): with T.block(): T.reads(A[i * 8 + j]) T.writes(B[i * 8 + j]) B[i * 8 + j] = A[i * 8 + j] + 1.0 for j in range(0, 8): with T.block(): T.reads(B[i * 8 + j]) T.writes(C[i * 8 + j])
C[i * 8 + j] = B[i * 8 + j] * 2.0 @T.prim_func def compacted_symbolic_func(a: T.handle, c: T.handle, n: T.int32) -> None: A = T.match_buffer(a, (n * 8,), "float32") C = T.match_buffer(c, (n * 8,), "float32") for i in range(0, n): with T.block(): T.reads(A[i * 8 : i * 8 + 8]) T.writes(C[i * 8 : i * 8 + 8]) B = T.alloc_buffer((T.min(n, 1) * 8,), "float32") for j in range(0, 8): with T.block(): T.reads(A[i * 8 + j]) T.writes(B[j]) B[j] = A[i * 8 + j] + 1.0 for j in range(0, 8): with T.block(): T.reads(B[j]) T.writes(C[i * 8 + j]) C[i * 8 + j] = B[j] * 2.0 @T.prim_func def complex_func(a: T.handle, c: T.handle, n: T.int32) -> None: A = T.match_buffer(a, (8, 8), "float32") C = T.match_buffer(c, (8, 8), "float32") for i in range(0, 8): with T.block(): T.reads(A[0, 8]) T.writes(C[0, 8]) B = T.alloc_buffer((8, 8), "float32") for j in range(0, 4): with T.block(): D = T.alloc_buffer((8, 8), "float32") T.reads(A[i, j]) T.writes(B[i, j]) for k in range(4, 8): D[k, j] = 1.0 for k in range(2, 4): B[i, j] = A[i, j] + D[k, j] for j in range(3, 5): with T.block(): T.reads(B[i, j]) T.writes(C[i, j]) C[i, j] = B[i, j] for j in range(6, 8): with T.block(): T.reads(B[i, j]) T.writes(C[i, j]) C[i, j] = B[i, j] @T.prim_func def compacted_complex_func(a: T.handle, c: T.handle, n: T.int32) -> None: A = T.match_buffer(a, (8, 8), "float32") C = T.match_buffer(c, (8, 8), "float32") f
or i in range(0, 8): with T.block(): T.reads(A[0, 8]) T.writes(C[0, 8]) B = T.alloc_buffer((1, 8), "float32") for j in range(0, 4): with T.block(): D = T.alloc_buffer((6, 1), "float32") T.reads(A[i, j]) T.writes(B[0, j]) for k in range(4, 8): D[k - 2, 0] = 1.0 for k in range(2, 4): B[0, j] = A[i, j] + D[k - 2, 0] for j in range(3, 5): with T.block(): T.reads(B[0, j]) T.writes(C[i, j]) C[i, j] = B[0, j] for j in range(6, 8): with T.block(): T.reads(B[0, j]) T.writes(C[i, j]) C[i, j] = B[0, j] @T.prim_func def match_buffer_func(a: T.handle, c: T.handle) -> None: A = T.match_buffer(a, (16, 16)) C = T.match_buffer(c, (16, 16)) for i in range(0, 16): with T.block(): A0 = T.match_buffer(A[i, 0:16], (16)) C0 = T.match_buffer(C[i, 0:16], (16)) B = T.alloc_buffer((16, 16)) with T.block(): B0 = T.match_buffer(B[i, 0:16], (16)) for j in range(0, 16): with T.block(): A1 = T.match_buffer(A0[j], ()) B1 = T.match_buffer(B0[j], ()) B1[()] = A1[()] + 1.0 for j in range(0, 16): with T.block(): C1 = T.match_buffer(C0[j], ()) B2 = T.match_buffer(B[i, j], ()) C1[()] = B2[()] * 2.0 @T.prim_func def compacted_match_buffer_func(a: T.handle, c: T.handle) -> None: A = T.match_buffer(a, (16, 16)) C = T.match_buffer(c, (16, 16)) for i in range(0, 16): with T.block(): A0 = T.match_buffer(A[i, 0:16], (16)) C0 = T.match_buffer(C[i, 0
:16], (16)) B = T.alloc_buffer((1, 16)) with T.block(): B0 = T.match_buffer(B[0, 0:16], (16)) for j in range(0, 16): with T.block(): A1 = T.match_buffer(A0[j], ()) B1 = T.match_buffer(B0[j], ()) B1[()] = A1[()] + 1.0 for j in range(0, 16): with T.block(): C1 = T.match_buffer(C0[j], ()) B2 = T.match_buffer(B[0, j], ()) C1[()] = B2[()] * 2.0 @T.prim_func def storage_align_func(a: T.handle, c: T.handle) -> None: A = T.match_buffer(a, (16, 16), "float32") C = T.match_buffer(c, (16, 16), "float32") for i in range(0, 16): with T.block(): T.reads(A[i, 0:16]) T.writes(C[i, 0:16]) B = T.alloc_buffer((16, 16), "float32") for j in range(0, 16): with T.block(): T.reads(A[i, j]) T.writes(B[i, j]) T.block_attr({"buffer_dim_align": [[0, 0, 16, 15]]}) B[i, j] = A[i, j] + 1.0 for j in range(0, 16): with T.block(): T.reads(B[i, j]) T.writes(C[i, j]) C[i, j] = B[i, j] * 2.0 @T.prim_func def compacted_storage_align_func(a: T.handle, c: T.handle) -> None: A = T.match_buffer(a, (16, 16), "float32") C = T.match_buffer(c, (16, 16), "float32") for i in range(0, 16): with T.block(): T.reads(A[i, 0:16]) T.writes(C[i, 0:16]) B = T.alloc_buffer((1, 16), strides=(31, 1), dtype="float32") for j in range(0, 16): with T.block(): T.reads(A[i, j]) T.writes(B[0, j]) T.block_attr({"buffer_dim_align": [[0, 0, 16, 15]]}) B[0, j] = A[i, j] + 1.0 for j in range(0, 16): with T.block(
): T.reads(B[0, j]) T.writes(C[i, j]) C[i, j] = B[0, j] * 2.0 @T.prim_func def padding_pattern_func(a: T.handle, c: T.handle) -> None: A = T.match_buffer(a, (16, 16), "float32") C = T.match_buffer(c, (20, 20), "float32") with T.block(): B = T.alloc_buffer((20, 20), dtype="float32") for i, j in T.grid(16, 16): with T.block(): B[i, j] = A[i, j] for i, j in T.grid(20, 20): with T.block(): C[i, j] = T.if_then_else( 2 <= i and i < 18 and 2 <= j and j < 18, B[i - 2, j - 2], 0.0, dtype="float32", ) @T.prim_func def compacted_padding_pattern_func(a: T.handle, c: T.handle) -> None: A = T.match_buffer(a, [16, 16], dtype="float32") C = T.match_buffer(c, [20, 20], dtype="float32") with T.block(): B = T.alloc_buffer([16, 16], dtype="float32") for i, j in T.grid(16, 16): with T.block(): B[i, j] = A[i, j] for i, j in T.grid(20, 20): with T.block(): C[i, j] = T.if_then_else( 2 <= i and i < 18 and 2 <= j and j < 18, B[i - 2, j - 2], 0.0, dtype="float32" ) @T.prim_func def padding_pattern_inlined(a: T.handle, b: T.handle) -> None: X = T.match_buffer(a, [224, 224], dtype="float32") Y = T.match_buffer(b, [224, 224], dtype="float32") cache = T.alloc_buffer([224, 224], dtype="float32") for h, w in T.grid(224, 224): with T.block("cache"): cache[h, w] = X[h, w] for h, w, kh, kw in T.grid(224, 224, 3, 3): with T.block("compute"): Y[h, w] = T.max( Y[h, w], T.if_then_else( T.likely(1 <= h + kh, dtype="bool") and T.likely(h + kh < 225, dtype="bool") and T.likely(1 <= w + kw, dtype="bool") and T.li
kely(w + kw < 225, dtype="bool"), cache[h + kh - 1, w + kw - 1], 0.0, dtype="float32", ), ) @T.prim_func def compacted_padding_pattern_inlined( X: T.Buffer[(224, 224), "float32"], Y: T.Buffer[(224, 224), "float32"] ) -> None: cache = T.alloc_buffer([224, 224], dtype="float32") for h, w in T.grid(224, 224): with T.block("cache"): cache[h, w] = X[h, w] for h, w, kh, kw in T.grid(224, 224, 3, 3): with T.block("compute"): Y[h, w] = T.max( Y[h, w], T.if_then_else( T.likely(1 <= h + kh, dtype="bool") and T.likely(h + kh < 225, dtype="bool") and T.likely(1 <= w + kw, dtype="bool") and T.likely(w + kw < 225, dtype="bool"), cache[h + kh - 1, w + kw - 1], 0.0, dtype="float32", ), ) @T.prim_func def mem_access_in_branch_func(a: T.handle) -> None: A = T.match_buffer(a, (224, 224), "float32") with T.block(): B1 = T.alloc_buffer((224, 224), dtype="float32") B2 = T.alloc_buffer((224, 224), dtype="float32") B3 = T.alloc_buffer((224, 224), dtype="float32") B4 = T.alloc_buffer((224, 224), dtype="float32") for i in range(0, 224): for j in range(0, 224): with T.block(): if i < 112 and j < 112: B1[i, j] = A[i, j] * 2.0 else: B2[i, j] = A[i, j] + 3.0 for i in range(0, 224): for j in range(0, 224): with T.block(): if i < 112 or j < 112: B3[i, j] = A[i, j] * 2.0 else: B4[i, j] = A[i, j] + 3.0 @T.prim_func def compacted_mem_access_in_branch_func(a: T.handle) -> None: A = T.match_buffer(a, [224, 224], dtype="
float32") with T.block(): B1 = T.alloc_buffer([112, 112], dtype="float32") B2 = T.alloc_buffer([224, 224], dtype="float32") B3 = T.alloc_buffer([224, 224], dtype="float32") B4 = T.alloc_buffer([112, 112], dtype="float32") for i, j in T.grid(224, 224): with T.block(): if i < 112 and j < 112: B1[i, j] = A[i, j] * 2.0 else: B2[i, j] = A[i, j] + 3.0 for i, j in T.grid(224, 224): with T.block(): if i < 112 or j < 112: B3[i, j] = A[i, j] * 2.0 else: B4[i - 112, j - 112] = A[i, j] + 3.0 @T.prim_func def opaque_access_annotated_func(a: T.handle) -> None: A = T.match_buffer(a, (1024,), "float32") with T.block(): B = T.alloc_buffer((1024,), dtype="float32") C = T.alloc_buffer((1024,), dtype="float32") for i in range(0, 512): with T.block(): T.reads([]) T.writes([]) T.evaluate(T.call_extern("opaque_extern_function", A.data, B.data, dtype="int32")) B[i] = A[i] with T.block(): T.reads([B[i]]) T.writes([C[i : i + 9]]) T.evaluate(T.call_extern("opaque_extern_function", B.data, C.data, dtype="int32")) C[i] = B[i] @T.prim_func def compacted_opaque_access_annotated_func(a: T.handle) -> None: A = T.match_buffer(a, (1024,), "float32") with T.block(): B = T.alloc_buffer((1024,), dtype="float32") C = T.alloc_buffer((520,), dtype="float32") for i in range(0, 512): with T.block(): T.reads([]) T.writes([]) T.evaluate(T.call_extern("opaque_extern_function", A.data, B.data, dtype="int32")) B[i] = A[i] with T.block():
T.reads([B[i]]) T.writes([C[i : i + 9]]) T.evaluate(T.call_extern("opaque_extern_function", B.data, C.data, dtype="int32")) C[i] = B[i] @T.prim_func def sparse_read_cache( A_data: T.Buffer[(819,), "float32"], B: T.Buffer[(128,), "float32"], A_indptr: T.Buffer[(129,), "int32"], A_indices: T.Buffer[(819,), "int32"], ) -> None: for i in T.serial(128): with T.block("rowsum_outer"): T.reads( A_indptr[i : i + 1], A_data[A_indptr[i] + 0 : A_indptr[i] + (A_indptr[i + 1] - A_indptr[i])], ) T.writes(B[i]) with T.block("rowsum_init"): T.reads() T.writes(B[i]) B[i] = T.float32(0) for k in T.serial(A_indptr[i + 1] - A_indptr[i]): with T.block(): T.reads(A_indptr[i], A_data[A_indptr[i] + k], B[i]) T.writes(B[i]) A_data_local = T.alloc_buffer([819], dtype="float32", scope="local") with T.block("A_data_cache_read"): T.reads(A_indptr[i], A_data[A_indptr[i] + k]) T.writes(A_data_local[A_indptr[i] + k]) A_data_local[A_indptr[i] + k] = A_data[A_indptr[i] + k] with T.block("rowsum_inner"): T.reads(B[i], A_indptr[i], A_data[A_indptr[i] + k]) T.writes(B[i]) B[i] = B[i] + A_data_local[A_indptr[i] + k] @T.prim_func def compacted_sparse_read_cache( A_data: T.Buffer[(819,), "float32"], B: T.Buffer[(128,), "float32"], A_indptr: T.Buffer[(129,), "int32"], A_indices: T.Buffer[(819,), "int32"], ) -> None: for i in T.serial(128): with T.block("rowsum_outer"): T.reads( A_indptr[i : i + 1], A_data[A_indptr[i] + 0 : A_indptr[i] + 0 + (A_indptr[i + 1] - A_indptr[i])], ) T
.writes(B[i]) with T.block("rowsum_init"): T.reads() T.writes(B[i]) B[i] = T.float32(0) for k in T.serial(A_indptr[i + 1] - A_indptr[i]): with T.block(): T.reads(A_indptr[i], A_data[A_indptr[i] + k], B[i]) T.writes(B[i]) A_data_local = T.alloc_buffer([1], dtype="float32", scope="local") with T.block("A_data_cache_read"): T.reads(A_indptr[i], A_data[A_indptr[i] + k]) T.writes(A_data_local[T.min(A_indptr[i] + k, 0)]) A_data_local[T.min(A_indptr[i] + k, 0)] = A_data[A_indptr[i] + k] with T.block("rowsum_inner"): T.reads(B[i], A_indptr[i], A_data[A_indptr[i] + k]) T.writes(B[i]) B[i] = B[i] + A_data_local[T.min(A_indptr[i] + k, 0)] @T.prim_func def narrow_shape(A: T.Buffer[(10,), "float32"], B: T.Buffer[(10,), "float32"]) -> None: B_cache = T.alloc_buffer(10, "float32") for j in T.serial(3): for k in T.serial(4): with T.block("B_cache"): T.where(j * 4 + k < 10) B_cache[j * 4 + k] = B[j] for i in T.serial(10): A[i] = B_cache[i] + T.float32(1) @T.prim_func def compacted_narrow_shape(A: T.Buffer[(10,), "float32"], B: T.Buffer[(10,), "float32"]) -> None: B_cache = T.alloc_buffer([10], dtype="float32") for j, k in T.grid(3, 4): with T.block("B_cache"): T.where(j * 4 + k < 10) T.reads(B[j]) T.writes(B_cache[j * 4 + k]) B_cache[j * 4 + k] = B[j] for i in T.serial(10): A[i] = B_cache[i] + T.float32(1) def test_elementwise(): _check(elementwise_func, compacted_elementwise_func) def test_unschedulable_block(): _check(unschedulable_func, unschedulable_func) def test_param_access(): _check(param_buffer_access_func, param_buf
fer_access_func) def test_shared_mem(): _check(shared_mem_func, compacted_shared_mem_func) def test_warp_mem(): _check(warp_mem_func, compacted_warp_mem_func) def test_symbolic(): _check(symbolic_func, compacted_symbolic_func) def test_complex(): _check(complex_func, compacted_complex_func) def test_match_buffer(): _check(match_buffer_func, compacted_match_buffer_func) def test_lower_te(): x = te.placeholder((1,)) y = te.compute((1,), lambda i: x[i] + 2) s = te.create_schedule(y.op) orig_mod = tvm.driver.build_module.schedule_to_module(s, [x, y]) mod = tvm.tir.transform.CompactBufferAllocation()(orig_mod) tvm.ir.assert_structural_equal(mod, orig_mod) def test_storage_align(): _check(storage_align_func, compacted_storage_align_func) def test_padding_pattern(): _check(padding_pattern_func, compacted_padding_pattern_func) def test_padding_pattern_inlined(): _check(padding_pattern_inlined, compacted_padding_pattern_inlined) def test_mem_access_in_branch_func(): _check(mem_access_in_branch_func, compacted_mem_access_in_branch_func) def test_opaque_access_annotated_func(): _check(opaque_access_annotated_func, compacted_opaque_access_annotated_func) def test_sparse_read_cache(): _check(sparse_read_cache, compacted_sparse_read_cache) def test_narrow_shape(): _check(narrow_shape, compacted_narrow_shape) def test_compact_with_let_binding(): @T.prim_func def func_with_let_binding(): A = T.alloc_buffer((64, 8), "float32") B = T.alloc_buffer((64, 8), "float32") C = T.alloc_buffer((8, 8), "float32") for rk in range(64): for rii, rjj in T.grid(8, 8): C[rii, rjj] = T.float32(0) for riijj in T.serial(8 * 8): rii: T.int32 = riijj rjj: T.int32 = riijj % 8 C[rii, rjj] += A[rk, rii] * B[rk, rjj] _check(func_with_let_binding, func_with_let_binding) @T.prim_func def func_with_non_index_let_bin
ding(): A = T.alloc_buffer((64), "float32") x1 = T.call_extern("get", dtype="float16") x2 = T.call_extern("get", dtype="float32") x3 = T.call_extern("get", dtype="float64") x4 = T.call_extern("get", dtype="uint8") x5 = T.call_extern("get", dtype="int32x16") x6 = T.call_extern("get", dtype="handle") x7 = T.call_extern("get", dtype="") for rk in range(64): A[rk] = T.call_extern("load_ptr", x1, x2, x3, x4, x5, x6, x7, dtype="float32") _check(func_with_non_index_let_binding, func_with_non_index_let_binding) def test_compact_spatial_tiled_pad_and_pooling(): @T.prim_func def spatial_tiled_pad_and_pooling( X: T.Buffer[(64, 112, 112), "int32"], Y: T.Buffer[(64, 56, 56), "int32"] ) -> None: for h_o, w_o in T.grid(14, 14): with T.block(): X_cache = T.alloc_buffer([112, 112, 64], dtype="int32") for ax0, ax1, ax2 in T.grid(64, 9, 9): with T.block("cache"): T.where(1 <= h_o * 8 + ax1 and 1 <= w_o * 8 + ax2) T.reads(X[ax0, h_o * 8 - 1 + ax1, w_o * 8 - 1 + ax2]) T.writes(X_cache[h_o * 8 - 1 + ax1, w_o * 8 - 1 + ax2, ax0]) X_cache[h_o * 8 - 1 + ax1, w_o * 8 - 1 + ax2, ax0] = X[ ax0, h_o * 8 - 1 + ax1, w_o * 8 - 1 + ax2 ] for h_i, w_i, kh, kw, c in T.grid(4, 4, 3, 3, 64): with T.block("compute"): T.reads( X_cache[(h_o * 4 + h_i) * 2 + kh - 1, (w_o * 4 + w_i) * 2 + kw - 1, c] ) T.writes(Y[h_o * 4 + h_i, w_o * 4 + w_i, c]) if kh == 0 and kw == 0: Y[h_o * 4 + h_i, w_o * 4 + w_i, c] = 0 Y[h_o * 4 + h_i, w_o * 4 + w_i, c] = T.max( Y[h_o * 4 + h_i, w_o * 4 + w_i, c],
T.if_then_else( T.likely(1 <= (h_o * 4 + h_i) * 2 + kh, dtype="bool") and T.likely((h_o * 4 + h_i) * 2 + kh < 113, dtype="bool") and T.likely(1 <= (w_o * 4 + w_i) * 2 + kw, dtype="bool") and T.likely((w_o * 4 + w_i) * 2 + kw < 113, dtype="bool"), X_cache[ (h_o * 4 + h_i) * 2 + kh - 1, (w_o * 4 + w_i) * 2 + kw - 1, c, ], 0, dtype="int32", ), ) @T.prim_func def compacted_spatial_tiled_pad_and_pooling( X: T.Buffer[(64, 112, 112), "int32"], Y: T.Buffer[(64, 56, 56), "int32"] ) -> None: for h_o, w_o in T.grid(14, 14): with T.block(): T.reads(X[0:64, h_o * 8 - 1 : h_o * 8 + 8, w_o * 8 - 1 : w_o * 8 + 8]) T.writes(Y[h_o * 4 : h_o * 4 + 4, w_o * 4 : w_o * 4 + 4, 0:64]) X_cache = T.alloc_buffer([9, 9, 64], dtype="int32") for ax0, ax1, ax2 in T.grid(64, 9, 9): with T.block("cache"): T.where(1 <= h_o * 8 + ax1 and 1 <= w_o * 8 + ax2) T.reads(X[ax0, h_o * 8 + ax1 - 1, w_o * 8 + ax2 - 1]) T.writes( X_cache[ h_o * 8 + ax1 - T.max(0, h_o * 8 - 1) - 1, w_o * 8 + ax2 - T.max(0, w_o * 8 - 1) - 1, ax0, ] ) X_cache[ h_o * 8 + ax1 - T.max(0, h_o * 8 - 1) - 1, w_o * 8 + ax2 - T.max(0, w_o * 8 - 1) - 1, ax0, ] =
X[ax0, h_o * 8 + ax1 - 1, w_o * 8 + ax2 - 1] for h_i, w_i, kh, kw, c in T.grid(4, 4, 3, 3, 64): with T.block("compute"): T.reads( X_cache[ h_o * 8 + h_i * 2 + kh - T.max(0, h_o * 8 - 1) - 1, w_o * 8 + w_i * 2 + kw - T.max(0, w_o * 8 - 1) - 1, c, ] ) T.writes(Y[h_o * 4 + h_i, w_o * 4 + w_i, c]) if kh == 0 and kw == 0: Y[h_o * 4 + h_i, w_o * 4 + w_i, c] = 0 Y[h_o * 4 + h_i, w_o * 4 + w_i, c] = T.max( Y[h_o * 4 + h_i, w_o * 4 + w_i, c], T.if_then_else( T.likely(1 <= h_o * 8 + h_i * 2 + kh, dtype="bool") and T.likely(1 <= w_o * 8 + w_i * 2 + kw, dtype="bool"), X_cache[ h_o * 8 + h_i * 2 + kh - T.max(0, h_o * 8 - 1) - 1, w_o * 8 + w_i * 2 + kw - T.max(0, w_o * 8 - 1) - 1, c, ], 0, dtype="int32", ), ) _check(spatial_tiled_pad_and_pooling, compacted_spatial_tiled_pad_and_pooling) def test_complex_case_1(): """Meta-schedule matmul case for compact shared A, B matrix""" @T.prim_func def func(A: T.Buffer[(960, 770), "float32"], B: T.Buffer[(770, 2304), "float32"], C: T.Buffer[(960, 2304), "float32"]) -> None: for bx in T.thread_binding(144, thread="blockIdx.x"): for vx in T.thread_binding(2, thread="vthread.x"): for tx_p in T.thread_binding(256, thread="threadIdx.x"): with T.block(): for k_
0 in T.serial(193): with T.block(): A_shared = T.alloc_buffer([960, 770], dtype="float32", scope="shared") B_shared = T.alloc_buffer([770, 2304], dtype="float32", scope="shared") for _u in T.serial(1): for tx in T.thread_binding(256, thread="threadIdx.x"): for vec in T.vectorized(3): with T.block("A_shared"): T.where(bx A_shared[bx for _u in T.serial(1): for tx in T.thread_binding(256, thread="threadIdx.x"): for vec in T.vectorized(4): with T.block("B_shared"): T.where(k_0 * 4 + ((_u * 256 + tx) * 4 + vec) B_shared[k_0 * 4 + (_u * 1024 + tx * 4 + vec) for k_1, i_3, j_3, k_2, i_4, j_4 in T.grid(1, 8, 1, 4, 2, 2): with T.block("update_update"): C[(((bx @T.prim_func def compacted_func(A: T.Buffer[(960, 770), "float32"], B: T.Buffer[(770, 2304), "float32"], C: T.Buffer[(960, 2304), "float32"]) -> None: for bx in T.thread_binding(144, thread="blockIdx.x"): for vx in T.thread_binding(2, thread="vthread.x"): for tx_p in T.thread_binding(256, thread="threadIdx.x"): with T.block(): for k_0 in T.serial(193): with T.block(): A_shared = T.alloc_buffer([128, 4], dtype="float32", scope="shared") B_shared = T.alloc_buffer([4, 128], dtype="float32", scope="shared"
) for v_u in T.serial(1): for tx in T.thread_binding(256, thread="threadIdx.x"): for vec in T.vectorized(3): with T.block("A_shared"): T.where(bx A_shared[(tx * 3 + vec) for v_u in T.serial(1): for tx in T.thread_binding(256, thread="threadIdx.x"): for vec in T.vectorized(4): with T.block("B_shared"): T.where(k_0 * 4 + tx B_shared[tx for k_1, i_3, j_3, k_2, i_4, j_4 in T.grid(1, 8, 1, 4, 2, 2): with T.block("update_update"): C[bx _check(func, compacted_func) def test_compact_dependent_buffer_indices(): """Check the upper bound on different indices could be independently estimated.""" @T.prim_func def diagonal_access(): for i in range(8): with T.block(): A = T.alloc_buffer((256, 256), "float32") for j, k in T.grid(8, 8): with T.block(): T.where(j * 8 + k < 60) A[i * 64 + j * 8 + k, i * 64 + j * 8 + k] = 1.0 @T.prim_func def diagonal_access_compacted() -> None: for i in T.serial(8): with T.block(): A = T.alloc_buffer([60, 60], dtype="float32") for j, k in T.grid(8, 8): with T.block(): T.where(j * 8 + k < 60) A[j * 8 + k, j * 8 + k] = 1.0 _check(diagonal_access, diagonal_access_compacted) def test_compact_dependent_buffer_indices_of_packed_mat
mul(): """Check the outer dimension of the packed M-dim should be compacted to 1 wrt split condition.""" @T.prim_func def nonuniform_packed_matmul_write_cache( A: T.Buffer[(1020, 64), "float32"], B: T.Buffer[(1000, 64), "float32"], C: T.Buffer[(1020, 1000), "float32"], ): for i0, i1 in T.grid(4, 1): with T.block(): C_local2 = T.alloc_buffer([4, 1, 16, 1000, 16], dtype="float32", scope="local") C_local1 = T.alloc_buffer([1020, 1000], dtype="float32", scope="local") for ax0, ax1, ax2 in T.grid(255, 1000, 64): with T.block("matmul"): if ax2 == 0: C_local1[i0 * 255 + ax0, ax1] = 0 C_local1[i0 * 255 + ax0, ax1] = ( C_local1[i0 * 255 + ax0, ax1] + A[i0 * 255 + ax0, ax2] * B[ax1, ax2] ) for ax0, ax1 in T.grid(255, 1000): with T.block("st1"): C_local2[ (i0 * 255 + ax0) 0, (i0 * 255 + ax0) % 255 ax1, (i0 * 255 + ax0) % 255 % 16, ] = C_local1[i0 * 255 + ax0, ax1] for ax0, ax1, ax2 in T.grid(16, 16, 1000): with T.block("st2"): T.where(ax0 * 16 + ax1 < 255) C[i0 * 255 + (ax0 * 16 + ax1), i1 * 1000 + ax2] = C_local2[ (i0 * 255 + ax0 * 16 + ax1) 0, (i0 * 255 + ax0 * 16 + ax1) % 255 i1 * 1000 + ax2, (i0 * 255 + ax0 * 16 + ax1) % 255 % 16, ] @T.prim_func def nonuniform_packed_matmul_write_cache_compacted( A: T.Buffer[(1020, 64), "float32"], B: T.Buffer[(1000, 64), "float32"], C
: T.Buffer[(1020, 1000), "float32"], ) -> None: for i0, i1 in T.grid(4, 1): with T.block(): C_local2 = T.alloc_buffer([1, 1, 15, 1000, 16], dtype="float32", scope="local") C_local1 = T.alloc_buffer([255, 1000], dtype="float32", scope="local") for ax0, ax1, ax2 in T.grid(255, 1000, 64): with T.block("matmul"): if ax2 == 0: C_local1[ax0, ax1] = 0 C_local1[ax0, ax1] = ( C_local1[ax0, ax1] + A[i0 * 255 + ax0, ax2] * B[ax1, ax2] ) for ax0, ax1 in T.grid(255, 1000): with T.block("st1"): C_local2[0, 0, ax0 for ax0, ax1, ax2 in T.grid(16, 16, 1000): with T.block("st2"): T.where(ax0 * 16 + ax1 < 255) C[i0 * 255 + ax0 * 16 + ax1, ax2] = C_local2[ (ax0 * 16 + ax1) 0, (ax0 * 16 + ax1) % 255 ax2, (ax0 * 16 + ax1) % 255 % 16, ] _check(nonuniform_packed_matmul_write_cache, nonuniform_packed_matmul_write_cache_compacted) if __name__ == "__main__": tvm.testing.main()
import tvm from tvm
import tir, te from tvm.script
import tir as T def _check(original, transformed): func = original mod = tvm.IRModule.from_expr(func) mod = tvm.tir.transform.ConvertBlocksToOpaque()(mod) mod = tvm.tir.transform.Simplify()(mod) tvm.ir.assert_structural_equal(mod["main"], transformed) @T.prim_func def elementwise_func(a: T.handle, c: T.handle) -> None: A = T.match_buffer(a, (16, 16), "float32") C = T.match_buffer(c, (16, 16), "float32") for i in range(0, 16): with T.block(): T.reads(A[i, 0:16]) T.writes(C[i, 0:16]) B = T.alloc_buffer((16, 16), "float32") for j in range(0, 16): with T.block(): vi = T.axis.S(16, i) vj = T.axis.S(16, j) B[vi, vj] = A[vi, vj] + 1.0 for j in range(0, 16): with T.block(): vi = T.axis.S(16, i) vj = T.axis.S(16, j) C[vi, vj] = B[vi, vj] * 2.0 @T.prim_func def substituted_elementwise_func(a: T.handle, c: T.handle) -> None: A = T.match_buffer(a, (16, 16), "float32") C = T.match_buffer(c, (16, 16), "float32") for i in range(0, 16): with T.block(): T.reads(A[i, 0:16]) T.writes(C[i, 0:16]) B = T.alloc_buffer([16, 16], "float32") for j in range(0, 16): with T.block(): T.reads([A[i, j]]) T.writes([B[i, j]]) B[i, j] = A[i, j] + 1.0 for j in range(0, 16): with T.block(): T.reads([B[i, j]]) T.writes([C[i, j]]) C[i, j] = B[i, j] * 2.0 def test_elementwise(): _check(elementwise_func, substituted_elementwise_func) def test_lower_te(): x = te.placeholder((1,)) y = te.compute((1,), lambda i: x[i] + 2) s = te.create_schedule(y.op) orig_mod = tvm.driver.build_module.schedule_to_module(s, [x, y]) mod = tvm.tir.transform.ConvertBlo
cksToOpaque()(orig_mod) tvm.ir.assert_structural_equal(mod, orig_mod)
class TestErrorIfPredicateUsesBlockVariables(tvm.testing.CompareBeforeAfter): transform = tvm.tir.transform.ConvertBlocksToOpaque() def before(A: T.Buffer[8, "int32"]): for i in T.serial(8): with T.block(): vi = T.axis.remap("S", [i]) T.where(vi < 6) T.evaluate(0) expected = tvm.TVMError if __name__ == "__main__": tvm.testing.main()
import pytest