text
stringlengths 1
2.05k
|
---|
import tvm
from tvm.script |
import tir as T
from tvm.tir |
import stmt_functor
@T.prim_func
def fused_nn_conv2d_add_fixed_point_multiply_clip_cast_cast_2(placeholder_30: T.handle, placeholder_31: T.handle, placeholder_32: T.handle, T_cast_8: T.handle) -> None:
T.func_attr({"global_symbol": "fused_nn_conv2d_add_fixed_point_multiply_clip_cast_cast_2", "tir.noalias": True})
placeholder_33 = T.match_buffer(placeholder_30, [150528], dtype="int16", elem_offset=0, align=64, offset_factor=1)
placeholder_34 = T.match_buffer(placeholder_31, [3072], dtype="int16", elem_offset=0, align=64, offset_factor=1)
placeholder_35 = T.match_buffer(placeholder_32, [16], dtype="int32", elem_offset=0, align=64, offset_factor=1)
T_cast_9 = T.match_buffer(T_cast_8, [12544], dtype="int16", elem_offset=0, align=64, offset_factor=1)
PaddedInput_3 = T.decl_buffer([150528], "int16")
for i0_i1_fused_3 in T.parallel(0, 28):
for i2_3, i3_3 in T.grid(28, 192):
PaddedInput_3[(((i0_i1_fused_3*5376) + (i2_3*192)) + i3_3) ] = placeholder_33[(((i0_i1_fused_3*5376) + (i2_3*192)) + i3_3)]
for ax0_ax1_fused_ax2_fused_3 in T.parallel(0, 784):
for ax3_2 in T.serial(0, 16):
Conv2dOutput_3 = T.decl_buffer([1], "int32")
Conv2dOutput_3[0] = 0
for rc_3 in T.serial(0, 192):
Conv2dOutput_3[0] = (Conv2dOutput_3[0] + (T.cast(PaddedInput_3[((ax0_ax1_fused_ax2_fused_3*192) + rc_3)], "int32")*T.cast(placeholder_34[((rc_3*16) + ax3_2)], "int32")))
T_cast_9[((ax0_ax1_fused_ax2_fused_3*16) + ax3_2)] = T.cast(T.cast(T.max(T.min(T.q_multiply_shift((Conv2dOutput_3[0] + placeholder_35[ax3_2]), 1764006585, 31, -7, dtype="int32"), 255), 0), "uint8"), "int16")
def test_nn_conv2d_add_fixed_point_multiply_clip_cast_cast_2():
primfunc = fused_nn_conv2d_add_fixed_point_multiply_clip_cast_cast_2
mod = tvm.IRModule.from_expr(primfunc)
mod = tvm.tir.transform.ConvertForLoopsToSerial()(mod)
def verify_serial_loops(stmt):
if isinstance(stmt, tvm.tir.For):
assert stmt.kind = |
= tvm.tir.ForKind.SERIAL
for _, primfunc in mod.functions.items():
stmt_functor.post_order_visit(primfunc.body, verify_serial_loops)
if __name__ == "__main__":
pytest.main([__file__]) |
import tvm
from tvm |
import te
tvm.ir.register_op_attr("tir.cop.coproc_sync", "TGlobalSymbol", "coproc_sync")
tvm.ir.register_op_attr("tir.cop.coproc_read_barrier", "TGlobalSymbol", "coproc_readb")
tvm.ir.register_op_attr("tir.cop.coproc_write_barrier", "TGlobalSymbol", "coproc_writeb")
tvm.ir.register_op_attr("tir.cop.coproc_dep_push", "TGlobalSymbol", "coproc_dep_push")
tvm.ir.register_op_attr("tir.cop.coproc_dep_pop", "TGlobalSymbol", "coproc_dep_pop")
def test_coproc_sync():
@tvm.register_func("tvm.info.mem.global.cache")
def meminfo_cache():
return tvm.ir.make_node(
"MemoryInfo",
unit_bits=8,
max_simd_bits=32,
max_num_bits=128,
head_address=tvm.tir.call_extern("handle", "global_cache"),
)
ib = tvm.tir.ir_builder.create()
n = te.size_var("n")
cp = te.thread_axis((0, 1), "cop")
A = ib.allocate("float32", 128, name="A", scope="global.cache")
with ib.for_range(0, n, name="i") as i:
A[i] = A[i] + 1
with ib.for_range(0, 8, name="k") as k:
with ib.for_range(0, 10, name="j") as j:
ib.scope_attr(cp, "coproc_scope", 1)
A[j] = A[j + k * 10] + 2
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([n], stmt))
stmt = tvm.tir.transform.CoProcSync()(mod)["main"].body
body = stmt.body.body
blist = tvm.tir.stmt_list(body)
assert blist[1].value.op.same_as(tvm.ir.Op.get("tir.cop.coproc_read_barrier"))
assert blist[1].value.args[3].value == 80
assert blist[-2].value.op.same_as(tvm.ir.Op.get("tir.cop.coproc_sync"))
assert blist[-1].value.op.same_as(tvm.ir.Op.get("tir.cop.coproc_write_barrier"))
assert blist[-1].value.args[3].value == 10
def test_coproc_sync2():
ib = tvm.tir.ir_builder.create()
n = te.size_var("n")
cp = te.thread_axis((0, 1), "cop")
ty = te.thread_axis("cthread")
A = ib.allocate("float32", 128, name="A")
ib.scope_attr(ty, "virtual_thread", 2)
with ib.new_scope():
ib.scope_attr(cp |
, "coproc_scope", 2)
A[ty] = 0.0
with ib.for_range(0, n, name="i") as i:
with ib.new_scope():
ib.scope_attr(cp, "coproc_scope", 1)
A[ty] = 1.0
with ib.new_scope():
ib.scope_attr(cp, "coproc_scope", 2)
A[ty] = 1.0
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([n], stmt))
stmt = tvm.tir.transform.CoProcSync()(mod)["main"].body
def test_coproc_sync3():
def __check_list(tvm_array, py_list):
for ti, li in zip(tvm_array, py_list):
if ti.value != li:
return False
return True
ib = tvm.tir.ir_builder.create()
n = te.size_var("n")
cp = te.thread_axis((0, 1), "cop")
A = ib.allocate("float32", 128, name="A", scope="global.cache")
with ib.for_range(0, n, name="i") as i:
with ib.for_range(0, n, name="i") as j:
with ib.new_scope():
ib.scope_attr(cp, "coproc_scope", 1)
A[i] = 1.0
with ib.new_scope():
ib.scope_attr(cp, "coproc_scope", 2)
A[i] = 1.0
with ib.new_scope():
ib.scope_attr(cp, "coproc_scope", 3)
A[0] = 0.0
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([n], stmt))
stmt = tvm.tir.transform.CoProcSync()(mod)["main"].body
slist = tvm.tir.stmt_list(stmt[0].body)
push_st = slist[2]
slist = tvm.tir.stmt_list(slist[-1])
pop_st = slist[0].body[0]
assert push_st.value.op.same_as(tvm.ir.Op.get("tir.cop.coproc_dep_push"))
assert __check_list(push_st.value.args, [2, 3])
assert pop_st.value.op.same_as(tvm.ir.Op.get("tir.cop.coproc_dep_pop"))
assert __check_list(pop_st.value.args, [2, 3])
if __name__ == "__main__":
test_coproc_sync()
test_coproc_sync2()
test_coproc_sync3() |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
def test_decorate_device():
x = te.var("x")
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([x], tvm.tir.Evaluate(x)))
stmt = tvm.tir.transform.DecorateDeviceScope()(mod)["main"].body
assert stmt.attr_key == "device_scope"
if __name__ == "__main__":
test_decorate_device()
|
import numpy as np |
import tvm
from tvm |
import tir
from tvm.script |
import tir as T |
import tvm.testing
@tvm.script.ir_module
class Module4:
@T.prim_func
def constant1(a: T.handle) -> None:
A = T.match_buffer(a, (10), "int32")
B = T.alloc_buffer((10), "int32")
K_data = T.allocate_const([1, 1, 1, 1, 1, 1, 1, 1, 1, 1], "int32", [10])
K = T.buffer_decl(shape=(10), dtype="int32", data=K_data)
for x in T.serial(0, 10):
B[x] = A[x] + K[x]
@T.prim_func
def constant2(a: T.handle) -> None:
A = T.match_buffer(a, (10), "int32")
B = T.alloc_buffer((10), "int32")
K_data = T.allocate_const([1, 1, 1, 1, 1, 1, 1, 1, 1, 1], "int32", [10])
K = T.buffer_decl(shape=(10), dtype="int32", data=K_data)
for x in T.serial(0, 10):
B[x] = A[x] + K[x]
@T.prim_func
def constant3(a: T.handle) -> None:
A = T.match_buffer(a, (10), "int32")
B = T.alloc_buffer((10), "int32")
K_data = T.allocate_const([1, 2, 3, 1, 1, 1, 1, 1, 1, 1], "int32", [10])
K = T.buffer_decl(shape=(10), dtype="int32", data=K_data)
for x in T.serial(0, 10):
B[x] = A[x] + K[x]
def test_const_extraction():
mod = tvm.tir.transform.ExtractPrimFuncConstants()(Module4)
constants = mod.attrs["constants"]
assert len(constants) == 2
def _visit(stmt):
if isinstance(stmt, tvm.tir.AllocateConst):
assert np.array_equal(stmt.data.numpy(), constants[int(stmt.irmod_storage_idx)].numpy())
for n, f in mod.functions.items():
tvm.tir.stmt_functor.post_order_visit(f.body, _visit)
tvm.lower(mod)
if __name__ == "__main__":
tvm.testing.main() |
import tvm |
import tvm.testing
from tvm |
import te
from tvm.script |
import tir as T |
class BaseCompare(tvm.testing.CompareBeforeAfter):
transform = tvm.transform.Sequential(
[
tvm.tir.transform.FlattenBuffer(),
tvm.tir.transform.Simplify(),
]
) |
class TestElementwise(BaseCompare):
"""2-d buffers are flattened to 1-d"""
def before(A: T.Buffer[(16, 16), "float32"], C: T.Buffer[(16, 16), "float32"]):
for i in T.serial(0, 16):
B_new = T.decl_buffer([1, 16], "float32")
for j in T.serial(0, 16):
B_new[0, j] = A[i, j] + 1.0
for j in T.serial(0, 16):
C[i, j] = B_new[0, j] * 2.0
def expected(A: T.Buffer[256, "float32"], C: T.Buffer[256, "float32"]):
T.preflattened_buffer(A, (16, 16), dtype="float32", data=A.data)
T.preflattened_buffer(C, (16, 16), dtype="float32", data=C.data)
for i in T.serial(0, 16):
B_new_data = T.allocate([16], "float32", scope="global")
B_new = T.buffer_decl([16], "float32", scope="global", data=B_new_data)
for j in T.serial(0, 16):
B_new[j] = A[((i * 16) + j)] + 1.0
for j in T.serial(0, 16):
C[((i * 16) + j)] = B_new[j] * 2.0 |
class TestElementwiseWithoutDeclBuffer(BaseCompare):
"""2-d buffers are flattened to 1-d
Like TestElementwise, but the TIR doesn't have the DeclBuffer
node. The T.buffer_decl declaration applies only during the
parsing the TVMScript, and doesn't occur in the TIR itself. In
this case, the allocation should be assumed to be targeting flat
memory, and should be flattened to a 1-d allocation.
"""
def before(A: T.Buffer[(16, 16), "float32"], C: T.Buffer[(16, 16), "float32"]):
for i in T.serial(0, 16):
B_new_data = T.allocate([1, 16], "float32", "global")
B_new = T.buffer_decl([1, 16], "float32", data=B_new_data)
for j in T.serial(0, 16):
B_new[0, j] = A[i, j] + 1.0
for j in T.serial(0, 16):
C[i, j] = B_new[0, j] * 2.0
def expected(A: T.Buffer[256, "float32"], C: T.Buffer[256, "float32"]):
T.preflattened_buffer(A, (16, 16), dtype="float32", data=A.data)
T.preflattened_buffer(C, (16, 16), dtype="float32", data=C.data)
for i in T.serial(0, 16):
B_new_data = T.allocate([16], "float32", "global")
B_new = T.buffer_decl(16, "float32", data=B_new_data)
for j in T.serial(0, 16):
B_new[j] = A[((i * 16) + j)] + 1.0
for j in T.serial(0, 16):
C[((i * 16) + j)] = B_new[j] * 2.0 |
class TestGPU(BaseCompare):
"""Buffer flattening may have indices based on GPU thread vars"""
def before(A: T.Buffer[(16, 16), "float32"], C: T.Buffer[(16, 16), "float32"]):
i0 = T.env_thread("blockIdx.x")
i1 = T.env_thread("threadIdx.x")
i2 = T.env_thread("vthread")
T.launch_thread(i0, 4)
T.launch_thread(i1, 2)
T.launch_thread(i2, 2)
B = T.decl_buffer([1, 16], "float32", scope="local")
for j in range(0, 16):
B[0, j] = A[i0 * 4 + i1 * 2 + i2, j] + 1.0
for j in range(0, 16):
C[i0 * 4 + i1 * 2 + i2, j] = B[0, j] * 2.0
def expected(A: T.Buffer[256, "float32"], C: T.Buffer[256, "float32"]):
T.preflattened_buffer(A, (16, 16), dtype="float32", data=A.data)
T.preflattened_buffer(C, (16, 16), dtype="float32", data=C.data)
i0 = T.env_thread("blockIdx.x")
i1 = T.env_thread("threadIdx.x")
i2 = T.env_thread("vthread")
T.launch_thread(i0, 4)
T.launch_thread(i1, 2)
T.launch_thread(i2, 2)
B_data = T.allocate([16], "float32", scope="local")
B = T.buffer_decl([16], "float32", scope="local", data=B_data)
for j in range(0, 16):
B[j] = A[i0 * 64 + i1 * 32 + i2 * 16 + j] + 1.0
for j in range(0, 16):
C[i0 * 64 + i1 * 32 + i2 * 16 + j] = B[j] * 2.0 |
class TestSymbolic(BaseCompare):
"""Dynamically-sized arrrays are flattened"""
def before(a: T.handle, c: T.handle, n: T.int32, m: T.int32) -> None:
A = T.match_buffer(a, (n, m), "float32")
C = T.match_buffer(c, (n, m), "float32")
for i in range(0, n):
B = T.decl_buffer([m], "float32")
for j in range(0, m):
B[j] = A[i, j] + 1.0
for j in range(0, m):
C[i, j] = B[j] * 2.0
def expected(a: T.handle, c: T.handle, n: T.int32, m: T.int32) -> None:
A = T.match_buffer(a, n * m, "float32")
C = T.match_buffer(c, n * m, "float32")
T.preflattened_buffer(A, (n, m), "float32", data=A.data)
T.preflattened_buffer(C, (n, m), "float32", data=C.data)
for i in range(0, n):
B_data = T.allocate([m], "float32", scope="global")
B = T.buffer_decl([m], "float32", scope="global", data=B_data)
for j in range(0, m):
B[j] = A[i * m + j] + 1.0
for j in range(0, m):
C[i * m + j] = B[j] * 2.0 |
class TestMultiAlloc(BaseCompare):
"""If multiple allocations occur, all are flattened."""
def before(A: T.Buffer[(4, 32), "float32"], D: T.Buffer[(4, 32), "float32"]):
for i, j in T.grid(4, 32):
B = T.decl_buffer((4, 32), "float32", scope="global")
C = T.decl_buffer((4, 32), "float32", scope="global")
B[i, j] = A[i, j] + 1.0
C[i, j] = A[i, j] + B[i, j]
D[i, j] = C[i, j] * 2.0
def expected(A: T.Buffer[128, "float32"], D: T.Buffer[128, "float32"]):
T.preflattened_buffer(A, (4, 32), "float32", data=A.data)
T.preflattened_buffer(D, (4, 32), "float32", data=D.data)
for i, j in T.grid(4, 32):
B_data = T.allocate([128], "float32", scope="global")
B = T.buffer_decl([128], "float32", scope="global", data=B_data)
C_data = T.allocate([128], "float32", scope="global")
C = T.buffer_decl([128], "float32", scope="global", data=C_data)
B[i * 32 + j] = A[i * 32 + j] + 1.0
C[i * 32 + j] = A[i * 32 + j] + B[i * 32 + j]
D[i * 32 + j] = C[i * 32 + j] * 2.0 |
class TestStrided(BaseCompare):
"""Indices for flattened buffers use the specified striding."""
def before(A: T.Buffer[(16, 16), "float32"], C: T.Buffer[(16, 16), "float32"]):
for i0 in T.serial(4):
B = T.decl_buffer([4, 17], "float32")
B_1 = T.buffer_decl([4, 16], dtype="float32", data=B.data, strides=[17, 1])
for i1, j in T.grid(4, 16):
B_1[i1, j] = A[i0 * 4 + i1, j] + 1.0
for i1, j in T.grid(4, 16):
C[i0 * 4 + i1, j] = B_1[i1, j] * 2.0
def expected(A: T.Buffer[256, "float32"], C: T.Buffer[256, "float32"]):
T.preflattened_buffer(A, [16, 16], dtype="float32", data=A.data)
T.preflattened_buffer(C, [16, 16], dtype="float32", data=C.data)
for i0 in T.serial(0, 4):
B_new_data = T.allocate([68], "float32", scope="global")
B_new = T.buffer_decl([68], "float32", scope="global", data=B_new_data)
for i1 in T.serial(0, 4):
for j in T.serial(0, 16):
B_new[i1 * 17 + j] = A[i0 * 64 + i1 * 16 + j] + 1.0
for i1 in T.serial(0, 4):
for j in T.serial(0, 16):
C[i0 * 64 + i1 * 16 + j] = B_new[i1 * 17 + j] * 2.0 |
class TestBoolean(BaseCompare):
"""Boolean buffers should be replaced by a backing int8 array"""
def before(A: T.Buffer[10, "bool"], B: T.Buffer[10, "bool"]) -> None:
for i0 in T.serial(10):
B[i0] = A[i0]
def expected(A: T.Buffer[10, "int8"], B: T.Buffer[10, "int8"]) -> None:
T.preflattened_buffer(A, [10], dtype="bool", data=A.data)
T.preflattened_buffer(B, [10], dtype="bool", data=B.data)
for i0 in T.serial(10):
B[i0] = T.cast(T.cast(A[i0], "bool"), "int8") |
class TestLowerTE(BaseCompare):
"""FlattenBuffer should do nothing on TE-based functions"""
def before(self):
x = te.placeholder((1,))
y = te.compute((1,), lambda i: x[i] + 2)
s = te.create_schedule(y.op)
mod = tvm.driver.build_module.schedule_to_module(s, [x, y])
return mod["main"]
expected = before |
class TestFlattenInsideBlock(BaseCompare):
"""Flattening access inside a block flattens the accessed region."""
def before():
A = T.alloc_buffer([32, 32])
for i, j in T.grid(32, 32):
with T.block("block"):
T.reads(A[i, j])
T.evaluate(A[i, j])
def expected():
A = T.alloc_buffer([1024])
for i, j in T.grid(32, 32):
with T.block("block"):
T.reads(A[i * 32 + j])
T.evaluate(A[i * 32 + j]) |
class TestNoChangeTo2DPhysicalBuffer(BaseCompare):
"""Flattening preserves axis separators."""
def before():
A = T.alloc_buffer([32, 32], axis_separators=[1])
for i, j in T.grid(32, 32):
T.evaluate(A[i, j])
expected = before |
class TestFlattenAllocBufferWithAxisSeparators(BaseCompare):
"""Flattening preserves axis separators"""
def before():
A = T.alloc_buffer([2, 3, 5, 7, 11, 13], axis_separators=[3])
for i0, i1, i2, i3, i4, i5 in T.grid(2, 3, 5, 7, 11, 13):
T.evaluate(A[i0, i1, i2, i3, i4, i5])
def expected():
A = T.alloc_buffer([30, 1001], axis_separators=[1])
for i0, i1, i2, i3, i4, i5 in T.grid(2, 3, 5, 7, 11, 13):
T.evaluate(A[i0 * 15 + i1 * 5 + i2, i3 * 143 + i4 * 13 + i5]) |
class TestFlattenDeclBufferWithAxisSeparators(BaseCompare):
"""Flattening preserves axis separators
Like TestFlattenAllocBufferWithAxisSeparators, but the allocations
is done using Allocate/DeclBuffer, rather than through
BlockNode::alloc_buffers.
"""
def before():
A = T.decl_buffer([2, 3, 5, 7, 11, 13], axis_separators=[3])
for i0, i1, i2, i3, i4, i5 in T.grid(2, 3, 5, 7, 11, 13):
T.evaluate(A[i0, i1, i2, i3, i4, i5])
def expected():
A_data = T.allocate([30, 1001], dtype="float32", scope="global")
A = T.buffer_decl(
[30, 1001], dtype="float32", scope="global", axis_separators=[1], data=A_data
)
for i0, i1, i2, i3, i4, i5 in T.grid(2, 3, 5, 7, 11, 13):
T.evaluate(A[i0 * 15 + i1 * 5 + i2, i3 * 143 + i4 * 13 + i5])
def test_lower_2d_physical_memory():
"""Axis separators should preserve 2-d buffers through lowering.
A catch-all test to ensure that defining axis_separators is
sufficient to maintain non-flat buffer descriptions through all
lowering steps.
"""
@T.prim_func
def func():
buf = T.alloc_buffer(
[1, 1],
dtype="int32",
scope="global",
axis_separators=[1],
)
buf[0, 0] = 0
lowered = tvm.lower(func)["main"]
assert isinstance(lowered.body, tvm.tir.Allocate)
assert list(lowered.body.extents) == [1, 1], (
"Non-flat buffer allocations, "
"marked by axis_separators, "
"flattened to flat memory allocation."
)
if __name__ == "__main__":
tvm.testing.main() |
import pytest |
import tvm
from tvm.script |
import tir as T |
import tvm.testing
def test_annotate_entry_func_single_primfunc():
@tvm.script.ir_module
class MockModule:
@T.prim_func
def func1(A: T.Buffer[(16,), "float32"]):
for i in T.serial(16):
if i == 5:
if i == 5:
A[i] = 0.0
mod = MockModule
assert mod
assert mod["func1"].attrs is None
after = tvm.tir.transform.AnnotateEntryFunc()(mod)
assert (
after["func1"].attrs
and "tir.is_entry_func" in after["func1"].attrs
and after["func1"].attrs["tir.is_entry_func"]
)
@tvm.script.ir_module
class MockModule:
@T.prim_func
def func1(A: T.Buffer[(16,), "float32"]):
for i in T.serial(16):
if i == 5:
if i == 5:
A[i] = 0.0
@T.prim_func
def func2(A: T.Buffer[(32,), "float32"]):
for i in T.serial(32):
if i == 15:
if i == 15:
A[i] = 0.0
@pytest.mark.xfail
def test_annotate_entry_func_multiple_primfunc():
mod = MockModule
assert mod
assert mod["func1"].attrs is None
assert mod["func2"].attrs is None
after = tvm.tir.transform.AnnotateEntryFunc()(mod)
def test_bind_target():
mod = MockModule
assert mod
target = tvm.target.Target("cuda")
assert mod["func1"].attrs is None
assert mod["func2"].attrs is None
after = tvm.tir.transform.BindTarget(target)(mod)
assert after["func1"].attrs and "target" in after["func1"].attrs
assert after["func1"].attrs["target"] == target
assert after["func2"].attrs and "target" in after["func2"].attrs
assert after["func2"].attrs["target"] == target
def test_filter_primfunc():
mod = MockModule
assert mod
mod["func1"] = mod["func1"].with_attr("temp", "test1")
mod["func2"] = mod["func2"].with_attr("temp", "test2")
def checker_filter_out_none(func: tvm.tir.PrimFunc):
return (func.attrs is not None) and ("temp" in func.attrs)
after |
= tvm.tir.transform.Filter(checker_filter_out_none)(mod)
assert len(after.functions) == 2
assert checker_filter_out_none(after["func1"])
assert checker_filter_out_none(after["func2"])
def checker_filter_out_one(func: tvm.tir.PrimFunc):
return (func.attrs is not None) and ("temp" in func.attrs) and func.attrs["temp"] == "test1"
after = tvm.tir.transform.Filter(checker_filter_out_one)(mod)
assert len(after.functions) == 1
assert checker_filter_out_one(after["func1"])
def checker_filter_out_both(func: tvm.tir.PrimFunc):
return (func.attrs is not None) and ("invalid_attr" in func.attrs)
after = tvm.tir.transform.Filter(checker_filter_out_both)(mod)
assert len(after.functions) == 0
if __name__ == "__main__":
tvm.testing.main() |
import tvm
from tvm |
import tir |
import tvm.testing
from tvm.script |
import tir as T
from tvm.tir.transform |
import HoistExpression, HoistedConditionals, HoistedLetBindings
class BaseBeforeAfter:
hoisted_conditionals = tvm.testing.parameter(HoistedConditionals.All)
hoisted_let_bindings = tvm.testing.parameter(HoistedLetBindings.All)
def test_hoist(self, hoisted_conditionals, hoisted_let_bindings):
before = self.before
before_mod = tvm.IRModule.from_expr(before)
config = {
"tir.HoistExpression": {
"hoisted_conditionals": hoisted_conditionals.value,
"hoisted_let_bindings": hoisted_let_bindings.value,
}
}
with tvm.transform.PassContext(config=config):
after_mod = tvm.tir.transform.HoistExpression()(before_mod)
after = after_mod["main"]
expected = self.expected
try:
tvm.ir.assert_structural_equal(after, expected)
except ValueError as err:
script = tvm.IRModule({"expected": expected, "after": after, "before": before}).script()
raise ValueError(
f"Function after simplification did not match expected:\n{script}"
) from err |
class TestHoistToTop(BaseBeforeAfter):
hoisted_conditionals = tvm.testing.parameter(
HoistedConditionals.IfElseStmt,
HoistedConditionals.All,
)
@T.prim_func
def before(A: T.Buffer[(16,), "float32"], n: T.int32):
for i in T.serial(16):
if n != 0:
A[i] = 0.0
@T.prim_func
def expected(A: T.Buffer[(16,), "float32"], n: T.int32):
if n != 0:
for i in T.serial(16):
A[i] = 0.0 |
class TestSuppressHoistIfElse(BaseBeforeAfter):
hoisted_conditionals = tvm.testing.parameter(
HoistedConditionals.Never,
HoistedConditionals.IfElseExpr,
)
@T.prim_func
def before(A: T.Buffer[(16,), "float32"], n: T.int32):
for i in T.serial(16):
if n != 0:
A[i] = 0.0
expected = before |
class TestHoistBlockVar(BaseBeforeAfter):
@T.prim_func
def before(A: T.Buffer[(128, 16), "float32"], n: T.int32):
i = T.env_thread("threadIdx.x")
T.launch_thread(i, 128)
for j in T.serial(16):
if i < 32:
A[i, j] = 0.0
@T.prim_func
def expected(A: T.Buffer[(128, 16), "float32"], n: T.int32):
i = T.env_thread("threadIdx.x")
T.launch_thread(i, 128)
if i < 32:
for j in T.serial(16):
A[i, j] = 0.0 |
class TestSuppressHoistBlockVar(BaseBeforeAfter):
hoisted_conditionals = tvm.testing.parameter(
HoistedConditionals.All & ~HoistedConditionals.UsingBlockVar
)
@T.prim_func
def before(A: T.Buffer[(128, 16), "float32"], n: T.int32):
thread_x = T.env_thread("threadIdx.x")
T.launch_thread(thread_x, 128)
for i in T.thread_binding(0, 128, thread="threadIdx.x"):
if i < 32:
for j in T.serial(16):
A[i, j] = 0.0
expected = before |
class TestHoistAcrossBlockVar(BaseBeforeAfter):
@T.prim_func
def before(A: T.Buffer[(128, 16), "float32"], n: T.int32):
thread_x = T.env_thread("threadIdx.x")
T.launch_thread(thread_x, 128)
for i in T.thread_binding(0, 128, thread="threadIdx.x"):
if n == 0:
for j in T.serial(16):
A[i, j] = 0.0
@T.prim_func
def expected(A: T.Buffer[(128, 16), "float32"], n: T.int32):
thread_x = T.env_thread("threadIdx.x")
if n == 0:
T.launch_thread(thread_x, 128)
for i in T.thread_binding(0, 128, thread="threadIdx.x"):
for j in T.serial(16):
A[i, j] = 0.0 |
class TestSuppressHoistAcrossBlockVar(BaseBeforeAfter):
hoisted_conditionals = tvm.testing.parameter(
HoistedConditionals.All & ~HoistedConditionals.UsingBlockVar
)
@T.prim_func
def before(A: T.Buffer[(128, 16), "float32"], n: T.int32):
thread_x = T.env_thread("threadIdx.x")
T.launch_thread(thread_x, 128)
for i in T.thread_binding(0, 128, thread="threadIdx.x"):
for j in T.serial(16):
if n == 0:
A[i, j] = 0.0
@T.prim_func
def expected(A: T.Buffer[(128, 16), "float32"], n: T.int32):
thread_x = T.env_thread("threadIdx.x")
T.launch_thread(thread_x, 128)
if n == 0:
for i in T.thread_binding(0, 128, thread="threadIdx.x"):
for j in T.serial(16):
A[i, j] = 0.0 |
class TestHoistToMiddle(BaseBeforeAfter):
@T.prim_func
def before(A: T.Buffer[(4, 4), "float32"]):
for i in T.serial(4):
for j in T.serial(4):
if i < 3:
A[i, j] = 0.0
@T.prim_func
def expected(A: T.Buffer[(4, 4), "float32"]):
for i in T.serial(4):
if i < 3:
for j in T.serial(4):
A[i, j] = 0.0 |
class TestHoistWithLet(BaseBeforeAfter):
@T.prim_func
def before(A: T.Buffer[(4, 4), "float32"]):
for i in T.serial(4):
for j in T.serial(4):
condition = i < 3
if condition:
A[i, j] = 0.0
@T.prim_func
def expected(A: T.Buffer[(4, 4), "float32"]):
for i in T.serial(4):
condition = i < 3
if condition:
for j in T.serial(4):
A[i, j] = 0.0 |
class TestHoistDisableLet(BaseBeforeAfter):
"""As TestHoistWithLet, but forbid hoisting of LetStmt
Because the condition depends on the let binding, it should no
longer be hoisted.
"""
hoisted_let_bindings = tvm.testing.parameter(HoistedLetBindings.Never)
@T.prim_func
def before(A: T.Buffer[(4, 4), "float32"]):
for i in T.serial(4):
for j in T.serial(4):
condition = i < 3
if condition:
A[i, j] = 0.0
expected = before |
class TestHoistIfElse(BaseBeforeAfter):
@T.prim_func
def before(A: T.Buffer[(4, 4), "float32"]):
for i in T.serial(4):
for j in T.serial(4):
if i < 3:
A[i, j] = 0.0
else:
A[i, j] = 1.0
@T.prim_func
def expected(A: T.Buffer[(4, 4), "float32"]):
for i in T.serial(4):
if i < 3:
for j in T.serial(4):
A[i, j] = 0.0
else:
for j in T.serial(4):
A[i, j] = 1.0 |
class TestHoistSequentialAssign(BaseBeforeAfter):
@T.prim_func
def before(A: T.Buffer[(4, 4), "float32"], B: T.Buffer[(4, 4), "float32"]):
for i in T.serial(4):
for j in T.serial(4):
if i < 3:
A[i, j] = 0.0
B[i, j] = 0.0
else:
A[i, j] = 1.0
B[i, j] = 1.0
@T.prim_func
def expected(A: T.Buffer[(4, 4), "float32"], B: T.Buffer[(4, 4), "float32"]):
for i in T.serial(4):
if i < 3:
for j in T.serial(4):
A[i, j] = 0.0
B[i, j] = 0.0
else:
for j in T.serial(4):
A[i, j] = 1.0
B[i, j] = 1.0 |
class TestHoistMultiIf(BaseBeforeAfter):
@T.prim_func
def before(A: T.Buffer[(4, 4), "float32"]):
for i in T.serial(4):
for j in T.serial(4):
for k in T.serial(4):
if j < 3:
if i < 2:
A[i, j] = 0.0
@T.prim_func
def expected(A: T.Buffer[(4, 4), "float32"]):
for i in T.serial(4):
if i < 2:
for j in T.serial(4):
if j < 3:
for k in T.serial(4):
A[i, j] = 0.0 |
class TestHoistComplexConditional(BaseBeforeAfter):
@T.prim_func
def before(A: T.Buffer[(4, 4), "float32"]):
for i, j, k in T.grid(4, 4, 4):
if j < 3 and i < 2:
A[i, j] = 0.0
@T.prim_func
def expected(A: T.Buffer[(4, 4), "float32"]):
for i in T.serial(4):
if i < 2:
for j in T.serial(4):
if j < 3:
for k in T.serial(4):
A[i, j] = 0.0 |
class TestSuppressSplittingConditional(BaseBeforeAfter):
hoisted_conditionals = tvm.testing.parameter(
HoistedConditionals.All & ~HoistedConditionals.BooleanExpression
)
@T.prim_func
def before(A: T.Buffer[(4, 4), "float32"]):
for i, j, k in T.grid(4, 4, 4):
if j < 3 and i < 2:
A[i, j] = 0.0
@T.prim_func
def expected(A: T.Buffer[(4, 4), "float32"]):
for i, j in T.grid(4, 4):
if j < 3 and i < 2:
for k in T.serial(4):
A[i, j] = 0.0 |
class TestHoistMultiIfElse(BaseBeforeAfter):
@T.prim_func
def before(A: T.Buffer[(4, 4), "float32"]):
for i in T.serial(4):
for j in T.serial(4):
for k in T.serial(4):
if j < 3:
if i < 2:
A[i, j] = 0.0
else:
A[i, j] = 1.0
else:
if i < 2:
A[i, j] = 2.0
else:
A[i, j] = 3.0
@T.prim_func
def expected(A: T.Buffer[(4, 4), "float32"]):
for i in T.serial(4):
if i < 2:
for j in T.serial(4):
if j < 3:
for k in T.serial(4):
A[i, j] = 0.0
else:
for k in T.serial(4):
A[i, j] = 2.0
else:
for j in T.serial(4):
if j < 3:
for k in T.serial(4):
A[i, j] = 1.0
else:
for k in T.serial(4):
A[i, j] = 3.0 |
class TestHoistMultiIfElseDifferentBranches(BaseBeforeAfter):
@T.prim_func
def before(A: T.Buffer[(4, 4), "float32"]):
for i in T.serial(4):
for j in T.serial(4):
for k in T.serial(4):
if j < 3:
if i < 2:
A[i, j] = 0.0
else:
A[i, j] = 1.0
else:
if i < 1:
A[i, j] = 2.0
else:
A[i, j] = 3.0
@T.prim_func
def expected(A: T.Buffer[(4, 4), "float32"]):
for i in T.serial(4):
if i < 2:
if i < 1:
for j in T.serial(4):
if j < 3:
for k in T.serial(4):
A[i, j] = 0.0
else:
for k in T.serial(4):
A[i, j] = 2.0
else:
for j in T.serial(4):
if j < 3:
for k in T.serial(4):
A[i, j] = 0.0
else:
for k in T.serial(4):
A[i, j] = 3.0
else:
for j in T.serial(4):
if j < 3:
for k in T.serial(4):
A[i, j] = 1.0
else:
for k in T.serial(4):
A[i, j] = 3.0 |
class TestHoistIfElseExpr(BaseBeforeAfter):
@T.prim_func
def before(A: T.Buffer[(4, 4), "float32"]):
for i, j in T.grid(4, 4):
A[i, j] = T.if_then_else(i < 2, 1.0, 2.0, dtype="float32")
@T.prim_func
def expected(A: T.Buffer[(4, 4), "float32"]):
for i in T.serial(4):
if i < 2:
for j in T.serial(4):
A[i, j] = 1.0
else:
for j in T.serial(4):
A[i, j] = 2.0 |
class TestSuppressHoistIfElseExpr(TestHoistIfElseExpr):
hoisted_conditionals = tvm.testing.parameter(
HoistedConditionals.All & ~HoistedConditionals.IfElseExpr
)
@T.prim_func
def before(A: T.Buffer[(4, 4), "float32"]):
for i, j in T.grid(4, 4):
A[i, j] = T.if_then_else(i < 2, 1.0, 2.0, dtype="float32")
expected = before |
class TestHoistLetExpr(BaseBeforeAfter):
@T.prim_func
def before(A: T.Buffer[(4, 4), "float32"]):
for i, j in T.grid(4, 4):
x = T.var("float32")
A[i, j] = T.Let(x, T.cast(i + 1, "float32"), 5.0 * x + T.cast(j, "float32"))
@T.prim_func
def expected(A: T.Buffer[(4, 4), "float32"]):
for i in T.serial(4):
x = T.cast(i + 1, "float32")
for j in T.serial(4):
A[i, j] = 5.0 * x + T.cast(j, "float32") |
class TestSuppressHoistLetExpr(BaseBeforeAfter):
hoisted_let_bindings = tvm.testing.parameter(
HoistedLetBindings.All & ~HoistedLetBindings.LetExpr
)
@T.prim_func
def before(A: T.Buffer[(4, 4), "float32"]):
for i, j in T.grid(4, 4):
x = T.var("float32")
A[i, j] = T.Let(x, T.cast(i + 1, "float32"), 5.0 * x + T.cast(j, "float32"))
expected = before
if __name__ == "__main__":
tvm.testing.main() |
import tvm
from tvm |
import te
from tvm |
import relay |
import numpy as np |
import pytest
from tvm.testing |
import enabled_targets
var_list = []
def verify_structure(stmt, expected_struct):
node_dict = {}
struct = {}
def _extract_vars(op):
global var_list
if isinstance(op, tvm.tir.Var):
var_list.append(op.name)
def _visit(op):
key = op
if isinstance(op, tvm.tir.IfThenElse):
global var_list
tvm.tir.stmt_functor.post_order_visit(op.condition, _extract_vars)
val = [(op.then_case, op.else_case), ("tir.IfThenElse", tuple(var_list))]
var_list.clear()
elif isinstance(op, tvm.tir.For):
val = [(op.body,), ("tir.For", op.loop_var.name)]
elif isinstance(op, tvm.tir.AttrStmt):
val = [(op.body,), ("tir.AttrStmt", op.attr_key, int(op.value))]
else:
return
node_dict[key] = val
tvm.tir.stmt_functor.post_order_visit(stmt, _visit)
for key, val in node_dict.items():
struct[val[1]] = tuple(
node_dict[child][1] if child in node_dict else None for child in val[0]
)
assert struct == expected_struct, "Structure mismatch: expect %s but got %s" % (
expected_struct,
struct,
)
var_list.clear()
def _opaque_eval(var):
return tvm.tir.Evaluate(tvm.tir.call_extern("int32", "dummy", var))
def test_hoist_top_for():
ib = tvm.tir.ir_builder.create()
l = te.var("l")
m = te.var("m")
n = te.var("n")
data = ib.pointer("float32", name="data")
with ib.for_range(0, l, "i") as i:
with ib.for_range(0, m, "j") as j:
with ib.for_range(0, n, "k") as k:
with ib.if_scope(ib.likely(i < 2)):
ib.emit(_opaque_eval(m))
with ib.else_scope():
ib.emit(_opaque_eval(n))
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([], stmt))
new_stmt = tvm.tir.transform.HoistIfThenElse()(mod)["main"].body
expected_struct = {
("tir.For", "k"): (None,),
("tir.For", "j"): (("tir.For", |
"k"),),
("tir.IfThenElse", ("i",)): (("tir.For", "j"), ("tir.For", "j")),
("tir.For", "i"): (("tir.IfThenElse", ("i",)),),
}
verify_structure(new_stmt, expected_struct)
def test_hoist_multi_var_if():
ib = tvm.tir.ir_builder.create()
l = te.var("l")
m = te.var("m")
n = te.var("n")
data = ib.pointer("float32", name="data")
with ib.for_range(0, l, "i") as i:
with ib.for_range(0, m, "j") as j:
with ib.for_range(0, n, "k") as k:
with ib.if_scope(ib.likely(i + j < 2)):
ib.emit(_opaque_eval(m))
with ib.else_scope():
ib.emit(_opaque_eval(n))
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([], stmt))
new_mod = tvm.tir.transform.HoistIfThenElse()(mod)
new_stmt = new_mod["main"].body
expected_struct = {
("tir.For", "k"): (None,),
("tir.IfThenElse", ("i", "j")): (("tir.For", "k"), ("tir.For", "k")),
("tir.For", "j"): (("tir.IfThenElse", ("i", "j")),),
("tir.For", "i"): (("tir.For", "j"),),
}
verify_structure(new_stmt, expected_struct)
def test_hoist_no_match_for():
ib = tvm.tir.ir_builder.create()
l = te.var("l")
m = te.var("m")
n = te.var("n")
data = ib.pointer("float32", name="data")
with ib.for_range(0, l, "i") as i:
with ib.for_range(0, m, "j") as j:
data[i * 3 + j] = data[i * 3 + j] + 0.5
with ib.for_range(0, n, "k") as k:
with ib.if_scope(ib.likely(i < 2)):
ib.emit(_opaque_eval(m))
with ib.else_scope():
ib.emit(_opaque_eval(n))
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([], stmt))
new_stmt = tvm.tir.transform.HoistIfThenElse()(mod)["main"].body
expected_struct = {
("tir.For", "k"): (None,),
("tir.IfThenElse", ("i",)): (("tir.For", "k"), ("tir.For", "k")),
("tir.For", "j"): (None,),
("tir.For", "i"): (("tir.Fo |
r", "j"),),
}
verify_structure(new_stmt, expected_struct)
def test_no_else():
ib = tvm.tir.ir_builder.create()
l = te.var("l")
m = te.var("m")
n = te.var("n")
with ib.for_range(0, l, "i") as i:
with ib.for_range(0, m, "j") as j:
with ib.for_range(0, n, "k") as k:
with ib.if_scope(ib.likely(i < 2)):
ib.emit(_opaque_eval(m))
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([], stmt))
new_stmt = tvm.tir.transform.HoistIfThenElse()(mod)["main"].body
expected_struct = {
("tir.For", "k"): (None,),
("tir.For", "j"): (("tir.For", "k"),),
("tir.IfThenElse", ("i",)): (("tir.For", "j"), None),
("tir.For", "i"): (("tir.IfThenElse", ("i",)),),
}
verify_structure(new_stmt, expected_struct)
def test_attr_stmt():
ib = tvm.tir.ir_builder.create()
dshape = (32, 64)
data = ib.pointer("float32", name="data")
l = te.var("l")
m = te.var("m")
n = te.var("n")
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("blockIdx.x")
ib.scope_attr(tx, "thread_extent", dshape[0])
ib.scope_attr(bx, "thread_extent", dshape[1])
with ib.for_range(0, l, "i") as i:
with ib.for_range(0, m, "j") as j:
with ib.for_range(0, n, "k") as k:
with ib.if_scope(tvm.tir.any(i < 4, j >= 8)):
data[bx * j + tx * j * k] = data[bx * j + tx * j * k] + 0.5
with ib.else_scope():
data[bx * j + tx * j * k] = data[bx * j + tx * j * k] + 1.0
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([], stmt))
new_stmt = tvm.tir.transform.HoistIfThenElse()(mod)["main"].body
expected_struct = {
("tir.For", "k"): (None,),
("tir.IfThenElse", ("i", "j")): (("tir.For", "k"), ("tir.For", "k")),
("tir.For", "j"): (("tir.IfThenElse", ("i", "j")),),
("tir.For", "i"): (("tir.For", "j"),),
("tir.AttrStmt", "thread_extent", 64): (("tir. |
For", "i"),),
("tir.AttrStmt", "thread_extent", 32): (("tir.AttrStmt", "thread_extent", 64),),
}
verify_structure(new_stmt, expected_struct)
def test_nested_for():
ib = tvm.tir.ir_builder.create()
data = ib.pointer("float32", name="data")
with ib.for_range(0, 5, "i") as i:
with ib.for_range(0, 10, "j") as j:
with ib.if_scope(i >= 3):
data[i * 3 + j] = data[i * 3 + j] + 0.5
with ib.for_range(0, 15, "k") as k:
with ib.for_range(0, 20, "l") as l:
with ib.if_scope(tvm.tir.any(i < 4, j >= 8)):
data[i * 3 + j + k + l] = data[i * 3 + j + k + l] * 2
with ib.else_scope():
data[i * 3 + j + k + l] = data[i * 3 + j + k + l] * 1.5
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([], stmt))
new_stmt = tvm.tir.transform.HoistIfThenElse()(mod)["main"].body
expected_struct = {
("tir.For", "l"): (None,),
("tir.For", "k"): (("tir.For", "l"),),
("tir.IfThenElse", ("i", "j")): (("tir.For", "k"), ("tir.For", "k")),
("tir.For", "j"): (None,),
("tir.IfThenElse", ("i",)): (("tir.For", "j"), None),
("tir.For", "i"): (("tir.IfThenElse", ("i",)),),
}
verify_structure(new_stmt, expected_struct)
def test_if_block():
ib = tvm.tir.ir_builder.create()
data = ib.pointer("float32", name="data")
n = te.var("n")
with ib.for_range(0, 5, "i") as i:
with ib.for_range(0, 10, "j") as j:
with ib.if_scope(i >= 3):
data[i * 3 + j] = data[i * 3 + j] + 0.5
with ib.for_range(0, 15, "k") as k:
with ib.for_range(0, 20, "l") as l:
with ib.if_scope(tvm.tir.any(i < 4, j >= 8)):
data[i * 3 + j + k + l] = data[i * 3 + j + k + l] * 2
with ib.else_scope():
data[i * 3 + j + k + l] = data[i * |
3 + j + k + l] * 1.5
with ib.if_scope(j < 5):
data[i * 3 + j + k + l] = data[i * 3 + j + k + l] - 1
with ib.for_range(0, 5, "i") as i:
with ib.for_range(0, 10, "j") as j:
with ib.for_range(0, 15, "k") as k:
with ib.if_scope(n >= 3):
data[i * 3 + j + k] = data[i * 3 + j + k] + 0.6
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([], stmt))
new_stmt = tvm.tir.transform.HoistIfThenElse()(mod)["main"].body
expected_struct = {
("tir.IfThenElse", ("i", "j")): (None, None),
("tir.IfThenElse", ("j",)): (None, None),
("tir.For", "l"): (None,),
("tir.For", "k"): (None,),
("tir.For", "j"): (("tir.For", "j"),),
("tir.IfThenElse", ("i",)): (("tir.For", "j"), None),
("tir.For", "i"): (("tir.IfThenElse", ("i",)),),
("tir.IfThenElse", ("n",)): (("tir.For", "j"), None),
}
verify_structure(new_stmt, expected_struct)
def test_multi_if():
ib = tvm.tir.ir_builder.create()
data = ib.pointer("float32", name="data")
with ib.for_range(0, 10, "i") as i:
with ib.for_range(0, 10, "j") as j:
with ib.for_range(0, 10, "k") as k:
with ib.if_scope(3 <= i):
with ib.if_scope(3 <= j):
data[i * 100 + j * 10 + k] = data[i * 100 + j * 10 + k] + 0.5
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([], stmt))
new_mod = tvm.tir.transform.HoistIfThenElse()(mod)
new_stmt = new_mod["main"].body
expected_struct = {
("tir.For", "k"): (None,),
("tir.IfThenElse", ("j",)): (("tir.For", "k"), None),
("tir.For", "j"): (("tir.IfThenElse", ("j",)),),
("tir.IfThenElse", ("i",)): (("tir.For", "j"), None),
("tir.For", "i"): (("tir.IfThenElse", ("i",)),),
}
verify_structure(new_stmt, expected_struct)
def test_no_hoisting_1():
ib = tvm.tir.ir_builder.create()
data = ib.pointe |
r("float32", name="data")
n = te.var("n")
with ib.for_range(0, 10, "i") as i:
with ib.for_range(0, 10, "j") as j:
with ib.for_range(0, 10, "k") as k:
with ib.if_scope(k <= 3):
data[i * 100 + j * 10 + k] = data[i * 100 + j * 10 + k] + 0.5
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([], stmt))
new_stmt = tvm.tir.transform.HoistIfThenElse()(mod)["main"].body
tvm.ir.assert_structural_equal(new_stmt, stmt)
with tvm.transform.PassContext(
config={"tir.HoistIfThenElse": {"support_block_scope_hosting": True}}
):
new_stmt = tvm.tir.transform.HoistIfThenElse()(mod)["main"].body
tvm.ir.assert_structural_equal(new_stmt, stmt)
def test_no_hoisting_2():
ib = tvm.tir.ir_builder.create()
data = ib.pointer("float32", name="data")
n = te.var("n")
x = te.var("x")
with ib.for_range(0, 10, "i") as i:
with ib.for_range(0, 10, "j") as j:
with ib.for_range(0, 10, "k") as k:
with ib.if_scope(i <= 3):
data[i * 100 + j * 10 + k] = data[i * 100 + j * 10 + k] + 0.3
data[i * 100 + j * 10 + k] = data[i * 100 + j * 10 + k] + 0.5
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([], stmt))
new_stmt = tvm.tir.transform.HoistIfThenElse()(mod)["main"].body
tvm.ir.assert_structural_equal(new_stmt, stmt)
with tvm.transform.PassContext(
config={"tir.HoistIfThenElse": {"support_block_scope_hosting": True}}
):
new_stmt = tvm.tir.transform.HoistIfThenElse()(mod)["main"].body
tvm.ir.assert_structural_equal(new_stmt, stmt)
@pytest.mark.xfail(reason="Inconsistent thread_extent", strict=True)
def test_no_hoisting_3():
ib = tvm.tir.ir_builder.create()
dshape = (32, 64)
dshape_inner = (33, 63)
data = ib.pointer("float32", name="data")
l = te.var("l")
m = te.var("m")
n = te.var("n")
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("blockIdx |
.x")
ib.scope_attr(tx, "thread_extent", dshape[0])
ib.scope_attr(bx, "thread_extent", dshape[1])
with ib.for_range(0, l, "i") as i:
with ib.for_range(0, m, "j") as j:
with ib.for_range(0, n, "k") as k:
ib.scope_attr(tx, "thread_extent", dshape_inner[0])
ib.scope_attr(bx, "thread_extent", dshape_inner[1])
with ib.if_scope(tx < 3):
data[bx * j + tx * j * k] = data[bx * j + tx * j * k] + 0.3
with ib.else_scope():
data[bx * j + tx * j * k] = data[bx * j + tx * j * k] + 1.3
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([], stmt))
new_stmt = tvm.tir.transform.HoistIfThenElse()(mod)["main"].body
tvm.ir.assert_structural_equal(new_stmt, stmt)
with tvm.transform.PassContext(
config={"tir.HoistIfThenElse": {"support_block_scope_hosting": True}}
):
new_stmt = tvm.tir.transform.HoistIfThenElse()(mod)["main"].body
tvm.ir.assert_structural_equal(new_stmt, stmt)
def test_no_hoisting_4():
ib = tvm.tir.ir_builder.create()
dshape = (32, 64)
dshape_inner = (33, 63)
data = ib.pointer("float32", name="data")
l = te.var("l")
m = te.var("m")
n = te.var("n")
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("blockIdx.x")
ib.scope_attr(bx, "thread_extent", dshape[1])
with ib.for_range(0, l, "i") as i:
with ib.for_range(0, m, "j") as j:
with ib.for_range(0, n, "k") as k:
ib.scope_attr(tx, "thread_extent", dshape_inner[0])
with ib.if_scope(tx < 3):
data[bx * j + tx * j * k] = data[bx * j + tx * j * k] + 0.3
with ib.else_scope():
data[bx * j + tx * j * k] = data[bx * j + tx * j * k] + 1.3
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([], stmt))
new_stmt = tvm.tir.transform.HoistIfThenElse()(mod)["main"].body
tvm.ir.assert_structural_equal(new_stmt, stmt) |
with tvm.transform.PassContext(
config={"tir.HoistIfThenElse": {"support_block_scope_hosting": True}}
):
new_stmt = tvm.tir.transform.HoistIfThenElse()(mod)["main"].body
tvm.ir.assert_structural_equal(new_stmt, stmt)
@pytest.mark.xfail(reason="Inconsistent thread_extent", strict=True)
def test_no_hoisting_5():
ib = tvm.tir.ir_builder.create()
dshape = (32, 64)
dshape_inner = (33, 63)
data = ib.pointer("float32", name="data")
l = te.var("l")
m = te.var("m")
n = te.var("n")
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("blockIdx.x")
ib.scope_attr(tx, "thread_extent", dshape[0])
ib.scope_attr(bx, "thread_extent", dshape[1])
with ib.for_range(0, l, "i") as i:
with ib.for_range(0, m, "j") as j:
ib.scope_attr(bx, "thread_extent", dshape_inner[1])
with ib.for_range(0, n, "k") as k:
ib.scope_attr(tx, "thread_extent", dshape_inner[0])
with ib.if_scope(tx < 3):
data[bx * j + tx * j * k] = data[bx * j + tx * j * k] + 0.3
with ib.else_scope():
data[bx * j + tx * j * k] = data[bx * j + tx * j * k] + 1.3
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([], stmt))
new_stmt = tvm.tir.transform.HoistIfThenElse()(mod)["main"].body
tvm.ir.assert_structural_equal(new_stmt, stmt)
with tvm.transform.PassContext(
config={"tir.HoistIfThenElse": {"support_block_scope_hosting": True}}
):
new_stmt = tvm.tir.transform.HoistIfThenElse()(mod)["main"].body
tvm.ir.assert_structural_equal(new_stmt, stmt)
def test_no_hoisting_6():
ib = tvm.tir.ir_builder.create()
dshape = (32, 64)
data = ib.pointer("float32", name="data")
l = te.var("l")
m = te.var("m")
n = te.var("n")
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("blockIdx.x")
ib.scope_attr(tx, "thread_extent", dshape[0])
ib.scope_attr(bx, "thread_extent", dshape[1])
with ib.for_ |
range(0, l, "i") as i:
with ib.for_range(0, m, "j") as j:
with ib.for_range(0, n, "k") as k:
with ib.if_scope((tx + k) < 3):
data[bx * j + tx * j * k] = data[bx * j + tx * j * k] + 0.3
with ib.else_scope():
data[bx * j + tx * j * k] = data[bx * j + tx * j * k] + 1.3
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([], stmt))
new_stmt = tvm.tir.transform.HoistIfThenElse()(mod)["main"].body
tvm.ir.assert_structural_equal(new_stmt, stmt)
with tvm.transform.PassContext(
config={"tir.HoistIfThenElse": {"support_block_scope_hosting": True}}
):
new_stmt = tvm.tir.transform.HoistIfThenElse()(mod)["main"].body
tvm.ir.assert_structural_equal(new_stmt, stmt)
def test_no_hoisting_7():
ib = tvm.tir.ir_builder.create()
dshape = (32, 64)
data = ib.pointer("float32", name="data")
l = te.var("l")
m = te.var("m")
n = te.var("n")
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("blockIdx.x")
ib.scope_attr(tx, "thread_extent", dshape[0])
ib.scope_attr(bx, "thread_extent", dshape[1])
with ib.for_range(0, l, "i") as i:
with ib.for_range(0, m, "j") as j:
with ib.if_scope((tx + j) < 9):
with ib.for_range(0, n, "k") as k:
with ib.if_scope((tx + k) < 3):
data[bx * j + tx * j * k] = data[bx * j + tx * j * k] + 0.3
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([], stmt))
new_stmt = tvm.tir.transform.HoistIfThenElse()(mod)["main"].body
tvm.ir.assert_structural_equal(new_stmt, stmt)
with tvm.transform.PassContext(
config={"tir.HoistIfThenElse": {"support_block_scope_hosting": True}}
):
new_stmt = tvm.tir.transform.HoistIfThenElse()(mod)["main"].body
tvm.ir.assert_structural_equal(new_stmt, stmt)
def test_hoisting_block_scope_1():
n = te.size_var("n")
m = te.size_var("m")
A = te.placeholder |
((n, m), name="A")
k = te.reduce_axis((0, m), "k")
B = te.compute((n,), lambda i: te.sum(A[i, k], axis=k), name="B")
s = te.create_schedule(B.op)
ko, ki = s[B].split(B.op.reduce_axis[0], factor=16)
BF = s.rfactor(B, ki)
xo, xi = s[B].split(s[B].op.axis[0], factor=32)
s[B.op].bind(xo, te.thread_axis("blockIdx.x"))
s[B.op].bind(xi, te.thread_axis("threadIdx.y"))
s[B].bind(s[B].op.reduce_axis[0], te.thread_axis("threadIdx.x"))
s[BF].compute_at(s[B], s[B].op.reduce_axis[0])
mod = tvm.driver.build_module.schedule_to_module(s, [A, B], "main", None)
mod = tvm.tir.transform.Simplify()(mod)
mod = tvm.tir.transform.RemoveNoOp()(mod)
stmt = mod["main"].body
new_stmt = tvm.tir.transform.HoistIfThenElse()(mod)["main"].body
tvm.ir.assert_structural_equal(new_stmt, stmt)
with tvm.transform.PassContext(
config={"tir.HoistIfThenElse": {"support_block_scope_hosting": True}}
):
new_stmt = tvm.tir.transform.HoistIfThenElse()(mod)["main"].body
assert not tvm.ir.structural_equal(new_stmt, stmt)
def test_hoisting_block_scope_2():
ib = tvm.tir.ir_builder.create()
dshape = (32, 64)
dshape_inner = (33, 63)
data = ib.pointer("float32", name="data")
l = te.var("l")
m = te.var("m")
n = te.var("n")
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("blockIdx.x")
ib.scope_attr(tx, "thread_extent", dshape[0])
with ib.for_range(0, l, "i") as i:
with ib.for_range(0, m, "j") as j:
with ib.for_range(0, n, "k") as k:
ib.scope_attr(bx, "thread_extent", dshape[1])
with ib.if_scope(tx < 3):
data[bx * j + tx * j * k] = data[bx * j + tx * j * k] + 0.3
with ib.else_scope():
data[bx * j + tx * j * k] = data[bx * j + tx * j * k] + 1.3
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([], stmt))
mod = tvm.tir.transform.Simplify()(mod)
mod = tvm.tir.transform.RemoveNoOp()(mod) |
stmt = mod["main"].body
new_stmt = tvm.tir.transform.HoistIfThenElse()(mod)["main"].body
tvm.ir.assert_structural_equal(new_stmt, stmt)
with tvm.transform.PassContext(
config={"tir.HoistIfThenElse": {"support_block_scope_hosting": True}}
):
new_stmt = tvm.tir.transform.HoistIfThenElse()(mod)["main"].body
assert not tvm.ir.structural_equal(new_stmt, stmt)
@pytest.mark.xfail(reason="Inconsistent thread_extent", strict=True)
def test_hoisting_block_scope_3():
ib = tvm.tir.ir_builder.create()
dshape = (32, 64)
dshape_inner = (33, 63)
data = ib.pointer("float32", name="data")
l = te.var("l")
m = te.var("m")
n = te.var("n")
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("blockIdx.x")
ib.scope_attr(tx, "thread_extent", dshape[0])
ib.scope_attr(bx, "thread_extent", dshape[1])
with ib.for_range(0, l, "i") as i:
with ib.for_range(0, m, "j") as j:
ib.scope_attr(tx, "thread_extent", dshape_inner[0])
ib.scope_attr(bx, "thread_extent", dshape_inner[1])
with ib.for_range(0, n, "k") as k:
with ib.if_scope(tx < 3):
data[bx * j + tx * j * k] = data[bx * j + tx * j * k] + 0.3
with ib.else_scope():
data[bx * j + tx * j * k] = data[bx * j + tx * j * k] + 1.3
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([], stmt))
new_stmt = tvm.tir.transform.HoistIfThenElse()(mod)["main"].body
tvm.ir.assert_structural_equal(new_stmt, stmt)
with tvm.transform.PassContext(
config={"tir.HoistIfThenElse": {"support_block_scope_hosting": True}}
):
new_stmt = tvm.tir.transform.HoistIfThenElse()(mod)["main"].body
assert not tvm.ir.structural_equal(new_stmt, stmt)
def test_hoisting_block_scope_4():
nn = 1024
n = tvm.runtime.convert(nn)
A = te.placeholder((n,), name="A")
B = te.placeholder((n,), name="B")
AA = te.compute((n,), lambda *i: A(*i), name="A")
BB = |
te.compute((n,), lambda *i: B(*i), name="B")
T = te.compute(A.shape, lambda *i: AA(*i) + BB(*i), name="T")
C = te.compute(A.shape, lambda *i: T(*i), name="C")
s = te.create_schedule(C.op)
xo, xi = s[C].split(C.op.axis[0], factor=4)
xo1, xo2 = s[C].split(xo, factor=13)
s[C].parallel(xo2)
s[C].pragma(xo1, "parallel_launch_point")
s[C].pragma(xo2, "parallel_stride_pattern")
s[C].pragma(xo2, "parallel_barrier_when_finish")
s[C].vectorize(xi)
mod = tvm.driver.build_module.schedule_to_module(s, [A, B, C], "main", None)
mod = tvm.tir.transform.Simplify()(mod)
stmt = mod["main"].body
new_stmt = tvm.tir.transform.HoistIfThenElse()(mod)["main"].body
tvm.ir.assert_structural_equal(new_stmt, stmt)
with tvm.transform.PassContext(
config={"tir.HoistIfThenElse": {"support_block_scope_hosting": True}}
):
new_stmt = tvm.tir.transform.HoistIfThenElse()(mod)["main"].body
assert not tvm.ir.structural_equal(new_stmt, stmt)
def test_hoisting_block_scope_5():
ib = tvm.tir.ir_builder.create()
data = ib.pointer("float32", name="data", scope="global")
l = te.var("l")
m = te.var("m")
n = te.var("n")
g = te.var("g")
ib.scope_attr(data, "storage_scope", "global")
with ib.for_range(0, l, "i") as i:
with ib.for_range(0, m, "j") as j:
with ib.for_range(0, n, "k") as k:
with ib.if_scope(data[g] < 3):
data[9 * j + 3 * j * k] = data[9 * j + 3 * j * k] + 0.3
with ib.else_scope():
data[9 * j + 3 * j * k] = data[9 * j + 3 * j * k] + 1.3
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([], stmt))
new_stmt = tvm.tir.transform.HoistIfThenElse()(mod)["main"].body
assert not tvm.ir.structural_equal(new_stmt, stmt)
stmt = new_stmt
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([], stmt))
with tvm.transform.PassContext(
config={"tir.HoistIfThenElse": {"support_block_scope_hosting": True}}
) |
:
new_stmt = tvm.tir.transform.HoistIfThenElse()(mod)["main"].body
tvm.ir.assert_structural_equal(new_stmt, stmt)
def test_hoisting_block_scope_6():
ib = tvm.tir.ir_builder.create()
dshape = (32, 64)
data = ib.pointer("float32", name="data")
l = te.var("l")
m = te.var("m")
n = te.var("n")
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("blockIdx.x")
ib.scope_attr(tx, "thread_extent", dshape[0])
ib.scope_attr(bx, "thread_extent", dshape[1])
with ib.for_range(0, l, "i") as i:
with ib.for_range(0, m, "j") as j:
with ib.for_range(0, n, "k") as k:
with ib.if_scope((tx + n) < 3):
data[bx * j + tx * j * k] = data[bx * j + tx * j * k] + 0.3
with ib.else_scope():
data[bx * j + tx * j * k] = data[bx * j + tx * j * k] + 1.3
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([], stmt))
new_stmt = tvm.tir.transform.HoistIfThenElse()(mod)["main"].body
tvm.ir.assert_structural_equal(new_stmt, stmt)
with tvm.transform.PassContext(
config={"tir.HoistIfThenElse": {"support_block_scope_hosting": True}}
):
new_stmt = tvm.tir.transform.HoistIfThenElse()(mod)["main"].body
assert not tvm.ir.structural_equal(new_stmt, stmt)
def test_hoisting_block_scope_7():
ib = tvm.tir.ir_builder.create()
dshape = (32, 64)
data = ib.pointer("float32", name="data")
l = te.var("l")
m = te.var("m")
n = te.var("n")
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("blockIdx.x")
ib.scope_attr(tx, "thread_extent", dshape[0])
ib.scope_attr(bx, "thread_extent", dshape[1])
with ib.for_range(0, l, "i") as i:
with ib.for_range(0, m, "j") as j:
with ib.for_range(0, n, "k") as k:
with ib.if_scope((tx + i) < 3):
data[bx * j + tx * j * k] = data[bx * j + tx * j * k] + 0.3
with ib.else_scope():
data[bx * j + tx * j |
* k] = data[bx * j + tx * j * k] + 1.3
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([], stmt))
new_stmt = tvm.tir.transform.HoistIfThenElse()(mod)["main"].body
tvm.ir.assert_structural_equal(new_stmt, stmt)
with tvm.transform.PassContext(
config={"tir.HoistIfThenElse": {"support_block_scope_hosting": True}}
):
new_stmt = tvm.tir.transform.HoistIfThenElse()(mod)["main"].body
assert not tvm.ir.structural_equal(new_stmt, stmt)
@pytest.mark.skip()
def test_hoisting_op_conv():
dtype = "float32"
dshape = (1, 80, 73, 73)
kshape = (192, 80, 3, 3)
padding = (1, 1)
groups = 1
dilation = (1, 1)
kernel_size = (3, 3)
channels = 192
scale = 1
x = relay.var("x", shape=dshape, dtype=dtype)
w = relay.var("w", shape=kshape, dtype=dtype)
y = relay.nn.conv2d(
x,
w,
padding=padding,
dilation=dilation,
groups=groups,
channels=channels,
kernel_size=kernel_size,
)
func = relay.Function([x, w], y)
mod = tvm.IRModule()
mod["main"] = func
mod = relay.transform.InferType()(mod)
data = np.random.uniform(-scale, scale, size=dshape).astype(dtype)
kernel = np.random.uniform(-scale, scale, size=kshape).astype(dtype)
params = {"w": tvm.nd.array(kernel)}
for target, dev in enabled_targets():
with tvm.transform.PassContext(opt_level=3):
lib = relay.build_module.build(mod, target=target, params=params)
m = tvm.contrib.graph_executor.GraphModule(lib["default"](dev))
x = np.random.uniform(size=dshape)
data_tvm = tvm.nd.array(data)
m.set_input("x", data_tvm)
m.run()
e = m.module.time_evaluator("run", dev, number=300, repeat=3)
t1 = e(data_tvm).results
t1 = np.array(t1) * 1000
print("{} ms".format(t1.mean()))
with tvm.transform.PassContext(
opt_level=3, config={"tir.HoistIfThenElse": {"support_block_sc |
ope_hosting": True}}
):
lib = relay.build_module.build(mod, target=target, params=params)
m = tvm.contrib.graph_executor.GraphModule(lib["default"](dev))
x = np.random.uniform(size=dshape)
data_tvm = tvm.nd.array(data)
m.set_input("x", data_tvm)
m.set_input(**params)
m.run()
e = m.module.time_evaluator("run", dev, number=300, repeat=3)
t2 = e(data_tvm).results
t2 = np.array(t2) * 1000
print("{} ms".format(t2.mean()))
tvm.testing.assert_allclose(t1.mean(), t2.mean(), atol=1, rtol=1e-1)
if __name__ == "__main__":
pytest.main([__file__]) |
import tvm |
import tvm.testing
from tvm |
import te
from tvm.driver.build_module |
import schedule_to_module
def test_copy2d():
m = te.var("m")
l = te.var("l")
A = te.placeholder((m, l), name="A")
B = te.compute((m, l), lambda i, j: A[i, j], name="B")
s = te.create_schedule(B.op)
s[B].pragma(B.op.axis[0], "memcpy")
bounds = tvm.te.schedule.InferBound(s)
stmt = tvm.te.schedule.ScheduleOps(s, bounds)
func = tvm.te.schedule.SchedulePostProcToPrimFunc([A, B], stmt, None)
mod = tvm.IRModule.from_expr(func)
mod = tvm.tir.transform.StorageFlatten(64)(mod)
def cb(src, dst, pad_before, pad_after, pad_value):
assert dst.strides[0] == l
assert dst.strides[1].value == 1
assert src.strides[0] == l
assert tuple(src.shape) == (m, l)
return tvm.tir.Evaluate(0)
stmt = tvm.tir.transform.InjectCopyIntrin("memcpy", cb)(mod)["main"].body
def test_copy_pad():
m = te.var("m")
l = te.var("l")
A = te.placeholder((m, l), name="A")
B = te.compute(
(m + 2, l),
lambda i, j: tvm.tir.if_then_else(tvm.tir.all(i >= 1, i < m + 1), A[i - 1, j], 1.0),
name="B",
)
s = te.create_schedule(B.op)
s[B].pragma(B.op.axis[0], "memcpy")
mod = schedule_to_module(s, [A, B])
mod = tvm.tir.transform.StorageFlatten(64)(mod)
def cb(src, dst, pad_before, pad_after, pad_value):
tvm.testing.assert_prim_expr_equal(src.elem_offset, 0)
assert pad_before[0].value == 1
assert pad_before[1].value == 0
assert pad_after[0].value == 1
assert pad_after[1].value == 0
assert pad_value.value == 1.0
return tvm.tir.Evaluate(0)
stmt = tvm.tir.transform.InjectCopyIntrin("memcpy", cb)(mod)["main"].body
def test_single_point_test():
A = te.placeholder((1,), name="A")
B = te.compute((1,), lambda i: A[i], name="B")
s = te.create_schedule(B.op)
s[B].pragma(B.op.axis[0], "memcpy")
mod = schedule_to_module(s, [A, B])
mod = tvm.tir.transform.StorageFlatten(64)(mod)
def cb(src, dst, pad_before, pad_after, pad_value): |
tvm.testing.assert_prim_expr_equal(src.elem_offset, 0)
tvm.testing.assert_prim_expr_equal(dst.elem_offset, 0)
tvm.testing.assert_prim_expr_equal(src.strides[0], 1)
tvm.testing.assert_prim_expr_equal(dst.strides[0], 1)
return tvm.tir.Evaluate(0)
stmt = tvm.tir.transform.InjectCopyIntrin("memcpy", cb)(mod)["main"].body
def test_copy_pad_split():
m = 4 * 3
A = te.placeholder((m,), name="A")
Apad = te.compute(
(m + 2,), lambda i: tvm.tir.if_then_else(tvm.tir.all(i >= 1, i <= m), A[i - 1], 0.0), "Apad"
)
B = te.compute((m,), lambda i: Apad[i] + Apad[i + 1] + Apad[i + 2])
s = te.create_schedule(B.op)
xo, xi = s[B].split(B.op.axis[0], factor=4)
s[Apad].compute_at(s[B], xo)
s[Apad].pragma(s[Apad].op.axis[0], "memcpy")
mod = schedule_to_module(s, [A, B])
mod = tvm.tir.transform.StorageFlatten(64)(mod._move())
mod = tvm.tir.transform.Simplify()(mod._move())
def cb(src, dst, pad_before, pad_after, pad_value):
assert dst.elem_offset.value == 0
tvm.testing.assert_prim_expr_equal(src.elem_offset, tvm.te.max(xo * 4, 1) - 1)
rpad_before = tvm.te.max(1 - xo * 4, 0)
rpad_after = tvm.te.max(xo * 4 - 7, 0)
tvm.testing.assert_prim_expr_equal(pad_before[0], rpad_before)
tvm.testing.assert_prim_expr_equal(pad_after[0], rpad_after)
tvm.testing.assert_prim_expr_equal(src.shape[0], 6 - rpad_before - rpad_after)
return tvm.tir.Evaluate(0)
stmt = tvm.tir.transform.InjectCopyIntrin("memcpy", cb)(mod)["main"].body
if __name__ == "__main__":
test_copy2d()
test_copy_pad()
test_copy_pad_split()
test_single_point_test() |
import tvm
from tvm |
import te
def test_double_buffer():
dtype = "int64"
n = 100
m = 4
tx = te.thread_axis("threadIdx.x")
ib = tvm.tir.ir_builder.create()
A = ib.pointer("float32", name="A")
C = ib.pointer("float32", name="C")
ib.scope_attr(tx, "thread_extent", 1)
with ib.for_range(0, n) as i:
B = ib.allocate("float32", m, name="B", scope="shared")
with ib.new_scope():
ib.scope_attr(B.asobject().data, "double_buffer_scope", 1)
with ib.for_range(0, m) as j:
B[j] = A[i * 4 + j]
with ib.for_range(0, m) as j:
C[j] = B[j] + 1
stmt = ib.get()
mod = tvm.IRModule({"db": tvm.tir.PrimFunc([A.asobject(), C.asobject()], stmt)})
opt = tvm.transform.Sequential(
[tvm.tir.transform.InjectDoubleBuffer(), tvm.tir.transform.Simplify()]
)
with tvm.transform.PassContext(config={"tir.InjectDoubleBuffer": {"split_loop": 2}}):
mod = opt(mod)
stmt = mod["db"].body
assert isinstance(stmt.body, tvm.tir.Allocate)
assert list(stmt.body.extents) == [m * 2]
f = tvm.tir.transform.ThreadSync("shared")(mod)["db"]
count = [0]
def count_sync(op):
if isinstance(op, tvm.tir.Call) and op.op.same_as(tvm.ir.Op.get("tir.tvm_storage_sync")):
count[0] += 1
tvm.tir.stmt_functor.post_order_visit(f.body, count_sync)
assert count[0] == 4
if __name__ == "__main__":
test_double_buffer() |
import tvm
from tvm.script |
import tir as T |
import numpy as np |
import tvm.testing
def count_cp_async(stmt):
num_alloc = [0]
def verify(n):
if isinstance(n, tvm.tir.Call) and str(n.op) == "tir.ptx_cp_async":
num_alloc[0] += 1
tvm.tir.stmt_functor.post_order_visit(stmt, verify)
return num_alloc[0]
def generate_global_to_shared_vectorized_copy(dtype, vector_size):
num_iters = 128
vector_size_expr = tvm.runtime.convert(vector_size)
@T.prim_func
def ptx_global_to_shared_copy(
A: T.Buffer[(32, 128), dtype], B: T.Buffer[(32, 128), dtype]
) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
bx = T.env_thread("blockIdx.x")
tx = T.env_thread("threadIdx.x")
T.launch_thread(bx, 1)
T.launch_thread(tx, 32)
with T.block():
A_shared = T.alloc_buffer([32, 128], dtype, scope="shared")
T.reads(A[0:32, 0:128])
T.writes(B[0:32, 0:128])
T.attr("default", "async_scope", 1)
for i in T.serial(num_iters):
for j in T.vectorized(vector_size):
A_shared[tx, i * vector_size_expr + j] = A[tx, i * vector_size_expr + j]
T.evaluate(T.ptx_commit_group(dtype=""))
T.evaluate(T.ptx_wait_group(0, dtype=""))
for i in range(128):
B[tx, i] = A_shared[tx, i]
return ptx_global_to_shared_copy
@T.prim_func
def ptx_global_to_shared_copy_fp32x1(
A: T.Buffer[(32, 128), "float32"], B: T.Buffer[(32, 128), "float32"]
) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
bx = T.env_thread("blockIdx.x")
tx = T.env_thread("threadIdx.x")
T.launch_thread(bx, 1)
T.launch_thread(tx, 32)
with T.block():
A_shared = T.alloc_buffer([32, 128], "float32", scope="shared")
T.reads(A[0:32, 0:128])
T.writes(B[0:32, 0:128])
T.attr("default", "async_scope", 1)
for i in T.serial(128):
A_shared[tx, i] = A[tx, i]
T.evaluate(T.ptx_commit_group(dtype="")) |
T.evaluate(T.ptx_wait_group(0, dtype=""))
for i in range(128):
B[tx, i] = A_shared[tx, i]
@T.prim_func
def ptx_global_to_shared_dyn_copy_fp16x8(
A: T.Buffer[(32, 128), "float16"],
B: T.Buffer[(32, 128), "float16"],
C: T.Buffer[(32, 128), "float16"],
) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
bx = T.env_thread("blockIdx.x")
tx = T.env_thread("threadIdx.x")
T.launch_thread(bx, 1)
T.launch_thread(tx, 32)
with T.block():
A_shared = T.alloc_buffer([32, 128], "float16", scope="shared.dyn")
B_shared = T.alloc_buffer([32, 128], "float16", scope="shared.dyn")
T.reads(A[0:32, 0:128], B[0:32, 0:128])
T.writes(C[0:32, 0:128])
T.attr("default", "async_scope", 1)
for i in T.serial(16):
for j in T.vectorized(8):
A_shared[tx, i * 8 + j] = A[tx, i * 8 + j]
B_shared[tx, i * 8 + j] = B[tx, i * 8 + j]
T.evaluate(T.ptx_commit_group(dtype=""))
T.evaluate(T.ptx_wait_group(0, dtype=""))
for i in range(128):
C[tx, i] = A_shared[tx, i] + B_shared[tx, i]
@tvm.testing.requires_cuda
def test_inject_async_copy():
for dtype, vec_size in [("float16", 8), ("float16", 4), ("float32", 4), ("float32", 1)]:
if vec_size == 1:
f = ptx_global_to_shared_copy_fp32x1
else:
f = generate_global_to_shared_vectorized_copy(dtype, vec_size)
mod = tvm.IRModule.from_expr(f)
mod = tvm.tir.transform.LowerOpaqueBlock()(mod)
mod = tvm.tir.transform.FlattenBuffer()(mod)
if vec_size > 1:
mod = tvm.tir.transform.VectorizeLoop()(mod)
mod = tvm.tir.transform.InjectPTXAsyncCopy()(mod)
assert count_cp_async(mod["main"].body) == 1
if not tvm.testing.is_ampere_or_newer():
continue
with tvm.transform.PassContext(config={"tir.use_async_copy": 1}):
mod = tvm.build(tvm.IRModule.from_expr(f), target="cuda") |
A_np = np.random.rand(32, 128).astype(dtype)
B_np = np.zeros((32, 128)).astype(dtype)
dev = tvm.cuda(0)
A_nd = tvm.nd.array(A_np, device=dev)
B_nd = tvm.nd.array(B_np, device=dev)
mod(A_nd, B_nd)
tvm.testing.assert_allclose(B_nd.numpy(), A_np)
@tvm.testing.requires_cuda
def test_inject_async_copy_shared_dyn():
f = ptx_global_to_shared_dyn_copy_fp16x8
mod = tvm.IRModule.from_expr(f)
mod = tvm.tir.transform.LowerOpaqueBlock()(mod)
mod = tvm.tir.transform.FlattenBuffer()(mod)
mod = tvm.tir.transform.VectorizeLoop()(mod)
mod = tvm.tir.transform.MergeDynamicSharedMemoryAllocations()(mod)
mod = tvm.tir.transform.InjectPTXAsyncCopy()(mod)
assert count_cp_async(mod["main"].body) == 2
if not tvm.testing.is_ampere_or_newer():
return
with tvm.transform.PassContext(config={"tir.use_async_copy": 1}):
mod = tvm.build(tvm.IRModule.from_expr(f), target="cuda")
A_np = np.random.rand(32, 128).astype("float16")
B_np = np.random.rand(32, 128).astype("float16")
C_np = np.zeros((32, 128)).astype("float16")
dev = tvm.cuda(0)
A_nd = tvm.nd.array(A_np, device=dev)
B_nd = tvm.nd.array(B_np, device=dev)
C_nd = tvm.nd.array(C_np, device=dev)
mod(A_nd, B_nd, C_nd)
tvm.testing.assert_allclose(C_nd.numpy(), A_np + B_np)
if __name__ == "__main__":
test_inject_async_copy()
test_inject_async_copy_shared_dyn() |
import tvm |
import tvm.script
from tvm.script |
import tir as T
from tvm |
import te
from tvm |
import topi
from tvm.driver.build_module |
import get_binds |
import numpy as np |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.