text
stringlengths 1
2.05k
|
---|
for global_var, basefunc in mod.functions.items():
if isinstance(basefunc, tvm.tir.PrimFunc):
ret[global_var] = basefunc.with_attr("target", target)
return ret
def test_create_array_buffer_info():
target = Target("c")
global_ws_pool = WorkspacePoolInfo(
"global_workspace",
[target],
)
fcreate_array_bi = tvm.get_global_func("tir.usmp.CreateArrayBufferInfo")
tir_mod = LinearStructure
tir_mod = _assign_targets_to_primfuncs_irmodule(tir_mod, target)
tir_mod = _assign_poolinfos_to_allocates_in_irmodule(tir_mod, [global_ws_pool])
main_func = tir_mod["tvmgen_default_run_model"]
buffer_info_analysis = tvm.tir.usmp.analysis.extract_buffer_info(main_func, tir_mod)
buffer_info_array = fcreate_array_bi(buffer_info_analysis.buffer_info_stmts)
for buffer_info in buffer_info_array:
assert buffer_info in buffer_info_analysis.buffer_info_stmts.keys()
if __name__ == "__main__":
pytest.main([__file__] + sys.argv[1:]) |
import functools |
import sys |
import pytest |
import numpy as np |
import tvm |
import tvm.testing
from tvm |
import te
from tvm.tir.stmt_functor |
import post_order_visit
from tvm.driver.build_module |
import schedule_to_module
dtype = tvm.testing.parameter("int32")
def flatten_all_indices(preflatten_shape):
def mapping(*indices):
output = 0
for index, size in zip(indices, preflatten_shape):
output = output * size + index
return [output]
return mapping
def unpack_flattened_indices(preflatten_shape):
def mapping(i):
output = []
for dim in reversed(preflatten_shape):
output.append(i % dim)
i
return output[::-1]
return mapping
def traverse(s, op, callback):
visited = set()
def _traverse(op):
if op in visited:
return
visited.add(op)
for tensor in op.input_tensors:
_traverse(tensor.op)
callback(op)
_traverse(op)
class TestCompareAgainstExplicitReshape:
A_definition_style = tvm.testing.parameter(
"explicit_reshape",
"transform_layout",
)
B_definition_style = tvm.testing.parameter(
"explicit_reshape",
"transform_layout",
)
reordered_shape = tvm.testing.parameter((2, 3, 4))
@tvm.testing.fixture
def n_items(self, reordered_shape):
return functools.reduce(lambda x, y: x * y, reordered_shape, 1)
@tvm.testing.fixture
def fphysical_layout(self, reordered_shape):
return unpack_flattened_indices(reordered_shape)
@tvm.testing.fixture
def fcompute(self, A_definition_style, B_definition_style, reordered_shape, n_items, dtype):
assert A_definition_style in ["explicit_reshape", "transform_layout"]
assert B_definition_style in ["explicit_reshape", "transform_layout"]
def func():
if A_definition_style == "explicit_reshape":
A_input = te.placeholder(shape=reordered_shape, name="A_input", dtype=dtype)
A = te.compute(
shape=(n_items,),
fcompute=lambda i: A_input[
i
(i
i % reordered_s |
hape[2],
],
name="A",
)
elif A_definition_style == "transform_layout":
A = te.placeholder(shape=(n_items,), name="A", dtype=dtype)
A_input = A
B = te.compute(shape=A.shape, fcompute=lambda i: A[i], name="B")
if B_definition_style == "explicit_reshape":
B_output = te.compute(
shape=reordered_shape,
fcompute=lambda i, j, k: B[
i * reordered_shape[1] * reordered_shape[2] + j * reordered_shape[2] + k
],
name="B_output",
)
elif B_definition_style == "transform_layout":
B_output = B
return A_input, B_output
return func
@tvm.testing.fixture
def fschedule(self, A_definition_style, B_definition_style, fphysical_layout):
def func(outs):
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
def callback(op):
if (op.name == "A" and A_definition_style == "transform_layout") or (
op.name == "B" and B_definition_style == "transform_layout"
):
s[op].transform_layout(fphysical_layout)
traverse(s, outs[0].op, callback)
return s
return func
@tvm.testing.parametrize_targets("llvm")
def test_external_reshape(
self, target, dev, fcompute, fschedule, n_items, reordered_shape, dtype
):
A, B = fcompute()
s = fschedule(B)
func = tvm.build(s, [A, B], target=target, name="copy_reshape")
a_np = np.arange(n_items).reshape(reordered_shape).astype(dtype)
b_np = np.arange(n_items).reshape(reordered_shape).astype(dtype)
a = tvm.nd.array(a_np, dev)
b = tvm.nd.empty(b_np.shape, dtype=dtype, device=dev)
func(a, b)
tvm.testing.assert_allcl |
ose(b.numpy(), b_np)
@tvm.testing.parametrize_targets("llvm")
def test_internal_reshape(self, target, dev, n_items, reordered_shape, dtype, fphysical_layout):
logical_shape = (n_items,)
A = te.placeholder(logical_shape, name="A", dtype=dtype)
B = te.compute(shape=logical_shape, fcompute=lambda i: A[i], name="B")
C = te.compute(shape=logical_shape, fcompute=lambda i: B[i], name="C")
s = te.create_schedule(C.op)
s[B].transform_layout(fphysical_layout)
mod = schedule_to_module(s, [A, C])
body = mod["main"].body
def walk_buffer_interactions(stmt, callback):
buffer_classes = [
tvm.tir.BufferLoad,
tvm.tir.BufferStore,
tvm.tir.BufferRealize,
]
def inner(node):
if (type(node) in buffer_classes) and node.buffer.name == "B":
callback(node)
post_order_visit(stmt, inner)
def check_references():
buffer_object = None
def inner(node):
nonlocal buffer_object
if buffer_object is None:
buffer_object = node.buffer
else:
assert node.buffer.same_as(buffer_object)
return inner
def check_shape(expected_shape):
def inner(node):
assert tuple(node.buffer.shape) == expected_shape
return inner
walk_buffer_interactions(body, check_references())
walk_buffer_interactions(body, check_shape(logical_shape))
mod = tvm.tir.transform.ApplyLayoutTransforms()(mod)
body = mod["main"].body
walk_buffer_interactions(body, check_references())
walk_buffer_interactions(body, check_shape(reordered_shape))
class Test2DPhysicalLayout:
transform_A = tvm.testing.parameter(
"1d_A",
"2d_A",
"2d_rev_A",
"3d_A",
) |
transform_B = tvm.testing.parameter(
"1d_B",
"2d_B",
"2d_rev_B",
"3d_B",
)
@staticmethod
def extract_logical_indices(stmt):
output = {}
def callback(node):
if isinstance(node, tvm.tir.For):
output[node.loop_var] = node.extent.value
post_order_visit(stmt, callback)
return sorted(output, key=output.get)
def get_transform(self, name):
name = name[:-2]
if name == "1d":
return None
elif name == "2d":
return lambda i, j, k: [i, j, te.AXIS_SEPARATOR, k]
elif name == "2d_rev":
return lambda i, j, k: [k, j, te.AXIS_SEPARATOR, i]
elif name == "3d":
return lambda i, j, k: [i, te.AXIS_SEPARATOR, j, te.AXIS_SEPARATOR, k]
else:
raise ValueError(f"Unknown transformation: {name}")
def transform_indices(self, name, logical_shape, logical_index_vars):
name = name[:-2]
i, j, k = logical_index_vars
if name == "1d":
return [i * (logical_shape[1] * logical_shape[2]) + j * logical_shape[2] + k]
elif name == "2d":
return [i * logical_shape[1] + j, k]
elif name == "2d_rev":
return [k * logical_shape[1] + j, i]
elif name == "3d":
return [i, j, k]
else:
raise ValueError(f"Unknown transformation: {name}")
def test_2d_physical(self, dtype, transform_A, transform_B):
logical_shape = (2, 3, 4)
A = te.placeholder(shape=logical_shape, dtype=dtype, name="A")
B = te.compute(shape=A.shape, fcompute=lambda i, j, k: A[i, j, k], name="B")
s = te.create_schedule(B.op)
func = self.get_transform(transform_A)
if func:
s[A].transform_layout(func)
func = self.get_transform(transform_B)
if func:
s[B].transform_layout(func)
with tvm.transform.PassContext(dis |
abled_pass=["tir.CommonSubexprElimTIR"]):
mod = tvm.lower(s, [A, B])
logical_index_vars = self.extract_logical_indices(mod["main"].body)
expected_indices_A = self.transform_indices(transform_A, logical_shape, logical_index_vars)
expected_indices_B = self.transform_indices(transform_B, logical_shape, logical_index_vars)
def callback(node):
if type(node) in [tvm.tir.BufferLoad, tvm.tir.BufferStore]:
name = node.buffer.name
if name == "A":
expected_indices = expected_indices_A
elif name == "B":
expected_indices = expected_indices_B
else:
raise RuntimeError(f"Unexpected buffer: {name}")
tvm.ir.assert_structural_equal(expected_indices, node.indices)
post_order_visit(mod["main"].body, callback)
class TestTransformedSchedules:
logical_shape = tvm.testing.parameter((4, 6, 40))
transform_names = [
None,
"reverse",
"flatten_all",
"factor_last_by_4",
]
transform_A = tvm.testing.parameter(by_dict={f"A_{t}": t for t in transform_names})
transform_B = tvm.testing.parameter(
by_dict={f"B_{t}": t for t in transform_names if t is not None}
)
after_transform = tvm.testing.parameter(None)
def make_transform(self, logical_shape, transform_name):
if transform_name is None:
return lambda *indices: indices
elif transform_name == "reverse":
return lambda *indices: indices[::-1]
elif transform_name == "flatten_all":
return flatten_all_indices(logical_shape)
elif transform_name == "factor_last_by_4":
return lambda *indices, n: [*indices, n
else:
raise NotImplementedError(f"Unknown transformation {transform_name}")
def make_transformed_shape(self, logical_shape, transform_name):
if transform_name is None:
return logical_shape
e |
lif transform_name == "reverse":
return logical_shape[::-1]
elif transform_name == "flatten_all":
num_elements = functools.reduce(lambda x, y: x * y, logical_shape, 1)
return [num_elements]
elif transform_name == "factor_last_by_4":
*indices, n = logical_shape
return [*indices, n
else:
raise NotImplementedError(f"Unknown transformation {transform_name}")
@tvm.testing.fixture
def expected_loop_order(self, logical_shape, transform_B, after_transform):
shape = self.make_transformed_shape(logical_shape, transform_B)
if after_transform == "reorder":
shape = shape[::-1]
elif after_transform == "split":
shape = [
*shape[:-1],
2,
shape[-1]
]
elif after_transform == "fuse":
fused_size = shape[0] if transform_B == "flatten_all" else shape[0] * shape[1]
shape = [fused_size, *shape[2:]]
return shape
@tvm.testing.fixture
def schedule(self, logical_shape, dtype, transform_A, transform_B, after_transform):
A = te.placeholder(shape=logical_shape, dtype=dtype, name="A")
B = te.compute(shape=A.shape, fcompute=lambda i, j, k: A[i, j, k], name="B")
s = te.create_schedule(B.op)
if transform_A:
s[A].transform_layout(self.make_transform(logical_shape, transform_A))
iter_vars = s[B].transform_layout(self.make_transform(logical_shape, transform_B))
iter_vars = list(iter_vars)
if after_transform == "reorder":
s[B].reorder(*iter_vars[::-1])
elif after_transform == "split":
s[B].split(iter_vars[-1], nparts=2)
elif after_transform == "fuse":
to_fuse = iter_vars[:2]
s[B].fuse(*iter_vars[:2])
return {
"schedule": s,
"tensors": [A, B],
"iter_vars": iter_vars,
}
def compare_tir_loop_order(s |
elf, stmt, expected_loop_order):
def collect_loops(node):
output = []
def callback(node):
if isinstance(node, tvm.tir.For):
output.append(node)
post_order_visit(node, callback)
return output[::-1]
loops = collect_loops(stmt)
loop_order = [loop.extent for loop in loops]
np.testing.assert_array_equal(loop_order, expected_loop_order)
def test_tir_loop_order(self, schedule, expected_loop_order):
func = tvm.lower(schedule["schedule"], schedule["tensors"])["main"]
self.compare_tir_loop_order(func.body, expected_loop_order)
def test_te_loop_order(self, schedule, expected_loop_order):
s = schedule["schedule"]
A, B = schedule["tensors"]
iter_vars = schedule["iter_vars"]
extents = [int(iter_var.dom.extent) for iter_var in s[B].leaf_iter_vars]
np.testing.assert_array_equal(extents, expected_loop_order)
extents = [int(iter_var.dom.extent) for iter_var in iter_vars]
np.testing.assert_array_equal(extents, expected_loop_order)
@pytest.mark.parametrize("after_transform", ["reorder", "split", "fuse"])
def test_use_transformed_axes(
self, schedule, expected_loop_order, transform_A, transform_B, after_transform
):
s = schedule["schedule"]
A, B = schedule["tensors"]
func = tvm.lower(s, [A, B])["main"]
self.compare_tir_loop_order(func.body, expected_loop_order)
class TestTransformCache:
A_size = tvm.testing.parameter(16)
transform_A = tvm.testing.parameter(by_dict={"transformA": True, "": False})
transform_B = tvm.testing.parameter(by_dict={"transformB": True, "": False})
cache_A = tvm.testing.parameter(by_dict={"cacheA": True, "": False})
cache_B = tvm.testing.parameter(by_dict={"cacheB": True, "": False})
@tvm.testing.fixture
def schedule_args(self, target, A_size, transform_A, transform_B, cache_A, cache_B, dtype): |
A = te.placeholder(shape=[A_size], dtype=dtype, name="A")
B = te.compute(A.shape, lambda i: A[i], name="B")
s = te.create_schedule(B.op)
requires_thread_bind = "gpu" in tvm.target.Target(target).keys
thread_x = te.thread_axis("threadIdx.x")
thread_y = te.thread_axis("threadIdx.y")
thread_z = te.thread_axis("threadIdx.z")
if cache_A:
AA = s.cache_read(A, "shared", [B])
if requires_thread_bind:
s[AA].bind(AA.op.axis[0], thread_x)
if cache_B:
BB = s.cache_write(B, "shared")
if requires_thread_bind:
s[BB].bind(BB.op.axis[0], thread_y)
if transform_A:
A_axis = s[A].transform_layout(lambda i: [i
if transform_B:
B_axis = s[B].transform_layout(lambda i: [i
else:
B_axis = B.op.axis
if requires_thread_bind:
s[B].bind(B_axis[0], thread_z)
return [s, [A, B]]
@tvm.testing.fixture
def ref_data(self, A_size, dtype, transform_A, transform_B):
a_np = (100 * np.random.uniform(size=A_size)).astype(dtype)
b_np = a_np
if transform_A:
a_np = a_np.reshape((-1, 4))
if transform_B:
b_np = b_np.reshape((-1, 4))
return a_np, b_np
def test_lower(self, schedule_args):
tvm.lower(*schedule_args)
def test_execute(self, target, dev, schedule_args, ref_data, dtype):
func = tvm.build(*schedule_args, target=target)
a_np, b_np = ref_data
a = tvm.nd.array(a_np, dev)
b = tvm.nd.empty(b_np.shape, dtype=dtype, device=dev)
func(a, b)
if "int" in dtype:
np.testing.assert_equal(b.numpy(), b_np)
else:
tvm.testing.assert_allclose(b.numpy(), b_np)
def test_transform_with_reduction():
A = te.placeholder([16, 32, 64], dtype="float32", name="A")
k = te.reduce_axis((0, A.shape[-1]), name="k")
B = te.compute(A.shape[:-1], lamb |
da i, j: te.sum(A[i, j, k], axis=[k]))
s = te.create_schedule(B.op)
s[B].transform_layout(lambda i, j: [j, i])
tvm.lower(s, [A, B])
shape, transform = tvm.testing.parameters(
([1, 8], lambda n, i: [i, n]),
([1, 1, 8], lambda i, j, k: [j, te.AXIS_SEPARATOR, i, k]),
([1, 1, 8], lambda i, j, k: [i, te.AXIS_SEPARATOR, j, k]),
)
def test_size_one_buffer(shape, transform):
dtype = "int8"
A = te.placeholder(shape, dtype, name="A")
B = te.compute(
shape=A.shape,
fcompute=lambda *indices: A[indices].astype(dtype),
name="B",
)
s = te.create_schedule(B.op)
s[B].transform_layout(transform)
def test_non_divisible_transform_raises_error():
A = te.placeholder([1, 3, 8, 8])
B = te.compute(A.shape, lambda *indices: A[indices])
s = te.create_schedule(B.op)
transform = lambda n, c, h, w: [n, c
with pytest.raises(tvm.TVMError):
s[B].transform_layout(transform)
if __name__ == "__main__":
tvm.testing.main() |
import tvm |
import tvm.testing
from tvm.script |
import tir as T, ir_module |
class BaseBeforeAfter(tvm.testing.CompareBeforeAfter):
def transform(self):
return lambda x: x |
class TestBeforeAfterPrimFunc(BaseBeforeAfter):
@T.prim_func
def before():
T.evaluate(0)
expected = before |
class TestBeforeAfterMethod(BaseBeforeAfter):
def before(self):
@T.prim_func
def func():
T.evaluate(0)
return func
expected = before |
class TestBeforeAfterFixture(BaseBeforeAfter):
@tvm.testing.fixture
def before(self):
@T.prim_func
def func():
T.evaluate(0)
return func
expected = before |
class TestBeforeAfterDelayedPrimFunc(BaseBeforeAfter):
def before():
T.evaluate(0)
expected = before |
class TestBeforeAfterParametrizedFixture(BaseBeforeAfter):
n = tvm.testing.parameter(1, 8, 16)
@tvm.testing.fixture
def before(self, n):
@T.prim_func
def func(A: T.Buffer[n, "float32"]):
for i in T.serial(n):
A[i] = 0.0
return func
expected = before |
class TestBeforeAfterIRModule(BaseBeforeAfter):
"""The preferred form for writing TIR unit tests
All evaluation is done at test-time, with the minimal amount of
additional lines. The `@tvm.testing.fixture`, `@ir_module`, and
`@T.prim_func` annotations are handled by
`tvm.testing.CompareBeforeAfter`.
"""
class before:
def func_A(A: T.Buffer[16, "float32"]):
for i in T.serial(16):
A[i] = 0.0
def func_B(A: T.Buffer[16, "int32"]):
for i in T.serial(16):
A[i] = 42
expected = before |
class TestBeforeAfterIRModuleExplicitFixture(BaseBeforeAfter):
"""Like TestBeforeAfterIRModule, but with an explicit fixture
If the IRModule depends on additional fixtures, this form can be
used.
"""
@tvm.testing.fixture
def before(self):
@ir_module
class mod:
@T.prim_func
def func_A(A: T.Buffer[16, "float32"]):
for i in T.serial(16):
A[i] = 0.0
@T.prim_func
def func_B(A: T.Buffer[16, "int32"]):
for i in T.serial(16):
A[i] = 42
return mod
expected = before
if __name__ == "__main__":
tvm.testing.main() |
import os |
import sys |
import pytest |
import tvm.testing
class TestTargetAutoParametrization:
targets_used = []
devices_used = []
enabled_targets = [target for target, dev in tvm.testing.enabled_targets()]
enabled_devices = [dev for target, dev in tvm.testing.enabled_targets()]
def test_target_parametrization(self, target):
assert target in self.enabled_targets
self.targets_used.append(target)
def test_device_parametrization(self, dev):
assert dev in self.enabled_devices
self.devices_used.append(dev)
def test_all_targets_used(self):
assert sorted(self.targets_used) == sorted(self.enabled_targets)
def test_all_devices_used(self):
sort_key = lambda dev: (dev.device_type, dev.device_id)
assert sorted(self.devices_used, key=sort_key) == sorted(self.enabled_devices, key=sort_key)
targets_with_explicit_list = []
@tvm.testing.parametrize_targets("llvm")
def test_explicit_list(self, target):
assert target == "llvm"
self.targets_with_explicit_list.append(target)
def test_no_repeats_in_explicit_list(self):
if tvm.testing.device_enabled("llvm"):
assert self.targets_with_explicit_list == ["llvm"]
else:
assert self.targets_with_explicit_list == []
targets_with_exclusion = []
@tvm.testing.exclude_targets("llvm")
def test_exclude_target(self, target):
assert "llvm" not in target
self.targets_with_exclusion.append(target)
def test_all_nonexcluded_targets_ran(self):
assert sorted(self.targets_with_exclusion) == sorted(
[target for target in self.enabled_targets if not target.startswith("llvm")]
)
run_targets_with_known_failure = []
@tvm.testing.known_failing_targets("llvm")
def test_known_failing_target(self, target):
self.run_targets_with_known_failure.append(target)
assert "llvm" not in target
def test_all_targets_ran(self):
assert sorted(self.run_targets_with_ |
known_failure) == sorted(self.enabled_targets)
@tvm.testing.known_failing_targets("llvm")
@tvm.testing.parametrize_targets("llvm")
def test_known_failing_explicit_list(self, target):
assert target != "llvm"
class TestJointParameter:
param1_vals = [1, 2, 3]
param2_vals = ["a", "b", "c"]
independent_usages = 0
param1 = tvm.testing.parameter(*param1_vals)
param2 = tvm.testing.parameter(*param2_vals)
joint_usages = 0
joint_param_vals = list(zip(param1_vals, param2_vals))
joint_param_ids = ["apple", "pear", "banana"]
joint_param1, joint_param2 = tvm.testing.parameters(*joint_param_vals, ids=joint_param_ids)
def test_using_independent(self, param1, param2):
type(self).independent_usages += 1
def test_independent(self):
assert self.independent_usages == len(self.param1_vals) * len(self.param2_vals)
def test_using_joint(self, joint_param1, joint_param2):
type(self).joint_usages += 1
assert (joint_param1, joint_param2) in self.joint_param_vals
def test_joint(self):
assert self.joint_usages == len(self.joint_param_vals)
def test_joint_test_id(self, joint_param1, joint_param2, request):
param_string = (
request.node.name.replace(request.node.originalname, "")
.replace("[", "")
.replace("]", "")
)
assert param_string in self.joint_param_ids
class TestFixtureCaching:
param1_vals = [1, 2, 3]
param2_vals = ["a", "b", "c"]
param1 = tvm.testing.parameter(*param1_vals)
param2 = tvm.testing.parameter(*param2_vals)
uncached_calls = 0
cached_calls = 0
@tvm.testing.fixture
def uncached_fixture(self, param1):
type(self).uncached_calls += 1
return 2 * param1
def test_use_uncached(self, param1, param2, uncached_fixture):
assert 2 * param1 == uncached_fixture
def test_uncached_count(self):
assert self.uncached_calls == len(self.param1_vals) * len(self.param2_vals)
@tvm.test |
ing.fixture(cache_return_value=True)
def cached_fixture(self, param1):
type(self).cached_calls += 1
return 3 * param1
def test_use_cached(self, param1, param2, cached_fixture):
assert 3 * param1 == cached_fixture
def test_cached_count(self):
cache_disabled = bool(int(os.environ.get("TVM_TEST_DISABLE_CACHE", "0")))
if cache_disabled:
assert self.cached_calls == len(self.param1_vals) * len(self.param2_vals)
else:
assert self.cached_calls == len(self.param1_vals)
class TestCachedFixtureIsCopy:
param = tvm.testing.parameter(1, 2, 3, 4)
@tvm.testing.fixture(cache_return_value=True)
def cached_mutable_fixture(self):
return {"val": 0}
def test_modifies_fixture(self, param, cached_mutable_fixture):
assert cached_mutable_fixture["val"] == 0
cached_mutable_fixture["val"] = param
class TestBrokenFixture:
num_uses_broken_uncached_fixture = 0
num_uses_broken_cached_fixture = 0
@tvm.testing.fixture
def broken_uncached_fixture(self):
raise RuntimeError("Intentionally broken fixture")
@pytest.mark.xfail(True, reason="Broken fixtures should result in a failing setup", strict=True)
def test_uses_broken_uncached_fixture(self, broken_uncached_fixture):
type(self).num_uses_broken_fixture += 1
def test_num_uses_uncached(self):
assert self.num_uses_broken_uncached_fixture == 0
@tvm.testing.fixture(cache_return_value=True)
def broken_cached_fixture(self):
raise RuntimeError("Intentionally broken fixture")
@pytest.mark.xfail(True, reason="Broken fixtures should result in a failing setup", strict=True)
def test_uses_broken_cached_fixture(self, broken_cached_fixture):
type(self).num_uses_broken_cached_fixture += 1
def test_num_uses_cached(self):
assert self.num_uses_broken_cached_fixture == 0
class TestAutomaticMarks:
@staticmethod
def check_marks(requ |
est, target):
decorators = tvm.testing.plugin._target_to_requirement(target)
required_marks = [decorator.mark for decorator in decorators]
applied_marks = list(request.node.iter_markers())
for required_mark in required_marks:
assert required_mark in applied_marks
def test_automatic_fixture(self, request, target):
self.check_marks(request, target)
@tvm.testing.parametrize_targets
def test_bare_parametrize(self, request, target):
self.check_marks(request, target)
@tvm.testing.parametrize_targets("llvm", "cuda", "vulkan")
def test_explicit_parametrize(self, request, target):
self.check_marks(request, target)
@pytest.mark.parametrize("target", ["llvm", "cuda", "vulkan"])
def test_pytest_mark(self, request, target):
self.check_marks(request, target)
@pytest.mark.parametrize("target,other_param", [("llvm", 0), ("cuda", 1), ("vulkan", 2)])
def test_pytest_mark_covariant(self, request, target, other_param):
self.check_marks(request, target)
@pytest.mark.skipif(
bool(int(os.environ.get("TVM_TEST_DISABLE_CACHE", "0"))),
reason="Cannot test cache behavior while caching is disabled",
)
class TestCacheableTypes:
class EmptyClass:
pass
@tvm.testing.fixture(cache_return_value=True)
def uncacheable_fixture(self):
return self.EmptyClass()
def test_uses_uncacheable(self, request):
self.uncacheable_fixture.num_tests_use_this_fixture[0] += 1
with pytest.raises(TypeError):
request.getfixturevalue("uncacheable_fixture")
class ImplementsReduce:
def __reduce__(self):
return super().__reduce__()
@tvm.testing.fixture(cache_return_value=True)
def fixture_with_reduce(self):
return self.ImplementsReduce()
def test_uses_reduce(self, fixture_with_reduce):
pass
class ImplementsDeepcopy:
def __deepcopy__(self, memo):
return type(self)() |
@tvm.testing.fixture(cache_return_value=True)
def fixture_with_deepcopy(self):
return self.ImplementsDeepcopy()
def test_uses_deepcopy(self, fixture_with_deepcopy):
pass
if __name__ == "__main__":
tvm.testing.main() |
import tvm
from tvm.ir |
import Range
from tvm.script |
import tir as T
@T.prim_func
def matmul(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [128, 128])
B = T.match_buffer(b, [128, 128])
C = T.match_buffer(c, [128, 128])
for i, j, k in T.grid(128, 128, 128):
with T.block("update"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
with T.init():
C[vi, vj] = T.float32(0)
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vj, vk]
@T.prim_func
def matmul_original(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [128, 128])
B = T.match_buffer(b, [128, 128])
C = T.match_buffer(c, [128, 128])
for i, j in T.grid(32, 32):
with T.block("init"):
vi, vj = T.axis.remap("SS", [i, j])
for ii, jj in T.grid(4, 4):
C[vi * 4 + ii, vj * 4 + jj] = T.float32(0)
for k in range(0, 32):
with T.block("update"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
for ii, jj, kk in T.grid(4, 4, 4):
C[vi * 4 + ii, vj * 4 + jj] = (
C[vi * 4 + ii, vj * 4 + jj]
+ A[vi * 4 + ii, vk * 4 + kk] * B[vj * 4 + jj, vk * 4 + kk]
)
@T.prim_func
def elementwise_with_root(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [128, 128])
B = T.match_buffer(b, [128, 128])
C = T.match_buffer(c, [128, 128])
with T.block():
for i, j in T.grid(128, 128):
with T.block():
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] + T.float32(1)
for i, j in T.grid(128, 128):
with T.block():
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + T.float32(1)
def func_with_opaque_block(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [128, 128])
B = T.match_buffer(b, [128, 128])
C = T.match_buffer(c, [128, 128])
with T.block():
with |
T.block():
B[0, 0] = A[0, 0] + T.float32(1)
for i, j in T.grid(128, 128):
with T.block():
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + T.float32(1)
@T.prim_func
def func_with_part_access_region(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [128, 128])
B = T.match_buffer(b, [128, 128])
C = T.match_buffer(c, [128, 128])
with T.block():
for i, j in T.grid(128, 128):
with T.block():
vi, vj = T.axis.remap("SS", [i, j])
T.reads(A[vi, vj])
B[vi, vj] = A[vi, vj] + T.float32(1)
for i, j in T.grid(128, 128):
with T.block():
vi, vj = T.axis.remap("SS", [i, j])
T.writes(C[vi, vj])
C[vi, vj] = B[vi, vj] + T.float32(1)
def test_complete_matmul():
func = matmul
A, B, C = [func.buffer_map[x] for x in func.params]
block = func.body.block.body.body.body.body.block
assert isinstance(block, tvm.tir.Block)
vi, vj, vk = [x.var for x in block.iter_vars]
access_A = tvm.tir.BufferRegion(A, [Range.from_min_extent(vi, 1), Range.from_min_extent(vk, 1)])
access_B = tvm.tir.BufferRegion(B, [Range.from_min_extent(vj, 1), Range.from_min_extent(vk, 1)])
access_C = tvm.tir.BufferRegion(C, [Range.from_min_extent(vi, 1), Range.from_min_extent(vj, 1)])
tvm.ir.assert_structural_equal(block.reads, [access_A, access_B])
tvm.ir.assert_structural_equal(block.writes, [access_C])
def test_complete_matmul_original():
func = matmul_original
A, B, C = [func.buffer_map[x] for x in func.params]
block1 = func.body.block.body.body.body[0].block
assert isinstance(block1, tvm.tir.Block)
vi, vj = [x.var for x in block1.iter_vars]
access_C = tvm.tir.BufferRegion(
C, [Range.from_min_extent(vi * 4, 4), Range.from_min_extent(vj * 4, 4)]
)
tvm.ir.assert_structural_equal(block1.reads, [])
tvm.ir.assert_structural_equal(block1.wri |
tes, [access_C])
block2 = func.body.block.body.body.body[1].body.block
assert isinstance(block2, tvm.tir.Block)
vi, vj, vk = [x.var for x in block2.iter_vars]
access_A = tvm.tir.BufferRegion(
A, [Range.from_min_extent(vi * 4, 4), Range.from_min_extent(vk * 4, 4)]
)
access_B = tvm.tir.BufferRegion(
B, [Range.from_min_extent(vj * 4, 4), Range.from_min_extent(vk * 4, 4)]
)
access_C = tvm.tir.BufferRegion(
C, [Range.from_min_extent(vi * 4, 4), Range.from_min_extent(vj * 4, 4)]
)
tvm.ir.assert_structural_equal(block2.reads, [access_C, access_A, access_B])
tvm.ir.assert_structural_equal(block2.writes, [access_C])
def _check_elementwise(func):
A, B, C = [func.buffer_map[x] for x in func.params]
block1 = func.body.block.body[0].body.body.block
assert isinstance(block1, tvm.tir.Block)
vi, vj = [x.var for x in block1.iter_vars]
tvm.ir.assert_structural_equal(
block1.reads,
[tvm.tir.BufferRegion(A, [Range.from_min_extent(vi, 1), Range.from_min_extent(vj, 1)])],
)
tvm.ir.assert_structural_equal(
block1.writes,
[tvm.tir.BufferRegion(B, [Range.from_min_extent(vi, 1), Range.from_min_extent(vj, 1)])],
)
block2 = func.body.block.body[1].body.body.block
assert isinstance(block2, tvm.tir.Block)
vi, vj = [x.var for x in block2.iter_vars]
tvm.ir.assert_structural_equal(
block2.reads,
[tvm.tir.BufferRegion(B, [Range.from_min_extent(vi, 1), Range.from_min_extent(vj, 1)])],
)
tvm.ir.assert_structural_equal(
block2.writes,
[tvm.tir.BufferRegion(C, [Range.from_min_extent(vi, 1), Range.from_min_extent(vj, 1)])],
)
def test_complete_with_root():
_check_elementwise(elementwise_with_root)
def test_complete_part_region():
_check_elementwise(func_with_part_access_region)
@T.prim_func
def func_with_bufferslice_indices(data: T.handle, index: T.handle) -> None:
data_buf = T.match_buffer(data, (16, 16), "float32")
index_buf = T.matc |
h_buffer(index, (1,), "int32")
out_buf = T.alloc_buffer((16, 16), "float32")
for i, j in T.grid(16, 16):
with T.block():
vi, vj = T.axis.remap("SS", [i, j])
out_buf[vi, vj] = data_buf[vi, index_buf[0]]
@T.prim_func
def expected_bufferslice_indices(data: T.handle, index: T.handle) -> None:
index_buf = T.match_buffer(index, [1], dtype="int32", elem_offset=0, align=64, offset_factor=1)
data_buf = T.match_buffer(data, [16, 16], elem_offset=0, align=64, offset_factor=1)
with T.block("root"):
T.reads([])
T.writes([])
out_buf = T.alloc_buffer([16, 16], elem_offset=0, align=64, offset_factor=1)
for i0, i1 in T.grid(16, 16):
with T.block():
vi, vj = T.axis.remap("SS", [i0, i1])
T.reads([data_buf[vi, index_buf[0]], index_buf[0]])
T.writes([out_buf[vi, vj]])
out_buf[vi, vj] = data_buf[vi, index_buf[0]]
@T.prim_func
def func_with_recursive_bufferslice_indices(data: T.handle, index: T.handle) -> None:
data_buf = T.match_buffer(data, (16, 16), "float32")
index_buf = T.match_buffer(index, (1,), "int32")
out_buf = T.alloc_buffer((16, 16), "float32")
for i, j in T.grid(16, 16):
with T.block():
vi, vj = T.axis.remap("SS", [i, j])
out_buf[vi, vj] = data_buf[index_buf[index_buf[0]], index_buf[0]]
@T.prim_func
def expected_recursive_bufferslice_indices(data: T.handle, index: T.handle) -> None:
index_buf = T.match_buffer(index, [1], dtype="int32", elem_offset=0, align=64, offset_factor=1)
data_buf = T.match_buffer(data, [16, 16], elem_offset=0, align=64, offset_factor=1)
with T.block("root"):
T.reads([])
T.writes([])
out_buf = T.alloc_buffer([16, 16], elem_offset=0, align=64, offset_factor=1)
for i0, i1 in T.grid(16, 16):
with T.block():
vi, vj = T.axis.remap("SS", [i0, i1])
T.reads(
[
data_bu |
f[index_buf[index_buf[0]], index_buf[0]],
index_buf[T.min(index_buf[0], 0) : T.max(index_buf[0], 0) + 1],
]
)
T.writes([out_buf[vi, vj]])
out_buf[vi, vj] = data_buf[index_buf[index_buf[0]], index_buf[0]]
def test_complete_buffer_indices():
new_func = tvm.script.from_source(func_with_bufferslice_indices.script())
tvm.ir.assert_structural_equal(new_func, expected_bufferslice_indices)
new_func = tvm.script.from_source(func_with_recursive_bufferslice_indices.script())
tvm.ir.assert_structural_equal(new_func, expected_recursive_bufferslice_indices)
@T.prim_func
def match_buffer_func(a: T.handle) -> None:
A = T.match_buffer(a, (16, 16))
for i in range(0, 16):
with T.block():
A0 = T.match_buffer(A[i, 0:16], (16))
with T.block():
for j in range(0, 16):
with T.block():
A1 = T.match_buffer(A0[j], ())
A1[()] = 1.0
@T.prim_func
def expected_match_buffer_func(a: T.handle) -> None:
A = T.match_buffer(a, (16, 16))
for i in range(0, 16):
with T.block():
T.reads([])
T.writes(A[i, 0:16])
A0 = T.match_buffer(A[i, 0:16], (16))
with T.block():
T.reads([])
T.writes(A0[0:16])
for j in range(0, 16):
with T.block():
T.reads([])
T.writes(A0[j])
A1 = T.match_buffer(A0[j], ())
A1[()] = 1.0
def test_complete_match_buffer():
tvm.ir.assert_structural_equal(match_buffer_func, expected_match_buffer_func)
@T.prim_func
def alloc_buffer_func(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [2, 2], dtype="float32")
B = T.match_buffer(b, [2, 2], dtype="float32")
C = T.alloc_buffer([2, 2], dtype="float32")
A[(0, 0)] = T.float32(2)
C[(0, 0)] = A[(0, 0)] + B[(0 |
, 0)]
B[(0, 0)] = C[(0, 0)]
@T.prim_func
def expect_alloc_buffer_func(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [2, 2], dtype="float32", elem_offset=0, align=64, offset_factor=1)
B = T.match_buffer(b, [2, 2], dtype="float32", elem_offset=0, align=64, offset_factor=1)
with T.block("root"):
T.reads([])
T.writes([])
C = T.alloc_buffer([2, 2], dtype="float32", elem_offset=0, align=64, offset_factor=1)
A[(0, 0)] = T.float32(2)
C[(0, 0)] = A[(0, 0)] + B[(0, 0)]
B[(0, 0)] = C[(0, 0)]
def test_complete_alloc_buffer():
rt_func = tvm.script.from_source(alloc_buffer_func.script(show_meta=True))
tvm.ir.assert_structural_equal(alloc_buffer_func, expect_alloc_buffer_func)
if __name__ == "__main__":
test_complete_matmul()
test_complete_matmul_original()
test_complete_with_root()
test_complete_part_region()
test_complete_buffer_indices()
test_complete_match_buffer()
test_complete_alloc_buffer() |
import inspect |
import re |
import pytest |
import tvm |
import tvm.testing
from tvm |
import tir
from tvm.ir.diagnostics |
import override_renderer
from tvm.script |
import from_source
from tvm.script |
import tir as T
def check_error(func, rel_lineno):
check_error_re = re.compile(r"^.*
"""check if TIR script throws error"""
errors = []
def render(e):
for d in e.diagnostics:
errors.append(d)
override_renderer(render)
try:
source_code = inspect.getsource(func)
indent = len(re.match(r"^\s*", source_code).group(0))
source_code = "@T.prim_func\n" + "\n".join(
line[indent:] for line in source_code.splitlines()
)
from_source(source_code)
except tvm.error.DiagnosticError as e:
pass
assert len(errors) == 1, errors
if rel_lineno is None:
return
error = errors[0]
assert (
error.span.line - 1 == rel_lineno or error.span.line == rel_lineno
), f"Expected error to be on line {rel_lineno}, but it was on {error.span.line - 1}"
error_line = source_code.split("\n")[rel_lineno]
m = check_error_re.match(error_line)
if m:
expected_error_text = m.group(1)
error = error.message
assert (
expected_error_text == error
), f'check_error expects "{expected_error_text} in str(errors): {error}'
def test_buffer_bind():
def buffer_bind_missing_args(a: T.handle) -> None:
A = T.match_buffer((16, 16), "float32")
check_error(buffer_bind_missing_args, 2)
def test_undefined_buffer():
def undefined_buffer(a: T.handle) -> None:
A = T.match_buffer(a, (16, 16), "float32")
T.attr(A, "realize_scope", "")
T.realize(C[0:16, 0:16], "")
for i in T.serial(16):
for j in T.serial(0, 16):
A[i, j] = 0.0
check_error(undefined_buffer, 5)
def test_unsupported_stmt():
def unsupported_stmt(a: T.int32) -> None:
if a > 0:
print("I love tvm")
check_error(unsupported_stmt, 3)
def test_unsupported_function_call():
def unsupported_function_call(a: T.handle) -> None:
A = T.match_buffer(a, (16, 16), "float32")
T.attr(A |
, "realize_scope", "")
T.realize(A[0:16, 0:16], "")
for i in T.const_range(16):
for j in T.serial(0, 16):
A[i, j] = 0.0
check_error(unsupported_function_call, 6)
def test_missing_type_annotation():
def missing_type_annotation(a) -> None:
T.evaluate(0.0)
check_error(missing_type_annotation, 1)
def test_invalid_for_function():
def invalid_for_function(a: T.handle) -> None:
A = T.match_buffer(a, (16, 16), "float32")
for i in T.evaluate(0.0):
for j in T.serial(0, 16):
A[i, j] = 0.0
check_error(invalid_for_function, 4)
def test_invalid_block_function():
def invalid_block_function(a: T.handle) -> None:
A = T.match_buffer(a, (16, 16), "float32")
with T.evaluate(0.0):
T.evaluate(1.0)
check_error(invalid_block_function, 4)
def test_return_not_allowed():
def return_not_allowed(a: T.handle) -> None:
return T.evaluate(0)
check_error(return_not_allowed, 2)
def test_no_body():
def no_body(a: T.handle) -> None:
A = T.match_buffer(a, (16, 16), "float32")
T.realize(A, "")
check_error(no_body, 3)
def test_allocate_with_buffers():
def allocate_with_buffers() -> None:
with T.allocate([1], "float32", "") as [A, B]:
T.evaluate(1.0)
check_error(allocate_with_buffers, 2)
def test_inconsistent_binding():
def inconsistent_binding_value() -> None:
for i, j in T.grid(16, 16):
vi, vj = T.axis.remap("SS", [i])
T.evaluate(1.0)
def inconsistent_binding_type() -> None:
for i, j in T.grid(16, 16):
vi, vj = T.axis.remap("S", [i, j])
T.evaluate(1.0)
check_error(inconsistent_binding_value, 3)
check_error(inconsistent_binding_type, 3)
def test_error_remap_args():
def error_remap_type() -> None:
for i, j in T.grid(16, 16):
with T.block():
vi, vj = T.axis.remap("TT", [i, j]) |
T.evaluate(1.0)
def error_remap_value() -> None:
for i, j in T.grid(16, 16):
with T.block():
vi, vj = T.axis.remap("SS", [i + j, j])
T.evaluate(1.0)
check_error(error_remap_type, 4)
check_error(error_remap_value, 4)
def test_invalid_block_axes():
def invalid_block_axes(a: T.handle) -> None:
A = T.match_buffer(a, (16, 16), "float32")
for i, j in T.grid(16, 16):
with T.block():
vi = T.axis.S(i, A)
T.evaluate(1.0)
check_error(invalid_block_axes, 5)
def test_duplicate_block_axes():
def duplicate_block_axes() -> None:
for i, j in T.grid(16, 16):
with T.block():
vi = T.axis.S(16, i)
vi = T.axis.S(16, j)
T.evaluate(1.0)
def duplicate_block_axes_remap() -> None:
for i, j in T.grid(16, 16):
with T.block():
vi, vi = T.axis.remap("SS", [i, j])
T.evaluate(1.0)
check_error(duplicate_block_axes, 5)
check_error(duplicate_block_axes_remap, 4)
def test_miss_block_bind():
def miss_block_bind_value() -> None:
for i, j in T.grid(128, 128):
with T.block():
vi = T.axis.S(i)
T.evaluate(1.0)
check_error(miss_block_bind_value, 4)
def test_invalid_loop_var():
def invalid_loop_var() -> None:
for i, j in range(0, 16):
T.evaluate(1.0)
check_error(invalid_loop_var, 2)
def test_inconsistent_grid():
def inconsistent_grid() -> None:
for i in T.grid(16, 16):
T.evaluate(1.0)
check_error(inconsistent_grid, 2)
def test_invalid_match_buffer_region():
def invalid_match_buffer_region() -> None:
for i, j in T.grid(128, 128):
with T.block():
vi, vj = T.axis.remap("SS", [i, j])
A = T.match_buffer(vi)
T.evaluate(1.0)
check_error(invalid_match_buffer_r |
egion, 5)
def test_duplicate_buffer():
def duplicate_buffer() -> None:
A = T.alloc_buffer((128, 128), "float32")
A = T.alloc_buffer((128, 128), "float32")
check_error(duplicate_buffer, 3)
def test_duplicate_block_signature():
def duplicate_reads() -> None:
A = T.alloc_buffer((128, 128), "float32")
for i, j in T.grid(128, 128):
with T.block():
vi, vj = T.axis.remap("SS", [i, j])
T.reads(A[0:8, 0:8])
T.reads(A[0:16, 0:16])
T.evaluate(1.0)
def duplicate_writes() -> None:
A = T.alloc_buffer((128, 128), "float32")
for i, j in T.grid(128, 128):
with T.block():
vi, vj = T.axis.remap("SS", [i, j])
T.writes(A[0:8, 0:8])
T.writes(A[0:16, 0:16])
T.evaluate(1.0)
def duplicate_predicate() -> None:
for i, j in T.grid(16, 16):
with T.block():
vi, vj = T.axis.remap("SS", [i, j])
T.where(1)
T.where(0)
def duplicate_annotations() -> None:
for i, j in T.grid(16, 16):
with T.block():
vi, vj = T.axis.remap("SS", [i, j])
T.block_attr({})
T.block_attr({})
def duplicate_init() -> None:
for i, j in T.grid(16, 16):
with T.block():
vi, vj = T.axis.remap("SS", [i, j])
with T.init():
T.evaluate(1.0)
with T.init():
T.evaluate(1.0)
def duplicate_axes() -> None:
for i, j in T.grid(16, 16):
with T.block():
vi, vj = T.axis.remap("SS", [i, j])
vi = T.axis.S(i, 16)
T.evaluate(1.0)
check_error(duplicate_reads, 7)
check_error(duplicate_writes, 7)
check_error(duplicate_predicate, 6)
check_error(duplicate_annotations, 6)
check_error(duplicate_init, 7)
check_error(duplica |
te_axes, 5)
def test_opaque_access_during_complete():
def opaque_access_during_complete(a: T.handle) -> None:
A = T.match_buffer(a, (16, 16), "float32")
for i, j in T.grid(16, 16):
with T.block():
T.evaluate(T.call_extern("dummy_extern_function", A.data, dtype="int32"))
check_error(opaque_access_during_complete, None)
def test_convert_slice_to_bufferload():
def convert_slice_to_bufferload() -> None:
A = T.alloc_buffer((128, 128), "float32")
for i, j in T.grid(128, 128):
with T.block():
vi, vj = T.axis.remap("SS", [i, j])
A[vi, vj] = A[vi : vi + 2, vj] + 1
check_error(convert_slice_to_bufferload, 6)
def test_tvm_exception_catch():
def special_stmt_except() -> None:
A = T.alloc_buffer("(128, 128)", "float32")
T.evaluate(1.0)
def scope_handler_except() -> None:
for i in T.serial("1", "1"):
T.evaluate(1)
def intrin_except_unassign(a: T.handle) -> None:
A = T.match_buffer(a, (16, 16), "float32")
T.evaluate(A)
def intrin_except_assign(a: T.handle) -> None:
A = T.match_buffer(a, (16, 16), "float32")
A[0, 0] = A[A]
check_error(special_stmt_except, 2)
check_error(scope_handler_except, 2)
check_error(intrin_except_unassign, 3)
check_error(intrin_except_assign, 3)
def test_match_buffer_shape_mismatch():
def buffer_shape_mismatch(a: T.handle) -> None:
A = T.match_buffer(a, (8, 8))
for i, j in T.grid(8, 2):
with T.block():
T.reads([])
T.writes([A[i, j * 4 : j * 4 + 4]])
sub_A = T.match_buffer(
A[i, j * 4 : j * 4 + 4], (5)
)
for jj in range(0, 4):
sub_A[i, j * 4 + jj] = 1
check_error(buffer_shape_mismatch, 7)
def test_high_dim_store():
def high_dim_store() -> None:
with T.block("root"):
B = T.allocate([2 |
56], "float32", "global")
for i, j in T.grid(16, 16):
B[i, j] = 1.0
check_error(high_dim_store, 5)
def test_block_has_option_vars():
def block_has_option_vars() -> None:
with T.block("root") as x:
T.evaluate(0.0)
check_error(block_has_option_vars, 2)
def test_implicit_root_has_attrs():
def implicit_root_has_read():
T.reads([])
T.evaluate(0.0)
def implicit_root_has_write():
T.writes([])
T.evaluate(0.0)
def implicit_root_has_attrs():
T.block_attr({})
T.evaluate(0.0)
def implicit_root_has_predicate():
T.where(True)
T.evaluate(0.0)
def implicit_root_has_axes():
v = T.axis.S(0, 0)
T.evaluate(0.0)
check_error(implicit_root_has_read, 2)
check_error(implicit_root_has_write, 2)
check_error(implicit_root_has_attrs, 2)
check_error(implicit_root_has_predicate, 2)
check_error(implicit_root_has_axes, 2)
@T.prim_func
def elementwise_not_affine(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128, 128, 128))
B = T.match_buffer(b, (128, 128, 128, 128))
for i, j, k, l in T.grid(128, 128, 128, 8):
with T.block("B"):
vi, vj, vk = T.axis.remap("SSS", [i, j, k])
vl = T.axis.S(128, l * 16)
B[vi, vj, vk, vl] = A[vi, vj, vk, vl] * 2.0
@T.prim_func
def elementwise_non_single_branch(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128, 128))
C = T.alloc_buffer((128, 128, 128))
B = T.match_buffer(b, (128, 128, 128))
for i, j in T.grid(128, 128):
for k in T.serial(0, 128):
with T.block("C"):
vi, vj, vk = T.axis.remap("SSS", [i, j, k])
C[vi, vj, vk] = A[vi, vj, vk] * 2.0
for k in T.serial(0, 128):
with T.block("B"):
vi, vj, vk = T.axis.remap("SSS", [i, j, k])
B[vi, vj, vk] = C[vi, vj, vk] * 2.0
def test_reorder_fail_block():
sch = |
tir.Schedule(elementwise_not_affine, debug_mask="all")
block_b = sch.get_block("B")
i, j, k, l = sch.get_loops(block_b)
with pytest.raises(tvm.tir.ScheduleError) as execinfo:
sch.reorder(l, i)
expected_sub_error_message = (
"
' with T.block("B"):\n'
" ^^^^^^^^^^^^^^^^^^\n"
)
assert expected_sub_error_message in str(execinfo.value)
def test_reorder_fail_nested_loop_inner():
sch = tir.Schedule(elementwise_non_single_branch, debug_mask="all")
block_b = sch.get_block("B")
i, j, k = sch.get_loops(block_b)
with pytest.raises(tvm.tir.ScheduleError) as execinfo:
sch.reorder(k, i)
expected_sub_error_message = (
" for i in T.serial(128):\n"
"
" for j in T.serial(128):\n"
" ^^^^^^^^^^^^^^^^^^^^^^^\n"
)
assert expected_sub_error_message in str(execinfo.value)
def test_fuse_fail_nested_loop_outer():
sch = tir.Schedule(elementwise_non_single_branch, debug_mask="all")
block_b = sch.get_block("B")
i, j, k = sch.get_loops(block_b)
with pytest.raises(tvm.tir.ScheduleError) as execinfo:
sch.fuse(k, i)
expected_sub_error_message = (
"
" for i in T.serial(128):\n"
" ^^^^^^^^^^^^^^^^^^^^^^^\n"
" for j in T.serial(128):\n"
)
assert expected_sub_error_message in str(execinfo.value)
def test_report_error_root_block():
sch = tir.Schedule(elementwise_non_single_branch, debug_mask="all")
root = sch.get_block("root")
with pytest.raises(tvm.tir.ScheduleError) as execinfo:
sch.compute_inline(root)
expected_sub_error_message = (
"
' with T.block("root"):\n'
" ^^^^^^^^^^^^^^^^^^^^^\n"
)
assert expected_sub_error_message in str(execinfo.value)
def test_load_var():
def load_var_multiple() -> None:
d = T.var("float32")
d[2] = d[2, 1]
check_err |
or(load_var_multiple, 3)
def test_store_var():
def store_var_multiple() -> None:
d = T.var("float32")
d[2, 1] = d[1]
check_error(store_var_multiple, 3)
def test_load_handle():
def load_handle(h: T.handle) -> None:
h_ = T.match_buffer(h, [1])
h_[0] = h[0]
check_error(load_handle, 3)
def test_store_handle():
def store_handle(h: T.handle) -> None:
h_ = T.match_buffer(h, [1])
h[0] = h_[0]
check_error(store_handle, 3)
def test_binop_bad_ast_type():
def binop_bad_ast_type(h: T.handle):
h_ = T.match_buffer(h, [1])
h_[0] = h + [2]
check_error(binop_bad_ast_type, 3)
def test_binop_bad_type():
def binop_bad_type(h: T.handle):
h_ = T.match_buffer(h, [1])
h_[0] = h + 2
check_error(binop_bad_type, 3)
def test_non_integer_typed_block_iter():
def non_integer_typed_block_iter():
with T.block():
i = T.axis.S(0.1, 0.1)
check_error(non_integer_typed_block_iter, 3)
def test_preflattened_buffer_map_align():
def preflattened_buffer_map_align_nonint(foo: T.handle):
foo_1 = T.match_buffer(foo, [1])
T.preflattened_buffer(
foo_1, [1], align="bar"
)
check_error(preflattened_buffer_map_align_nonint, 3)
def test_preflattened_buffer_map_offset_factor():
def preflattened_buffer_map_offset_factor_nonint(foo: T.handle):
foo_1 = T.match_buffer(foo, [1])
T.preflattened_buffer(
foo_1, [1], offset_factor="bar"
)
check_error(preflattened_buffer_map_offset_factor_nonint, 3)
def test_illegal_buffer_slice():
def strided_buffer_region(A: T.handle):
A = T.match_buffer((128, 128), "int32")
with T.block():
T.reads([])
T.writes([A[0:128:2, 0:128:3]])
T.evaluate(T.call_extern("strided_compute", dtype=""))
def access_reversed_slice(A: T.handle):
A = T.match_buffer((128,), "int32")
A[0:128:-1] |
= T.broadcast(1, 128)
def access_non_const_slice_length(A: T.handle):
A = T.match_buffer((128,), "int32")
for i in range(4):
T.evaluate(A[0:i:1])
check_error(strided_buffer_region, 3)
check_error(access_reversed_slice, 3)
check_error(access_non_const_slice_length, 3)
def test_syntax_sugar_fail():
def loop_syntax_sugar_fail(a: T.handle) -> None:
A = T.match_buffer(a, (128,))
for i in T.thread_binding(128, 128):
A[i] = A[i] * 2.0
check_error(loop_syntax_sugar_fail, 3)
if __name__ == "__main__":
tvm.testing.main() |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Unittests for tvm.script.ir_builder.base"""
import pytest
from tvm.script.ir_builder import IRBuilder
def test_ir_builder_scope():
with IRBuilder() as ib: # pylint: disable=invalid-name
assert IRBuilder.current() == ib
def test_ir_builder_multi_scope():
with IRBuilder() as ib: # pylint: disable=invalid-name
with IRBuilder() as ib2: # pylint: disable=invalid-name
assert IRBuilder.current() == ib2
assert IRBuilder.current() == ib
def test_ir_builder_no_scope():
with pytest.raises(ValueError):
IRBuilder.current()
if __name__ == "__main__":
test_ir_builder_scope()
test_ir_builder_multi_scope()
test_ir_builder_no_scope()
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Unittests for tvm.script.ir_builder.ir"""
import pytest
import tvm.testing
from tvm.script.ir_builder import IRBuilder
from tvm.script.ir_builder import ir as I
from tvm import ir
from tvm.ir.base import assert_structural_equal
def test_ir_builder_irmodule():
with IRBuilder() as ib: # pylint: disable=invalid-name
with I.ir_module():
pass
# the ir_module generated by IRBuilder
ir_module_actual = ib.get()
# the expected prim_func
ir_module_expected = ir.IRModule(None, None)
assert_structural_equal(ir_module_actual, ir_module_expected, map_free_vars=True)
if __name__ == "__main__":
tvm.testing.main()
|
"""Unittests for tvm.script.ir_builder.tir""" |
import numpy as np |
import pytest |
import tvm |
import tvm.testing
from tvm |
import tir
from tvm.ir.base |
import assert_structural_equal
from tvm.runtime |
import ndarray
from tvm.script.ir_builder |
import IRBuilder
from tvm.script.ir_builder |
import tir as T
def test_ir_builder_tir_primfunc_base():
with IRBuilder() as ib:
with T.prim_func():
T.evaluate(0)
prim_func_actual = ib.get()
prim_func_expected = tir.PrimFunc(
params=[],
body=tir.Evaluate(0),
ret_type=None,
buffer_map=None,
preflattened_buffer_map=None,
attrs=None,
)
assert_structural_equal(prim_func_actual, prim_func_expected, map_free_vars=True)
def test_ir_builder_tir_primfunc_complete():
with IRBuilder() as ib:
with T.prim_func():
T.arg("a", T.handle())
T.arg("b", T.var("int64"))
T.arg("c", T.buffer_decl((128, 128), "float32"))
d = T.arg("d", T.handle())
e = T.arg("e", T.buffer_decl((1024,), "int8"))
T.func_attr({"key": "value"})
T.func_ret(tvm.ir.PrimType("int64"))
buffer_d = T.match_buffer(d, (64, 64), "int64")
T.preflattened_buffer(e, (32, 32), "int8", data=e.data)
T.evaluate(0)
prim_func_actual = ib.get()
c_handle, c_buffer = tir.Var("c_handle", "handle"), tir.decl_buffer(
(128, 128), "float32", name="c"
)
d_handle, d_buffer = tir.Var("d", "handle"), tir.decl_buffer((64, 64), "int64", name="d")
e_handle, e_buffer = tir.Var("e_handle", "handle"), tir.decl_buffer((1024,), "int8", name="e")
prim_func_expected = tir.PrimFunc(
params=[
tir.Var("a", "handle"),
tir.Var("b", "int64"),
c_handle,
d_handle,
e_handle,
],
body=tir.Evaluate(0),
ret_type=tvm.ir.PrimType("int64"),
buffer_map={c_handle: c_buffer, d_handle: d_buffer, e_handle: e_buffer},
preflattened_buffer_map={
e_handle: tir.decl_buffer((32, 32), "int8", name="e_preflatten", data=e_buffer.data)
},
attrs=tvm.ir.make_node("DictAttrs", key="value"),
)
assert_structural_equal(prim_func_actual, prim_func_expected |
, map_free_vars=True)
def test_ir_builder_tir_block_base():
with IRBuilder() as ib:
with T.block("block"):
T.evaluate(0)
block_realize_actual = ib.get()
block_expected = tir.Block(
iter_vars=[],
reads=[],
writes=[],
name_hint="block",
body=tir.Evaluate(0),
alloc_buffers=None,
match_buffers=None,
annotations={"tir.script_parsing_detect_access": tir.IntImm("int64", 3)},
)
block_realize_expected = tir.BlockRealize(
iter_values=[],
predicate=True,
block=block_expected,
)
assert_structural_equal(block_realize_actual, block_realize_expected, map_free_vars=True)
def test_ir_builder_tir_block_complete():
with IRBuilder() as ib:
a = T.var("int64", "a")
b = T.buffer_decl((128, 128), "float32")
c = T.buffer_decl((128, 128), "float32")
d = T.var("int32", "d")
e = T.buffer_decl((128, 128), "float32")
f = T.var("int32", "f")
with T.block("block"):
T.where(a > 1)
T.reads(b[0:16, 0:16])
T.writes(c[d:128, d:128])
T.block_attr({"key": "value"})
T.alloc_buffer((128, 128), "float32")
T.match_buffer(e[0:32, 0:32], (32, 32), "float32")
T.axis.spatial(128, f)
T.evaluate(0)
block_realize_actual = ib.get()
var_a = tir.Var("a", "int64")
buffer_b = tir.decl_buffer((128, 128), "float32", name="b")
buffer_c = tir.decl_buffer((128, 128), "float32", name="c")
var_d = tir.Var("d", "int32")
buffer_e = tir.decl_buffer((128, 128), "float32", name="c")
var_f = tir.Var("f", "int32")
block_expected = tir.Block(
iter_vars=[tir.IterVar((0, 128), tir.Var("", "int32"), iter_type=tir.IterVar.DataPar)],
reads=[buffer_b[0:16, 0:16]],
writes=[buffer_c[var_d:128, var_d:128]],
name_hint="block",
body=tir.Evaluate(0),
alloc_buffers=[tir.decl_buffer((128, 128), "f |
loat32")],
match_buffers=[
tir.MatchBufferRegion(tir.decl_buffer((32, 32), "float32"), buffer_e[0:32, 0:32])
],
annotations={"key": "value"},
)
block_realize_expected = tir.BlockRealize(
iter_values=[var_f],
predicate=var_a > 1,
block=block_expected,
)
assert_structural_equal(block_realize_actual, block_realize_expected, map_free_vars=True)
def test_ir_builder_tir_axis():
with IRBuilder() as ib:
a = T.var("int32", "a")
b = T.var("int32", "b")
c = T.var("int32", "c")
d = T.var("int32", "d")
with T.block("block"):
T.axis.spatial(8, a)
T.axis.reduce(16, b)
T.axis.scan(32, c)
T.axis.opaque(64, d)
T.evaluate(0)
block_realize_actual = ib.get()
var_a = tir.Var("a", "int32")
var_b = tir.Var("b", "int32")
var_c = tir.Var("c", "int32")
var_d = tir.Var("d", "int32")
block_expected = tir.Block(
iter_vars=[
tir.IterVar((0, 8), tir.Var("", "int32"), iter_type=tir.IterVar.DataPar),
tir.IterVar((0, 16), tir.Var("", "int32"), iter_type=tir.IterVar.CommReduce),
tir.IterVar((0, 32), tir.Var("", "int32"), iter_type=tir.IterVar.Ordered),
tir.IterVar((0, 64), tir.Var("", "int32"), iter_type=tir.IterVar.DimInfo),
],
reads=[],
writes=[],
name_hint="block",
body=tir.Evaluate(0),
annotations={"tir.script_parsing_detect_access": tir.IntImm("int64", 3)},
)
block_realize_expected = tir.BlockRealize(
iter_values=[var_a, var_b, var_c, var_d],
predicate=True,
block=block_expected,
)
assert_structural_equal(block_realize_actual, block_realize_expected, map_free_vars=True)
def test_ir_builder_tir_for():
with IRBuilder() as ib:
with T.serial(128) as a:
with T.parallel(64) as b:
with T.vectorized(32) as c:
with T.unroll( |
16) as d:
with T.thread_binding(8, thread="threadIdx.x") as e:
T.evaluate(0)
for_actual = ib.get()
thread_binding_expected = tir.For(
loop_var=tir.Var("", "int32"),
min_val=0,
extent=8,
kind=tir.ForKind.THREAD_BINDING,
body=tir.Evaluate(0),
thread_binding=tir.IterVar(
None, tir.Var("", "int32"), tir.IterVar.ThreadIndex, "threadIdx.x"
),
)
unroll_expected = tir.For(
loop_var=tir.Var("", "int32"),
min_val=0,
extent=16,
kind=tir.ForKind.UNROLLED,
body=thread_binding_expected,
)
vectorized_expected = tir.For(
loop_var=tir.Var("", "int32"),
min_val=0,
extent=32,
kind=tir.ForKind.VECTORIZED,
body=unroll_expected,
)
parallel_expected = tir.For(
loop_var=tir.Var("", "int32"),
min_val=0,
extent=64,
kind=tir.ForKind.PARALLEL,
body=vectorized_expected,
)
for_expected = tir.For(
loop_var=tir.Var("", "int32"),
min_val=0,
extent=128,
kind=tir.ForKind.SERIAL,
body=parallel_expected,
)
assert_structural_equal(for_actual, for_expected, map_free_vars=True)
def test_ir_builder_tir_assert():
with IRBuilder() as ib:
with T.Assert(T.var("int32", name="a") == 0, message="a is 0"):
T.evaluate(0)
assert_actual = ib.get()
assert_expected = tir.AssertStmt(
T.var("int32", name="a") == 0, tir.StringImm("a is 0"), tir.Evaluate(0)
)
assert_structural_equal(assert_actual, assert_expected, map_free_vars=True)
def test_ir_builder_tir_let():
with IRBuilder() as ib:
with T.let(T.var("int32", name="a"), tir.IntImm("int32", 2)):
T.evaluate(0)
let_actual = ib.get()
let_expected = tir.LetStmt(T.var("int32", name="a"), tir.IntImm("int32", 2), tir.Evaluate(0))
assert_structural_equal(let_actual, |
let_expected, map_free_vars=True)
def test_ir_builder_tir_realize():
buffer_a = T.buffer_decl((128, 128), "float32")
with IRBuilder() as ib:
with T.realize(buffer_a[0:128, 0:128], "test_storage_scope", True):
T.evaluate(0)
realize_actual = ib.get()
buffer_realize = tir.BufferRealize(
buffer_a, [tvm.ir.Range(0, 128), tvm.ir.Range(0, 128)], True, tir.Evaluate(0)
)
expected_realize = tir.AttrStmt(
buffer_a, "realize_scope", tir.StringImm("test_storage_scope"), buffer_realize
)
assert_structural_equal(realize_actual, expected_realize, map_free_vars=True)
def test_ir_builder_tir_thread():
with IRBuilder() as ib:
with T.prim_func():
brow = T.env_thread("blockIdx.y")
with T.launch_thread(brow, 1):
T.evaluate(0)
ir_actual = ib.get()
iter_var = tir.IterVar((0, 1), "v", iter_type=1, thread_tag="blockIdx.y")
attr_stmt = tir.AttrStmt(iter_var, "thread_extent", 1, tir.Evaluate(0))
func = tir.PrimFunc([], attr_stmt)
assert_structural_equal(ir_actual, func, map_free_vars=True)
def test_ir_builder_tir_allocate():
with IRBuilder() as ib:
with T.allocate([10], "float32", scope="local"):
T.evaluate(1)
ir_actual = ib.get()
buffer_var = tir.Var("v", tvm.ir.PointerType(tvm.ir.PrimType("float32"), "local"))
ir_expected = tir.Allocate(
buffer_var, "float32", [10], tvm.tir.const(1, "uint1"), tir.Evaluate(1)
)
assert_structural_equal(ir_actual, ir_expected, map_free_vars=True)
def test_ir_builder_tir_allocate_const():
data = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
with IRBuilder() as ib:
with T.allocate_const(data, "int32", [10]):
T.evaluate(1)
ir_actual = ib.get()
buffer_var = tir.Var("v", tvm.ir.PointerType(tvm.ir.PrimType("int32")))
ir_expected = tir.AllocateConst(
buffer_var,
"int32",
[10],
ndarray.array(np.asa |
rray(data, "int32")),
tir.Evaluate(1),
annotations={},
)
assert_structural_equal(ir_actual, ir_expected, map_free_vars=True)
def test_ir_builder_tir_while():
with IRBuilder() as ib:
with T.While(T.var("int32", "x") > 0):
T.evaluate(0)
ir_actual = ib.get()
ir_expected = tir.While(tir.Var("x", "int32") > 0, tir.Evaluate(0))
assert_structural_equal(ir_actual, ir_expected, map_free_vars=True)
def test_ir_builder_tir_if_then_else():
with IRBuilder() as ib:
with T.If(T.var("int32", "c") < 12):
with T.Then():
T.evaluate(T.int32(0))
with T.Else():
T.evaluate(T.int32(1))
ir_actual = ib.get()
ir_expected = tir.IfThenElse(
tir.Var("c", "int32") < 12,
tir.Evaluate(tir.IntImm("int32", 0)),
tir.Evaluate(tir.IntImm("int32", 1)),
)
assert_structural_equal(ir_actual, ir_expected, map_free_vars=True)
def test_ir_builder_tir_buffer_store():
buffer_a = T.buffer_decl((10, 10), "float32")
i = T.var("int32", "x")
with IRBuilder() as ib:
T.buffer_store(buffer_a, 0.1, [0, i])
ir_actual = ib.get()
ir_expected = tir.BufferStore(buffer_a, 0.1, [0, i])
assert_structural_equal(ir_actual, ir_expected, map_free_vars=True)
def test_ir_builder_tir_prefetch():
with IRBuilder() as ib:
buffer_a = T.buffer_decl((128, 128), "float32")
T.prefetch(buffer_a, [])
ir_actual = ib.get()
ir_expected = tir.Prefetch(buffer_a, [])
assert_structural_equal(ir_actual, ir_expected, map_free_vars=True)
def test_ir_builder_tir_evaluate():
with IRBuilder() as ib:
T.evaluate(0)
eval_actual = ib.get()
eval_expected = tir.Evaluate(0)
assert_structural_equal(eval_actual, eval_expected, map_free_vars=True)
def test_ir_builder_tir_decl_buffer():
with IRBuilder() as ib:
with T.decl_buffer([128, 128], "float3 |
2"):
T.evaluate(0)
ir_actual = ib.get()
buffer = T.buffer_decl((128, 128), "float32")
ir_expected = tir.Allocate(
buffer.data,
"float32",
(128, 128),
tir.IntImm("bool", True),
tir.DeclBuffer(buffer, tir.Evaluate(0)),
)
assert_structural_equal(ir_actual, ir_expected, map_free_vars=True)
def test_ir_builder_tir_inline():
with IRBuilder() as ib:
m, n = T.meta_var(1), T.meta_var(2)
a, b = T.meta_var([3, 4])
T.evaluate(m.value + n.value + a.value + b.value)
eval_actual = ib.get()
eval_expected = tir.Evaluate(10)
assert_structural_equal(eval_actual, eval_expected, map_free_vars=True)
if __name__ == "__main__":
tvm.testing.main() |
import tvm
from tvm.script |
import tir as T
def matmul_generator(M: int, N: int, K: int, dtype: str):
@T.prim_func
def matmul(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [M, K], dtype=dtype)
B = T.match_buffer(b, [N, K], dtype=dtype)
C = T.match_buffer(c, [M, N], dtype=dtype)
for i, j, k in T.grid(M, N, K):
with T.block():
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
with T.init():
C[vi, vj] = T.float32(0)
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vj, vk]
return matmul
@T.prim_func
def matmul_128_128_128_fp16(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [128, 128], dtype="float16")
B = T.match_buffer(b, [128, 128], dtype="float16")
C = T.match_buffer(c, [128, 128], dtype="float16")
for i, j, k in T.grid(128, 128, 128):
with T.block():
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
with T.init():
C[vi, vj] = T.float32(0)
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vj, vk]
def test_meta_programming_matmul():
f = matmul_generator(128, 128, 128, "float16")
tvm.ir.assert_structural_equal(f, matmul_128_128_128_fp16)
if __name__ == "__main__":
test_meta_programming_matmul() |
import tvm
from tvm.script |
import tir as T |
import numpy as np |
import tvm.testing
@T.prim_func
def get_valid_counts(
data: T.handle,
valid_count: T.handle,
out: T.handle,
out_indices: T.handle,
score_threshold: T.float32,
id_index: T.int32,
score_index: T.int32,
) -> None:
data_buf = T.match_buffer(data, (1, 2500, 6), "float32")
valid_count_buf = T.match_buffer(valid_count, (1,), "int32")
out_buf = T.match_buffer(out, (1, 2500, 6), "float32")
out_indices_buf = T.match_buffer(out_indices, (1, 2500), "int32")
with T.block("init"):
vi = T.axis.S(1, 0)
valid_count_buf[vi] = T.int32(0)
for j in range(2500):
with T.block("update"):
vj = T.axis.S(2500, j)
T.reads([data_buf[vi, vj, 6]])
T.writes([valid_count_buf[vi], out_indices_buf[vi, vj], out_buf[vi, vj, 6]])
if (data_buf[vi, vj, score_index] > score_threshold) and (
(id_index < 0) or (data_buf[vi, vj, id_index] >= T.float32(0))
):
for k in T.serial(0, 6):
out_buf[vi, valid_count_buf[vi], k] = data_buf[vi, vj, k]
out_indices_buf[vi, valid_count_buf[vi]] = vj
valid_count_buf[vi] = valid_count_buf[vi] + 1
if vj >= valid_count_buf[vi]:
for k in T.serial(0, 6):
out_buf[vi, vj, k] = T.float32(-1)
out_indices_buf[vi, vj] = T.int32(-1)
def _check_get_valid_counts_with_numpy(f, dshape, score_threshold, id_index, score_index):
dtype = "float32"
ctx = tvm.cpu()
batch_size, num_anchor, elem_length = dshape
np_data = np.random.uniform(low=-2, high=2, size=dshape).astype(dtype)
np_out1 = np.zeros(shape=(batch_size,), dtype="int32")
np_out2 = np.zeros(shape=dshape).astype(dtype)
np_out3 = np.zeros(shape=(batch_size, num_anchor), dtype="int32")
for i in range(batch_size):
np_out1[i] = 0
inter_idx = 0
for j in range(num_anchor):
sc |
ore = np_data[i, j, score_index]
if score > score_threshold and (id_index < 0 or np_data[i, j, id_index] >= 0):
for k in range(elem_length):
np_out2[i, inter_idx, k] = np_data[i, j, k]
np_out1[i] += 1
np_out3[i, inter_idx] = j
inter_idx += 1
if j >= np_out1[i]:
for k in range(elem_length):
np_out2[i, j, k] = -1.0
np_out3[i, j] = -1
in_data = tvm.nd.array(np_data, ctx)
out1 = tvm.nd.array(np_out1, ctx)
out2 = tvm.nd.array(np_out2, ctx)
out3 = tvm.nd.array(np_out3, ctx)
f(in_data, out1, out2, out3, score_threshold, id_index, score_index)
tvm.testing.assert_allclose(out1.numpy(), np_out1, rtol=1e-5)
tvm.testing.assert_allclose(out2.numpy(), np_out2, rtol=1e-5)
tvm.testing.assert_allclose(out3.numpy(), np_out3, rtol=1e-5)
print("test get_valid_counts end")
def test_get_valid_counts_script_func():
device = "llvm"
print(get_valid_counts.script())
mod = tvm.ir.IRModule({"get_valid_counts": get_valid_counts})
print(mod.script())
f = tvm.build(mod["get_valid_counts"], target=device)
_check_get_valid_counts_with_numpy(f, (1, 2500, 6), 0.0, 0, 1)
@T.prim_func
def alloc_zero_dim_buffer(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [], dtype="float32")
B = T.match_buffer(b, [], dtype="float32")
C = T.alloc_buffer([], dtype="float32")
A[()] = T.float32(2)
C[()] = A[()] + B[()]
B[()] = C[()]
@T.prim_func
def alloc_zero_dim_buffer_block(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (), "float32")
B = T.match_buffer(b, (), "float32")
with T.block("root"):
T.reads([])
T.writes([])
C = T.alloc_buffer((), "float32")
A[()] = T.float32(2)
C[()] = A[()] + B[()]
B[()] = C[()]
def _check_alloc_zero_dim_buffer(f):
dtype = "float32"
ctx = tvm.cpu()
np_data = np.zeros(shape=()).a |
stype(dtype)
np_out = np.zeros(shape=()).astype(dtype)
tvm_data = tvm.nd.array(np_data, ctx)
tvm_out = tvm.nd.array(np_out, ctx)
np_inter = np.array(1)
np_data[()] = 2.0
np_inter[()] = np_data[()] + np_out[()]
np_out[()] = np_inter[()]
f(tvm_data, tvm_out)
tvm.testing.assert_allclose(tvm_out.numpy(), np_out, rtol=1e-5)
def test_alloc_zero_dim_buffer_round_trip():
func = alloc_zero_dim_buffer
func_with_block = alloc_zero_dim_buffer_block
rt_func = tvm.script.from_source(func.script(show_meta=True))
rt_func_with_block = tvm.script.from_source(func_with_block.script(show_meta=True))
rt_mod = tvm.build(rt_func, "llvm")
rt_mod_with_block = tvm.build(rt_func_with_block, "llvm")
tvm.ir.assert_structural_equal(func, func_with_block)
tvm.ir.assert_structural_equal(rt_func, rt_func_with_block)
_check_alloc_zero_dim_buffer(rt_mod)
_check_alloc_zero_dim_buffer(rt_mod_with_block)
@T.prim_func
def ceildiv_test(A: T.Buffer[16, "int32"]):
for i in range(16):
A[i] = T.ceildiv(A[i], 4)
@tvm.testing.requires_llvm
def test_ceildiv():
f = tvm.build(ceildiv_test, "llvm")
a = tvm.nd.array(np.arange(16).astype("int32"))
f(a)
ref = (np.arange(16) + 3)
tvm.testing.assert_allclose(a.numpy(), ref)
if __name__ == "__main__":
test_get_valid_counts_script_func()
test_alloc_zero_dim_buffer_round_trip() |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Unittests for tvm.script.parser.evaluator"""
import pytest
import tvm.testing
from tvm.script.parser.core.diagnostics import Source
from tvm.script.parser.core.evaluator import ExprEvaluator
def _calc(expr, extra_vars=None):
if extra_vars is None:
extra_vars = {}
source = Source(expr)
mod_ast = source.as_ast()
mod_body_ast = mod_ast.body
expr_stmt_ast = mod_body_ast[0]
expr_ast = expr_stmt_ast.value
return ExprEvaluator.eval(None, extra_vars, expr_ast)
def test_evaluator_basic():
assert _calc("1, 3.14, True, 'str'") == (1, 3.14, True, "str")
def test_evaluator_op():
assert _calc("1 + 2, 1 - 2, 1 * 2, 1 / 2") == (3, -1, 2, 0.5)
def test_evaluator_value_table():
res = _calc("a + b, a - b, a * b, a / b", {"a": 1, "b": 2})
a, b = 1, 2
assert res == (a + b, a - b, a * b, a / b)
def test_evaluator_func_call():
def func(a, b):
return a + b, a - b, a * b, a / b
assert _calc("func(1, 2)", {"func": func}) == func(1, 2)
def test_evaluator_slice():
res = _calc("a, a[1:], a[:5], a[1: 5], a[1: 5: 2]", {"a": [1, 2, 3, 4, 5, 6]})
a = [1, 2, 3, 4, 5, 6]
assert res == (a, a[1:], a[:5], a[1:5], a[1:5:2])
if __name__ == "__main__":
tvm.testing.main()
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Unittests for tvm.script.parser.ir"""
import pytest
import inspect
import tvm.testing
from tvm.script.parser import ir_module
from tvm.ir import IRModule
def test_ir_base():
@ir_module
class BlankIRModule:
pass
assert isinstance(BlankIRModule, IRModule) and len(BlankIRModule.functions.items()) == 0
if __name__ == "__main__":
tvm.testing.main()
|
"""Unittests for tvm.script.parser.core""" |
import pytest |
import inspect |
import tvm.testing
from tvm.script.parser.core.diagnostics |
import Source
from tvm.script.parser.core |
import doc_core as doc
from tvm.script |
import tir as T
def matmul(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [128, 128])
B = T.match_buffer(b, [128, 128])
C = T.match_buffer(c, [128, 128])
for i, j, k in T.grid(128, 128, 128):
with T.block("update"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vj, vk]
def test_source_base():
source = Source(matmul)
assert (
source.source_name == inspect.getsourcefile(matmul)
and source.start_line is not None
and source.start_column == 0
and source.source == inspect.getsource(matmul)
and source.full_source == inspect.getsource(inspect.getmodule(matmul))
)
def test_source_ast():
source = Source(matmul)
mod = source.as_ast()
assert isinstance(mod, doc.Module)
func_def = mod.body[0]
assert isinstance(func_def, doc.FunctionDef)
assert func_def.name == "matmul"
func_args = func_def.args
assert (
len(func_args.args) == 3
and func_args.args[0].arg == "a"
and func_args.args[1].arg == "b"
and func_args.args[2].arg == "c"
)
func_body = func_def.body
assert len(func_body) == 4
func_assigns = func_body[:3]
assert (
isinstance(func_assigns[0], doc.Assign)
and func_assigns[0].targets[0].id == "A"
and isinstance(func_assigns[1], doc.Assign)
and func_assigns[1].targets[0].id == "B"
and isinstance(func_assigns[2], doc.Assign)
and func_assigns[2].targets[0].id == "C"
)
func_for = func_body[3]
assert (
len(func_for.target.elts) == 3
and func_for.target.elts[0].id == "i"
and func_for.target.elts[1].id == "j"
and func_for.target.elts[2].id == "k"
)
for_body = func_for.body
assert len(for_body) == 1
for_block = for_body[0]
assert isinstance(for_block, doc.With) and len(for_block.body) == 2
if __name__ == "__main__":
tvm.testing.main() |
"""Unittests for tvm.script.parser.tir""" |
import pytest |
import inspect |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.