text
stringlengths 1
2.05k
|
---|
import sys |
import pytest |
import tvm |
import tvm.testing
from tvm |
import tir
from tvm.script |
import tir as T
from tvm.tir.schedule.testing |
import verify_trace_roundtrip
@T.prim_func
def elementwise(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.alloc_buffer((128, 128))
C = T.match_buffer(c, (128, 128))
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + 1.0
@T.prim_func
def elementwise_shape_int64(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (T.int64(128), T.int64(128)))
B = T.alloc_buffer((T.int64(128), T.int64(128)))
C = T.match_buffer(c, (T.int64(128), T.int64(128)))
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + 1.0
@T.prim_func
def func_nested_seq(b: T.handle, c: T.handle) -> None:
A = T.alloc_buffer((128, 128))
B = T.match_buffer(b, (128, 128))
C = T.match_buffer(c, (128, 128))
for i, j in T.grid(128, 128):
with T.block("A"):
vi, vj = T.axis.remap("SS", [i, j])
A[vi, vj] = 2.0
for i, j in T.grid(8, 8):
for x, y in T.grid(16, 16):
with T.block("B0"):
vi = T.axis.S(128, i * 16 + x)
vj = T.axis.S(128, j * 16 + y)
B[vi, vj] = 1.0
for x, y in T.grid(16, 16):
with T.block("B1"):
vi = T.axis.S(128, i * 16 + x)
vj = T.axis.S(128, j * 16 + y)
B[vi, vj] = A[vi, vj] + B[vi, vj]
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = A[vi, vj] * 2.0
@T.prim_func
def access_under_scope(b: T.handle, c: T.handle) -> None:
A = T.alloc_buffe |
r((128, 128))
B = T.match_buffer(b, (128, 128))
C = T.match_buffer(c, (128, 128))
for i0, j0 in T.grid(8, 8):
with T.block("scope"):
i, j = T.axis.remap("SS", [i0, j0])
for x, y in T.grid(16, 16):
with T.block("A"):
vi = T.axis.S(128, i * 16 + x)
vj = T.axis.S(128, j * 16 + y)
A[vi, vj] = 1.0
for x, y in T.grid(16, 16):
with T.block("B"):
vi = T.axis.S(128, i * 16 + x)
vj = T.axis.S(128, j * 16 + y)
B[vi, vj] = A[vi, vj] + 1.0
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = A[vi, vj] * 2.0
@T.prim_func
def opaque_access(a: T.handle, b: T.handle, c: T.handle, d: T.handle) -> None:
A = T.match_buffer(a, (128, 128), dtype="float16")
B = T.match_buffer(b, (128, 128), dtype="float16")
C = T.match_buffer(c, (128, 128), dtype="float16")
D = T.match_buffer(d, (128, 128), dtype="float16")
for i, j in T.grid(128, 128):
with T.block("load_store"):
vi, vj = T.axis.remap("SS", [i, j])
T.reads(A[vi, vj])
T.writes(D[vi, vj])
D[vi, vj] = A[vi, vj]
for i, j in T.grid(8, 8):
with T.block("opaque"):
vi, vj = T.axis.remap("SS", [i, j])
T.reads(A[vi * 16 : vi * 16 + 16, vj * 16 : vj * 16 + 16])
T.writes(B[vi * 16 : vi * 16 + 16, vj * 16 : vj * 16 + 16])
T.evaluate(
T.tvm_load_matrix_sync(
B.data,
16,
16,
16,
vi * 8 + vj,
T.tvm_access_ptr(
T.type_annotation(dtype="float16"),
A.data,
vi * 2048 + vj * 16,
128,
1,
dtype="handle", |
),
128,
"row_major",
dtype="handle",
)
)
for i, j in T.grid(8, 8):
with T.block("match_buffer"):
vi, vj = T.axis.remap("SS", [i, j])
T.reads(A[vi * 16 : vi * 16 + 16, vj * 16 : vj * 16 + 16])
T.writes(C[vi * 16 : vi * 16 + 16, vj * 16 : vj * 16 + 16])
A0 = T.match_buffer(
A[
vi * 16 : vi * 16 + 16,
vj * 16 : vj * 16 + 16,
],
(16, 16),
"float16",
strides=[128, 1],
offset_factor=1,
)
C0 = T.match_buffer(
C[
vi * 16 : vi * 16 + 16,
vj * 16 : vj * 16 + 16,
],
(16, 16),
"float16",
strides=[128, 1],
offset_factor=1,
)
T.evaluate(
T.tvm_load_matrix_sync(
C0.data,
16,
16,
16,
vi * 8 + vj,
T.tvm_access_ptr(
T.type_annotation(dtype="float16"),
A0.data,
A0.elem_offset,
A0.strides[0],
1,
dtype="handle",
),
128,
"row_major",
dtype="handle",
)
)
@T.prim_func
def func_multi_consumer() -> None:
A = T.alloc_buffer((128))
B = T.alloc_buffer((128))
C = T.alloc_buffer((128))
for i in T.grid(8):
for j in T.grid(16):
with T.block("A"):
vi = T.axis.S(128, i * 16 + j)
A[vi] = 1.0
for j in T.grid(16):
with T.block("B"):
vi = T.axis.S(128, i * 16 + j)
B[vi] = A[vi] + |
1.0
for i in T.grid(128):
with T.block("C"):
vi = T.axis.S(128, i)
C[vi] = A[vi]
@T.prim_func
def func_multi_producer() -> None:
A = T.alloc_buffer((128))
B = T.alloc_buffer((128))
for i in range(128):
with T.block("A0"):
vi = T.axis.S(128, i)
A[vi] = 1.0
for i in range(128):
with T.block("A1"):
vi = T.axis.S(128, i)
A[vi] = 2.0
for i in range(128):
with T.block("B"):
vi = T.axis.S(128, i)
B[vi] = A[vi]
@T.prim_func
def func_with_block_predicate() -> None:
A = T.alloc_buffer((120))
B = T.alloc_buffer((120))
for i, j in T.grid(16, 8):
with T.block("producer"):
T.where(i * 8 + j < 120)
ax = T.axis.S(120, i * 8 + j)
A[ax] = 0.0
for i, j in T.grid(16, 8):
with T.block("consumer"):
T.where(i * 8 + j < 120)
ax = T.axis.S(120, i * 8 + j)
B[ax] = A[ax] + 1.0
@T.prim_func
def inplace_func(data_io: T.Buffer[(64), "int32"]):
data_1d = T.alloc_buffer([64], dtype="int32")
for i0 in T.serial(64):
with T.block("copy_in"):
v0 = T.axis.remap("S", [i0])
data_1d[v0] = data_io[v0]
for i0 in T.serial(1):
with T.block("ext_call"):
T.reads(data_1d[:64])
T.writes(data_1d[:64])
T.evaluate(T.call_extern("call_impl", data_1d.data, dtype=""))
for i0 in T.serial(64):
with T.block("copy_out"):
v0 = T.axis.remap("S", [i0])
data_io[v0] = data_1d[v0]
@T.prim_func
def inplace_call(data_io: T.Buffer[(64), "int32"]):
for i0 in T.serial(1):
with T.block("ext_call"):
T.reads(data_io[:64])
T.writes(data_io[:64])
T.evaluate(T.call_extern("call_impl", data_io.data, dtype=""))
@T.prim_func
def cache_read_nested_seq_target(
B: T.Buffer[(128, 128), "float32"], C: T.Buffer[(128, 128), "float32"]
) -> None:
A = T.alloc_b |
uffer([128, 128], dtype="float32")
A_global = T.alloc_buffer([128, 128], dtype="float32")
for i, j in T.grid(128, 128):
with T.block("A"):
vi, vj = T.axis.remap("SS", [i, j])
T.reads()
T.writes(A[vi, vj])
A[vi, vj] = T.float32(2)
for i, j in T.grid(8, 8):
for x, y in T.grid(16, 16):
with T.block("B0"):
vi = T.axis.spatial(128, i * 16 + x)
vj = T.axis.spatial(128, j * 16 + y)
T.reads()
T.writes(B[vi, vj])
B[vi, vj] = T.float32(1)
for x, y in T.grid(16, 16):
with T.block("B1"):
vi = T.axis.spatial(128, i * 16 + x)
vj = T.axis.spatial(128, j * 16 + y)
T.reads(A[vi, vj], B[vi, vj])
T.writes(B[vi, vj])
B[vi, vj] = A[vi, vj] + B[vi, vj]
for ax0, ax1 in T.grid(128, 128):
with T.block("A_global"):
v0, v1 = T.axis.remap("SS", [ax0, ax1])
T.reads(A[v0, v1])
T.writes(A_global[v0, v1])
A_global[v0, v1] = A[v0, v1]
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
T.reads(A_global[vi, vj])
T.writes(C[vi, vj])
C[vi, vj] = A_global[vi, vj] * T.float32(2)
@T.prim_func
def cache_read_elementwise(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
C = T.match_buffer(c, (128, 128))
B = T.alloc_buffer((128, 128))
A_global = T.alloc_buffer((128, 128))
B_local = T.alloc_buffer((128, 128), scope="local")
for i, j in T.grid(128, 128):
with T.block("A_global"):
vi, vj = T.axis.remap("SS", [i, j])
A_global[vi, vj] = A[vi, vj]
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A_global[vi, vj] * 2.0
for i, j in T.grid(128, 128):
with T.block("B_local"): |
vi, vj = T.axis.remap("SS", [i, j])
B_local[vi, vj] = B[vi, vj]
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B_local[vi, vj] + 1.0
@T.prim_func
def cache_read_under_scope(b: T.handle, c: T.handle) -> None:
A = T.alloc_buffer((128, 128))
B = T.match_buffer(b, (128, 128))
C = T.match_buffer(c, (128, 128))
A_global = T.alloc_buffer((128, 128))
for i0, j0 in T.grid(8, 8):
with T.block("scope"):
i, j = T.axis.remap("SS", [i0, j0])
A_local = T.alloc_buffer((128, 128), scope="local")
for x, y in T.grid(16, 16):
with T.block("A"):
vi = T.axis.S(128, i * 16 + x)
vj = T.axis.S(128, j * 16 + y)
A[vi, vj] = 1.0
for x, y in T.grid(16, 16):
with T.block("A_local"):
vi = T.axis.S(128, i * 16 + x)
vj = T.axis.S(128, j * 16 + y)
A_local[vi, vj] = A[vi, vj]
for x, y in T.grid(16, 16):
with T.block("B"):
vi = T.axis.S(128, i * 16 + x)
vj = T.axis.S(128, j * 16 + y)
B[vi, vj] = A_local[vi, vj] + 1.0
for i, j in T.grid(128, 128):
with T.block("A_global"):
vi, vj = T.axis.remap("SS", [i, j])
A_global[vi, vj] = A[vi, vj]
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = A_global[vi, vj] * 2.0
@T.prim_func
def cache_read_opaque_access(a: T.handle, b: T.handle, c: T.handle, d: T.handle) -> None:
A = T.match_buffer(a, (128, 128), dtype="float16")
B = T.match_buffer(b, (128, 128), dtype="float16")
C = T.match_buffer(c, (128, 128), dtype="float16")
D = T.match_buffer(d, (128, 128), dtype="float16")
A_global = T.alloc_buffer((128, 128), dtype="float16")
for i, j in T.grid(128, |
128):
with T.block("A_global"):
vi, vj = T.axis.remap("SS", [i, j])
A_global[vi, vj] = A[vi, vj]
for i, j in T.grid(128, 128):
with T.block("load_store"):
vi, vj = T.axis.remap("SS", [i, j])
T.reads(A_global[vi, vj])
T.writes(D[vi, vj])
D[vi, vj] = A_global[vi, vj]
for i, j in T.grid(8, 8):
with T.block("opaque"):
vi, vj = T.axis.remap("SS", [i, j])
T.reads(A_global[vi * 16 : vi * 16 + 16, vj * 16 : vj * 16 + 16])
T.writes(B[vi * 16 : vi * 16 + 16, vj * 16 : vj * 16 + 16])
T.evaluate(
T.tvm_load_matrix_sync(
B.data,
16,
16,
16,
vi * 8 + vj,
T.tvm_access_ptr(
T.type_annotation(dtype="float16"),
A_global.data,
vi * 2048 + vj * 16,
128,
1,
dtype="handle",
),
128,
"row_major",
dtype="handle",
)
)
for i, j in T.grid(8, 8):
with T.block("match_buffer"):
vi, vj = T.axis.remap("SS", [i, j])
T.reads(A_global[vi * 16 : vi * 16 + 16, vj * 16 : vj * 16 + 16])
T.writes(C[vi * 16 : vi * 16 + 16, vj * 16 : vj * 16 + 16])
A0 = T.match_buffer(
A_global[
vi * 16 : vi * 16 + 16,
vj * 16 : vj * 16 + 16,
],
(16, 16),
"float16",
strides=[128, 1],
offset_factor=1,
)
C0 = T.match_buffer(
C[
vi * 16 : vi * 16 + 16,
vj * 16 : vj * 16 + 16,
],
(16, 16),
"float16",
strides=[128 |
, 1],
offset_factor=1,
)
T.evaluate(
T.tvm_load_matrix_sync(
C0.data,
16,
16,
16,
vi * 8 + vj,
T.tvm_access_ptr(
T.type_annotation(dtype="float16"),
A0.data,
A0.elem_offset,
A0.strides[0],
1,
dtype="handle",
),
128,
"row_major",
dtype="handle",
)
)
@T.prim_func
def cache_read_multi_consumer() -> None:
A = T.alloc_buffer((128))
B = T.alloc_buffer((128))
C = T.alloc_buffer((128))
A_global = T.alloc_buffer((128))
for i in T.grid(8):
for j in T.grid(16):
with T.block("A"):
vi = T.axis.S(128, i * 16 + j)
A[vi] = 1.0
for j in T.grid(16):
with T.block("A"):
vi = T.axis.S(128, i * 16 + j)
A_global[vi] = A[vi]
for j in T.grid(16):
with T.block("B"):
vi = T.axis.S(128, i * 16 + j)
B[vi] = A_global[vi] + 1.0
for i in T.grid(128):
with T.block("C"):
vi = T.axis.S(128, i)
C[vi] = A_global[vi]
@T.prim_func
def cache_read_multi_consumer_target() -> None:
A = T.alloc_buffer((128))
B = T.alloc_buffer((128))
C = T.alloc_buffer((128))
A_global = T.alloc_buffer((128))
for i in T.grid(8):
for j in T.grid(16):
with T.block("A"):
vi = T.axis.S(128, i * 16 + j)
A[vi] = 1.0
for j in T.grid(16):
with T.block("B"):
vi = T.axis.S(128, i * 16 + j)
B[vi] = A[vi] + 1.0
for i in T.grid(128):
with T.block("A"):
vi = T.axis.S(128, i)
A_global[vi] = A[vi] |
for i in T.grid(128):
with T.block("C"):
vi = T.axis.S(128, i)
C[vi] = A_global[vi]
@T.prim_func
def continuous_cache_read(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
C = T.match_buffer(c, (128, 128))
B = T.alloc_buffer((128, 128))
B_shared = T.alloc_buffer((128, 128), scope="shared")
B_local = T.alloc_buffer((128, 128), scope="local")
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(128, 128):
with T.block("B_shared"):
vi, vj = T.axis.remap("SS", [i, j])
B_shared[vi, vj] = B[vi, vj]
for i, j in T.grid(128, 128):
with T.block("B_local"):
vi, vj = T.axis.remap("SS", [i, j])
B_local[vi, vj] = B_shared[vi, vj]
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B_local[vi, vj] + 1.0
@T.prim_func
def block_predicate_cache_read() -> None:
A = T.alloc_buffer([120], dtype="float32")
B = T.alloc_buffer([120], dtype="float32")
A_shared = T.alloc_buffer([120], dtype="float32", scope="shared")
for i, j in T.grid(16, 8):
with T.block("producer"):
ax = T.axis.spatial(120, i * 8 + j)
T.where(i * 8 + j < 120)
A[ax] = T.float32(0)
for ax0 in T.serial(120):
with T.block("A_shared"):
v0 = T.axis.spatial(120, ax0)
A_shared[v0] = A[v0]
for i, j in T.grid(16, 8):
with T.block("consumer"):
ax = T.axis.spatial(120, i * 8 + j)
T.where(i * 8 + j < 120)
B[ax] = A_shared[ax] + T.float32(1)
@T.prim_func
def cache_read_shape_int64(var_A: T.handle, var_C: T.handle) -> None:
A = T.match_buffer(var_A, (T.int64(128), T.int64(128)), dtype="float32")
C = T.match_buffer(var_C, (T.int64(128), T.int64(128)), dtype="float32")
B = T.alloc_buffer([T |
.int64(128), T.int64(128)], dtype="float32")
A_global = T.alloc_buffer([T.int64(128), T.int64(128)], dtype="float32")
for ax0, ax1 in T.grid(T.int64(128), T.int64(128)):
with T.block("A_global"):
v0, v1 = T.axis.remap("SS", [ax0, ax1])
T.reads(A[v0, v1])
T.writes(A_global[v0, v1])
A_global[v0, v1] = A[v0, v1]
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
T.reads(A_global[vi, vj])
T.writes(B[vi, vj])
B[vi, vj] = A_global[vi, vj] * T.float32(2)
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
T.reads(B[vi, vj])
T.writes(C[vi, vj])
C[vi, vj] = B[vi, vj] + T.float32(1)
@T.prim_func
def cache_read_inplace(data_io: T.Buffer[64, "int32"]) -> None:
data_1d = T.alloc_buffer([64], dtype="int32")
data_io_local = T.alloc_buffer([64], dtype="int32", scope="local")
for ax0 in T.serial(64):
with T.block("data_io_local"):
v0 = T.axis.spatial(64, ax0)
T.reads(data_io[v0])
T.writes(data_io_local[v0])
data_io_local[v0] = data_io[v0]
for i0 in T.serial(64):
with T.block("copy_in"):
v0 = T.axis.spatial(64, i0)
T.reads(data_io_local[v0])
T.writes(data_1d[v0])
data_1d[v0] = data_io_local[v0]
for i0 in T.serial(1):
with T.block("ext_call"):
T.reads(data_1d[0:64])
T.writes(data_1d[0:64])
T.evaluate(T.call_extern("call_impl", data_1d.data, dtype=""))
for i0 in T.serial(64):
with T.block("copy_out"):
v0 = T.axis.spatial(64, i0)
T.reads(data_1d[v0])
T.writes(data_io[v0])
data_io[v0] = data_1d[v0]
@T.prim_func
def cache_inplace_buffer(data_io: T.Buffer[64, "int32"]) -> None:
data_io_local = T.alloc_buffer([64], dtype="int32", scope="local")
dat |
a_io_global = T.alloc_buffer([64], dtype="int32")
data_io_global_1 = T.alloc_buffer([64], dtype="int32")
for ax0 in T.serial(64):
with T.block("data_io_global"):
v0 = T.axis.spatial(64, ax0)
T.reads(data_io[v0])
T.writes(data_io_global[v0])
data_io_global[v0] = data_io[v0]
for i0 in T.serial(1):
for ax0 in T.serial(64):
with T.block("data_io_local"):
v0 = T.axis.spatial(64, ax0)
T.reads(data_io_global[v0])
T.writes(data_io_local[v0])
data_io_local[v0] = data_io_global[v0]
with T.block("ext_call"):
T.reads(data_io_local[0:64])
T.writes(data_io_local[0:64])
T.evaluate(T.call_extern("call_impl", data_io_local.data, dtype=""))
for ax0 in T.serial(64):
with T.block("data_io_local"):
v0 = T.axis.spatial(64, ax0)
T.reads(data_io_local[v0])
T.writes(data_io_global_1[v0])
data_io_global_1[v0] = data_io_local[v0]
for ax0 in T.serial(64):
with T.block("data_io_global"):
v0 = T.axis.spatial(64, ax0)
T.reads(data_io_global_1[v0])
T.writes(data_io[v0])
data_io[v0] = data_io_global_1[v0]
@T.prim_func
def cache_write_elementwise(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
C = T.match_buffer(c, (128, 128))
B = T.alloc_buffer((128, 128))
B_global = T.alloc_buffer((128, 128), scope="local")
C_local = T.alloc_buffer((128, 128))
for i, j in T.grid(128, 128):
with T.block("B_global"):
vi, vj = T.axis.remap("SS", [i, j])
B_global[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = B_global[vi, vj]
for i, j in T.grid(128, 128):
with T.block("C_local"):
vi, vj = T.axis.remap("SS", [i, j]) |
C_local[vi, vj] = B[vi, vj] + 1.0
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = C_local[vi, vj]
@T.prim_func
def cache_write_under_scope(b: T.handle, c: T.handle) -> None:
A = T.alloc_buffer((128, 128))
B = T.match_buffer(b, (128, 128))
C = T.match_buffer(c, (128, 128))
A_global = T.alloc_buffer((128, 128))
for i0, j0 in T.grid(8, 8):
with T.block("scope"):
i, j = T.axis.remap("SS", [i0, j0])
A_local = T.alloc_buffer((128, 128), scope="local")
B_global = T.alloc_buffer((128, 128))
for x, y in T.grid(16, 16):
with T.block("A_local"):
vi = T.axis.S(128, i * 16 + x)
vj = T.axis.S(128, j * 16 + y)
A_local[vi, vj] = 1.0
for x, y in T.grid(16, 16):
with T.block("A"):
vi = T.axis.S(128, i * 16 + x)
vj = T.axis.S(128, j * 16 + y)
A_global[vi, vj] = A_local[vi, vj]
for x, y in T.grid(16, 16):
with T.block("B_global"):
vi = T.axis.S(128, i * 16 + x)
vj = T.axis.S(128, j * 16 + y)
B_global[vi, vj] = A_global[vi, vj] + 1.0
for x, y in T.grid(16, 16):
with T.block("B_global"):
vi = T.axis.S(128, i * 16 + x)
vj = T.axis.S(128, j * 16 + y)
B[vi, vj] = B_global[vi, vj]
for i, j in T.grid(128, 128):
with T.block("A_global"):
vi, vj = T.axis.remap("SS", [i, j])
A[vi, vj] = A_global[vi, vj]
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = A[vi, vj] * 2.0
@T.prim_func
def cache_write_opaque_access(a: T.handle, b: T.handle, c: T.handle, d: T.handle) -> None:
A = T.match_buffer(a, (128, 128), dtype="float |
16")
B = T.match_buffer(b, (128, 128), dtype="float16")
C = T.match_buffer(c, (128, 128), dtype="float16")
D = T.match_buffer(d, (128, 128), dtype="float16")
D_global = T.alloc_buffer((128, 128), dtype="float16")
B_global = T.alloc_buffer((128, 128), dtype="float16")
C_global = T.alloc_buffer((128, 128), dtype="float16")
for i, j in T.grid(128, 128):
with T.block("load_store"):
vi, vj = T.axis.remap("SS", [i, j])
T.reads(A[vi, vj])
T.writes(D_global[vi, vj])
D_global[vi, vj] = A[vi, vj]
for i, j in T.grid(8, 8):
with T.block("opaque"):
vi, vj = T.axis.remap("SS", [i, j])
T.reads(A[vi * 16 : vi * 16 + 16, vj * 16 : vj * 16 + 16])
T.writes(B_global[vi * 16 : vi * 16 + 16, vj * 16 : vj * 16 + 16])
T.evaluate(
T.tvm_load_matrix_sync(
B_global.data,
16,
16,
16,
vi * 8 + vj,
T.tvm_access_ptr(
T.type_annotation(dtype="float16"),
A.data,
vi * 2048 + vj * 16,
128,
1,
dtype="handle",
),
128,
"row_major",
dtype="handle",
)
)
for i, j in T.grid(8, 8):
with T.block("match_buffer"):
vi, vj = T.axis.remap("SS", [i, j])
T.reads(A[vi * 16 : vi * 16 + 16, vj * 16 : vj * 16 + 16])
T.writes(C_global[vi * 16 : vi * 16 + 16, vj * 16 : vj * 16 + 16])
A0 = T.match_buffer(
A[
vi * 16 : vi * 16 + 16,
vj * 16 : vj * 16 + 16,
],
(16, 16),
"float16",
strides=[128, 1],
offset_factor=1,
)
C0 = T.match_buf |
fer(
C_global[
vi * 16 : vi * 16 + 16,
vj * 16 : vj * 16 + 16,
],
(16, 16),
"float16",
strides=[128, 1],
offset_factor=1,
)
T.evaluate(
T.tvm_load_matrix_sync(
C0.data,
16,
16,
16,
vi * 8 + vj,
T.tvm_access_ptr(
T.type_annotation(dtype="float16"),
A0.data,
A0.elem_offset,
A0.strides[0],
1,
dtype="handle",
),
128,
"row_major",
dtype="handle",
)
)
for i, j in T.grid(128, 128):
with T.block("D"):
vi, vj = T.axis.remap("SS", [i, j])
D[vi, vj] = D_global[vi, vj]
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = B_global[vi, vj]
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = C_global[vi, vj]
@T.prim_func
def cache_write_multi_consumer() -> None:
A = T.alloc_buffer((128))
B = T.alloc_buffer((128))
C = T.alloc_buffer((128))
A_global = T.alloc_buffer((128))
for i in T.grid(8):
for j in T.grid(16):
with T.block("A_global"):
vi = T.axis.S(128, i * 16 + j)
A_global[vi] = 1.0
for j in T.grid(16):
with T.block("A"):
vi = T.axis.S(128, i * 16 + j)
A[vi] = A_global[vi]
for j in T.grid(16):
with T.block("B"):
vi = T.axis.S(128, i * 16 + j)
B[vi] = A[vi] + 1.0
for i in T.grid(128):
with T.block("C"): |
vi = T.axis.S(128, i)
C[vi] = A[vi]
@T.prim_func
def continuous_cache_write(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.alloc_buffer((128, 128))
C = T.match_buffer(c, (128, 128))
B_shared = T.alloc_buffer((128, 128), scope="shared")
B_local = T.alloc_buffer((128, 128), scope="local")
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B_local[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B_shared[vi, vj] = B_local[vi, vj]
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = B_shared[vi, vj]
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + 1.0
@T.prim_func
def block_predicate_cache_write_intermediate_buf() -> None:
A = T.alloc_buffer([120], dtype="float32")
B = T.alloc_buffer([120], dtype="float32")
A_shared = T.alloc_buffer([120], dtype="float32", scope="shared")
for i, j in T.grid(16, 8):
with T.block("producer"):
ax = T.axis.spatial(120, i * 8 + j)
T.where(i * 8 + j < 120)
A_shared[ax] = T.float32(0)
for ax0 in T.serial(120):
with T.block("A_shared"):
v0 = T.axis.spatial(120, ax0)
A[v0] = A_shared[v0]
for i, j in T.grid(16, 8):
with T.block("consumer"):
ax = T.axis.spatial(120, i * 8 + j)
T.where(i * 8 + j < 120)
B[ax] = A[ax] + 1.0
@T.prim_func
def block_predicate_cache_write_output_buf() -> None:
A = T.alloc_buffer([120], dtype="float32")
B = T.alloc_buffer([120], dtype="float32")
B_shared = T.alloc_buffer([120], dtype="float32", scope="shared")
for i, j in T.grid(16, 8):
with T.block("producer"):
ax = T.axis.spatial |
(120, i * 8 + j)
T.where(i * 8 + j < 120)
A[ax] = T.float32(0)
for i, j in T.grid(16, 8):
with T.block("consumer"):
ax = T.axis.spatial(120, i * 8 + j)
T.where(i * 8 + j < 120)
B_shared[ax] = A[ax] + T.float32(1)
for ax0 in T.serial(120):
with T.block("B_shared"):
v0 = T.axis.spatial(120, ax0)
B[v0] = B_shared[v0]
use_block_name = tvm.testing.parameter(by_dict={"block_obj": False, "block_name": True})
def test_cache_read_elementwise(use_block_name):
sch = tir.Schedule(elementwise, debug_mask="all")
block_b = sch.get_block("B")
block_c = sch.get_block("C")
if use_block_name:
cached_a = sch.cache_read("B", "A", "global")
cached_b = sch.cache_read("C", "B", "local")
else:
cached_a = sch.cache_read(block_b, 0, "global")
cached_b = sch.cache_read(block_c, 0, "local")
assert sch.get(cached_a) == sch.get(sch.get_block("A_global"))
assert sch.get(cached_b) == sch.get(sch.get_block("B_local"))
assert sch.get(block_b) == sch.get(sch.get_block("B"))
assert sch.get(block_c) == sch.get(sch.get_block("C"))
tvm.ir.assert_structural_equal(cache_read_elementwise, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=elementwise)
def test_cache_read_under_scope(use_block_name):
sch = tir.Schedule(access_under_scope, debug_mask="all")
block_b = "B" if use_block_name else sch.get_block("B")
block_c = "C" if use_block_name else sch.get_block("C")
sch.cache_read(block_b, 0, "local")
sch.cache_read(block_c, 0, "global")
tvm.ir.assert_structural_equal(cache_read_under_scope, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=access_under_scope)
def test_cache_read_opaque_access(use_block_name):
sch = tir.Schedule(opaque_access, debug_mask="all")
block = "load_store" if use_block_name else sch.get_block("load_store")
sch.cache_read(block, 0, "global")
tvm.ir.assert_structural_equal(cache_read_opaque_acc |
ess, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=opaque_access)
def test_cache_read_location(use_block_name):
sch = tir.Schedule(func_multi_consumer, debug_mask="all")
block_b = "B" if use_block_name else sch.get_block("B")
sch.cache_read(block_b, 0, "global")
tvm.ir.assert_structural_equal(cache_read_multi_consumer, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=func_multi_consumer)
sch = tir.Schedule(func_multi_consumer, debug_mask="all")
block_b = "B" if use_block_name else sch.get_block("B")
block_c = "C" if use_block_name else sch.get_block("C")
sch.cache_read(block_b, 0, "global", consumer_blocks=[block_c])
tvm.ir.assert_structural_equal(cache_read_multi_consumer_target, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=func_multi_consumer)
sch = tir.Schedule(func_multi_consumer, debug_mask="all")
block_b = "B" if use_block_name else sch.get_block("B")
block_c = "C" if use_block_name else sch.get_block("C")
sch.cache_read(block_b, 0, "global", consumer_blocks=[block_b, block_c])
tvm.ir.assert_structural_equal(cache_read_multi_consumer, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=func_multi_consumer)
def test_continuous_cache_read(use_block_name):
sch = tir.Schedule(elementwise, debug_mask="all")
block_c = "C" if use_block_name else sch.get_block("C")
sch.cache_read(block_c, 0, "shared")
sch.cache_read(block_c, 0, "local")
tvm.ir.assert_structural_equal(continuous_cache_read, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=elementwise)
def test_cache_read_with_block_predicate(use_block_name):
sch = tir.Schedule(func_with_block_predicate, debug_mask="all")
block = "consumer" if use_block_name else sch.get_block("consumer")
sch.cache_read(block, 0, "shared")
tvm.ir.assert_structural_equal(block_predicate_cache_read, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=func_with_block_predicate)
def test_cache_read_non_int32_shape(use_block_name) |
:
sch = tir.Schedule(elementwise_shape_int64, debug_mask="all")
block_b = "B" if use_block_name else sch.get_block("B")
sch.cache_read(block_b, 0, "global")
tvm.ir.assert_structural_equal(cache_read_shape_int64, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=elementwise_shape_int64)
def test_cache_read_fail_multi_producer(use_block_name):
sch = tir.Schedule(func_multi_producer, debug_mask="all")
block_b = "B" if use_block_name else sch.get_block("B")
with pytest.raises(tvm.tir.ScheduleError):
sch.cache_read(block_b, 0, "global")
def test_cache_read_fail_index_out_of_bound(use_block_name):
sch = tir.Schedule(elementwise, debug_mask="all")
block_b = "B" if use_block_name else sch.get_block("B")
with pytest.raises(tvm.tir.ScheduleError):
sch.cache_read(block_b, 1, "global")
def test_cache_read_fail_invalid_storage_scope(use_block_name):
sch = tir.Schedule(elementwise, debug_mask="all")
block_b = "B" if use_block_name else sch.get_block("B")
with pytest.raises(tvm.tir.ScheduleError):
sch.cache_read(block_b, 0, "test_scope")
def test_inplace_cache_read():
sch = tvm.tir.Schedule(inplace_func, debug_mask="all")
block = sch.get_block("copy_in")
sch.cache_read(block, 0, "local", [block])
tvm.ir.assert_structural_equal(cache_read_inplace, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=inplace_func)
def test_cache_inplace():
debug_mask = tvm.tir.schedule.state.ScheduleDebugMask.VERIFY_SREF_TREE
sch = tvm.tir.Schedule(inplace_call, debug_mask=debug_mask)
block = sch.get_block("ext_call")
blocks = sch.cache_inplace(block, 0, "local")
block = sch.cache_read(blocks[0], 0, "global", [blocks[0]])
block = sch.cache_write(blocks[1], 0, "global")
tvm.ir.assert_structural_equal(cache_inplace_buffer, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=inplace_call, debug_mask=debug_mask)
def test_cache_read_nested_seq(use_block_name):
sch = tir.Schedule(func_nested_seq, |
debug_mask="all")
block_c = "C" if use_block_name else sch.get_block("C")
sch.cache_read(block_c, 0, "global", consumer_blocks=[block_c])
tvm.ir.assert_structural_equal(cache_read_nested_seq_target, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=func_nested_seq)
def test_cache_write_elementwise(use_block_name):
sch = tir.Schedule(elementwise, debug_mask="all")
block_b = sch.get_block("B")
block_c = sch.get_block("C")
cached_b = sch.cache_write("B" if use_block_name else block_b, 0, "local")
cached_c = sch.cache_write("C" if use_block_name else block_c, 0, "global")
assert sch.get(cached_b) == sch.get(sch.get_block("B_local"))
assert sch.get(cached_c) == sch.get(sch.get_block("C_global"))
assert sch.get(block_b) == sch.get(sch.get_block("B"))
assert sch.get(block_c) == sch.get(sch.get_block("C"))
tvm.ir.assert_structural_equal(cache_write_elementwise, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=elementwise)
def test_cache_write_under_scope(use_block_name):
sch = tir.Schedule(access_under_scope, debug_mask="all")
block_a = "A" if use_block_name else sch.get_block("A")
block_b = "B" if use_block_name else sch.get_block("B")
block_scope = sch.get_block("scope")
sch.cache_write(block_a, 0, "local")
sch.cache_write(block_b, 0, "global")
sch.cache_write(block_scope, 0, "global")
tvm.ir.assert_structural_equal(cache_write_under_scope, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=access_under_scope)
def test_cache_write_opaque_access(use_block_name):
sch = tir.Schedule(opaque_access, debug_mask="all")
block_store = "load_store" if use_block_name else sch.get_block("load_store")
block_opaque = "opaque" if use_block_name else sch.get_block("opaque")
block_match_buffer = "match_buffer" if use_block_name else sch.get_block("match_buffer")
sch.cache_write(block_store, 0, "global")
sch.cache_write(block_opaque, 0, "global")
sch.cache_write(block_match_buffer, 0, "global") |
tvm.ir.assert_structural_equal(cache_write_opaque_access, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=opaque_access)
def test_cache_write_location(use_block_name):
sch = tir.Schedule(func_multi_consumer, debug_mask="all")
block_a = "A" if use_block_name else sch.get_block("A")
sch.cache_write(block_a, 0, "global")
tvm.ir.assert_structural_equal(cache_write_multi_consumer, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=func_multi_consumer)
def test_continuous_cache_write(use_block_name):
sch = tir.Schedule(elementwise, debug_mask="all")
block_b = "B" if use_block_name else sch.get_block("B")
sch.cache_write(block_b, 0, "shared")
sch.cache_write(block_b, 0, "local")
tvm.ir.assert_structural_equal(continuous_cache_write, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=elementwise)
def test_cache_write_with_block_predicate(use_block_name):
sch = tir.Schedule(func_with_block_predicate, debug_mask="all")
block = "producer" if use_block_name else sch.get_block("producer")
sch.cache_write(block, 0, "shared")
tvm.ir.assert_structural_equal(block_predicate_cache_write_intermediate_buf, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=func_with_block_predicate)
sch = tir.Schedule(func_with_block_predicate, debug_mask="all")
block = "consumer" if use_block_name else sch.get_block("consumer")
sch.cache_write(block, 0, "shared")
tvm.ir.assert_structural_equal(block_predicate_cache_write_output_buf, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=func_with_block_predicate)
def test_cache_write_fail_multi_producer(use_block_name):
sch = tir.Schedule(func_multi_producer, debug_mask="all")
block_a0 = "A0" if use_block_name else sch.get_block("A0")
block_a1 = "A1" if use_block_name else sch.get_block("A1")
with pytest.raises(tvm.tir.ScheduleError):
sch.cache_write(block_a0, 0, "global")
with pytest.raises(tvm.tir.ScheduleError):
sch.cache_write(block_a1, 0, "glo |
bal")
def test_cache_write_fail_index_out_of_bound(use_block_name):
sch = tir.Schedule(elementwise, debug_mask="all")
block_b = "B" if use_block_name else sch.get_block("B")
with pytest.raises(tvm.tir.ScheduleError):
sch.cache_write(block_b, 1, "global")
def test_cache_write_fail_invalid_storage_scope(use_block_name):
sch = tir.Schedule(elementwise, debug_mask="all")
block_b = "B" if use_block_name else sch.get_block("B")
with pytest.raises(tvm.tir.ScheduleError):
sch.cache_write(block_b, 0, "test_scope")
if __name__ == "__main__":
tvm.testing.main() |
import pytest |
import tvm |
import tvm.testing
from tvm |
import te, tir
from tvm.script |
import tir as T
from tvm.tir.schedule.testing |
import verify_trace_roundtrip
@T.prim_func
def two_elementwise(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 128), "float32")
B = T.alloc_buffer((128, 128), "float32")
C = T.match_buffer(c, (128, 128), "float32")
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + 1.0
@T.prim_func
def two_elementwise_after_compute_at(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 128), "float32")
B = T.alloc_buffer((128, 128), "float32")
C = T.match_buffer(c, (128, 128), "float32")
for i in range(0, 128):
for ax0, ax1 in T.grid(1, 128):
with T.block("B"):
vi = T.axis.S(128, i + ax0)
vj = T.axis.S(128, ax1)
B[vi, vj] = A[vi, vj] * 2.0
for j in range(0, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + 1.0
@T.prim_func
def blockized_1(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [128, 128], "float32")
B = T.alloc_buffer([128, 128], "float32")
C = T.match_buffer(c, [128, 128], "float32")
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(8, 8):
with T.block("C_outer"):
vi_o, vj_o = T.axis.remap("SS", [i, j])
T.reads([B[
vi_o * 16 : vi_o * 16 + 16,
vj_o * 16 : vj_o * 16 + 16,
]])
T.writes([C[
vi_o * 16 : vi_o * 16 + 16,
vj_o * 16 : vj_o * 16 + 16
]])
for i_i, j_i in T.grid(16, 16):
with T.block("C_inner"):
vi = T.axis.S(128, vi_o * 16 + i_i) |
vj = T.axis.S(128, vj_o * 16 + j_i)
C[vi, vj] = B[vi, vj] + 1.0
@T.prim_func
def blockized_after_compute_at(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [128, 128], "float32")
B = T.alloc_buffer([128, 128], "float32")
C = T.match_buffer(c, [128, 128], "float32")
for i0_0, i1_0 in T.grid(8, 8):
for ax0, ax1 in T.grid(16, 16):
with T.block("B"):
vi = T.axis.S(128, i0_0 * 16 + ax0)
vj = T.axis.S(128, i1_0 * 16 + ax1)
B[vi, vj] = A[vi, vj] * 2.0
with T.block("C_outer"):
vi_o, vj_o = T.axis.remap("SS", [i0_0, i1_0])
T.reads([B[
vi_o * 16 : vi_o * 16 + 16,
vj_o * 16 : vj_o * 16 + 16,
]])
T.writes([C[
vi_o * 16 : vi_o * 16 + 16,
vj_o * 16 : vj_o * 16 + 16
]])
for i0_1, i1_1 in T.grid(16, 16):
with T.block("C_inner"):
vi = T.axis.S(128, vi_o * 16 + i0_1)
vj = T.axis.S(128, vj_o * 16 + i1_1)
C[vi, vj] = B[vi, vj] + 1.0
@T.prim_func
def blockized_2(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [128, 128], "float32")
B = T.alloc_buffer([128, 128], "float32")
C = T.match_buffer(c, [128, 128], "float32")
for i_o, j_o in T.grid(8, 8):
with T.block("B_outer"):
vio, vjo = T.axis.remap("SS", [i_o, j_o])
T.reads([A[
vio * 16 : vio * 16 + 16,
vjo * 16 : vjo * 16 + 16,
]])
T.writes([B[
vio * 16 : vio * 16 + 16,
vjo * 16 : vjo * 16 + 16
]])
for i_i, j_i in T.grid(16, 16):
with T.block("B_inner"):
vi = T.axis.S(128, vio * 16 + i_i)
vj = T.axis.S(128, vjo * 16 + j_i)
B[vi, vj] = A[vi, vj] * 2.0
for i_o, j_o, i_i, j_i in T.grid(4, 4, 32, 32):
wi |
th T.block("C"):
vi = T.axis.S(128, i_o * 32 + i_i)
vj = T.axis.S(128, j_o * 32 + j_i)
C[vi, vj] = B[vi, vj] + 1.0
@T.prim_func
def blockized_2_after_reverse_compute_at(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [128, 128], "float32")
B = T.alloc_buffer([128, 128], "float32")
C = T.match_buffer(c, [128, 128], "float32")
for i_o, j_o in T.grid(8, 8):
with T.block("B_outer"):
vio, vjo = T.axis.remap("SS", [i_o, j_o])
T.reads([A[
vio * 16 : vio * 16 + 16,
vjo * 16 : vjo * 16 + 16,
]])
T.writes([B[
vio * 16 : vio * 16 + 16,
vjo * 16 : vjo * 16 + 16
]])
for i_i, j_i in T.grid(16, 16):
with T.block("B_inner"):
vi = T.axis.S(128, vio * 16 + i_i)
vj = T.axis.S(128, vjo * 16 + j_i)
B[vi, vj] = A[vi, vj] * 2.0
for ax0, ax1 in T.grid(16, 16):
with T.block("C"):
vi = T.axis.S(128, i_o * 16 + ax0)
vj = T.axis.S(128, j_o * 16 + ax1)
T.reads([B[vi, vj]])
T.writes([C[vi, vj]])
C[vi, vj] = B[vi, vj] + 1.0
@T.prim_func
def blockized_2_after_compute_at(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [128, 128], "float32")
B = T.alloc_buffer([128, 128], "float32")
C = T.match_buffer(c, [128, 128], "float32")
for i_o, j_o in T.grid(4, 4):
for ax0, ax1 in T.grid(2, 2):
with T.block("blockized_B"):
vio = T.axis.S(8, i_o * 2 + ax0)
vjo = T.axis.S(8, j_o * 2 + ax1)
T.reads([A[
vio * 16 : vio * 16 + 16,
vjo * 16 : vjo * 16 + 16,
]])
T.writes([B[
vio * 16 : vio * 16 + 16,
vjo * 16 : vjo * 16 + 16,
]])
for i_i, j_i in T.grid( |
16, 16):
with T.block("B"):
vi = T.axis.S(128, vio * 16 + i_i)
vj = T.axis.S(128, vjo * 16 + j_i)
B[vi, vj] = A[vi, vj] * 2.0
for i_i, j_i in T.grid(32, 32):
with T.block("C"):
vi = T.axis.S(128, i_o * 32 + i_i)
vj = T.axis.S(128, j_o * 32 + j_i)
C[vi, vj] = B[vi, vj] + 1.0
@T.prim_func
def cuda_matmul_0(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [2048, 2048], "float32")
B = T.match_buffer(b, [2048, 2048], "float32")
C = T.match_buffer(c, [2048, 2048], "float32")
A_shared = T.alloc_buffer([2048, 2048], "float32", scope="shared")
B_shared = T.alloc_buffer([2048, 2048], "float32", scope="shared")
A_shared_local = T.alloc_buffer([2048, 2048], "float32", scope="local")
B_shared_local = T.alloc_buffer([2048, 2048], "float32", scope="local")
C_local = T.alloc_buffer([2048, 2048], "float32", scope="local")
for i, j in T.grid(2048, 2048):
with T.block("A_shared"):
v0, v1 = T.axis.remap("SS", [i, j])
A_shared[v0, v1] = A[v0, v1]
for i, j in T.grid(2048, 2048):
with T.block("B_shared"):
v0, v1 = T.axis.remap("SS", [i, j])
B_shared[v0, v1] = B[v0, v1]
for i, j in T.grid(2048, 2048):
with T.block("A_shared_local"):
v0, v1 = T.axis.remap("SS", [i, j])
A_shared_local[v0, v1] = A_shared[v0, v1]
for i, j in T.grid(2048, 2048):
with T.block("B_shared_local"):
v0, v1 = T.axis.remap("SS", [i, j])
B_shared_local[v0, v1] = B_shared[v0, v1]
for i, j, k in T.grid(2048, 2048, 2048):
with T.block("C"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
with T.init():
C_local[vi, vj] = 0.0
C_local[vi, vj] = C_local[vi, vj] + A_shared_local[vk, vi] * B_shared_local[vk, vj]
for by in T.thread_binding(0, 32, thread = "bloc |
kIdx.y"):
for bx in T.thread_binding(0, 32, thread = "blockIdx.x"):
for vy in T.thread_binding(0, 2, thread = "vthread.y"):
for vx in T.thread_binding(0, 2, thread = "vthread.x"):
for ty in T.thread_binding(0, 8, thread = "threadIdx.y"):
for tx in T.thread_binding(0, 8, thread = "threadIdx.x"):
for i, j in T.grid(4, 4):
with T.block("C_local"):
v0_4 = T.axis.S(2048, by * 64 + vy * 32 + ty * 4 + i)
v1_4 = T.axis.S(2048, bx * 64 + vx * 32 + tx * 4 + j)
C[v0_4, v1_4] = C_local[v0_4, v1_4]
@T.prim_func
def cuda_matmul_0_after_compute_at(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [2048, 2048], "float32")
B = T.match_buffer(b, [2048, 2048], "float32")
C = T.match_buffer(c, [2048, 2048], "float32")
A_shared = T.alloc_buffer([2048, 2048], "float32", scope="shared")
B_shared = T.alloc_buffer([2048, 2048], "float32", scope="shared")
A_shared_local = T.alloc_buffer([2048, 2048], "float32", scope="local")
B_shared_local = T.alloc_buffer([2048, 2048], "float32", scope="local")
C_local = T.alloc_buffer([2048, 2048], "float32", scope="local")
for i, j in T.grid(2048, 2048):
with T.block("A_shared"):
v0, v1 = T.axis.remap("SS", [i, j])
A_shared[v0, v1] = A[v0, v1]
for i, j in T.grid(2048, 2048):
with T.block("B_shared"):
v0, v1 = T.axis.remap("SS", [i, j])
B_shared[v0, v1] = B[v0, v1]
for i, j in T.grid(2048, 2048):
with T.block("A_shared_local"):
v0, v1 = T.axis.remap("SS", [i, j])
A_shared_local[v0, v1] = A_shared[v0, v1]
for i, j in T.grid(2048, 2048):
with T.block("B_shared_local"):
v0, v1 = T.axis.remap("SS", [i, j])
B_shared_local[v0, v1] = B_shared[v0, v1]
for by in T.thr |
ead_binding(0, 32, thread = "blockIdx.y"):
for bx in T.thread_binding(0, 32, thread = "blockIdx.x"):
for vy in T.thread_binding(0, 2, thread = "vthread.y"):
for vx in T.thread_binding(0, 2, thread = "vthread.x"):
for ty in T.thread_binding(0, 8, thread = "threadIdx.y"):
for tx in T.thread_binding(0, 8, thread = "threadIdx.x"):
for i, j, k in T.grid(4, 4, 2048):
with T.block("C"):
vi = T.axis.S(2048, by * 64 + vy * 32 + ty * 4 + i)
vj = T.axis.S(2048, bx * 64 + vx * 32 + tx * 4 + j)
vk = T.axis.R(2048, k)
with T.init():
C_local[vi, vj] = 0.0
C_local[vi, vj] = C_local[vi, vj] + A_shared_local[vk, vi] * B_shared_local[vk, vj]
for i, j in T.grid(4, 4):
with T.block("C_local"):
vi = T.axis.S(2048, by * 64 + vy * 32 + ty * 4 + i)
vj = T.axis.S(2048, bx * 64 + vx * 32 + tx * 4 + j)
C[vi, vj] = C_local[vi, vj]
@T.prim_func
def cuda_matmul_1(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [2048, 2048], "float32")
B = T.match_buffer(b, [2048, 2048], "float32")
C = T.match_buffer(c, [2048, 2048], "float32")
A_shared = T.alloc_buffer([2048, 2048], "float32", scope="shared")
B_shared = T.alloc_buffer([2048, 2048], "float32", scope="shared")
A_shared_local = T.alloc_buffer([2048, 2048], "float32", scope="local")
B_shared_local = T.alloc_buffer([2048, 2048], "float32", scope="local")
C_local = T.alloc_buffer([2048, 2048], "float32", scope="local")
for i, j in T.grid(2048, 2048):
with T.block("A_shared"):
v0, v1 = T.axis.remap("SS |
", [i, j])
A_shared[v0, v1] = A[v0, v1]
for i, j in T.grid(2048, 2048):
with T.block("B_shared"):
v0, v1 = T.axis.remap("SS", [i, j])
B_shared[v0, v1] = B[v0, v1]
for i, j in T.grid(2048, 2048):
with T.block("A_shared_local"):
v0, v1 = T.axis.remap("SS", [i, j])
A_shared_local[v0, v1] = A_shared[v0, v1]
for i, j in T.grid(2048, 2048):
with T.block("B_shared_local"):
v0, v1 = T.axis.remap("SS", [i, j])
B_shared_local[v0, v1] = B_shared[v0, v1]
for by in T.thread_binding(0, 32, thread = "blockIdx.y"):
for bx in T.thread_binding(0, 32, thread = "blockIdx.x"):
for vy in T.thread_binding(0, 2, thread = "vthread.y"):
for vx in T.thread_binding(0, 2, thread = "vthread.x"):
for ty in T.thread_binding(0, 8, thread = "threadIdx.y"):
for tx in T.thread_binding(0, 8, thread = "threadIdx.x"):
for k_0 in T.serial(0, 256):
for k_1 in T.unroll(0, 8):
for _, i, j in T.grid(1, 4, 4):
with T.block("C"):
vi = T.axis.S(2048, by * 64 + vy * 32 + ty * 4 + i)
vj = T.axis.S(2048, bx * 64 + vx * 32 + tx * 4 + j)
vk = T.axis.R(2048, k_0 * 8 + k_1)
with T.init():
C_local[vi, vj] = 0.0
C_local[vi, vj] = C_local[vi, vj] + A_shared_local[vk, vi] * B_shared_local[vk, vj]
for i, j in T.grid(4, 4):
with T.block("C_local"):
vi = T.axis.S(2048, by * 64 + vy * 32 + ty * 4 + i)
vj = T.axis.S(2048, bx * 64 + vx * 32 + tx * 4 |
+ j)
C[vi, vj] = C_local[vi, vj]
@T.prim_func
def cuda_matmul_2(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [2048, 2048], "float32")
B = T.match_buffer(b, [2048, 2048], "float32")
C = T.match_buffer(c, [2048, 2048], "float32")
A_shared = T.alloc_buffer([2048, 2048], "float32", scope="shared")
B_shared = T.alloc_buffer([2048, 2048], "float32", scope="shared")
A_shared_local = T.alloc_buffer([2048, 2048], "float32", scope="local")
B_shared_local = T.alloc_buffer([2048, 2048], "float32", scope="local")
C_local = T.alloc_buffer([2048, 2048], "float32", scope="local")
for i, j in T.grid(2048, 2048):
with T.block("A_shared"):
v0, v1 = T.axis.remap("SS", [i, j])
A_shared[v0, v1] = A[v0, v1]
for i, j in T.grid(2048, 2048):
with T.block("B_shared"):
v0, v1 = T.axis.remap("SS", [i, j])
B_shared[v0, v1] = B[v0, v1]
for i, j in T.grid(2048, 2048):
with T.block("B_shared_local"):
v0, v1 = T.axis.remap("SS", [i, j])
B_shared_local[v0, v1] = B_shared[v0, v1]
for by in T.thread_binding(0, 32, thread = "blockIdx.y"):
for bx in T.thread_binding(0, 32, thread = "blockIdx.x"):
for vy in T.thread_binding(0, 2, thread = "vthread.y"):
for vx in T.thread_binding(0, 2, thread = "vthread.x"):
for ty in T.thread_binding(0, 8, thread = "threadIdx.y"):
for tx in T.thread_binding(0, 8, thread = "threadIdx.x"):
for k_0 in T.serial(0, 256):
for k_1 in T.unroll(0, 8):
for i, j in T.grid(1, 4):
with T.block("A_shared_local"):
v0 = T.axis.S(2048, k_0 * 8 + k_1 + i)
v1 = T.axis.S(2048, by * 64 + vy * 32 + ty * 4 + j) |
A_shared_local[v0, v1] = A_shared[v0, v1]
for _, i, j in T.grid(1, 4, 4):
with T.block("C"):
vi = T.axis.S(2048, by * 64 + vy * 32 + ty * 4 + i)
vj = T.axis.S(2048, bx * 64 + vx * 32 + tx * 4 + j)
vk = T.axis.R(2048, k_0 * 8 + k_1)
with T.init():
C_local[vi, vj] = T.float32(0)
C_local[vi, vj] = C_local[vi, vj] + A_shared_local[vk, vi] * B_shared_local[vk, vj]
for i, j in T.grid(4, 4):
with T.block("C_local"):
v0 = T.axis.S(2048, by * 64 + vy * 32 + ty * 4 + i)
v1 = T.axis.S(2048, bx * 64 + vx * 32 + tx * 4 + j)
C[v0, v1] = C_local[v0, v1]
@T.prim_func
def cuda_matmul_3(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [2048, 2048], "float32")
B = T.match_buffer(b, [2048, 2048], "float32")
C = T.match_buffer(c, [2048, 2048], "float32")
A_shared = T.alloc_buffer([2048, 2048], "float32", scope="shared")
B_shared = T.alloc_buffer([2048, 2048], "float32", scope="shared")
A_shared_local = T.alloc_buffer([2048, 2048], "float32", scope="local")
B_shared_local = T.alloc_buffer([2048, 2048], "float32", scope="local")
C_local = T.alloc_buffer([2048, 2048], "float32", scope="local")
for i, j in T.grid(2048, 2048):
with T.block("A_shared"):
v0, v1 = T.axis.remap("SS", [i, j])
A_shared[v0, v1] = A[v0, v1]
for i, j in T.grid(2048, 2048):
with T.block("B_shared"):
v0, v1 = T.axis.remap("SS", [i, j])
B_shared[v0, v1] = B[v0, v1]
for by in T.thread_binding(0, 32, thread = "blockIdx. |
y"):
for bx in T.thread_binding(0, 32, thread = "blockIdx.x"):
for vy in T.thread_binding(0, 2, thread = "vthread.y"):
for vx in T.thread_binding(0, 2, thread = "vthread.x"):
for ty in T.thread_binding(0, 8, thread = "threadIdx.y"):
for tx in T.thread_binding(0, 8, thread = "threadIdx.x"):
for k0 in T.serial(0, 256):
for k1 in T.unroll(0, 8):
for i, j in T.grid(1, 4):
with T.block("A_shared_local"):
v0 = T.axis.S(2048, k0 * 8 + k1 + i)
v1 = T.axis.S(2048, by * 64 + vy * 32 + ty * 4 + j)
A_shared_local[v0, v1] = A_shared[v0, v1]
for i, j in T.grid(1, 4):
with T.block("B_shared_local"):
v0 = T.axis.S(2048, k0 * 8 + k1 + i)
v1 = T.axis.S(2048, bx * 64 + vx * 32 + tx * 4 + j)
B_shared_local[v0, v1] = B_shared[v0, v1]
for _, i, j in T.grid(1, 4, 4):
with T.block("C"):
vi = T.axis.S(2048, by * 64 + vy * 32 + ty * 4 + i)
vj = T.axis.S(2048, bx * 64 + vx * 32 + tx * 4 + j)
vk = T.axis.R(2048, k0 * 8 + k1)
with T.init():
C_local[vi, vj] = T.float32(0)
C_local[vi, vj] = C_local[vi, vj] + A_shared_local[vk, vi] * B_shared_local[vk, vj]
for i, j in T.grid(4, 4):
with T.block("C_local |
"):
v0 = T.axis.S(2048, by * 64 + vy * 32 + ty * 4 + i)
v1 = T.axis.S(2048, bx * 64 + vx * 32 + tx * 4 + j)
C[v0, v1] = C_local[v0, v1]
@T.prim_func
def cuda_matmul_4(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [2048, 2048], "float32")
B = T.match_buffer(b, [2048, 2048], "float32")
C = T.match_buffer(c, [2048, 2048], "float32")
A_shared = T.alloc_buffer([2048, 2048], "float32", scope="shared")
B_shared = T.alloc_buffer([2048, 2048], "float32", scope="shared")
A_shared_local = T.alloc_buffer([2048, 2048], "float32", scope="local")
B_shared_local = T.alloc_buffer([2048, 2048], "float32", scope="local")
C_local = T.alloc_buffer([2048, 2048], "float32", scope="local")
for i, j in T.grid(2048, 2048):
with T.block("B_shared"):
v0, v1 = T.axis.remap("SS", [i, j])
B_shared[v0, v1] = B[v0, v1]
for by in T.thread_binding(0, 32, thread = "blockIdx.y"):
for bx in T.thread_binding(0, 32, thread = "blockIdx.x"):
for vy in T.thread_binding(0, 2, thread = "vthread.y"):
for vx in T.thread_binding(0, 2, thread = "vthread.x"):
for ty in T.thread_binding(0, 8, thread = "threadIdx.y"):
for tx in T.thread_binding(0, 8, thread = "threadIdx.x"):
for k0 in T.serial(0, 256):
for i, j in T.grid(8, 64):
with T.block("A_shared"):
v0 = T.axis.S(2048, k0 * 8 + i)
v1 = T.axis.S(2048, by * 64 + j)
A_shared[v0, v1] = A[v0, v1]
for k1 in T.unroll(0, 8):
for i, j in T.grid(1, 4):
with T.block("A_shared_local"): |
v0 = T.axis.S(2048, k0 * 8 + k1 + i)
v1 = T.axis.S(2048, by * 64 + vy * 32 + ty * 4 + j)
A_shared_local[v0, v1] = A_shared[v0, v1]
for i, j in T.grid(1, 4):
with T.block("B_shared_local"):
v0 = T.axis.S(2048, k0 * 8 + k1 + i)
v1 = T.axis.S(2048, bx * 64 + vx * 32 + tx * 4 + j)
B_shared_local[v0, v1] = B_shared[v0, v1]
for _, i, j in T.grid(1, 4, 4):
with T.block("C"):
vi = T.axis.S(2048, by * 64 + vy * 32 + ty * 4 + i)
vj = T.axis.S(2048, bx * 64 + vx * 32 + tx * 4 + j)
vk = T.axis.R(2048, k0 * 8 + k1)
with T.init():
C_local[vi, vj] = 0.0
C_local[vi, vj] = C_local[vi, vj] + A_shared_local[vk, vi] * B_shared_local[vk, vj]
for i, j in T.grid(4, 4):
with T.block("C_local"):
v0 = T.axis.S(2048, by * 64 + vy * 32 + ty * 4 + i)
v1 = T.axis.S(2048, bx * 64 + vx * 32 + tx * 4 + j)
C[v0, v1] = C_local[v0, v1]
@T.prim_func
def cuda_matmul_5(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [2048, 2048], "float32")
B = T.match_buffer(b, [2048, 2048], "float32")
C = T.match_buffer(c, [2048, 2048], "float32")
A_shared = T.alloc_buffer([2048, 2048], "float32", scope="shared")
B_shared = T.alloc_buffer([2048, 2048], "float32", scope="shared")
A_shared_local = T.alloc_buffer([2048 |
, 2048], "float32", scope="local")
B_shared_local = T.alloc_buffer([2048, 2048], "float32", scope="local")
C_local = T.alloc_buffer([2048, 2048], "float32", scope="local")
for by in T.thread_binding(0, 32, thread = "blockIdx.y"):
for bx in T.thread_binding(0, 32, thread = "blockIdx.x"):
for vy in T.thread_binding(0, 2, thread = "vthread.y"):
for vx in T.thread_binding(0, 2, thread = "vthread.x"):
for ty in T.thread_binding(0, 8, thread = "threadIdx.y"):
for tx in T.thread_binding(0, 8, thread = "threadIdx.x"):
for k0 in T.serial(0, 256):
for i, j in T.grid(8, 64):
with T.block("A_shared"):
v0 = T.axis.S(2048, k0 * 8 + i)
v1 = T.axis.S(2048, by * 64 + j)
A_shared[v0, v1] = A[v0, v1]
for i, j in T.grid(8, 64):
with T.block("B_shared"):
v0 = T.axis.S(2048, k0 * 8 + i)
v1 = T.axis.S(2048, bx * 64 + j)
B_shared[v0, v1] = B[v0, v1]
for k1 in T.unroll(0, 8):
for i, j in T.grid(1, 4):
with T.block("A_shared_local"):
v0 = T.axis.S(2048, k0 * 8 + k1 + i)
v1 = T.axis.S(2048, by * 64 + vy * 32 + ty * 4 + j)
A_shared_local[v0, v1] = A_shared[v0, v1]
for i, j in T.grid(1, 4):
with T.block("B_shared_local"):
v0 = T.axis.S(2048, k0 * 8 + k1 + i) |
v1 = T.axis.S(2048, bx * 64 + vx * 32 + tx * 4 + j)
B_shared_local[v0, v1] = B_shared[v0, v1]
for _, i, j in T.grid(1, 4, 4):
with T.block("C"):
vi = T.axis.S(2048, by * 64 + vy * 32 + ty * 4 + i)
vj = T.axis.S(2048, bx * 64 + vx * 32 + tx * 4 + j)
vk = T.axis.R(2048, k0 * 8 + k1)
with T.init():
C_local[vi, vj] = 0.0
C_local[vi, vj] = C_local[vi, vj] + A_shared_local[vk, vi] * B_shared_local[vk, vj]
for i, j in T.grid(4, 4):
with T.block("C_local"):
v0 = T.axis.S(2048, by * 64 + vy * 32 + ty * 4 + i)
v1 = T.axis.S(2048, bx * 64 + vx * 32 + tx * 4 + j)
C[v0, v1] = C_local[v0, v1]
@T.prim_func
def tiled(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [128, 128], "float32")
B = T.alloc_buffer([128, 128], "float32")
C = T.match_buffer(c, [128, 128], "float32")
for i_0, j_0, i_1, j_1 in T.grid(8, 8, 16, 16):
with T.block("B"):
vi = T.axis.S(128, i_0 * 16 + i_1)
vj = T.axis.S(128, j_0 * 16 + j_1)
B[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + 1.0
@T.prim_func
def tiled_after_reverse_compute_at(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [128, 128], "float32")
B = T.alloc_buffer([128, 128], "float32")
C = T.match_buffer(c, [128, 128], "float32")
for i_0, j_0, i_1 in T.grid(8, 8, 16):
for j_1 in T.serial(0, 16):
with T.block("B"): |
vi = T.axis.S(128, i_0 * 16 + i_1)
vj = T.axis.S(128, j_0 * 16 + j_1)
B[vi, vj] = A[vi, vj] * 2.0
for j_1 in T.serial(0, 16):
with T.block("C"):
vi = T.axis.S(128, i_0 * 16 + i_1)
vj = T.axis.S(128, j_0 * 16 + j_1)
C[vi, vj] = B[vi, vj] + 1.0
@T.prim_func
def tiled_trivial_binding(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [1, 128, 128], "float32")
B = T.alloc_buffer([1, 128, 128], "float32")
C = T.match_buffer(c, [1, 128, 128], "float32")
for i_0, j_0, i_1, j_1 in T.grid(8, 8, 16, 16):
with T.block("B"):
vi = T.axis.S(128, i_0 * 16 + i_1)
vj = T.axis.S(128, j_0 * 16 + j_1)
B[0, vi, vj] = A[0, vi, vj] * 2.0
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[0, vi, vj] = B[0, vi, vj] + 1.0
@T.prim_func
def tiled_trivial_binding_after_reverse_compute_at(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [1, 128, 128], "float32")
B = T.alloc_buffer([1, 128, 128], "float32")
C = T.match_buffer(c, [1, 128, 128], "float32")
for i_0, j_0, i_1 in T.grid(8, 8, 16):
for j_1 in T.serial(0, 16):
with T.block("B"):
vi = T.axis.S(128, i_0 * 16 + i_1)
vj = T.axis.S(128, j_0 * 16 + j_1)
B[0, vi, vj] = A[0, vi, vj] * 2.0
for j_1 in T.serial(0, 16):
with T.block("C"):
vi = T.axis.S(128, i_0 * 16 + i_1)
vj = T.axis.S(128, j_0 * 16 + j_1)
C[0, vi, vj] = B[0, vi, vj] + 1.0
@T.prim_func
def factorized(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [16, 16, 16], "float32")
B = T.match_buffer(b, [16], "float32")
B_rf_local = T.alloc_buffer([16, 16], "float32", scope="local")
for j in T.thread_binding(0, 16, thread = "blockIdx.x"):
for i_o in T.thread_binding(0, 4, thread = "threadIdx.x" |
):
for i_i, k in T.grid(4, 16):
with T.block("B_rf"):
vi = T.axis.S(16, i_o * 4 + i_i)
vj, vk = T.axis.remap("SR", [j, k])
with T.init():
B_rf_local[vi, vj] = 0.0
B_rf_local[vi, vj] = B_rf_local[vi, vj] + A[vj, vi, vk]
for i, k in T.grid(16, 16):
with T.block("B"):
vi, vk = T.axis.remap("SR", [i, k])
with T.init():
B[vi] = 0.0
B[vi] = B[vi] + B_rf_local[vk, vi]
@T.prim_func
def factorized_after_reverse_compute_at(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [16, 16, 16], "float32")
B = T.match_buffer(b, [16], "float32")
B_rf_local = T.alloc_buffer([16, 16], "float32", scope="local")
for j in T.thread_binding(0, 16, thread = "blockIdx.x"):
for i_o in T.thread_binding(0, 4, thread = "threadIdx.x"):
for i_i, k in T.grid(4, 16):
with T.block("B_rf"):
vi = T.axis.S(16, i_o * 4 + i_i)
vj = T.axis.S(16, j)
vk = T.axis.R(16, k)
with T.init():
B_rf_local[vi, vj] = 0.0
B_rf_local[vi, vj] = B_rf_local[vi, vj] + A[vj, vi, vk]
for k in T.serial(0, 4):
with T.block("B"):
vi = T.axis.S(16, j)
vk = T.axis.R(16, i_o * 4 + k)
with T.init():
B[vi] = 0.0
B[vi] = B[vi] + B_rf_local[vk, vi]
@T.prim_func
def not_all_compact_data_flow(a: T.handle, c: T.handle):
A = T.match_buffer(a, (128, 128), "float32")
B = T.alloc_buffer((128, 128), "float32")
C = T.match_buffer(c, (128, 128), "float32")
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj]
for i, j in T.grid(128, 64):
with T.block("C_1"):
vi, vj = T. |
axis.remap("SS", [i, j])
C[vi, vj * 2] = B[vi, vj * 2] + 1.0
with T.block("C_2"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj * 2 + 1] = B[vi, vj * 2 + 1] * 2.0
@T.prim_func
def not_all_compact_data_flow_after_compute_at(a: T.handle, c: T.handle):
A = T.match_buffer(a, (128, 128), "float32")
B = T.alloc_buffer((128, 128), "float32")
C = T.match_buffer(c, (128, 128), "float32")
for i, j in T.grid(128, 64):
for t in range(2):
with T.block("B"):
vi = T.axis.S(128, i)
vj = T.axis.S(128, j * 2 + t)
B[vi, vj] = A[vi, vj]
with T.block("C_1"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj * 2] = B[vi, vj * 2] + 1.0
with T.block("C_2"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj * 2 + 1] = B[vi, vj * 2 + 1] * 2.0
@T.prim_func
def fail_subtree_compact_dataflow(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 128), "float32")
B = T.alloc_buffer((128, 128), "float32")
C = T.match_buffer(c, (128, 128), "float32")
for i in range(0, 128):
for j in range(0, 64):
with T.block("B_0"):
vi = T.axis.S(128, i)
vj = T.axis.S(128, j)
B[vi, vj] = A[vi, vj] * 2.0
for j in range(0, 64):
with T.block("B_1"):
vi = T.axis.S(128, i)
vj = T.axis.S(128, j + 64)
B[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + 1.0
@T.prim_func
def fail_all_consumers_under_loop(a: T.handle, c: T.handle, d: T.handle) -> None:
A = T.match_buffer(a, (128, 128), "float32")
B = T.alloc_buffer((128, 128), "float32")
C = T.match_buffer(c, (128, 128), "float32")
D = T.match_buffer(d, (128, 128), "float32")
for i, j in T.grid(128, 128):
with T.block("B"): |
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + 1.0
for i, j in T.grid(128, 128):
with T.block("D"):
vi, vj = T.axis.remap("SS", [i, j])
D[vi, vj] = B[vi, vj] + 1.0
@T.prim_func
def fail_all_producers_under_loop(a: T.handle, d: T.handle) -> None:
A = T.match_buffer(a, (128, 128), "float32")
B = T.alloc_buffer((128, 128), "float32")
C = T.alloc_buffer((128, 128), "float32")
D = T.match_buffer(d, (128, 128), "float32")
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = A[vi, vj] + 1.0
for i, j in T.grid(128, 128):
with T.block("D"):
vi, vj = T.axis.remap("SS", [i, j])
D[vi, vj] = B[vi, vj] + C[vi, vj]
@T.prim_func
def read_out_of_bound(a: T.handle, c:T.handle) -> None:
A = T.match_buffer(a, [16], "float32")
B = T.alloc_buffer([16], "float32")
C = T.match_buffer(c, [16], "float32")
for i in T.serial(0, 16):
with T.block("B"):
v = T.axis.S(16, i)
B[v] = A[v]
for j in T.serial(0, 16):
with T.block("C"):
v = T.axis.S(16, j)
T.reads(B[v : v + 2])
C[v] = T.if_then_else(v < 15, T.max(B[v], B[v + 1]), B[v], dtype="float32")
@T.prim_func
def read_out_of_bound_after_compute_at(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [16], "float32")
B = T.alloc_buffer([16], "float32")
C = T.match_buffer(c, [16], "float32")
for j in T.serial(0, 16):
for i in T.serial(0, 2):
with T.block("B"):
v = T.axis.S(16, j + i)
T.where(j + i < 16)
B[v] = A[v]
wi |
th T.block("C"):
v = T.axis.S(16, j)
T.reads([B[v : v + 2]])
C[v] = T.if_then_else(v < 15, T.max(B[v], B[v + 1]), B[v], dtype="float32")
@T.prim_func
def multi_reduction(A: T.Buffer[(16, 16), "float32"], C: T.Buffer[(), "float32"]):
B = T.alloc_buffer((16, ), dtype="float32")
for i, k in T.grid(16, 16):
with T.block("B"):
vi, vk = T.axis.remap("SR", [i, k])
with T.init():
B[vi] = 0.0
B[vi] += A[vi, vk]
for k in T.grid(16):
with T.block("C"):
vk = T.axis.remap("R", [k])
with T.init():
C[()] = 0.0
C[()] += B[vk]
@T.prim_func
def multi_reduction_after_compute_at(
A: T.Buffer[(16, 16), "float32"],
C:T.Buffer[(), "float32"],
):
B = T.alloc_buffer((16, ), dtype="float32")
for k in T.grid(16):
for kk in T.grid(16):
with T.block("B"):
vi, vk = T.axis.remap("SR", [k, kk])
with T.init():
B[vi] = 0.0
B[vi] += A[vi, vk]
with T.block("C"):
vk = T.axis.remap("R", [k])
with T.init():
C[()] = 0.0
C[()] += B[vk]
@T.prim_func
def tiled_pooling_read_cache(a: T.handle, b: T.handle) -> None:
X = T.match_buffer(a, [224, 224], dtype="float32")
Y = T.match_buffer(b, [224, 224], dtype="float32")
cache = T.alloc_buffer([224, 224], dtype="float32")
for hh, ww in T.grid(224, 224):
with T.block("cache"):
h, w = T.axis.remap("SS", [hh, ww])
cache[h, w] = X[h, w]
for hh_0, ww_0, hh_1, ww_1, khh, kww in T.grid(28, 28, 8, 8, 3, 3):
with T.block("compute"):
h = T.axis.spatial(224, hh_0 * 8 + hh_1)
w = T.axis.spatial(224, ww_0 * 8 + ww_1)
kh, kw = T.axis.remap("RR", [khh, kww])
with T.init():
Y[h, w] = 0.0
Y[h, w] = T.max(Y[h, w], T.if_then_else(
T.likely(1 <= h + kh |
, dtype="bool") and \
T.likely(h + kh < 225, dtype="bool") and \
T.likely(1 <= w + kw, dtype="bool") and \
T.likely(w + kw < 225, dtype="bool"),
cache[h + kh - 1, w + kw - 1], 0.0, dtype="float32"))
@T.prim_func
def tiled_pooling_read_cache_after_compute_at(a: T.handle, b: T.handle) -> None:
X = T.match_buffer(a, [224, 224], dtype="float32")
Y = T.match_buffer(b, [224, 224], dtype="float32")
cache = T.alloc_buffer([224, 224], dtype="float32")
for hh_0, ww_0 in T.grid(28, 28):
for ax0, ax1 in T.grid(10, 10):
with T.block("cache"):
h = T.axis.spatial(224, hh_0 * 8 - 1 + ax0)
w = T.axis.spatial(224, ww_0 * 8 - 1 + ax1)
T.where(1 <= hh_0 * 8 + ax0 and hh_0 * 8 + ax0 < 225 and 1 <= ww_0 * 8 + ax1 and ww_0 * 8 + ax1 < 225)
cache[h, w] = X[h, w]
for hh_1, ww_1, khh, kww in T.grid(8, 8, 3, 3):
with T.block("compute"):
h = T.axis.spatial(224, hh_0 * 8 + hh_1)
w = T.axis.spatial(224, ww_0 * 8 + ww_1)
kh, kw = T.axis.remap("RR", [khh, kww])
with T.init():
Y[h, w] = 0.0
Y[h, w] = T.max(Y[h, w], T.if_then_else(
T.likely(1 <= h + kh, dtype="bool") and \
T.likely(h + kh < 225, dtype="bool") and \
T.likely(1 <= w + kw, dtype="bool") and \
T.likely(w + kw < 225, dtype="bool"),
cache[h + kh - 1, w + kw - 1], 0.0, dtype="float32"))
@T.prim_func
def non_uniform_tiled_conv(x: T.Buffer[(1, 3, 100, 100), "float32"],
w: T.Buffer[(16, 3, 3, 3), "float32"],
y: T.Buffer[(1, 16, 98, 98), "float32"]) -> None:
x_global = T.alloc_buffer([1, 3, 100, 100], dtype="float32")
for ax0, ax1, ax2, ax3 in T.grid(1, 3, 100, 100):
with T.block("cache"):
v0, v1, v2, v3 = T.axis.remap("SSSS", [ax0 |
, ax1, ax2, ax3])
x_global[v0, v1, v2, v3] = x[v0, v1, v2, v3]
for h_o, w_o, n, c_o, h_i, w_i, c_i, kh, kw in T.grid(7, 7, 1, 16, 15, 15, 3, 3, 3):
with T.block("compute"):
nn = T.axis.spatial(1, 0)
cc = T.axis.spatial(16, c_o)
hh = T.axis.spatial(98, h_o * 15 + h_i)
ww = T.axis.spatial(98, w_o * 15 + w_i)
rc, rh, rw = T.axis.remap("RRR", [c_i, kh, kw])
T.where(h_o * 15 + h_i < 98 and w_o * 15 + w_i < 98)
with T.init():
y[nn, cc, hh, ww] = T.float32(0)
y[nn, cc, hh, ww] = y[nn, cc, hh, ww] + \
x_global[nn, cc
@T.prim_func
def non_uniform_tiled_conv_after_compute_at(x: T.Buffer[(1, 3, 100, 100), "float32"],
w: T.Buffer[(16, 3, 3, 3), "float32"],
y: T.Buffer[(1, 16, 98, 98), "float32"]) -> None:
x_global = T.alloc_buffer([1, 3, 100, 100], dtype="float32")
for h_o, w_o in T.grid(7, 7):
for ax0, ax1, ax2 in T.grid(3, 17, 17):
with T.block("cache"):
v0 = T.axis.spatial(1, 0)
v1 = T.axis.spatial(3, ax0)
v2 = T.axis.spatial(100, h_o * 15 + ax1)
v3 = T.axis.spatial(100, w_o * 15 + ax2)
T.where(h_o * 15 + ax1 < 100 and w_o * 15 + ax2 < 100)
x_global[v0, v1, v2, v3] = x[v0, v1, v2, v3]
for n, c_o, h_i, w_i, c_i, kh, kw in T.grid(1, 16, 15, 15, 3, 3, 3):
with T.block("compute"):
nn = T.axis.spatial(1, 0)
cc = T.axis.spatial(16, c_o)
hh = T.axis.spatial(98, h_o * 15 + h_i)
ww = T.axis.spatial(98, w_o * 15 + w_i)
rc, rh, rw = T.axis.remap("RRR", [c_i, kh, kw])
T.where(h_o * 15 + h_i < 98 and w_o * 15 + w_i < 98)
with T.init():
y[nn, cc, hh, ww] = T.float32(0)
y[nn, cc, hh, ww] = y[nn, cc, hh, ww] + \ |
x_global[nn, cc
@T.prim_func
def concat_two_elemwise(x: T.Buffer[(16,), "float32"],
y: T.Buffer[(8,), "float32"],
T_concat: T.Buffer[(24,), "float32"]) -> None:
T_add_1 = T.alloc_buffer([16], dtype="float32")
T_add_2 = T.alloc_buffer([8], dtype="float32")
for i in T.serial(16):
with T.block("T_add_1"):
ax = T.axis.spatial(16, i)
T_add_1[ax] = x[ax] + T.float32(1)
for i in T.serial(8):
with T.block("T_add_2"):
ax = T.axis.spatial(8, i)
T_add_2[ax] = y[ax] + T.float32(2)
for i in T.serial(24):
with T.block("T_concat"):
ax = T.axis.spatial(24, i)
T_concat[ax] = T.if_then_else(16 <= ax, T_add_2[ax - 16], T_add_1[ax], dtype="float32")
@T.prim_func
def concat_two_elemwise_after_compute_at(x: T.Buffer[(16,), "float32"],
y: T.Buffer[(8,), "float32"],
T_concat: T.Buffer[(24,), "float32"]) -> None:
T_add_1 = T.alloc_buffer([16], dtype="float32")
T_add_2 = T.alloc_buffer([8], dtype="float32")
for i in T.serial(24):
with T.block("T_add_1"):
ax = T.axis.spatial(16, i)
T.where(i < 16)
T_add_1[ax] = x[ax] + T.float32(1)
with T.block("T_add_2"):
ax = T.axis.spatial(8, i - 16)
T.where(16 <= i)
T_add_2[ax] = y[ax] + T.float32(2)
with T.block("T_concat"):
ax = T.axis.spatial(24, i)
T_concat[ax] = T.if_then_else(16 <= ax, T_add_2[ax - 16], T_add_1[ax], dtype="float32")
@T.prim_func
def floordiv_and_floormod_indices(a: T.handle, b: T.handle) -> None:
X = T.match_buffer(a, [16, 16])
Y = T.match_buffer(b, [256])
temp = T.alloc_buffer([16, 16])
for i, j in T.grid(16, 16):
with T.block("A"):
v_i, v_j = T.axis.remap("SS", [i, j])
temp[v_i, v_j] = X[v_j, v_i] + 1.0
for i in T.serial(0, 256): |
with T.block("B"):
v_i = T.axis.remap("S", [i])
Y[v_i] = temp[v_i
@T.prim_func
def floordiv_and_floormod_indices_after_reverse_compute_at(a: T.handle, b: T.handle) -> None:
X = T.match_buffer(a, [16, 16], dtype="float32")
Y = T.match_buffer(b, [256], dtype="float32")
temp = T.alloc_buffer([16, 16], dtype="float32")
for i in T.serial(0, 16):
for j in T.serial(0, 16):
with T.block("A"):
v_i, v_j = T.axis.remap("SS", [i, j])
temp[v_i, v_j] = X[v_j, v_i] + T.float32(1)
for ax0 in T.serial(0, 16):
with T.block("B"):
v_i = T.axis.spatial(256, i * 16 + ax0)
Y[v_i] = temp[v_i
@T.prim_func
def tiled_repeat_op(x: T.Buffer[(4,), "float32"], T_repeat: T.Buffer[(64,), "float32"]) -> None:
T_add = T.alloc_buffer([4], dtype="float32")
for i0 in T.serial(4):
with T.block("T_add"):
ax0 = T.axis.spatial(4, i0)
T_add[ax0] = x[ax0] + 1.0
for i0_0, i0_1 in T.grid(8, 8):
with T.block("T_repeat"):
ax0 = T.axis.spatial(64, i0_0 * 8 + i0_1)
T_repeat[ax0] = T_add[ax0
@T.prim_func
def tiled_repeat_op_after_compute_at(x: T.Buffer[(4,), "float32"], T_repeat: T.Buffer[(64,), "float32"]) -> None:
T_add = T.alloc_buffer([4], dtype="float32")
for i0_0 in T.serial(8):
with T.block("T_add"):
ax0 = T.axis.spatial(4, i0_0
T_add[ax0] = x[ax0] + T.float32(1)
for i0_1 in T.serial(8):
with T.block("T_repeat"):
ax0 = T.axis.spatial(64, i0_0 * 8 + i0_1)
T_repeat[ax0] = T_add[ax0
@T.prim_func
def static_bound(A: T.Buffer[(32, 1), "float32"], C: T.Buffer[(32, 1), "float32"]) -> None:
B = T.alloc_buffer((32, 1), "float32")
for i, j in T.grid(32, 1):
with T.block("B"):
vi = T.axis.spatial(32, i)
vj = T.axis.spatial(1, j)
B[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(32, 32): |
with T.block("C"):
vi = T.axis.spatial(32, i)
vj = T.axis.spatial(1, j)
T.where(j < 1)
C[vi, vj] = B[vi, vj] + 1.0
@T.prim_func
def static_bound_after_compute_at(A: T.Buffer[(32, 1), "float32"], C: T.Buffer[(32, 1), "float32"]) -> None:
B = T.alloc_buffer((32, 1), "float32")
for i in range(32):
for ax0, ax1 in T.grid(1, 1):
with T.block("B"):
vi = T.axis.spatial(32, i + ax0)
vj = T.axis.spatial(1, ax1)
B[vi, vj] = A[vi, vj] * 2.0
for j in range(32):
with T.block("C"):
vi = T.axis.spatial(32, i)
vj = T.axis.spatial(1, j)
T.where(j < 1)
C[vi, vj] = B[vi, vj] + 1.0
use_block_name = tvm.testing.parameter(by_dict={"block_obj": False, "block_name": True})
def test_compute_at_two_elementwise(use_block_name):
sch = tir.Schedule(two_elementwise, debug_mask="all")
block = "B" if use_block_name else sch.get_block("B")
loop, _ = sch.get_loops("C" if use_block_name else sch.get_block("C"))
sch.compute_at(block, loop, preserve_unit_loops=True)
tvm.ir.assert_structural_equal(two_elementwise_after_compute_at, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=two_elementwise)
def test_compute_at_blockized_1(use_block_name):
sch = tir.Schedule(blockized_1, debug_mask="all")
block = sch.get_block("B")
_, loop = sch.get_loops(sch.get_block("C_outer"))
sch.compute_at(block, loop, preserve_unit_loops=True)
tvm.ir.assert_structural_equal(blockized_after_compute_at, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=blockized_1)
def test_compute_at_blockized_2(use_block_name):
sch = tir.Schedule(blockized_2, debug_mask="all")
block = sch.get_block("B_outer")
_, loop, _, _ = sch.get_loops(sch.get_block("C"))
sch.compute_at(block, loop, preserve_unit_loops=True)
tvm.ir.assert_structural_equal(blockized_2_after_compute_at, sch.mod["main"])
verify_tr |
ace_roundtrip(sch=sch, mod=blockized_2)
def test_compute_at_cuda_matmul_0(use_block_name):
sch = tir.Schedule(cuda_matmul_0, debug_mask="all")
block = sch.get_block("C")
_, _, _, _, _, loop, _, _ = sch.get_loops(sch.get_block("C_local"))
sch.compute_at(block, loop, preserve_unit_loops=True)
tvm.ir.assert_structural_equal(cuda_matmul_0_after_compute_at, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=cuda_matmul_0)
def test_compute_at_cuda_matmul_1(use_block_name):
sch = tir.Schedule(cuda_matmul_1, debug_mask="all")
block = sch.get_block("A_shared_local")
_, _, _, _, _, _, _, loop, _, _, _ = sch.get_loops(sch.get_block("C"))
sch.compute_at(block, loop, preserve_unit_loops=True)
tvm.ir.assert_structural_equal(cuda_matmul_2, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=cuda_matmul_1)
def test_compute_at_cuda_matmul_2(use_block_name):
sch = tir.Schedule(cuda_matmul_2, debug_mask="all")
block = sch.get_block("B_shared_local")
_, _, _, _, _, _, _, loop, _, _, _ = sch.get_loops(sch.get_block("C"))
sch.compute_at(block, loop, preserve_unit_loops=True)
tvm.ir.assert_structural_equal(cuda_matmul_3, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=cuda_matmul_2)
def test_compute_at_cuda_matmul_3(use_block_name):
sch = tir.Schedule(cuda_matmul_3, debug_mask="all")
block = sch.get_block("A_shared")
_, _, _, _, _, _, loop, _, _, _, _ = sch.get_loops(sch.get_block("C"))
sch.compute_at(block, loop, preserve_unit_loops=True)
tvm.ir.assert_structural_equal(cuda_matmul_4, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=cuda_matmul_3)
def test_compute_at_cuda_matmul_4(use_block_name):
sch = tir.Schedule(cuda_matmul_4, debug_mask="all")
block = sch.get_block("B_shared")
_, _, _, _, _, _, loop, _, _, _, _ = sch.get_loops(sch.get_block("C"))
sch.compute_at(block, loop, preserve_unit_loops=True)
tvm.ir.assert_structural_equal(cuda_matmul_5, sch.mod["main"])
verify_trace_roundtrip(sch=sch |
, mod=cuda_matmul_4)
def test_compute_at_reduction_block(use_block_name):
sch = tir.Schedule(multi_reduction, debug_mask="all")
block = sch.get_block("B")
(loop,) = sch.get_loops(sch.get_block("C"))
sch.compute_at(block, loop, preserve_unit_loops=False)
tvm.ir.assert_structural_equal(multi_reduction_after_compute_at, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=multi_reduction)
def test_compute_at_tiled_pooling_read_cache(use_block_name):
sch = tir.Schedule(tiled_pooling_read_cache, debug_mask="all")
compute = sch.get_block("compute")
_, w_o, _, _, _, _ = sch.get_loops(compute)
cache = sch.get_block("cache")
sch.compute_at(cache, w_o)
tvm.ir.assert_structural_equal(tiled_pooling_read_cache_after_compute_at, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=tiled_pooling_read_cache)
def test_compute_at_non_uniform_tiled_conv(use_block_name):
sch = tir.Schedule(non_uniform_tiled_conv, debug_mask="all")
compute = sch.get_block("compute")
sch.compute_at(sch.get_block("cache"), sch.get_loops(compute)[1])
tvm.ir.assert_structural_equal(non_uniform_tiled_conv_after_compute_at, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=non_uniform_tiled_conv)
def test_compute_at_concat(use_block_name):
sch = tir.Schedule(concat_two_elemwise, debug_mask="all")
concat = sch.get_block("T_concat")
add1 = sch.get_block("T_add_1")
add2 = sch.get_block("T_add_2")
axis = sch.get_loops(concat)[0]
sch.compute_at(add1, axis)
sch.compute_at(add2, axis)
tvm.ir.assert_structural_equal(concat_two_elemwise_after_compute_at, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=concat_two_elemwise)
def test_compute_at_tiled_repeat_op(use_block_name):
sch = tir.Schedule(tiled_repeat_op, debug_mask="all")
outer_ax, _ = sch.get_loops(sch.get_block("T_repeat"))
sch.compute_at(sch.get_block("T_add"), outer_ax)
tvm.ir.assert_structural_equal(tiled_repeat_op_after_compute_at, sch.mod["main"])
verify_trace |
_roundtrip(sch=sch, mod=tiled_repeat_op)
def test_reverse_compute_at_tiled(use_block_name):
sch = tir.Schedule(tiled, debug_mask="all")
block = sch.get_block("C")
_, _, loop, _ = sch.get_loops(sch.get_block("B"))
sch.reverse_compute_at(block, loop, preserve_unit_loops=False)
tvm.ir.assert_structural_equal(tiled_after_reverse_compute_at, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=tiled)
def test_reverse_compute_at_tiled_trivial_binding(use_block_name):
sch = tir.Schedule(tiled_trivial_binding, debug_mask="all")
block = sch.get_block("C")
_, _, loop, _ = sch.get_loops(sch.get_block("B"))
sch.reverse_compute_at(block, loop, preserve_unit_loops=False)
tvm.ir.assert_structural_equal(tiled_trivial_binding_after_reverse_compute_at, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=tiled_trivial_binding)
def test_reverse_compute_at_blockized_2(use_block_name):
sch = tir.Schedule(blockized_2, debug_mask="all")
block = sch.get_block("C")
_, loop = sch.get_loops(sch.get_block("B_outer"))
sch.reverse_compute_at(block, loop, preserve_unit_loops=True)
tvm.ir.assert_structural_equal(blockized_2_after_reverse_compute_at, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=blockized_2)
def test_reverse_compute_at_factorized(use_block_name):
sch = tir.Schedule(factorized, debug_mask="all")
block = sch.get_block("B")
_, loop, _, _ = sch.get_loops(sch.get_block("B_rf"))
sch.reverse_compute_at(block, loop, preserve_unit_loops=False)
tvm.ir.assert_structural_equal(factorized_after_reverse_compute_at, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=factorized)
def test_reverse_compute_at_floordiv_and_floormod_indices(use_block_name):
sch = tir.Schedule(floordiv_and_floormod_indices, debug_mask="all")
A = sch.get_block("A")
B = sch.get_block("B")
sch.reverse_compute_at(B, sch.get_loops(A)[0])
tvm.ir.assert_structural_equal(
floordiv_and_floormod_indices_after_reverse_compute_at, sch.mod |
["main"]
)
verify_trace_roundtrip(sch=sch, mod=floordiv_and_floormod_indices)
def test_read_out_of_bound(use_block_name):
sch = tir.Schedule(read_out_of_bound, debug_mask="all")
block = sch.get_block("B")
(loop,) = sch.get_loops(sch.get_block("C"))
sch.compute_at(block, loop)
tvm.ir.assert_structural_equal(read_out_of_bound_after_compute_at, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=read_out_of_bound)
def test_compact_dataflow(use_block_name):
sch = tir.Schedule(not_all_compact_data_flow, debug_mask="all")
block = sch.get_block("B")
_, loop = sch.get_loops(sch.get_block("C_1"))
sch.compute_at(block, loop)
tvm.ir.assert_structural_equal(not_all_compact_data_flow_after_compute_at, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=not_all_compact_data_flow)
def test_compute_at_simplify_static_bound(use_block_name):
sch = tir.Schedule(static_bound, debug_mask="all")
block = sch.get_block("B")
loop, _ = sch.get_loops(sch.get_block("C"))
sch.compute_at(block, loop, preserve_unit_loops=True)
tvm.ir.assert_structural_equal(static_bound_after_compute_at, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=static_bound)
def test_compute_at_non_perfect_channel_group(use_block_name):
@T.prim_func
def grouped_channel_bias(
X: T.Buffer[(720, 8, 8), "float32"], Y: T.Buffer[(720, 8, 8), "float32"]
):
B = T.alloc_buffer([45], dtype="float32", scope="")
for i in T.grid(45):
with T.block("init"):
vi = T.axis.remap("S", [i])
B[vi] = vi
for c_o, h, w, c_i in T.grid(2, 8, 8, 360):
with T.block("compute"):
hh, ww = T.axis.remap("SS", [h, w])
cc = T.axis.spatial(720, c_o * 360 + c_i)
Y[cc, hh, ww] = X[cc, hh, ww] + B[cc
@T.prim_func
def grouped_channel_bias_non_perfect_tiled(
X: T.Buffer[(720, 8, 8), "float32"], Y: T.Buffer[(720, 8, 8), "float32"]
):
B = T.allo |
c_buffer([45], dtype="float32")
for c_o in range(2):
for ax0 in range(23):
with T.block("init"):
vi = T.axis.spatial(45, c_o * 22 + ax0)
B[vi] = vi
for h, w, c_i in T.grid(8, 8, 360):
with T.block("compute"):
hh, ww = T.axis.remap("SS", [h, w])
cc = T.axis.spatial(720, c_o * 360 + c_i)
Y[cc, hh, ww] = X[cc, hh, ww] + B[cc
sch = tir.Schedule(grouped_channel_bias, debug_mask="all")
loop = sch.get_loops(sch.get_block("compute"))[0]
sch.compute_at(sch.get_block("init"), loop)
tvm.ir.assert_structural_equal(sch.mod["main"], grouped_channel_bias_non_perfect_tiled)
def test_fail_subtree_complete_block(use_block_name):
sch = tir.Schedule(fail_subtree_compact_dataflow, debug_mask="all")
block = sch.get_block("B_0")
loop, _ = sch.get_loops(sch.get_block("C"))
with pytest.raises(tvm.tir.ScheduleError, match="complete block"):
sch.compute_at(block, loop)
def test_fail_not_in_same_scope(use_block_name):
sch = tir.Schedule(blockized_1, debug_mask="all")
block = "B" if use_block_name else sch.get_block("B")
loop, _ = sch.get_loops(sch.get_block("C_inner"))
with pytest.raises(tvm.tir.ScheduleError, match="same block scope"):
sch.compute_at(block, loop)
def test_fail_loop_is_ancestor_of_block(use_block_name):
sch = tir.Schedule(two_elementwise, debug_mask="all")
block = "B" if use_block_name else sch.get_block("B")
loop, _ = sch.get_loops(sch.get_block("B"))
with pytest.raises(tvm.tir.ScheduleError, match="ancestor of block"):
sch.compute_at(block, loop)
def test_fail_output_block(use_block_name):
sch = tir.Schedule(tiled, debug_mask="all")
block = "C" if use_block_name else sch.get_block("C")
loop, _, _, _ = sch.get_loops(sch.get_block("B"))
with pytest.raises(tvm.tir.ScheduleError, match="output block"):
sch.compute_at(block, loop)
def test_fail |
_all_consumers_under_loop(use_block_name):
sch = tir.Schedule(fail_all_consumers_under_loop, debug_mask="all")
block = "B" if use_block_name else sch.get_block("B")
loop, _ = sch.get_loops(sch.get_block("C"))
with pytest.raises(tvm.tir.ScheduleError, match="requires all the consumer"):
sch.compute_at(block, loop)
def test_fail_all_producers_under_loop(use_block_name):
sch = tir.Schedule(fail_all_producers_under_loop, debug_mask="all")
block = "D" if use_block_name else sch.get_block("D")
loop, _ = sch.get_loops(sch.get_block("C"))
with pytest.raises(tvm.tir.ScheduleError, match="requires all the producer"):
sch.reverse_compute_at(block, loop)
def test_compute_at_int64_loop(use_block_name):
def _create_prim_func():
n = te.var("n", dtype="int64")
m = te.var("m", dtype="int64")
A = te.placeholder((n, m), name="A", dtype="float32")
B = te.placeholder((n, m), name="B", dtype="float32")
C = te.compute((n, m), lambda i, j: A[i, j] + B[i, j], name="C")
D = te.compute((n, m), lambda i, j: C[i, j] + 1.0, name="D")
return te.create_prim_func([A, B, D])
mod = _create_prim_func()
sch = tir.Schedule(mod, debug_mask="all")
block_c = "C" if use_block_name else sch.get_block("C")
block_d = "D" if use_block_name else sch.get_block("D")
i, _ = sch.get_loops(block_d)
sch.compute_at(block_c, i)
verify_trace_roundtrip(sch=sch, mod=mod)
def test_compute_at_to_index():
@T.prim_func
def multi_producers_conv(
data: T.Buffer[(1, 3, 224, 224), "int8"],
w: T.Buffer[(16, 3, 7, 7), "int8"],
conv: T.Buffer[(1, 16, 112, 112), "int32"],
) -> None:
pad = T.alloc_buffer([1, 3, 230, 230], dtype="int8")
wbuf = T.alloc_buffer([16, 3, 7, 7], dtype="int8")
for i0, i1, i2, i3 in T.grid(1, 3, 230, 230):
with T.block("pad"):
i0_1, i1_1, i2_1, i3_1 = T.axis.remap("SSSS", [i0, i1, i2, i3])
T.reads(data[i0_1, i1_1, i |
2_1 - 3, i3_1 - 3])
T.writes(pad[i0_1, i1_1, i2_1, i3_1])
pad[i0_1, i1_1, i2_1, i3_1] = T.if_then_else(
3 <= i2_1 and i2_1 < 227 and 3 <= i3_1 and i3_1 < 227,
data[i0_1, i1_1, i2_1 - 3, i3_1 - 3],
T.int8(0),
dtype="int8",
)
for i0 in T.serial(1):
for ax0, ax1, ax2, ax3 in T.grid(16, 3, 7, 7):
with T.block("wbuf"):
v0, v1, v2, v3 = T.axis.remap("SSSS", [ax0, ax1, ax2, ax3])
T.reads(w[v0, v1, v2, v3])
T.writes(wbuf[v0, v1, v2, v3])
wbuf[v0, v1, v2, v3] = w[v0, v1, v2, v3]
for i1, i2, i3, i4, i5, i6 in T.grid(16, 112, 112, 3, 7, 7):
with T.block("conv"):
nn, ff, yy, xx, rc, ry, rx = T.axis.remap(
"SSSSRRR", [i0, i1, i2, i3, i4, i5, i6]
)
T.reads(pad[nn, rc, yy * 2 + ry, xx * 2 + rx], wbuf[ff, rc, ry, rx])
T.writes(conv[nn, ff, yy, xx])
with T.init():
conv[nn, ff, yy, xx] = 0
conv[nn, ff, yy, xx] = conv[nn, ff, yy, xx] + T.cast(
pad[nn, rc, yy * 2 + ry, xx * 2 + rx], "int32"
) * T.cast(wbuf[ff, rc, ry, rx], "int32")
@T.prim_func
def multi_producers_after_compute_at(
data: T.Buffer[(1, 3, 224, 224), "int8"],
w: T.Buffer[(16, 3, 7, 7), "int8"],
conv: T.Buffer[(1, 16, 112, 112), "int32"],
) -> None:
pad = T.alloc_buffer([1, 3, 230, 230], dtype="int8")
wbuf = T.alloc_buffer([16, 3, 7, 7], dtype="int8")
for i0 in T.serial(1):
for ax0, ax1, ax2 in T.grid(3, 229, 229):
with T.block("pad"):
i0_1 = T.axis.spatial(1, 0)
i1_1 = T.axis.spatial(3, ax0)
i2_1 = T.axis.spatial(230, ax1)
i3_1 |
= T.axis.spatial(230, ax2)
T.reads(data[i0_1, i1_1, i2_1 - 3, i3_1 - 3])
T.writes(pad[i0_1, i1_1, i2_1, i3_1])
pad[i0_1, i1_1, i2_1, i3_1] = T.if_then_else(
3 <= i2_1 and i2_1 < 227 and 3 <= i3_1 and i3_1 < 227,
data[i0_1, i1_1, i2_1 - 3, i3_1 - 3],
T.int8(0),
dtype="int8",
)
for ax0, ax1, ax2, ax3 in T.grid(16, 3, 7, 7):
with T.block("wbuf"):
v0, v1, v2, v3 = T.axis.remap("SSSS", [ax0, ax1, ax2, ax3])
T.reads(w[v0, v1, v2, v3])
T.writes(wbuf[v0, v1, v2, v3])
wbuf[v0, v1, v2, v3] = w[v0, v1, v2, v3]
for i1, i2, i3, i4, i5, i6 in T.grid(16, 112, 112, 3, 7, 7):
with T.block("conv"):
nn, ff, yy, xx, rc, ry, rx = T.axis.remap(
"SSSSRRR", [i0, i1, i2, i3, i4, i5, i6]
)
T.reads(pad[nn, rc, yy * 2 + ry, xx * 2 + rx], wbuf[ff, rc, ry, rx])
T.writes(conv[nn, ff, yy, xx])
with T.init():
conv[nn, ff, yy, xx] = 0
conv[nn, ff, yy, xx] = conv[nn, ff, yy, xx] + T.cast(
pad[nn, rc, yy * 2 + ry, xx * 2 + rx], "int32"
) * T.cast(wbuf[ff, rc, ry, rx], "int32")
sch = tir.Schedule(multi_producers_conv, debug_mask="all")
block_c = sch.get_block("pad")
axis = sch.get_loops("conv")[0]
sch.compute_at(block_c, axis, index=-2)
tvm.ir.assert_structural_equal(multi_producers_after_compute_at, sch.mod["main"])
def test_reverse_compute_at_to_index():
@T.prim_func
def main(A: T.Buffer[(128, 128), "float32"], D: T.Buffer[(128, 128), "float32"]) -> None:
B = T.alloc_buffer([128, 128], dtype="float32")
C = T.alloc_buffer([128, 128], dtype="float32")
for i_0, j_0, i_1 in T.grid(8, |
8, 16):
for j_1 in T.serial(16):
with T.block("B"):
vi = T.axis.spatial(128, i_0 * 16 + i_1)
vj = T.axis.spatial(128, j_0 * 16 + j_1)
T.reads(A[vi, vj])
T.writes(B[vi, vj])
B[vi, vj] = A[vi, vj] * T.float32(2)
for ax0 in T.serial(16):
with T.block("C"):
vi = T.axis.spatial(128, i_0 * 16 + i_1)
vj = T.axis.spatial(128, j_0 * 16 + ax0)
T.reads(B[vi, vj])
T.writes(C[vi, vj])
C[vi, vj] = B[vi, vj] + T.float32(1)
for i, j in T.grid(128, 128):
with T.block("D"):
vi, vj = T.axis.remap("SS", [i, j])
T.reads(B[vi, vj])
T.writes(D[vi, vj])
D[vi, vj] = B[vi, vj] + T.float32(1)
@T.prim_func
def main_reverse_compute_at(
A: T.Buffer[(128, 128), "float32"], D: T.Buffer[(128, 128), "float32"]
) -> None:
B = T.alloc_buffer([128, 128], dtype="float32")
C = T.alloc_buffer([128, 128], dtype="float32")
for i_0, j_0, i_1 in T.grid(8, 8, 16):
for j_1 in T.serial(16):
with T.block("B"):
vi = T.axis.spatial(128, i_0 * 16 + i_1)
vj = T.axis.spatial(128, j_0 * 16 + j_1)
T.reads(A[vi, vj])
T.writes(B[vi, vj])
B[vi, vj] = A[vi, vj] * T.float32(2)
for ax0 in T.serial(16):
with T.block("D"):
vi = T.axis.spatial(128, i_0 * 16 + i_1)
vj = T.axis.spatial(128, j_0 * 16 + ax0)
T.reads(B[vi, vj])
T.writes(D[vi, vj])
D[vi, vj] = B[vi, vj] + T.float32(1)
for ax0 in T.serial(16):
with T.block("C"):
vi = T.axis.spatial(128, i_0 * 16 + i_1)
vj = T.axis.spati |
al(128, j_0 * 16 + ax0)
T.reads(B[vi, vj])
T.writes(C[vi, vj])
C[vi, vj] = B[vi, vj] + T.float32(1)
sch = tir.Schedule(main, debug_mask="all")
block_c = sch.get_block("D")
axis = sch.get_loops("B")[2]
sch.reverse_compute_at(block_c, axis, index=1)
tvm.ir.assert_structural_equal(main_reverse_compute_at, sch.mod["main"])
if __name__ == "__main__":
tvm.testing.main() |
import sys |
import pytest |
import tvm |
import tvm.testing
from tvm |
import tir
from tvm.script |
import tir as T
from tvm.tir.schedule.testing |
import verify_trace_roundtrip
@T.prim_func
def elementwise(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.alloc_buffer((128, 128))
C = T.match_buffer(c, (128, 128))
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + 1.0
@T.prim_func
def elementwise_multi_producer_consumer(a: T.handle, c: T.handle, d: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.alloc_buffer((128, 128))
C = T.match_buffer(c, (128, 128))
D = T.match_buffer(d, (128, 128))
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + 1.0
for i, j in T.grid(128, 128):
with T.block("D"):
vi, vj = T.axis.remap("SS", [i, j])
D[vi, vj] = B[vi, vj] + 2.0 + C[vi, vj]
@T.prim_func
def elementwise_multi_consumer_inlined(a: T.handle, c: T.handle, d: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
C = T.match_buffer(c, (128, 128))
D = T.match_buffer(d, (128, 128))
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = A[vi, vj] * 2.0 + 1.0
for i, j in T.grid(128, 128):
with T.block("D"):
vi, vj = T.axis.remap("SS", [i, j])
D[vi, vj] = A[vi, vj] * 2.0 + 2.0 + C[vi, vj]
@T.prim_func
def elementwise_standalone(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.alloc_buffer((128, 128))
C = T.match_buffer(c, (128, 128))
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", |
[i, j])
B[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = A[vi, vj] + 1.0
@T.prim_func
def elementwise_standalone_dce(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
C = T.match_buffer(c, (128, 128))
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = A[vi, vj] + 1.0
@T.prim_func
def elementwise_under_loop(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
C = T.match_buffer(c, (128, 128))
B = T.alloc_buffer((128, 128))
for i in T.serial(0, 128):
for j in T.serial(0, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
for j in T.serial(0, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + 1.0
@T.prim_func
def elementwise_inlined(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
C = T.match_buffer(c, (128, 128))
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = A[vi, vj] * 2.0 + 1.0
@T.prim_func
def fail_multi_reader_writer(a: T.handle, d: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.alloc_buffer((128, 128))
C = T.alloc_buffer((128, 128))
D = T.match_buffer(d, (128, 128))
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
C[vi, vj] = A[vi, vj] + 2.0
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
D[vi, vj] = B[vi, vj] + C[vi, vj]
@T.prim_func
def elementwise_multi_reverse_loads(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 128)) |
B = T.alloc_buffer((128, 128))
C = T.match_buffer(c, (128, 128))
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = (B[vi, vj] + 1.0) * (B[vi, vj] * 2.0) + 3.0
@T.prim_func
def elementwise_multi_reverse_loads_inlined(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
C = T.match_buffer(c, (128, 128))
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = (A[vi, vj] * 2.0 + 1.0) * (A[vi, vj] * 2.0 * 2.0) + 3.0
@T.prim_func
def elementwise_reverse_affine_load(
A: T.Buffer[(128, 128), "float32"], C: T.Buffer[(8, 32, 8, 8), "float32"]
) -> None:
B = T.alloc_buffer((128, 128))
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
for i, j, k, l in T.grid(8, 32, 8, 8):
with T.block("C"):
vi, vj, vk, vl = T.axis.remap("SSSS", [i, j, k, l])
C[vi, vj, vk, vl] = B[
((((vi * 32) + vj) * 8 + vk) * 8 + vl)
((((vi * 32) + vj) * 8 + vk) * 8 + vl) % 128,
]
@T.prim_func
def elementwise_reverse_affine_load_inlined(
A: T.Buffer[(128, 128), "float32"], C: T.Buffer[(8, 32, 8, 8), "float32"]
) -> None:
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
C[
(vj + vi * 128)
(vj + vi * 128)
((vj + vi * 128)
(vj + vi * 128) % 8,
] = (
A[vi, vj] * 2.0
)
@T.prim_func
def elementwise_reverse_affine_load_unit_iter(
A: T.Buffer[(128, 128), "float32"],
B: T.Buffer[(8, 16, 1), "float32"],
D: T.Buffer[(1, 8, 16, 128), "float32"],
) -> N |
one:
C = T.alloc_buffer((128, 128))
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = A[vi, vj] * 2.0
for i, j, k, l in T.grid(1, 8, 16, 128):
with T.block("C"):
vi, vj, vk, vl = T.axis.remap("SSSS", [i, j, k, l])
D[vi, vj, vk, vl] = C[vj * 16 + vk, vl] + B[vj, vk, vi]
@T.prim_func
def elementwise_reverse_affine_load_unit_iter_inlined(
A: T.Buffer[(128, 128), "float32"],
B: T.Buffer[(8, 16, 1), "float32"],
D: T.Buffer[(1, 8, 16, 128), "float32"],
) -> None:
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
D[0, vi
@T.prim_func
def elementwise_reverse_affine_load_unit_iter_simplified(
A: T.Buffer[(128, 128), "float32"],
B: T.Buffer[(8, 16, 1), "float32"],
D: T.Buffer[(1, 8, 16, 128), "float32"],
) -> None:
C = T.alloc_buffer((128, 128))
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = A[vi, vj] * 2.0
for i, j, k in T.grid(8, 16, 128):
with T.block("C"):
vi, vj, vk = T.axis.remap("SSS", [i, j, k])
D[0, vi, vj, vk] = C[vi * 16 + vj, vk] + B[vi, vj, 0]
@T.prim_func
def elementwise_reverse_affine_load_unit_iter_simplified_inlined(
A: T.Buffer[(128, 128), "float32"],
B: T.Buffer[(8, 16, 1), "float32"],
D: T.Buffer[(1, 8, 16, 128), "float32"],
) -> None:
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
D[0, vi
@T.prim_func
def elementwise_reverse_affine_chain(
A: T.Buffer[(128, 128), "float32"], D: T.Buffer[(1, 8, 16, 128), "float32"]
):
B = T.alloc_buffer((128, 128))
C = T.alloc_buffer((8, 16, 128))
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
for i, j, k in T.grid(8, 16, 1 |
28):
with T.block("C"):
vi, vj, vk = T.axis.remap("SSS", [i, j, k])
C[vi, vj, vk] = B[vi * 16 + vj, vk] + 1.0
for i, j, k, l in T.grid(1, 8, 16, 128):
with T.block("D"):
vi, vj, vk, vl = T.axis.remap("SSSS", [i, j, k, l])
D[vi, vj, vk, vl] = C[vj, vk, vl]
@T.prim_func
def elementwise_reverse_affine_chain_inlined(
A: T.Buffer[(128, 128), "float32"], D: T.Buffer[(1, 8, 16, 128), "float32"]
) -> None:
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
D[0, vi
@T.prim_func
def elementwise_multi_reverse_affine_load(
A: T.Buffer[(128, 128), "float32"],
C: T.Buffer[(8, 16, 128), "float32"],
) -> None:
B = T.alloc_buffer((128, 128))
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
for i, j, k in T.grid(8, 16, 128):
with T.block("C"):
vi, vj, vk = T.axis.remap("SSS", [i, j, k])
C[vi, vj, vk] = B[vi * 16 + vj, vk] + B[vi * 16 + vj, vk]
@T.prim_func
def elementwise_multi_reverse_affine_load_inlined(
A: T.Buffer[(128, 128), "float32"],
C: T.Buffer[(8, 16, 128), "float32"],
) -> None:
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi
@T.prim_func
def elementwise_reverse_non_affine_load(
A: T.Buffer[(128, 128), "float32"], C: T.Buffer[(8, 16, 128), "float32"]
) -> None:
B = T.alloc_buffer((128, 128))
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
for i, j, k in T.grid(8, 16, 128):
with T.block("C"):
vi, vj, vk = T.axis.remap("SSS", [i, j, k])
C[vi, vj, vk] = B[vi * 16 + vj, vi * 16 + vj]
@T.prim_func
def opaque_access_load(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T. |
alloc_buffer((128, 128))
C = T.match_buffer(c, (128, 128))
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
T.reads(B[0:128, 0:128])
T.writes(C[0:128, 0:128])
T.evaluate(B.access_ptr("r", extent=128))
C[vi, vj] = B[vi, vj] + 1.0
@T.prim_func
def opaque_access_store(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.alloc_buffer((128, 128))
C = T.match_buffer(c, (128, 128))
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
T.reads(B[0:128, 0:128])
T.writes(C[0:128, 0:128])
T.evaluate(B.access_ptr("r", extent=128))
T.evaluate(C.access_ptr("w", extent=128))
C[vi, vj] = B[vi, vj] + 1.0
@T.prim_func
def buffer_matched(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.alloc_buffer((128, 128))
C = T.match_buffer(c, (128, 128))
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
Bb = T.match_buffer(B[vi : vi + 1, vj], (1, 1))
C[vi, vj] = Bb[0, 0] + 1.0
@T.prim_func
def elementwise_predicate(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.alloc_buffer((128, 128))
C = T.match_buffer(c, (128, 128))
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
for i, j in T.g |
rid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
T.where(B[i, j] < 10.0)
C[vi, vj] = B[vi, vj] + 1.0
@T.prim_func
def elementwise_predicate_inlined(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
C = T.match_buffer(c, (128, 128))
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
T.where(A[i, j] * 2.0 < 10.0)
C[vi, vj] = A[vi, vj] * 2.0 + 1.0
@T.prim_func
def elementwise_multi_loads(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.alloc_buffer((128, 128))
C = T.match_buffer(c, (128, 128))
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + B[vi, vj + 1] + B[vi, vj + 2]
@T.prim_func
def elementwise_multi_loads_inlined(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
C = T.match_buffer(c, (128, 128))
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = A[vi, vj] * 2.0 + A[vi, vj + 1] * 2.0 + A[vi, vj + 2] * 2.0
@T.prim_func
def access_opaque_ptr_then_elemwise(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [1024])
B = T.match_buffer(b, [1024])
A_cache = T.alloc_buffer([1024])
BB = T.alloc_buffer([1024])
with T.block("opaque"):
T.reads(A[0:512])
T.writes(A_cache[0:512])
T.evaluate(A.access_ptr("r", extent=512))
T.evaluate(A_cache.access_ptr("w", extent=512))
for i in range(512):
with T.block("BB"):
vi = T.axis.remap("S", [i])
BB[vi] = A_cache[vi] * 2.0
for i in range(512):
with T.block("B"):
vi = T.axis.remap("S", [i]) |
B[vi] = BB[vi] + 1.0
@T.prim_func
def access_opaque_ptr_then_elemwise_inline(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [1024], dtype="float32")
B = T.match_buffer(b, [1024], dtype="float32")
A_cache = T.alloc_buffer([1024], dtype="float32")
with T.block("opaque"):
T.reads(A[0:512])
T.writes([A_cache[0:512]])
T.evaluate(A.access_ptr("r", extent=512))
T.evaluate(A_cache.access_ptr("w", extent=512))
for i in T.serial(0, 512):
with T.block("B"):
vi = T.axis.spatial(512, i)
T.reads([A_cache[vi]])
T.writes([B[vi]])
B[vi] = A_cache[vi] * 2.0 + 1.0
@T.prim_func
def matmul_relu(var_A: T.handle, var_B: T.handle, var_compute: T.handle) -> None:
A = T.match_buffer(var_A, [512, 512], dtype="float32")
B = T.match_buffer(var_B, [512, 512], dtype="float32")
compute = T.match_buffer(var_compute, [512, 512], dtype="float32")
C = T.alloc_buffer([512, 512], dtype="float32")
for i0, i1, i2 in T.grid(512, 512, 512):
with T.block("C"):
i, j, k = T.axis.remap("SSR", [i0, i1, i2])
T.reads([C[i, j], A[i, k], B[k, j]])
T.writes([C[i, j]])
with T.init():
C[i, j] = T.float32(0)
C[i, j] = C[i, j] + A[i, k] * B[k, j]
for i0, i1 in T.grid(512, 512):
with T.block("compute"):
i0_1, i1_1 = T.axis.remap("SS", [i0, i1])
T.reads([C[i0_1, i1_1]])
T.writes([compute[i0_1, i1_1]])
compute[i0_1, i1_1] = T.max(C[i0_1, i1_1], T.float32(0))
@T.prim_func
def inline_block_with_init(
A: T.Buffer[(1, 512, 7, 7), "float32"],
B: T.Buffer[(1, 512, 1, 1), "float32"],
) -> None:
B_rf = T.alloc_buffer([1, 512, 1, 1, 49], dtype="float32")
for i0, i1, i2, i3, i4, i5 in T.grid(1, 512, 1, 1, 49, 1):
with T.block("tensor_rf"):
vi4 = T.axis.spatial(49, i4)
ax0 = T.axis.spatial(1, 0)
ax1 = T.axis.spatial(512, i1) |
ax2 = T.axis.spatial(1, 0)
ax3 = T.axis.spatial(1, 0)
with T.init():
B_rf[ax0, ax1, ax2, ax3, vi4] = T.float32(0)
B_rf[ax0, ax1, ax2, ax3, vi4] = (
B_rf[ax0, ax1, ax2, ax3, vi4]
+ A[
ax0,
ax1,
ax2 * 7 + vi4
ax3 * 7 + vi4 % 7,
]
)
for i0, i1 in T.grid(1, 512):
for ax0, ax1, ax2, ax3, ax4 in T.grid(49, 1, 1, 1, 1):
with T.block("tensor"):
vi4, ax0_1 = T.axis.remap("RS", [ax0, ax1])
ax1_1 = T.axis.spatial(512, i1 + ax2)
ax2_1, ax3_1 = T.axis.remap("SS", [ax3, ax4])
with T.init():
B[ax0_1, ax1_1, ax2_1, ax3_1] = T.float32(0)
B[ax0_1, ax1_1, ax2_1, ax3_1] = (
B[ax0_1, ax1_1, ax2_1, ax3_1] + B_rf[ax0_1, ax1_1, ax2_1, ax3_1, vi4]
)
@T.prim_func
def exp_exp_opaque_access_with_tvm_access_ptr(
lookup_table: T.Buffer[(1024,), "int8"],
x: T.Buffer[(16,), "float16"],
compute: T.Buffer[(16,), "float16"],
) -> None:
compute_1 = T.alloc_buffer([16], dtype="float16")
for i0 in T.serial(16):
with T.block("compute"):
i0_1 = T.axis.spatial(16, i0)
T.reads(x[i0_1])
T.writes(compute_1[i0_1])
compute_1[i0_1] = T.exp(x[i0_1], dtype="float16")
for i0 in T.serial(16):
with T.block("compute_1"):
i0_2 = T.axis.spatial(16, i0)
T.reads(lookup_table[0:1024], compute_1[i0_2])
T.writes(compute[i0_2])
T.evaluate(lookup_table.access_ptr("r"))
compute[i0_2] = T.exp(
compute_1[i0_2],
dtype="float16",
)
@T.prim_func
def exp_exp_opaque_access_with_tvm_access_ptr_inlined(
lookup_table: T.Buffer[(1024,), "int8"],
x: T.Buffer[(16,), "float16"],
compute: T.Buffer[(16,), "float16"],
) -> None: |
for i0 in T.serial(16):
with T.block("compute_1"):
i0_1 = T.axis.spatial(16, i0)
T.reads(lookup_table[0:1024], x[i0_1])
T.writes(compute[i0_1])
T.evaluate(lookup_table.access_ptr("r"))
compute[i0_1] = T.exp(
T.exp(x[i0_1], dtype="float16"),
dtype="float16",
)
@T.prim_func
def elementwise_overcomputed_producer(
A: T.Buffer[(128, 128), "float32"], C: T.Buffer[(127, 127), "float32"]
) -> None:
B = T.alloc_buffer((128, 128))
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(127, 127):
with T.block("C"):
cvi, cvj = T.axis.remap("SS", [i, j])
C[cvi, cvj] = B[cvi, cvj] + 1.0
@T.prim_func
def elementwise_overcomputed_producer_reverse_inlined(
A: T.Buffer[(128, 128), "float32"], C: T.Buffer[(127, 127), "float32"]
) -> None:
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
T.where(i < 127 and j < 127)
C[vi, vj] = A[vi, vj] * 2.0 + 1.0
@T.prim_func
def elementwise_producer_not_cover_consumer(
A: T.Buffer[(128, 128), "float32"], D: T.Buffer[(256, 128), "float32"]
) -> None:
B = T.alloc_buffer((128, 128))
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(256, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
D[vi, vj] = T.if_then_else(vi >= 128, B[vi - 128, vj], T.float32(0), dtype="float32")
@T.prim_func
def elementwise_predicate_producer(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.alloc_buffer((127, 128))
C = T.match_buffer(c, (127, 128))
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T. |
axis.remap("SS", [i, j])
T.where(i < 127)
B[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(127, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + 1.0
@T.prim_func
def elementwise_predicate_producer_inlined(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
C = T.match_buffer(c, (127, 128))
for i, j in T.grid(128, 128):
with T.block("B"):
T.where(i < 127)
vi, vj = T.axis.remap("SS", [i, j])
T.reads(A[vi, vj])
T.writes(C[vi, vj])
C[vi, vj] = A[vi, vj] * T.float32(2) + T.float32(1)
@tvm.script.ir_module
class Conv2dInt8_TensorCore_with_predicate:
@T.prim_func
def main(p0: T.Buffer[(16, 56, 56, 64), "int8"], p1: T.Buffer[(256, 1, 1, 64), "int8"], p2: T.Buffer[(1, 1, 1, 256), "int32"], p3: T.Buffer[(1, 1, 1, 256), "int32"], p4: T.Buffer[256, "int32"], p5: T.Buffer[256, "int32"], p6: T.Buffer[256, "int32"], p7: T.Buffer[(), "int32"], p8: T.Buffer[1, "int32"], p9: T.Buffer[(16, 56, 56, 256), "int32"], compute: T.Buffer[(16, 56, 56, 256), "int32"]):
T.func_attr({"global_symbol": "main", "tir.noalias": True})
with T.block("root"):
T.reads()
T.writes()
T.block_attr({"meta_schedule.unroll_explicit":1024})
compute_3 = T.alloc_buffer([16, 56, 56, 256], dtype="int32")
conv2d_nhwc_reindex_shared = T.alloc_buffer([50176, 256], dtype="int32", scope="shared")
conv2d_nhwc_reindex_shared_wmma_accumulator = T.alloc_buffer([50176, 256], dtype="int32", scope="wmma.accumulator")
pad_temp_reindex_shared = T.alloc_buffer([50176, 64], dtype="int8", scope="shared")
p1_reindex_shared = T.alloc_buffer([1, 1, 256, 64], dtype="int8", scope="shared")
pad_temp_reindex_shared_wmma_matrix_a = T.alloc_buffer([50176, 64], dtype="int8", scope="wmma.matrix_a")
p1_reindex_shared_wmma_matrix_b = T.alloc |
_buffer([1, 1, 256, 64], dtype="int8", scope="wmma.matrix_b")
for ax2_0_0_ax3_0_0_fused in T.thread_binding(32, thread="blockIdx.y"):
for ax2_0_1_ax3_0_1_fused in T.thread_binding(196, thread="blockIdx.x"):
for ax2_0_2_ax3_0_2_fused in T.thread_binding(4, thread="threadIdx.y"):
for ax0_0, ax1_0, ax4_0_0 in T.grid(1, 1, 2):
for ax0_ax1_fused in T.serial(1024):
with T.block("pad_temp_reindex_shared"):
v0 = T.axis.spatial(50176, ax2_0_0_ax3_0_0_fused
v1 = T.axis.spatial(64, ax4_0_0 * 32 + ax0_ax1_fused % 32)
T.reads(p0[v0
T.writes(pad_temp_reindex_shared[v0, v1])
T.block_attr({"buffer_dim_align":[[0, 0, 32, 16]], "meta_schedule.cooperative_fetch":4})
pad_temp_reindex_shared[v0, v1] = p0[v0
for ax0_ax1_ax2_ax3_fused in T.serial(2048):
with T.block("p1_reindex_shared"):
v0 = T.axis.spatial(1, 0)
v1 = T.axis.spatial(1, 0)
v2 = T.axis.spatial(256, ax2_0_0_ax3_0_0_fused % 4 * 64 + ax0_ax1_ax2_ax3_fused
v3 = T.axis.spatial(64, ax4_0_0 * 32 + ax0_ax1_ax2_ax3_fused % 32)
T.reads(p1[v2, v0, v1, v3])
T.writes(p1_reindex_shared[v0, v1, v2, v3])
T.block_attr({"buffer_dim_align":[[0, 2, 32, 16]], "meta_schedule.cooperative_fetch":3})
p1_reindex_shared[v0, v1, v2, v3] = p1[v2, v0, v1, v3]
for ax0_1, ax1_1, ax4_0_1 in T.grid(1, 1, 2):
for ax0_0_1, ax1_0_1 in T.grid(1, 1): |
with T.block("pad_temp_reindex_shared_wmma.matrix_a_o"):
v0_o = T.axis.spatial(3136, ax2_0_0_ax3_0_0_fused
v1_o = T.axis.spatial(4, ax4_0_0 * 2 + ax4_0_1)
T.reads(pad_temp_reindex_shared[v0_o * 16 : v0_o * 16 + 16, v1_o * 16 : v1_o * 16 + 16])
T.writes(pad_temp_reindex_shared_wmma_matrix_a[v0_o * 16 : v0_o * 16 + 16, v1_o * 16 : v1_o * 16 + 16])
T.block_attr({"meta_schedule.auto_tensorize":"wmma_load_16x16x16_s8_a"})
for ax0_1_1, ax1_1_1 in T.grid(16, 16):
with T.block("pad_temp_reindex_shared_wmma.matrix_a"):
v0_i, v1_i = T.axis.remap("SS", [ax0_1_1, ax1_1_1])
T.reads(pad_temp_reindex_shared[v0_o * 16 + v0_i, v1_o * 16 + v1_i])
T.writes(pad_temp_reindex_shared_wmma_matrix_a[v0_o * 16 + v0_i, v1_o * 16 + v1_i])
pad_temp_reindex_shared_wmma_matrix_a[v0_o * 16 + v0_i, v1_o * 16 + v1_i] = pad_temp_reindex_shared[v0_o * 16 + v0_i, v1_o * 16 + v1_i]
for ax0, ax1, ax2_0, ax3_0 in T.grid(1, 1, 2, 1):
with T.block("p1_reindex_shared_wmma.matrix_b_o"):
v0 = T.axis.spatial(1, 0)
v1 = T.axis.spatial(1, 0)
v2_o = T.axis.spatial(16, ax2_0_0_ax3_0_0_fused % 4 * 4 + ax2_0_2_ax3_0_2_fused % 2 * 2 + ax2_0)
v3_o = T.axis.spatial(4, ax4_0_0 * 2 + ax4_0_1)
T.reads(p1_reindex_shared[v0, v1, v2_o * 16 : v2_o * 16 + 16, v3_o * 16 : v3_o * 16 + 16]) |
T.writes(p1_reindex_shared_wmma_matrix_b[v0, v1, v2_o * 16 : v2_o * 16 + 16, v3_o * 16 : v3_o * 16 + 16])
T.block_attr({"meta_schedule.auto_tensorize":"wmma_load_16x16x16_s8_b_trans"})
for ax2_1, ax3_1 in T.grid(16, 16):
with T.block("p1_reindex_shared_wmma.matrix_b"):
v2_i, v3_i = T.axis.remap("SS", [ax2_1, ax3_1])
T.reads(p1_reindex_shared[v0, v1, v2_o * 16 + v2_i, v3_o * 16 + v3_i])
T.writes(p1_reindex_shared_wmma_matrix_b[v0, v1, v2_o * 16 + v2_i, v3_o * 16 + v3_i])
p1_reindex_shared_wmma_matrix_b[v0, v1, v2_o * 16 + v2_i, v3_o * 16 + v3_i] = p1_reindex_shared[v0, v1, v2_o * 16 + v2_i, v3_o * 16 + v3_i]
for ax2_0_3, ax3_0_3, ax0_2, ax1_2, ax4_0_2, ax2_0_4, ax3_0_4 in T.grid(1, 1, 1, 1, 1, 1, 2):
with T.block("conv2d_nhwc_o"):
v0 = T.axis.reduce(1, 0)
v1 = T.axis.reduce(1, 0)
v2_o = T.axis.spatial(3136, ax2_0_0_ax3_0_0_fused
v3_o = T.axis.spatial(16, ax2_0_0_ax3_0_0_fused % 4 * 4 + ax2_0_2_ax3_0_2_fused % 2 * 2 + ax3_0_3 * 2 + ax3_0_4)
v4_o = T.axis.reduce(4, ax4_0_0 * 2 + ax4_0_1 + ax4_0_2)
T.reads(pad_temp_reindex_shared_wmma_matrix_a[v2_o * 16 : v2_o * 16 + 16, v4_o * 16 : v4_o * 16 + 16], p1_reindex_shared_wmma_matrix_b[v0, v1, v3_o * 16 : v3_o * 16 + 16, v4_o * 16 : v4_o * 16 + 16])
T.writes(conv2d_nhwc_reindex_shared_wmma_accumulator[v2_o * 16 : v2_o * 16 + 16, v3_o * 16 : v3_o * 16 + 16])
T.block_attr({"me |
ta_schedule.auto_tensorize":"wmma_sync_16x16x16_s8s8s32_trans", "meta_schedule.auto_tensorize_init":"wmma_fill_16x16x16_s32", "meta_schedule.thread_extent_high_inclusive":1024, "meta_schedule.thread_extent_low_inclusive":32, "warp_execution":1})
with T.init():
for ax2_1, ax3_1 in T.grid(16, 16):
with T.block("conv2d_nhwc_init"):
v2_i_init, v3_i_init = T.axis.remap("SS", [ax2_1, ax3_1])
T.reads()
T.writes(conv2d_nhwc_reindex_shared_wmma_accumulator[v2_o * 16 + v2_i_init, v3_o * 16 + v3_i_init])
conv2d_nhwc_reindex_shared_wmma_accumulator[v2_o * 16 + v2_i_init, v3_o * 16 + v3_i_init] = 0
for ax2_1, ax3_1, ax4_1 in T.grid(16, 16, 16):
with T.block("conv2d_nhwc"):
v2_i, v3_i, v4_i = T.axis.remap("SSR", [ax2_1, ax3_1, ax4_1])
T.reads(conv2d_nhwc_reindex_shared_wmma_accumulator[v2_o * 16 + v2_i, v3_o * 16 + v3_i], pad_temp_reindex_shared_wmma_matrix_a[v2_o * 16 + v2_i, v4_o * 16 + v4_i], p1_reindex_shared_wmma_matrix_b[v0, v1, v3_o * 16 + v3_i, v4_o * 16 + v4_i])
T.writes(conv2d_nhwc_reindex_shared_wmma_accumulator[v2_o * 16 + v2_i, v3_o * 16 + v3_i])
T.block_attr({"meta_schedule.tiling_structure":"SSSRRSRS"})
conv2d_nhwc_reindex_shared_wmma_accumulator[v2_o * 16 + v2_i, v3_o * 16 + v3_i] = conv2d_nhwc_reindex_shared_wmma_accumulator[v2_o * 16 + v2_i, v3_o * 16 + v3_i] + T.cast(pad_temp_reindex_shared_wmma_matrix_a[v2_o * 16 + v2_i, v4_o * 16 + v4_i], "int32") * T.cas |
t(p1_reindex_shared_wmma_matrix_b[v0, v1, v3_o * 16 + v3_i, v4_o * 16 + v4_i], "int32")
for ax0_0, ax1_0 in T.grid(1, 2):
with T.block("conv2d_nhwc_reindex_shared_wmma.accumulator_o"):
v0_o = T.axis.spatial(3136, ax2_0_0_ax3_0_0_fused
v1_o = T.axis.spatial(16, ax2_0_0_ax3_0_0_fused % 4 * 4 + ax2_0_2_ax3_0_2_fused % 2 * 2 + ax1_0)
T.reads(conv2d_nhwc_reindex_shared_wmma_accumulator[v0_o * 16 : v0_o * 16 + 16, v1_o * 16 : v1_o * 16 + 16])
T.writes(conv2d_nhwc_reindex_shared[v0_o * 16 : v0_o * 16 + 16, v1_o * 16 : v1_o * 16 + 16])
T.block_attr({"meta_schedule.auto_tensorize":"wmma_store_16x16x16_s32_shared"})
for ax0_1, ax1_1 in T.grid(16, 16):
with T.block("conv2d_nhwc_reindex_shared_wmma.accumulator"):
v0_i, v1_i = T.axis.remap("SS", [ax0_1, ax1_1])
T.reads(conv2d_nhwc_reindex_shared_wmma_accumulator[v0_o * 16 + v0_i, v1_o * 16 + v1_i])
T.writes(conv2d_nhwc_reindex_shared[v0_o * 16 + v0_i, v1_o * 16 + v1_i])
conv2d_nhwc_reindex_shared[v0_o * 16 + v0_i, v1_o * 16 + v1_i] = conv2d_nhwc_reindex_shared_wmma_accumulator[v0_o * 16 + v0_i, v1_o * 16 + v1_i]
for ax0, ax1_0, ax1_1, ax1_2, ax1_3 in T.grid(32, 1, 4, 32, 2):
with T.block("conv2d_nhwc_reindex_shared"):
T.where(((ax1_0 * 4 + ax1_1) * 32 + ax1_2) * 2 + ax1_3 < 64)
v0 = T.axis.spatial(50176, ax2_0_0_ax3_0_0_fused
v1 = T.axis.spatial(256, ax2_0_0_ax3_0_0_fused % 4 * 64 + (ax1_0 * 256 + ax1_1 * 64 + ax1_2 * 2 + ax1_3))
T.reads(p7[()], conv2d_nhwc_reindex_shared[v0, v1], p2[0, 0, |
0, v1], p3[0, 0, 0, v1], p4[v1], p5[v1], p6[v1], p8[0])
T.writes(compute_3[v0
compute_3[v0
for i0_12, i1_12, i2_12, i3_12 in T.grid(16, 56, 56, 256):
with T.block("compute_4"):
i0_13, i1_13, i2_13, i3_13 = T.axis.remap("SSSS", [i0_12, i1_12, i2_12, i3_12])
T.reads(compute_3[i0_13, i1_13, i2_13, i3_13], p9[i0_13, i1_13, i2_13, i3_13])
T.writes(compute[i0_13, i1_13, i2_13, i3_13])
compute[i0_13, i1_13, i2_13, i3_13] = T.max(T.min(compute_3[i0_13, i1_13, i2_13, i3_13] + T.q_multiply_shift(p9[i0_13, i1_13, i2_13, i3_13], 2101000910, 31, 0, dtype="int32"), 255), 0)
use_block_name = tvm.testing.parameter(by_dict={"block_obj": False, "block_name": True})
def test_compute_inline_elementwise(use_block_name):
sch = tir.Schedule(elementwise, debug_mask="all")
block_b = "B" if use_block_name else sch.get_block("B")
block_c = sch.get_block("C")
sch.compute_inline(block_b)
tvm.ir.assert_structural_equal(elementwise_inlined, sch.mod["main"])
assert sch.get(block_c).name_hint == "C"
verify_trace_roundtrip(sch=sch, mod=elementwise)
def test_compute_inline_under_loop(use_block_name):
sch = tir.Schedule(elementwise_under_loop, debug_mask="all")
block_b = "B" if use_block_name else sch.get_block("B")
block_c = sch.get_block("C")
sch.compute_inline(block_b)
tvm.ir.assert_structural_equal(elementwise_inlined, sch.mod["main"])
assert sch.get(block_c).name_hint == "C"
verify_trace_roundtrip(sch=sch, mod=elementwise_under_loop)
def test_compute_inline_as_dce(use_block_name):
sch = tir.Schedule(elementwise_standalone, debug_mask="all")
block_b = "B" if use_block_name else sch.get_block("B")
block_c = sch.get_block("C")
sch.compute_inline(block_b)
tvm.ir.assert_structural_equal(elementwise_standalone_dce, sch.mod["main"])
assert sch.get(block_c).name_hint == "C"
verify_trace_roundtri |
p(sch=sch, mod=elementwise_standalone)
def test_compute_inline_multi_consumer(use_block_name):
sch = tir.Schedule(elementwise_multi_producer_consumer, debug_mask="all")
block_b = "B" if use_block_name else sch.get_block("B")
block_c = sch.get_block("C")
block_d = sch.get_block("D")
sch.compute_inline(block_b)
tvm.ir.assert_structural_equal(elementwise_multi_consumer_inlined, sch.mod["main"])
assert sch.get(block_c).name_hint == "C"
assert sch.get(block_d).name_hint == "D"
verify_trace_roundtrip(sch=sch, mod=elementwise_multi_producer_consumer)
def test_compute_inline_fail_multi_writer(use_block_name):
sch = tir.Schedule(fail_multi_reader_writer, debug_mask="all")
block_b = "B" if use_block_name else sch.get_block("B")
with pytest.raises(tvm.tir.ScheduleError):
sch.compute_inline(block_b)
def test_reverse_compute_inline_elementwise(use_block_name):
sch = tir.Schedule(elementwise, debug_mask="all")
block_b = sch.get_block("B")
block_c = "C" if use_block_name else sch.get_block("C")
sch.reverse_compute_inline(block_c)
tvm.ir.assert_structural_equal(elementwise_inlined, sch.mod["main"])
assert sch.get(block_b).name_hint == "B"
verify_trace_roundtrip(sch=sch, mod=elementwise)
def test_reverse_compute_inline_under_loop(use_block_name):
sch = tir.Schedule(elementwise_under_loop, debug_mask="all")
block_b = sch.get_block("B")
block_c = "C" if use_block_name else sch.get_block("C")
sch.reverse_compute_inline(block_c)
tvm.ir.assert_structural_equal(elementwise_inlined, sch.mod["main"])
assert sch.get(block_b).name_hint == "B"
verify_trace_roundtrip(sch=sch, mod=elementwise_under_loop)
def test_reverse_compute_inline_fail_as_dce(use_block_name):
sch = tir.Schedule(elementwise_standalone, debug_mask="all")
block_b = "B" if use_block_name else sch.get_block("B")
with pytest.raises(tvm.tir.ScheduleError):
sch.reverse_compute_inline(block_b)
def test_reverse_compute_inline_fail_multi_pr |
oducer(use_block_name):
sch = tir.Schedule(elementwise_multi_producer_consumer, debug_mask="all")
block_d = "D" if use_block_name else sch.get_block("D")
with pytest.raises(tvm.tir.ScheduleError):
sch.reverse_compute_inline(block_d)
def test_reverse_compute_inline_fail_multi_reader(use_block_name):
sch = tir.Schedule(fail_multi_reader_writer, debug_mask="all")
block_c = "C" if use_block_name else sch.get_block("C")
with pytest.raises(tvm.tir.ScheduleError):
sch.reverse_compute_inline(block_c)
def test_reverse_compute_multi_reverse_loads(use_block_name):
sch = tir.Schedule(elementwise_multi_reverse_loads, debug_mask="all")
block_c = "C" if use_block_name else sch.get_block("C")
sch.reverse_compute_inline(block_c)
tvm.ir.assert_structural_equal(elementwise_multi_reverse_loads_inlined, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=elementwise_multi_reverse_loads)
def test_reverse_compute_inline_affine_load(use_block_name):
sch = tir.Schedule(elementwise_reverse_affine_load, debug_mask="all")
block_c = "C" if use_block_name else sch.get_block("C")
sch.reverse_compute_inline(block_c)
tvm.ir.assert_structural_equal(elementwise_reverse_affine_load_inlined, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=elementwise_reverse_affine_load)
def test_reverse_compute_inline_multi_affine_load(use_block_name):
sch = tir.Schedule(elementwise_multi_reverse_affine_load, debug_mask="all")
block_c = "C" if use_block_name else sch.get_block("C")
sch.reverse_compute_inline(block_c)
tvm.ir.assert_structural_equal(elementwise_multi_reverse_affine_load_inlined, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=elementwise_multi_reverse_affine_load)
def test_reverse_compute_inline_affine_load_unit_iter(use_block_name):
sch = tir.Schedule(elementwise_reverse_affine_load_unit_iter, debug_mask="all")
block_c = "C" if use_block_name else sch.get_block("C")
sch.reverse_compute_inline(block_c)
tvm.ir.assert |
_structural_equal(
elementwise_reverse_affine_load_unit_iter_inlined, sch.mod["main"]
)
verify_trace_roundtrip(sch=sch, mod=elementwise_reverse_affine_load_unit_iter)
def test_reverse_compute_inline_affine_load_unit_iter_simplified(use_block_name):
sch = tir.Schedule(elementwise_reverse_affine_load_unit_iter_simplified, debug_mask="all")
block_c = "C" if use_block_name else sch.get_block("C")
sch.reverse_compute_inline(block_c)
tvm.ir.assert_structural_equal(
elementwise_reverse_affine_load_unit_iter_simplified_inlined, sch.mod["main"]
)
verify_trace_roundtrip(sch=sch, mod=elementwise_reverse_affine_load_unit_iter_simplified)
@pytest.mark.parametrize("reverse_order", [True, False])
def test_reverse_compute_inline_affine_chain(use_block_name, reverse_order):
sch = tir.Schedule(elementwise_reverse_affine_chain, debug_mask="all")
block_c = "C" if use_block_name else sch.get_block("C")
block_d = "D" if use_block_name else sch.get_block("D")
if reverse_order:
sch.reverse_compute_inline(block_d)
sch.reverse_compute_inline(block_c)
else:
sch.reverse_compute_inline(block_c)
sch.reverse_compute_inline(block_d)
tvm.ir.assert_structural_equal(elementwise_reverse_affine_chain_inlined, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=elementwise_reverse_affine_chain)
def test_reverse_compute_fail_non_affine_load(use_block_name):
sch = tir.Schedule(elementwise_reverse_non_affine_load, debug_mask="all")
block_c = "C" if use_block_name else sch.get_block("C")
with pytest.raises(tvm.tir.ScheduleError):
sch.reverse_compute_inline(block_c)
def test_reverse_compute_fail_multi_reverse_loads(use_block_name):
sch = tir.Schedule(elementwise_multi_loads, debug_mask="all")
block_c = "C" if use_block_name else sch.get_block("C")
with pytest.raises(tvm.tir.ScheduleError):
sch.reverse_compute_inline(block_c)
def test_opaque_access_load(use_block_name):
sch = tir.Schedule(opaque_ |
access_load, debug_mask="all")
block_b = "B" if use_block_name else sch.get_block("B")
with pytest.raises(tvm.tir.ScheduleError):
sch.compute_inline(block_b)
def test_opaque_access_store(use_block_name):
sch = tir.Schedule(opaque_access_store, debug_mask="all")
block_b = "B" if use_block_name else sch.get_block("B")
with pytest.raises(tvm.tir.ScheduleError):
sch.compute_inline(block_b)
def test_buffer_matched(use_block_name):
sch = tir.Schedule(buffer_matched, debug_mask="all")
block_b = "B" if use_block_name else sch.get_block("B")
with pytest.raises(tvm.tir.ScheduleError):
sch.compute_inline(block_b)
def test_output_block(use_block_name):
sch = tir.Schedule(matmul_relu, debug_mask="all")
block = sch.get_block("compute")
with pytest.raises(tvm.tir.ScheduleError):
sch.compute_inline(block)
def test_compute_inline_predicate(use_block_name):
sch = tir.Schedule(elementwise_predicate, debug_mask="all")
block_b = "B" if use_block_name else sch.get_block("B")
sch.compute_inline(block_b)
tvm.ir.assert_structural_equal(elementwise_predicate_inlined, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=elementwise_predicate)
def test_compute_inline_multi_loads(use_block_name):
sch = tir.Schedule(elementwise_multi_loads, debug_mask="all")
block_b = "B" if use_block_name else sch.get_block("B")
sch.compute_inline(block_b)
tvm.ir.assert_structural_equal(elementwise_multi_loads_inlined, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=elementwise_multi_loads)
def test_compute_inline_with_opaque_access(use_block_name):
"""Test not rewrite opaque reads/writes after irrelavant compute inline"""
sch = tir.Schedule(access_opaque_ptr_then_elemwise, debug_mask="all")
BB = "BB" if use_block_name else sch.get_block("BB")
sch.compute_inline(BB)
tvm.ir.assert_structural_equal(access_opaque_ptr_then_elemwise_inline, sch.mod["main"])
def test_inline_block_with_init():
sch = tir.Sched |
ule(inline_block_with_init, debug_mask="all")
block = sch.get_block(name="tensor_rf", func_name="main")
with pytest.raises(tvm.tir.ScheduleError):
sch.compute_inline(block=block)
def test_compute_inline_opaque_access_with_tvm_access_ptr(use_block_name):
"""Test opaque access with tvm_access_ptr after compute inline"""
sch = tir.Schedule(exp_exp_opaque_access_with_tvm_access_ptr, debug_mask="all")
compute = "compute" if use_block_name else sch.get_block("compute")
sch.compute_inline(compute)
tvm.ir.assert_structural_equal(
exp_exp_opaque_access_with_tvm_access_ptr_inlined, sch.mod["main"]
)
def test_reverse_compute_inline_overcomputed_producer(use_block_name):
"""Test reverse compute inline overcomputed producer"""
sch = tir.Schedule(elementwise_overcomputed_producer, debug_mask="all")
compute = "C" if use_block_name else sch.get_block("C")
sch.reverse_compute_inline(compute)
tvm.ir.assert_structural_equal(
elementwise_overcomputed_producer_reverse_inlined, sch.mod["main"]
)
def test_reverse_compute_inline_error_producer_not_cover_consumer(use_block_name):
"""Test reverse compute inline failure when the inlined block iter domains are not covered by
its producer
"""
sch = tir.Schedule(elementwise_producer_not_cover_consumer, debug_mask="all")
compute = "C" if use_block_name else sch.get_block("C")
with pytest.raises(tvm.tir.ScheduleError):
sch.reverse_compute_inline(compute)
def test_reverse_compute_inline_producer_predicate_allowed():
"""Test a case where reverse compute inline is allowed even though the producer has a
non-trivial predicate.
"""
sch = tir.Schedule(elementwise_predicate_producer, debug_mask="all")
sch.reverse_compute_inline(sch.get_block("C"))
tvm.ir.assert_structural_equal(elementwise_predicate_producer_inlined, sch.mod["main"])
def test_reverse_compute_inline_producer_predicate_disallowed():
"""Test reverse compute inline failure when the producer |
has a non-trivial predicate that cannot be
implied by the synthesized predicate of the new inlined block.
"""
sch = tir.Schedule(Conv2dInt8_TensorCore_with_predicate, debug_mask="all")
with pytest.raises(tvm.tir.ScheduleError) as e:
sch.reverse_compute_inline(sch.get_block("compute_4"))
assert (
"that cannot be implied by the synthesized predicate True of the new inlined block"
in str(e)
)
if __name__ == "__main__":
tvm.testing.main() |
import numpy as np |
import tvm |
import tvm.testing
from tvm |
import tir
from tvm.script |
import tir as T
def check_decompose_padding(origin, scheduled, expected, check_run=False):
tvm.ir.assert_structural_equal(scheduled, expected)
if check_run:
in_buffer = origin.buffer_map[origin.params[0]]
out_buffer = origin.buffer_map[origin.params[1]]
in_shape = [int(_) for _ in in_buffer.shape]
out_shape = [int(_) for _ in out_buffer.shape]
x = tvm.nd.array(np.random.uniform(0, 64, in_shape).astype(in_buffer.dtype))
y0 = tvm.nd.array(np.zeros(out_shape).astype(out_buffer.dtype))
y1 = tvm.nd.array(np.zeros(out_shape).astype(out_buffer.dtype))
f_origin = tvm.build(origin)
f_scheduled = tvm.build(scheduled)
f_origin(x, y0)
f_scheduled(x, y1)
tvm.testing.assert_allclose(y0.numpy(), y1.numpy())
def test_1d_decompose_padding():
@T.prim_func
def before_decompose(x: T.Buffer[128, "int32"], y: T.Buffer[140, "int32"]):
for i in range(140):
with T.block("block"):
vi = T.axis.remap("S", [i])
y[vi] = T.if_then_else(vi >= 6 and vi < 134, x[vi - 6], 0, dtype="int32")
@T.prim_func
def after_decompose(x: T.Buffer[128, "int32"], y: T.Buffer[140, "int32"]):
for i in T.serial(140):
with T.block("block_pad_const"):
vi = T.axis.spatial(140, i)
T.reads()
T.writes(y[vi])
y[vi] = 0
for i in T.serial(128):
with T.block("block"):
vi = T.axis.spatial(128, i)
T.reads(x[vi])
T.writes(y[vi + 6])
y[vi + 6] = x[vi]
sch = tir.Schedule(before_decompose, debug_mask="all")
block = sch.get_block("block")
sch.decompose_padding(block, sch.get_loops(block)[0])
check_decompose_padding(before_decompose, sch.mod["main"], after_decompose, check_run=False)
@T.prim_func
def sum_pool_2d(
x: T.Buffer[(1, 16, 225, 225), "int8"], tensor: T.Buffer[(1, 16, 225, 225), "int8"]
):
pad_temp = T.allo |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.