text
stringlengths 1
2.05k
|
---|
mp0[0]])
T.writes([reduce_temp0[0]])
T.attr(
T.comm_reducer(lambda x, y: x + y, [T.float32(0)]),
"reduce_scope",
T.reinterpret(T.uint64(0), dtype="handle"),
)
T.evaluate(
T.tvm_thread_allreduce(
T.uint32(1),
normal_reduce_temp0[0],
True,
reduce_temp0[0],
ki,
dtype="handle",
)
)
with T.block("B_write_back"):
vi = T.axis.S(128, i)
T.reads([reduce_temp0[0]])
T.writes([B[vi]])
B[vi] = reduce_temp0[0]
@T.prim_func
def no_normal_reduction(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [128, 128], dtype="float32")
B = T.match_buffer(b, [128], dtype="float32")
for i in T.serial(0, 128):
for k in T.thread_binding(0, 128, thread="threadIdx.x"):
with T.block("B"):
vi, vk = T.axis.remap("SR", [i, k])
T.reads([A[vi, vk]])
T.writes([B[vi]])
with T.init():
B[vi] = T.float32(0)
B[vi] = B[vi] + A[vi, vk]
@T.prim_func
def lowered_no_normal_reduction(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [128, 128], dtype="float32")
B = T.match_buffer(b, [128], dtype="float32")
reduce_temp0 = T.alloc_buffer([1], dtype="float32", strides=[1], scope="local")
for i in T.serial(0, 128):
for k in T.thread_binding(0, 128, thread="threadIdx.x"):
with T.block("B_cross_thread_reduction"):
vi, vk = T.axis.remap("SR", [i, k])
T.reads([A[vi, vk]])
T.writes([reduce_temp0[0]])
T.attr(
T.comm_reducer(lambda x, y: x + y, [T.float32(0)]),
"reduce_scope", |
T.reinterpret(T.uint64(0), dtype="handle"),
)
T.evaluate(
T.tvm_thread_allreduce(
T.uint32(1), A[vi, vk], True, reduce_temp0[0], k, dtype="handle"
)
)
with T.block("B_write_back"):
vi = T.axis.spatial(128, i)
T.reads([reduce_temp0[0]])
T.writes([B[vi]])
B[vi] = reduce_temp0[0]
@T.prim_func
def two_bound_loops(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [128, 128], dtype="float32")
B = T.match_buffer(b, [128], dtype="float32")
for i in T.serial(0, 128):
for ko in T.thread_binding(0, 4, thread="threadIdx.x"):
for ki in T.thread_binding(0, 32, thread="threadIdx.y"):
with T.block("B"):
vi = T.axis.spatial(128, i)
vk = T.axis.reduce(128, ko * 32 + ki)
T.reads([A[vi, vk]])
T.writes([B[vi]])
with T.init():
B[vi] = T.float32(0)
B[vi] = B[vi] + A[vi, vk]
@T.prim_func
def lowered_two_bound_loops(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [128, 128], dtype="float32")
B = T.match_buffer(b, [128], dtype="float32")
reduce_temp0 = T.alloc_buffer([1], dtype="float32", strides=[1], scope="local")
for i in T.serial(0, 128):
for ko in T.thread_binding(0, 4, thread="threadIdx.x"):
for ki in T.thread_binding(0, 32, thread="threadIdx.y"):
with T.block("B_cross_thread_reduction"):
vi = T.axis.spatial(128, i)
vk = T.axis.reduce(128, ko * 32 + ki)
T.reads([A[vi, vk]])
T.writes([reduce_temp0[0]])
T.attr(
T.comm_reducer(lambda x, y: x + y, [T.float32(0)]),
"reduce_scope",
T.reinterpret(T.uint64(0), dtype="handle"), |
)
T.evaluate(
T.tvm_thread_allreduce(
T.uint32(1), A[vi, vk], True, reduce_temp0[0], ko, ki, dtype="handle"
)
)
with T.block("B_write_back"):
vi = T.axis.spatial(128, i)
T.reads([reduce_temp0[0]])
T.writes([B[vi]])
B[vi] = reduce_temp0[0]
@T.prim_func
def multiple_blocks_under_reduction_loop(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [16, 16, 16], dtype="float32")
B = T.match_buffer(b, [16], dtype="float32")
B_rf_local = T.alloc_buffer([16, 16], dtype="float32", scope="local")
for i in T.thread_binding(0, 16, thread="blockIdx.x"):
for k0o in T.thread_binding(0, 4, thread="threadIdx.x"):
for k0i0, k1 in T.grid(4, 16):
with T.block("B_rf"):
vk0 = T.axis.spatial(16, k0o * 4 + k0i0)
vi, vk1 = T.axis.remap("SR", [i, k1])
T.reads([A[vi, vk0, vk1]])
T.writes([B_rf_local[vk0, vi]])
with T.init():
B_rf_local[vk0, vi] = T.float32(0)
B_rf_local[vk0, vi] = B_rf_local[vk0, vi] + A[vi, vk0, vk1]
for k0i1 in T.serial(0, 4):
with T.block("B"):
vk0 = T.axis.reduce(16, k0o * 4 + k0i1)
vi = T.axis.spatial(16, i)
T.reads([B_rf_local[vk0, vi]])
T.writes([B[vi]])
with T.init():
B[vi] = T.float32(0)
B[vi] = B[vi] + B_rf_local[vk0, vi]
@T.prim_func
def lowered_multiple_blocks_under_reduction_loop(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [16, 16, 16], dtype="float32")
B = T.match_buffer(b, [16], dtype="float32")
B_rf_local = T.alloc_buffer([16, 16], dtype="float32", scope="local")
reduce_temp0 = T.al |
loc_buffer([1], dtype="float32", strides=[1], scope="local")
normal_reduce_temp0 = T.alloc_buffer([1], dtype="float32", strides=[1], scope="local")
for i in T.thread_binding(0, 16, thread="blockIdx.x"):
for k0o in T.thread_binding(0, 4, thread="threadIdx.x"):
with T.block("B_in_thread_init"):
T.reads([])
T.writes([normal_reduce_temp0[0]])
normal_reduce_temp0[0] = T.float32(0)
for k0i0, k1 in T.grid(4, 16):
with T.block("B_rf"):
vk0 = T.axis.spatial(16, k0o * 4 + k0i0)
vi, vk1 = T.axis.remap("SR", [i, k1])
T.reads([A[vi, vk0, vk1]])
T.writes([B_rf_local[vk0, vi]])
with T.init():
B_rf_local[vk0, vi] = T.float32(0)
B_rf_local[vk0, vi] = B_rf_local[vk0, vi] + A[vi, vk0, vk1]
for k0i1 in T.serial(0, 4):
with T.block("B_normal_reduction"):
vk0 = T.axis.reduce(16, k0o * 4 + k0i1)
vi = T.axis.spatial(16, i)
T.reads([B_rf_local[vk0, vi]])
T.writes([normal_reduce_temp0[0]])
normal_reduce_temp0[0] = normal_reduce_temp0[0] + B_rf_local[vk0, vi]
with T.block("B_cross_thread_reduction"):
T.reads([normal_reduce_temp0[0]])
T.writes([reduce_temp0[0]])
T.attr(
T.comm_reducer(lambda x, y: x + y, [T.float32(0)]),
"reduce_scope",
T.reinterpret(T.uint64(0), dtype="handle"),
)
T.evaluate(
T.tvm_thread_allreduce(
T.uint32(1),
normal_reduce_temp0[0],
True,
reduce_temp0[0],
k0o,
dtype="handle",
)
)
with T.block(" |
B_write_back"):
vi = T.axis.spatial(16, i)
T.reads([reduce_temp0[0]])
T.writes([B[vi]])
B[vi] = reduce_temp0[0]
@T.prim_func
def with_block_predicate(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [128, 120], dtype="float32")
B = T.match_buffer(b, [128], dtype="float32")
for i, ko in T.grid(128, 4):
for ki in T.thread_binding(0, 32, thread="threadIdx.x"):
with T.block("B"):
vi = T.axis.spatial(128, i)
vk = T.axis.reduce(120, ko * 32 + ki)
T.where(ko * 32 + ki < 120)
T.reads([A[vi, vk]])
T.writes([B[vi]])
with T.init():
B[vi] = T.float32(0)
B[vi] = B[vi] + A[vi, vk]
@T.prim_func
def lowered_with_block_predicate(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [128, 120], dtype="float32")
B = T.match_buffer(b, [128], dtype="float32")
reduce_temp0 = T.alloc_buffer([1], dtype="float32", strides=[1], scope="local")
normal_reduce_temp0 = T.alloc_buffer([1], dtype="float32", strides=[1], scope="local")
for i in T.serial(0, 128):
for ki in T.thread_binding(0, 32, thread="threadIdx.x"):
with T.block("B_in_thread_init"):
T.reads([])
T.writes([normal_reduce_temp0[0]])
normal_reduce_temp0[0] = T.float32(0)
for ko in T.serial(0, 4):
with T.block("B_normal_reduction"):
vi = T.axis.spatial(128, i)
vk = T.axis.reduce(120, ko * 32 + ki)
T.where(ko * 32 + ki < 120)
T.reads([A[vi, vk]])
T.writes([normal_reduce_temp0[0]])
normal_reduce_temp0[0] = normal_reduce_temp0[0] + A[vi, vk]
with T.block("B_cross_thread_reduction"):
T.reads([normal_reduce_temp0[0]])
T.writes([reduce_temp0[0]])
T.attr( |
T.comm_reducer(lambda x, y: x + y, [T.float32(0)]),
"reduce_scope",
T.reinterpret(T.uint64(0), dtype="handle"),
)
T.evaluate(
T.tvm_thread_allreduce(
T.uint32(1),
normal_reduce_temp0[0],
True,
reduce_temp0[0],
ki,
dtype="handle",
)
)
with T.block("B_write_back"):
vi = T.axis.spatial(128, i)
T.reads([reduce_temp0[0]])
T.writes([B[vi]])
B[vi] = reduce_temp0[0]
@T.prim_func
def single_reduction_loop_with_block_predicate(
A: T.Buffer[(256, 256), "float32"], T_softmax_norm: T.Buffer[(256, 256), "float32"]
) -> None:
T_softmax_maxelem_shared = T.alloc_buffer([256], dtype="float32", scope="shared")
T_softmax_expsum_shared = T.alloc_buffer([256], dtype="float32", scope="shared")
for i0 in T.serial(256):
for ax0, ax1_0 in T.grid(1, 1):
for ax1_1 in T.thread_binding(512, thread="threadIdx.x"):
with T.block("T_softmax_maxelem"):
i0_1 = T.axis.spatial(256, i0 + ax0)
k = T.axis.reduce(256, ax1_0 * 512 + ax1_1)
T.where(ax1_0 * 512 + ax1_1 < 256)
T.reads(A[i0_1, k])
T.writes(T_softmax_maxelem_shared[i0_1])
with T.init():
T_softmax_maxelem_shared[i0_1] = T.float32(-3.4028234663852886e38)
T_softmax_maxelem_shared[i0_1] = T.max(
T_softmax_maxelem_shared[i0_1], A[i0_1, k]
)
for ax0, ax1_0 in T.grid(1, 1):
for ax1_1 in T.thread_binding(512, thread="threadIdx.x"):
with T.block("T_softmax_expsum"):
i0_2 = T.axis.spatial(256, i0 + ax0)
k = T.axis.re |
duce(256, ax1_0 * 512 + ax1_1)
T.where(ax1_0 * 512 + ax1_1 < 256)
T.reads(A[i0_2, k], T_softmax_maxelem_shared[i0_2])
T.writes(T_softmax_expsum_shared[i0_2])
with T.init():
T_softmax_expsum_shared[i0_2] = T.float32(0)
T_softmax_expsum_shared[i0_2] = T_softmax_expsum_shared[i0_2] + T.exp(
A[i0_2, k] - T_softmax_maxelem_shared[i0_2], dtype="float32"
)
for i1_0 in T.serial(1):
for i1_1 in T.thread_binding(512, thread="threadIdx.x"):
with T.block("T_softmax_norm"):
i0_3 = T.axis.spatial(256, i0)
i1 = T.axis.spatial(256, i1_0 * 512 + i1_1)
T.where(i1_0 * 512 + i1_1 < 256)
T.reads(
A[i0_3, i1], T_softmax_maxelem_shared[i0_3], T_softmax_expsum_shared[i0_3]
)
T.writes(T_softmax_norm[i0_3, i1])
T.block_attr({"axis": 1})
T_softmax_norm[i0_3, i1] = (
T.exp(A[i0_3, i1] - T_softmax_maxelem_shared[i0_3], dtype="float32")
/ T_softmax_expsum_shared[i0_3]
)
@T.prim_func
def lowered_single_reduction_loop_with_block_predicate(
A: T.Buffer[(256, 256), "float32"], T_softmax_norm: T.Buffer[(256, 256), "float32"]
) -> None:
T_softmax_maxelem_shared = T.alloc_buffer([256], dtype="float32", scope="shared")
T_softmax_expsum_shared = T.alloc_buffer([256], dtype="float32", scope="shared")
cross_thread_0 = T.alloc_buffer([1], dtype="float32", strides=[1], scope="local")
in_thread_0 = T.alloc_buffer([1], dtype="float32", strides=[1], scope="local")
cross_thread_1 = T.alloc_buffer([1], dtype="float32", strides=[1], scope="local")
in_thread_1 = T.alloc_buffer([1], dtype="float32", strides=[1], scope="local")
for i0 in T.serial(256):
for ax0 in T.serial(1): |
for ax1_1 in T.thread_binding(512, thread="threadIdx.x"):
with T.block("T_softmax_maxelem_in_thread_init"):
T.reads()
T.writes(in_thread_0[0])
in_thread_0[0] = T.float32(-3.4028234663852886e38)
for ax1_0 in T.serial(1):
with T.block("T_softmax_maxelem_in_thread"):
T.where(ax1_0 * 512 + ax1_1 < 256)
i0_1 = T.axis.spatial(256, i0 + ax0)
k = T.axis.reduce(256, ax1_0 * 512 + ax1_1)
T.reads(A[i0_1, k])
T.writes(in_thread_0[0])
in_thread_0[0] = T.max(in_thread_0[0], A[i0_1, k])
with T.block("T_softmax_maxelem_cross_thread"):
T.reads(in_thread_0[0])
T.writes(cross_thread_0[0])
T.attr(
T.comm_reducer(
lambda x, y: T.max(x, y), [T.float32(-3.4028234663852886e38)]
),
"reduce_scope",
T.reinterpret(T.uint64(0), dtype="handle"),
)
T.evaluate(
T.tvm_thread_allreduce(
T.uint32(1),
in_thread_0[0],
True,
cross_thread_0[0],
ax1_1,
dtype="handle",
)
)
with T.block("T_softmax_maxelem_write_back"):
i0_2 = T.axis.spatial(256, i0 + ax0)
T.reads(cross_thread_0[0])
T.writes(T_softmax_maxelem_shared[i0_2])
T_softmax_maxelem_shared[i0_2] = cross_thread_0[0]
for ax0 in T.serial(1):
for ax1_1 in T.thread_binding(512, thread="threadIdx.x"):
with T.block("T_softmax_expsum_in_thread_i |
nit"):
T.reads()
T.writes(in_thread_1[0])
in_thread_1[0] = T.float32(0)
for ax1_0 in T.serial(1):
with T.block("T_softmax_expsum_in_thread"):
T.where(ax1_0 * 512 + ax1_1 < 256)
i0_3 = T.axis.spatial(256, i0 + ax0)
k = T.axis.reduce(256, ax1_0 * 512 + ax1_1)
T.reads(A[i0_3, k], T_softmax_maxelem_shared[i0_3])
T.writes(in_thread_1[0])
in_thread_1[0] = in_thread_1[0] + T.exp(
A[i0_3, k] - T_softmax_maxelem_shared[i0_3], dtype="float32"
)
with T.block("T_softmax_expsum_cross_thread"):
T.reads(in_thread_1[0])
T.writes(cross_thread_1[0])
T.attr(
T.comm_reducer(lambda x_1, y_1: x_1 + y_1, [T.float32(0)]),
"reduce_scope",
T.reinterpret(T.uint64(0), dtype="handle"),
)
T.evaluate(
T.tvm_thread_allreduce(
T.uint32(1),
in_thread_1[0],
True,
cross_thread_1[0],
ax1_1,
dtype="handle",
)
)
with T.block("T_softmax_expsum_write_back"):
i0_4 = T.axis.spatial(256, i0 + ax0)
T.reads(cross_thread_1[0])
T.writes(T_softmax_expsum_shared[i0_4])
T_softmax_expsum_shared[i0_4] = cross_thread_1[0]
for i1_0 in T.serial(1):
for i1_1 in T.thread_binding(512, thread="threadIdx.x"):
with T.block("T_softmax_norm"):
i0_5 = T.axis.spatial(256, i0)
i1 = T.axis.spatial(256, i1_0 * |
512 + i1_1)
T.where(i1_0 * 512 + i1_1 < 256)
T.reads(
A[i0_5, i1], T_softmax_maxelem_shared[i0_5], T_softmax_expsum_shared[i0_5]
)
T.writes(T_softmax_norm[i0_5, i1])
T.block_attr({"axis": 1})
T_softmax_norm[i0_5, i1] = (
T.exp(A[i0_5, i1] - T_softmax_maxelem_shared[i0_5], dtype="float32")
/ T_softmax_expsum_shared[i0_5]
)
@T.prim_func
def single_reduction_loop_with_tensorize(
input_A: T.Buffer[(1, 64, 7, 7, 32), "uint8"],
input_B: T.Buffer[(16, 64, 1, 1, 8, 32, 4), "int8"],
output: T.Buffer[(1, 16, 7, 7, 32), "int32"],
) -> None:
for i1, i2, i3, i4, i5 in T.grid(16, 4, 98, 2, 32):
with T.block("compute_o"):
n = T.axis.spatial(1, 0)
oc_chunk = T.axis.spatial(16, i1)
oh = T.axis.spatial(7, (i2 * 6272 + i3 * 64 + i4 * 32 + i5)
ow = T.axis.spatial(7, (i2 * 6272 + i3 * 64 + i4 * 32 + i5) % 3584
kh = T.axis.reduce(1, 0)
kw = T.axis.reduce(1, 0)
ic_outer = T.axis.reduce(64, (i2 * 6272 + i3 * 64 + i4 * 32 + i5) % 512
ic_f_inner = T.axis.reduce(8, (i2 * 6272 + i3 * 64 + i4 * 32 + i5) % 8)
T.reads(
input_A[n, ic_outer, oh + kh, ow + kw, ic_f_inner * 4 : ic_f_inner * 4 + 4],
input_B[oc_chunk, ic_outer, kh, kw, ic_f_inner, 0:32, 0:4],
)
T.writes(output[n, oc_chunk, oh, ow, 0:32])
with T.init():
for x in T.serial(32):
with T.block("compute_init"):
oc_block_i_init = T.axis.spatial(32, x)
T.reads()
T.writes(output[n, oc_chunk, oh, ow, oc_block_i_init])
output[n, oc_chunk, oh, ow, oc_block_i_init] = 0
with T.block("compute_o"):
T.reads( |
output[n, oc_chunk, oh, ow, 0:32],
input_A[n, ic_outer, oh + kh, ow + kw, ic_f_inner * 4 : ic_f_inner * 4 + 4],
input_B[oc_chunk, ic_outer, kh, kw, ic_f_inner, 0:32, 0:4],
)
T.writes(output[n, oc_chunk, oh, ow, 0:32])
A = T.match_buffer(
input_A[n, ic_outer, oh + kh, ow + kw, ic_f_inner * 4 : ic_f_inner * 4 + 4],
[4],
dtype="uint8",
offset_factor=1,
)
B = T.match_buffer(
input_B[oc_chunk, ic_outer, kh, kw, ic_f_inner, 0:32, 0:4],
[32, 4],
dtype="int8",
offset_factor=1,
)
C = T.match_buffer(
output[n, oc_chunk, oh, ow, 0:32], [32], dtype="int32", offset_factor=1
)
A_u8x4: T.uint8x4 = A[0:4]
A_i32: T.int32 = T.reinterpret(A_u8x4, dtype="int32")
B_i8x128 = B[0, 0:128]
B_i32x32: T.int32x32 = T.reinterpret(B_i8x128, dtype="int32x32")
C[0:32] = T.call_llvm_pure_intrin(
4217, T.uint32(3), C[0:32], T.broadcast(A_i32, 32), B_i32x32, dtype="int32x32"
)
@T.prim_func
def reducer_max(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [128, 128], dtype="float32")
B = T.match_buffer(b, [128], dtype="float32")
for i in T.serial(0, 128):
for k in T.thread_binding(0, 128, thread="threadIdx.x"):
with T.block("B"):
vi, vk = T.axis.remap("SR", [i, k])
T.reads([A[vi, vk]])
T.writes([B[vi]])
with T.init():
B[vi] = T.min_value("float32")
B[vi] = T.max(B[vi], A[vi, vk])
@T.prim_func
def lowered_reducer_max(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [128, 128], dtype="float32")
B = T.match_buffer(b, [128], dtype="floa |
t32")
reduce_temp0 = T.alloc_buffer([1], dtype="float32", strides=[1], scope="local")
for i in T.serial(0, 128):
for k in T.thread_binding(0, 128, thread="threadIdx.x"):
with T.block("B_cross_thread_reduction"):
vi, vk = T.axis.remap("SR", [i, k])
T.reads([A[vi, vk]])
T.writes([reduce_temp0[0]])
T.attr(
T.comm_reducer(lambda x, y: T.max(x, y), [T.min_value("float32")]),
"reduce_scope",
T.reinterpret(T.uint64(0), dtype="handle"),
)
T.evaluate(
T.tvm_thread_allreduce(
T.uint32(1), A[vi, vk], True, reduce_temp0[0], k, dtype="handle"
)
)
with T.block("B_write_back"):
vi = T.axis.spatial(128, i)
T.reads([reduce_temp0[0]])
T.writes([B[vi]])
B[vi] = reduce_temp0[0]
@T.prim_func
def zero_rank_buffer(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [128], dtype="float32")
B = T.match_buffer(b, [], dtype="float32")
for k in T.thread_binding(0, 128, thread="threadIdx.x"):
with T.block("B"):
vk = T.axis.reduce(128, k)
T.reads([A[vk]])
T.writes([B[()]])
with T.init():
B[()] = T.float32(0)
B[()] = B[()] + A[vk]
@T.prim_func
def lowered_zero_rank_buffer(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [128], dtype="float32")
B = T.match_buffer(b, [], dtype="float32")
reduce_temp0 = T.alloc_buffer([1], dtype="float32", strides=[1], scope="local")
for k in T.thread_binding(0, 128, thread="threadIdx.x"):
with T.block("B_cross_thread_reduction"):
vk = T.axis.reduce(128, k)
T.reads([A[vk]])
T.writes([reduce_temp0[0]])
T.attr(
T.comm_reducer(lambda x, y: x + y, [T.float32(0)]),
"reduc |
e_scope",
T.reinterpret(T.uint64(0), dtype="handle"),
)
T.evaluate(
T.tvm_thread_allreduce(T.uint32(1), A[vk], True, reduce_temp0[0], k, dtype="handle")
)
with T.block("B_write_back"):
T.reads([reduce_temp0[0]])
T.writes([B[()]])
B[()] = reduce_temp0[0]
@T.prim_func
def multiple_bufferstore(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [128, 128], dtype="float32")
B = T.match_buffer(b, [128], dtype="float32")
C = T.alloc_buffer([], dtype="float32")
for i in T.serial(0, 128):
for k in T.thread_binding(0, 128, thread="threadIdx.x"):
with T.block("B"):
vi, vk = T.axis.remap("SR", [i, k])
T.reads([A[vi, vk], B[vi], C[()]])
T.writes([B[vi], C[()]])
with T.init():
B[vi] = T.float32(0)
C[()] = A[vi, vk]
B[vi] = B[vi] + C[()]
@T.prim_func
def reduction_loop_not_deepest(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [128, 128], dtype="float32")
B = T.match_buffer(b, [128], dtype="float32")
for k in T.thread_binding(0, 128, thread="threadIdx.x"):
for i in T.serial(0, 128):
with T.block("B"):
vi, vk = T.axis.remap("SR", [i, k])
T.reads([A[vi, vk]])
T.writes([B[vi]])
with T.init():
B[vi] = T.float32(0)
B[vi] = B[vi] + A[vi, vk]
@T.prim_func
def reduction_loop_bound_to_blockidx(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [128, 128], dtype="float32")
B = T.match_buffer(b, [128], dtype="float32")
for i in T.serial(0, 128):
for k in T.thread_binding(0, 128, thread="blockIdx.x"):
with T.block("B"):
vi, vk = T.axis.remap("SR", [i, k])
T.reads([A[vi, vk]])
T.writes([B[vi]])
with T.init():
B |
[vi] = T.float32(0)
B[vi] = B[vi] + A[vi, vk]
@T.prim_func
def different_access_indices(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [128, 128, 128], dtype="float32")
B = T.match_buffer(b, [128, 128], dtype="float32")
for i, j in T.grid(128, 128):
for k in T.thread_binding(0, 128, thread="threadIdx.x"):
with T.block("B"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
T.reads([A[vi, vj, vk]])
T.writes(
[
B[
T.min(vj, vi) : T.min(vj, vi) + (T.max(vj, vi) + 1 - T.min(vj, vi)),
T.min(vi, vj) : T.min(vi, vj) + (T.max(vi, vj) + 1 - T.min(vi, vj)),
]
]
)
with T.init():
B[vj, vi] = T.float32(0)
B[vi, vj] = B[vi, vj] + A[vi, vj, vk]
@T.prim_func
def invalid_reducer(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [128, 128], dtype="float32")
B = T.match_buffer(b, [128], dtype="float32")
for i in T.serial(0, 128):
for k in T.thread_binding(0, 128, thread="threadIdx.x"):
with T.block("B"):
vi, vk = T.axis.remap("SR", [i, k])
T.reads([A[vi, vk]])
T.writes([B[vi]])
with T.init():
B[vi] = T.float32(0)
B[vi] = B[vi] - A[vi, vk]
@T.prim_func
def softmax(var_A: T.handle, var_T_softmax_norm: T.handle) -> None:
A = T.match_buffer(var_A, [256, 256], dtype="float32")
T_softmax_norm = T.match_buffer(var_T_softmax_norm, [256, 256], dtype="float32")
T_softmax_maxelem_shared = T.alloc_buffer([256], dtype="float32", scope="shared")
T_softmax_expsum_shared = T.alloc_buffer([256], dtype="float32", scope="shared")
for i0 in T.thread_binding(0, 256, thread="blockIdx.x"):
for ax0_0 in T.serial(0, 8):
for ax0_1 in T.thread_binding(0, 32, thread="thre |
adIdx.x"):
with T.block("T_softmax_maxelem"):
i0_1 = T.axis.spatial(256, i0)
k = T.axis.reduce(256, ax0_0 * 32 + ax0_1)
T.reads([A[i0_1, k]])
T.writes([T_softmax_maxelem_shared[i0_1]])
with T.init():
T_softmax_maxelem_shared[i0_1] = T.min_value("float32")
T_softmax_maxelem_shared[i0_1] = T.max(
T_softmax_maxelem_shared[i0_1], A[i0_1, k]
)
for ax0_0 in T.serial(0, 8):
for ax0_1 in T.thread_binding(0, 32, thread="threadIdx.x"):
with T.block("T_softmax_expsum"):
i0_2 = T.axis.spatial(256, i0)
k = T.axis.reduce(256, ax0_0 * 32 + ax0_1)
T.reads(
[
A[i0_2, k],
T_softmax_maxelem_shared[i0_2],
]
)
T.writes([T_softmax_expsum_shared[i0_2]])
with T.init():
T_softmax_expsum_shared[i0_2] = T.float32(0)
T_softmax_expsum_shared[i0_2] = T_softmax_expsum_shared[i0_2] + T.exp(
A[i0_2, k] - T_softmax_maxelem_shared[i0_2], dtype="float32"
)
for i1_0 in T.serial(0, 8):
for i1_1 in T.thread_binding(0, 32, thread="threadIdx.x"):
with T.block("T_softmax_norm"):
i0_3 = T.axis.spatial(256, i0)
i1 = T.axis.spatial(256, i1_0 * 32 + i1_1)
T.reads(
[
A[i0_3, i1],
T_softmax_maxelem_shared[i0_3],
T_softmax_expsum_shared[i0_3],
]
)
T.writes([T_softmax_norm[i0_3, i1]])
T.block_attr({"axis": 1})
T_softma |
x_norm[i0_3, i1] = (
T.exp(
A[i0_3, i1] - T_softmax_maxelem_shared[i0_3],
dtype="float32",
)
/ T_softmax_expsum_shared[i0_3]
)
@T.prim_func
def lowered_softmax(var_A: T.handle, var_T_softmax_norm: T.handle) -> None:
A = T.match_buffer(var_A, [256, 256], dtype="float32")
T_softmax_norm = T.match_buffer(var_T_softmax_norm, [256, 256], dtype="float32")
T_softmax_maxelem_shared = T.alloc_buffer([256], dtype="float32", scope="shared")
T_softmax_expsum_shared = T.alloc_buffer([256], dtype="float32", scope="shared")
reduce_temp0 = T.alloc_buffer([1], dtype="float32", strides=[1], scope="local")
normal_reduce_temp0 = T.alloc_buffer([1], dtype="float32", strides=[1], scope="local")
reduce_temp1 = T.alloc_buffer([1], dtype="float32", strides=[1], scope="local")
normal_reduce_temp1 = T.alloc_buffer([1], dtype="float32", strides=[1], scope="local")
for i0 in T.thread_binding(0, 256, thread="blockIdx.x"):
for ax0_1 in T.thread_binding(0, 32, thread="threadIdx.x"):
with T.block("T_softmax_maxelem_normal_reduction_init"):
T.reads([])
T.writes([normal_reduce_temp0[0]])
normal_reduce_temp0[0] = T.min_value("float32")
for ax0_0 in T.serial(0, 8):
with T.block("T_softmax_maxelem_normal_reduction"):
i0_1 = T.axis.spatial(256, i0)
k = T.axis.reduce(256, ax0_0 * 32 + ax0_1)
T.reads([A[i0_1, k]])
T.writes([normal_reduce_temp0[0]])
normal_reduce_temp0[0] = T.max(normal_reduce_temp0[0], A[i0_1, k])
with T.block("T_softmax_maxelem_cross_thread_reduction"):
T.reads([normal_reduce_temp0[0]])
T.writes([reduce_temp0[0]])
T.attr(
T.comm_reducer(lambda x, y: T.max(x, y), [T.min_value("float3 |
2")]),
"reduce_scope",
T.reinterpret(T.uint64(0), dtype="handle"),
)
T.evaluate(
T.tvm_thread_allreduce(
T.uint32(1),
normal_reduce_temp0[0],
True,
reduce_temp0[0],
ax0_1,
dtype="handle",
)
)
with T.block("T_softmax_maxelem_write_back"):
i0_2 = T.axis.spatial(256, i0)
T.reads([reduce_temp0[0]])
T.writes([T_softmax_maxelem_shared[i0_2]])
T_softmax_maxelem_shared[i0_2] = reduce_temp0[0]
for ax0_1 in T.thread_binding(0, 32, thread="threadIdx.x"):
with T.block("T_softmax_expsum_normal_reduction_init"):
T.reads([])
T.writes([normal_reduce_temp1[0]])
normal_reduce_temp1[0] = T.float32(0)
for ax0_0 in T.serial(0, 8):
with T.block("T_softmax_expsum_normal_reduction"):
i0_3 = T.axis.spatial(256, i0)
k = T.axis.reduce(256, ax0_0 * 32 + ax0_1)
T.reads(
[
A[i0_3, k],
T_softmax_maxelem_shared[i0_3],
]
)
T.writes([normal_reduce_temp1[0]])
normal_reduce_temp1[0] = normal_reduce_temp1[0] + T.exp(
A[i0_3, k] - T_softmax_maxelem_shared[i0_3], dtype="float32"
)
with T.block("T_softmax_expsum_cross_thread_reduction"):
T.reads([normal_reduce_temp1[0]])
T.writes([reduce_temp1[0]])
T.attr(
T.comm_reducer(lambda x_1, y_1: x_1 + y_1, [T.float32(0)]),
"reduce_scope",
T.reinterpret(T.uint64(0), dtype="handle"), |
)
T.evaluate(
T.tvm_thread_allreduce(
T.uint32(1),
normal_reduce_temp1[0],
True,
reduce_temp1[0],
ax0_1,
dtype="handle",
)
)
with T.block("T_softmax_expsum_write_back"):
i0_4 = T.axis.spatial(256, i0)
T.reads([reduce_temp1[0]])
T.writes([T_softmax_expsum_shared[i0_4]])
T_softmax_expsum_shared[i0_4] = reduce_temp1[0]
for i1_0 in T.serial(0, 8):
for i1_1 in T.thread_binding(0, 32, thread="threadIdx.x"):
with T.block("T_softmax_norm"):
i0_5 = T.axis.spatial(256, i0)
i1 = T.axis.spatial(256, i1_0 * 32 + i1_1)
T.reads(
[
A[i0_5, i1],
T_softmax_maxelem_shared[i0_5],
T_softmax_expsum_shared[i0_5],
]
)
T.writes([T_softmax_norm[i0_5, i1]])
T.block_attr({"axis": 1})
T_softmax_norm[i0_5, i1] = (
T.exp(
A[i0_5, i1] - T_softmax_maxelem_shared[i0_5],
dtype="float32",
)
/ T_softmax_expsum_shared[i0_5]
)
@T.prim_func
def argmax_split(
idx: T.Buffer[(128, 128), "int32"],
val: T.Buffer[(128, 128), "float32"],
argmax_v0: T.Buffer[(128,), "int32"],
argmax_v1: T.Buffer[(128,), "float32"],
) -> None:
for i0, i1_0 in T.grid(128, 4):
for i1_1 in T.thread_binding(32, thread="threadIdx.x"):
with T.block("argmax"):
i = T.axis.spatial(128, i0)
k = T.axis.reduce(128, i1_0 * 32 + i1_1)
T.reads(idx[i, k], val[i, k]) |
T.writes(argmax_v0[i], argmax_v1[i])
with T.init():
argmax_v0[i] = -1
argmax_v1[i] = T.float32(-3.4028234663852886e38)
v_argmax_v0: T.int32 = T.Select(argmax_v1[i] >= val[i, k], argmax_v0[i], idx[i, k])
v_argmax_v1: T.float32 = T.Select(
argmax_v1[i] >= val[i, k], argmax_v1[i], val[i, k]
)
argmax_v0[i] = v_argmax_v0
argmax_v1[i] = v_argmax_v1
@T.prim_func
def lowered_argmax_split(
idx: T.Buffer[(128, 128), "int32"],
val: T.Buffer[(128, 128), "float32"],
argmax_v0: T.Buffer[(128,), "int32"],
argmax_v1: T.Buffer[(128,), "float32"],
) -> None:
cross_thread_argmax_v0 = T.alloc_buffer([1], dtype="int32", strides=[1], scope="local")
cross_thread_argmax_v1 = T.alloc_buffer([1], dtype="float32", strides=[1], scope="local")
in_thread_argmax_v0 = T.alloc_buffer([1], dtype="int32", strides=[1], scope="local")
in_thread_argmax_v1 = T.alloc_buffer([1], dtype="float32", strides=[1], scope="local")
for i0 in T.serial(128):
for i1_1 in T.thread_binding(32, thread="threadIdx.x"):
with T.block("argmax_in_thread_init"):
T.reads()
T.writes(in_thread_argmax_v0[0], in_thread_argmax_v1[0])
in_thread_argmax_v0[0] = -1
in_thread_argmax_v1[0] = T.float32(-3.4028234663852886e38)
for i1_0 in T.serial(4):
with T.block("argmax_in_thread"):
i = T.axis.spatial(128, i0)
k = T.axis.reduce(128, i1_0 * 32 + i1_1)
T.reads(idx[i, k], val[i, k])
T.writes(in_thread_argmax_v0[0], in_thread_argmax_v1[0])
v_argmax_v0: T.int32 = T.Select(
in_thread_argmax_v1[0] >= val[i, k], in_thread_argmax_v0[0], idx[i, k]
)
v_argmax_v1: T.float32 = T.Select(
in_thread_ar |
gmax_v1[0] >= val[i, k], in_thread_argmax_v1[0], val[i, k]
)
in_thread_argmax_v0[0] = v_argmax_v0
in_thread_argmax_v1[0] = v_argmax_v1
with T.block("argmax_cross_thread"):
T.reads(in_thread_argmax_v0[0], in_thread_argmax_v1[0])
T.writes(cross_thread_argmax_v0[0], cross_thread_argmax_v1[0])
T.attr(
T.comm_reducer(
lambda x0, x1, y0, y1: (
T.Select(x1 >= y1, x0, y0),
T.Select(x1 >= y1, x1, y1),
),
[-1, T.float32(-3.4028234663852886e38)],
),
"reduce_scope",
T.reinterpret(T.uint64(0), dtype="handle"),
)
T.evaluate(
T.tvm_thread_allreduce(
T.uint32(2),
in_thread_argmax_v0[0],
in_thread_argmax_v1[0],
True,
cross_thread_argmax_v0[0],
cross_thread_argmax_v1[0],
i1_1,
dtype="handle",
)
)
with T.block("argmax_write_back"):
i = T.axis.spatial(128, i0)
T.reads(cross_thread_argmax_v0[0], cross_thread_argmax_v1[0])
T.writes(argmax_v0[i], argmax_v1[i])
argmax_v0[i] = cross_thread_argmax_v0[0]
argmax_v1[i] = cross_thread_argmax_v1[0]
@T.prim_func
def argmin_split_init_update_reordered(
idx: T.Buffer[(128, 128), "int32"],
val: T.Buffer[(128, 128), "float32"],
argmin_v0: T.Buffer[(128,), "int32"],
argmin_v1: T.Buffer[(128,), "float32"],
) -> None:
for i0, i1_0 in T.grid(128, 4):
for i1_1 in T.thread_binding(32, thread="threadIdx.x"):
with T.block("argmin"):
i = T.axis.spatial(128, i0) |
k = T.axis.reduce(128, i1_0 * 32 + i1_1)
T.reads(idx[i, k], val[i, k])
T.writes(argmin_v0[i], argmin_v1[i])
with T.init():
argmin_v1[i] = T.float32(3.4028234663852886e38)
argmin_v0[i] = -1
v_argmin_v0: T.int32 = T.Select(argmin_v1[i] <= val[i, k], argmin_v0[i], idx[i, k])
v_argmin_v1: T.float32 = T.Select(
argmin_v1[i] <= val[i, k], argmin_v1[i], val[i, k]
)
argmin_v1[i] = v_argmin_v1
argmin_v0[i] = v_argmin_v0
@T.prim_func
def lowered_argmin_split_init_update_reordered(
idx: T.Buffer[(128, 128), "int32"],
val: T.Buffer[(128, 128), "float32"],
argmin_v0: T.Buffer[(128,), "int32"],
argmin_v1: T.Buffer[(128,), "float32"],
) -> None:
cross_thread_argmin_v0 = T.alloc_buffer([1], dtype="int32", strides=[1], scope="local")
cross_thread_argmin_v1 = T.alloc_buffer([1], dtype="float32", strides=[1], scope="local")
in_thread_argmin_v0 = T.alloc_buffer([1], dtype="int32", strides=[1], scope="local")
in_thread_argmin_v1 = T.alloc_buffer([1], dtype="float32", strides=[1], scope="local")
for i0 in T.serial(128):
for i1_1 in T.thread_binding(32, thread="threadIdx.x"):
with T.block("argmin_in_thread_init"):
T.reads()
T.writes(in_thread_argmin_v0[0], in_thread_argmin_v1[0])
in_thread_argmin_v0[0] = -1
in_thread_argmin_v1[0] = T.float32(3.4028234663852886e38)
for i1_0 in T.serial(4):
with T.block("argmin_in_thread"):
i = T.axis.spatial(128, i0)
k = T.axis.reduce(128, i1_0 * 32 + i1_1)
T.reads(idx[i, k], val[i, k])
T.writes(in_thread_argmin_v0[0], in_thread_argmin_v1[0])
v_argmin_v0: T.int32 = T.Select(
in_thread_argmin_v1[0] <= val[i, k], in_thread_argmin_v0[0], i |
dx[i, k]
)
v_argmin_v1: T.float32 = T.Select(
in_thread_argmin_v1[0] <= val[i, k], in_thread_argmin_v1[0], val[i, k]
)
in_thread_argmin_v1[0] = v_argmin_v1
in_thread_argmin_v0[0] = v_argmin_v0
with T.block("argmin_cross_thread"):
T.reads(in_thread_argmin_v0[0], in_thread_argmin_v1[0])
T.writes(cross_thread_argmin_v0[0], cross_thread_argmin_v1[0])
T.attr(
T.comm_reducer(
lambda x0, x1, y0, y1: (
T.Select(x1 <= y1, x0, y0),
T.Select(x1 <= y1, x1, y1),
),
[-1, T.float32(3.4028234663852886e38)],
),
"reduce_scope",
T.reinterpret(T.uint64(0), dtype="handle"),
)
T.evaluate(
T.tvm_thread_allreduce(
T.uint32(2),
in_thread_argmin_v0[0],
in_thread_argmin_v1[0],
True,
cross_thread_argmin_v0[0],
cross_thread_argmin_v1[0],
i1_1,
dtype="handle",
)
)
with T.block("argmin_write_back"):
i = T.axis.spatial(128, i0)
T.reads(cross_thread_argmin_v0[0], cross_thread_argmin_v1[0])
T.writes(argmin_v0[i], argmin_v1[i])
argmin_v0[i] = cross_thread_argmin_v0[0]
argmin_v1[i] = cross_thread_argmin_v1[0]
@T.prim_func
def layer_norm_tuple_sum(
data: T.Buffer[(128, 768), "float32"],
gamma: T.Buffer[768, "float32"],
bias: T.Buffer[768, "float32"],
T_layer_norm: T.Buffer[(128, 768), "float32"],
) -> None:
data_red_temp_v0 = T.alloc_buffer([128], dtype="float32")
data_red_t |
emp_v1 = T.alloc_buffer([128], dtype="float32")
for i0_fused in T.thread_binding(128, thread="blockIdx.x"):
for i1_0 in T.serial(24):
for i1_1 in T.thread_binding(32, thread="threadIdx.x"):
with T.block("data_red_temp"):
ax0 = T.axis.spatial(128, i0_fused)
k1 = T.axis.reduce(768, i1_0 * 32 + i1_1)
T.reads(data[ax0, k1])
T.writes(data_red_temp_v0[ax0], data_red_temp_v1[ax0])
with T.init():
data_red_temp_v0[ax0] = T.float32(0)
data_red_temp_v1[ax0] = T.float32(0)
v_data_red_temp_v0: T.float32 = data_red_temp_v0[ax0] + data[ax0, k1]
v_data_red_temp_v1: T.float32 = (
data_red_temp_v1[ax0] + data[ax0, k1] * data[ax0, k1]
)
data_red_temp_v0[ax0] = v_data_red_temp_v0
data_red_temp_v1[ax0] = v_data_red_temp_v1
for i0_i1_fused_0 in T.thread_binding(384, thread="blockIdx.x"):
for i0_i1_fused_1 in T.thread_binding(256, thread="threadIdx.x"):
with T.block("T_layer_norm"):
ax0 = T.axis.spatial(128, (i0_i1_fused_0 * 256 + i0_i1_fused_1)
ax1 = T.axis.spatial(768, (i0_i1_fused_0 * 256 + i0_i1_fused_1) % 768)
T.reads(
data[ax0, ax1],
data_red_temp_v0[ax0],
data_red_temp_v1[ax0],
gamma[ax1],
bias[ax1],
)
T.writes(T_layer_norm[ax0, ax1])
T_layer_norm[ax0, ax1] = (
data[ax0, ax1] - data_red_temp_v0[ax0] * T.float32(0.0013020833333333333)
) * T.rsqrt(
data_red_temp_v1[ax0] * T.float32(0.0013020833333333333)
- data_red_temp_v0[ax0]
* T.float32(0.0013020833333333333)
* (data_red_temp_v0[ax0] * T.flo |
at32(0.0013020833333333333))
+ T.float32(1.0000000000000001e-05),
dtype="float32",
) * gamma[
ax1
] + bias[
ax1
]
@T.prim_func
def lowered_layer_norm_tuple_sum(
data: T.Buffer[(128, 768), "float32"],
gamma: T.Buffer[768, "float32"],
bias: T.Buffer[768, "float32"],
T_layer_norm: T.Buffer[(128, 768), "float32"],
) -> None:
data_red_temp_v0 = T.alloc_buffer([128], dtype="float32")
data_red_temp_v1 = T.alloc_buffer([128], dtype="float32")
cross_thread_data_red_temp_v0 = T.alloc_buffer([1], dtype="float32", strides=[1], scope="local")
cross_thread_data_red_temp_v1 = T.alloc_buffer([1], dtype="float32", strides=[1], scope="local")
in_thread_data_red_temp_v0 = T.alloc_buffer([1], dtype="float32", strides=[1], scope="local")
in_thread_data_red_temp_v1 = T.alloc_buffer([1], dtype="float32", strides=[1], scope="local")
for i0_fused in T.thread_binding(128, thread="blockIdx.x"):
for i1_1 in T.thread_binding(32, thread="threadIdx.x"):
with T.block("data_red_temp_in_thread_init"):
T.reads()
T.writes(in_thread_data_red_temp_v0[0], in_thread_data_red_temp_v1[0])
in_thread_data_red_temp_v0[0] = T.float32(0)
in_thread_data_red_temp_v1[0] = T.float32(0)
for i1_0 in T.serial(24):
with T.block("data_red_temp_in_thread"):
ax0 = T.axis.spatial(128, i0_fused)
k1 = T.axis.reduce(768, i1_0 * 32 + i1_1)
T.reads(data[ax0, k1])
T.writes(in_thread_data_red_temp_v0[0], in_thread_data_red_temp_v1[0])
v_data_red_temp_v0: T.float32 = in_thread_data_red_temp_v0[0] + data[ax0, k1]
v_data_red_temp_v1: T.float32 = (
in_thread_data_red_temp_v1[0] + data[ax0, k1] * data[ax0, k1]
)
in_t |
hread_data_red_temp_v0[0] = v_data_red_temp_v0
in_thread_data_red_temp_v1[0] = v_data_red_temp_v1
with T.block("data_red_temp_cross_thread"):
T.reads(in_thread_data_red_temp_v0[0], in_thread_data_red_temp_v1[0])
T.writes(cross_thread_data_red_temp_v0[0], cross_thread_data_red_temp_v1[0])
T.attr(
T.comm_reducer(
lambda x0, x1, y0, y1: (x0 + y0, x1 + y1), [T.float32(0), T.float32(0)]
),
"reduce_scope",
T.reinterpret(T.uint64(0), dtype="handle"),
)
T.evaluate(
T.tvm_thread_allreduce(
T.uint32(2),
in_thread_data_red_temp_v0[0],
in_thread_data_red_temp_v1[0],
True,
cross_thread_data_red_temp_v0[0],
cross_thread_data_red_temp_v1[0],
i1_1,
dtype="handle",
)
)
with T.block("data_red_temp_write_back"):
ax0 = T.axis.spatial(128, i0_fused)
T.reads(cross_thread_data_red_temp_v0[0], cross_thread_data_red_temp_v1[0])
T.writes(data_red_temp_v0[ax0], data_red_temp_v1[ax0])
data_red_temp_v0[ax0] = cross_thread_data_red_temp_v0[0]
data_red_temp_v1[ax0] = cross_thread_data_red_temp_v1[0]
for i0_i1_fused_0 in T.thread_binding(384, thread="blockIdx.x"):
for i0_i1_fused_1 in T.thread_binding(256, thread="threadIdx.x"):
with T.block("T_layer_norm"):
ax0 = T.axis.spatial(128, (i0_i1_fused_0 * 256 + i0_i1_fused_1)
ax1 = T.axis.spatial(768, (i0_i1_fused_0 * 256 + i0_i1_fused_1) % 768)
T.reads(
data[ax0, ax1],
data_red_temp_v0[ax0],
data_red_temp_v1[ax0], |
gamma[ax1],
bias[ax1],
)
T.writes(T_layer_norm[ax0, ax1])
T_layer_norm[ax0, ax1] = (
data[ax0, ax1] - data_red_temp_v0[ax0] * T.float32(0.0013020833333333333)
) * T.rsqrt(
data_red_temp_v1[ax0] * T.float32(0.0013020833333333333)
- data_red_temp_v0[ax0]
* T.float32(0.0013020833333333333)
* (data_red_temp_v0[ax0] * T.float32(0.0013020833333333333))
+ T.float32(1.0000000000000001e-05),
dtype="float32",
) * gamma[
ax1
] + bias[
ax1
]
def test_loop_split():
_check(loop_split, lowered_loop_split)
def test_no_normal_reduction():
_check(no_normal_reduction, lowered_no_normal_reduction)
def test_two_bound_loops():
_check(two_bound_loops, lowered_two_bound_loops)
def test_multiple_blocks_under_reduction_loop():
_check(multiple_blocks_under_reduction_loop, lowered_multiple_blocks_under_reduction_loop)
def test_with_block_predicate():
_check(with_block_predicate, lowered_with_block_predicate)
def test_single_reduction_loop_with_block_predicate():
_check(
single_reduction_loop_with_block_predicate,
lowered_single_reduction_loop_with_block_predicate,
)
def test_single_reduction_loop_with_tensorize():
_check(
single_reduction_loop_with_tensorize,
single_reduction_loop_with_tensorize,
)
def test_reducer_max():
_check(reducer_max, lowered_reducer_max)
def test_zero_rank_buffer():
_check(zero_rank_buffer, lowered_zero_rank_buffer)
def test_multiple_bufferstore():
_check_fail(multiple_bufferstore)
def test_reduction_block_not_deepest():
_check_fail(reduction_loop_not_deepest)
def test_reduction_loop_bound_to_blockidx():
_check_fail(reduction_loop_bound_to_blockidx)
def test_different_access_indices( |
):
_check_fail(different_access_indices)
def test_invalid_reducer():
_check_fail(invalid_reducer)
def test_softmax():
_check(softmax, lowered_softmax)
def test_argmax_split():
_check(argmax_split, lowered_argmax_split)
def test_argmin_split_init_update_reordered():
_check(argmin_split_init_update_reordered, lowered_argmin_split_init_update_reordered)
def test_lower_te():
a = te.placeholder((32, 2, 2))
k1 = te.reduce_axis((0, 2), "k1")
k2 = te.reduce_axis((0, 2), "k2")
b = te.compute((32,), lambda i: te.sum(a[i, k1, k2], axis=[k1, k2]))
s = te.create_schedule(b.op)
s[b].bind(k1, te.thread_axis("threadIdx.x"))
s[b].bind(k2, te.thread_axis("threadIdx.y"))
orig_mod = tvm.driver.build_module.schedule_to_module(s, [a, b])
mod = tvm.tir.transform.LowerCrossThreadReduction()(orig_mod)
tvm.ir.assert_structural_equal(
mod, orig_mod
)
def test_layer_norm_tuple_sum():
_check(layer_norm_tuple_sum, lowered_layer_norm_tuple_sum)
if __name__ == "__main__":
tvm.testing.main() |
import tvm
from tvm |
import te
from tvm.script |
import tir as T
@tvm.script.ir_module
class WithInit:
@T.prim_func
def main(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [64, 64, 64])
B = T.match_buffer(b, [64])
for i0, j0 in T.grid(64, 64):
for k0 in T.serial(32, 64):
with T.block():
i, j, k = T.axis.remap("SRR", [i0, j0, k0])
with T.init():
B[i] = T.float32(0)
B[i] += A[i, j, k]
@tvm.script.ir_module
class WithBranch:
@T.prim_func
def main(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [64, 64, 64])
B = T.match_buffer(b, [64])
for i0, j0 in T.grid(64, 64):
for k0 in T.serial(32, 64):
with T.block():
i, j, k = T.axis.remap("SRR", [i0, j0, k0])
T.reads(A[i, j, k])
T.writes(B[i])
if (j == 0) and (k == 32):
B[i] = T.float32(0)
B[i] += A[i, j, k]
@tvm.script.ir_module
class InitWithMatchBuffer:
@T.prim_func
def main(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [64, 64, 64])
B = T.match_buffer(b, [64])
for i0, j0 in T.grid(64, 64):
for k0 in T.serial(32, 64):
with T.block():
i, j, k = T.axis.remap("SRR", [i0, j0, k0])
BB = T.match_buffer(B[i], ())
AA = T.match_buffer(A[i, 0:64, 0:64], (64, 64))
with T.init():
BB[()] = T.float32(0)
BB[()] += AA[j, k]
@tvm.script.ir_module
class BranchWithMatchBuffer:
@T.prim_func
def main(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [64, 64, 64])
B = T.match_buffer(b, [64])
for i0, j0 in T.grid(64, 64):
for k0 in T.serial(32, 64):
with T.block():
i, j, k = T.axis.remap("SRR", [i0, j0, k0]) |
T.reads(A[i, j, k])
T.writes(B[i])
BB = T.match_buffer(B[i], ())
AA = T.match_buffer(A[i, 0:64, 0:64], (64, 64))
if (j == 0) and (k == 32):
BB[()] = T.float32(0)
BB[()] += AA[j, k]
def test_lower_reduction():
origin_mod = WithInit
mod = tvm.tir.transform.LowerInitBlock()(origin_mod)
tvm.ir.assert_structural_equal(mod, WithBranch, True)
def test_lower_match_buffer():
origin_mod = InitWithMatchBuffer
mod = tvm.tir.transform.LowerInitBlock()(origin_mod)
tvm.ir.assert_structural_equal(mod, BranchWithMatchBuffer, True)
def test_lower_te():
x = te.placeholder((1,))
y = te.compute((1,), lambda i: x[i] + 2)
s = te.create_schedule(y.op)
orig_mod = tvm.driver.build_module.schedule_to_module(s, [x, y])
mod = tvm.tir.transform.LowerInitBlock()(orig_mod)
tvm.ir.assert_structural_equal(mod, orig_mod)
if __name__ == "__main__":
test_lower_reduction()
test_lower_match_buffer()
test_lower_te() |
import tvm |
import tvm.testing
from tvm |
import te |
import numpy as np
def lower_intrin(params, stmt):
"""wrapper to call transformation in stmt"""
lower_expr = isinstance(stmt, tvm.tir.PrimExpr)
stmt = tvm.tir.Evaluate(stmt) if lower_expr else stmt
mod = tvm.IRModule.from_expr(
tvm.tir.PrimFunc(params, stmt).with_attr("target", tvm.target.Target("llvm"))
)
mod = tvm.transform.Sequential([tvm.tir.transform.Simplify(), tvm.tir.transform.LowerIntrin()])(
mod
)
func = mod["main"]
stmt = func.body
return stmt.value if lower_expr else stmt.body
def check_value(expr, vx, vy, data, fref):
n = len(data)
A = te.placeholder((n,), name="A", dtype=expr.dtype)
B = te.placeholder((n,), name="B", dtype=expr.dtype)
def make_binds(i):
x = expr
x = tvm.tir.Let(vx, A[i], x)
x = tvm.tir.Let(vy, B[i], x)
return x
C = te.compute((n,), make_binds)
s = te.create_schedule([C.op])
f = tvm.build(s, [A, B, C], "llvm")
a = tvm.nd.array(np.array([x for x, y in data], dtype=expr.dtype))
b = tvm.nd.array(np.array([y for x, y in data], dtype=expr.dtype))
c = tvm.nd.array(np.zeros(len(data), dtype=expr.dtype))
f(a, b, c)
cref = np.array([fref(x, y) for x, y in data])
np.testing.assert_equal(c.numpy(), cref)
def get_ref_data():
"""Get reference data for every pairs""" |
import itertools
x = range(-10, 10)
y = list(range(-10, 10))
y.remove(0)
return list(itertools.product(x, y))
@tvm.testing.requires_llvm
def test_lower_floordiv():
data = get_ref_data()
for dtype in ["int32", "int64", "int16"]:
x = te.var("x", dtype=dtype)
y = te.var("y", dtype=dtype)
zero = tvm.tir.const(0, dtype)
res = lower_intrin([x, y], tvm.te.floordiv(x, y))
check_value(res, x, y, data, lambda a, b: a
res = lower_intrin([x, y], tvm.tir.Select(y >= 0, tvm.te.floordiv(x, y), zero))
check_value(res, x, y, data, lambda a, b: a
res = lower_intrin(
[x, y], tvm.tir.Select(y >= 0, tvm.te.max(tvm.te.floordiv(x, y), zero), zero)
)
check_value(res, x, y, data, lambda a, b: max(a
res = lower_intrin(
[x, y], tvm.tir.Select(tvm.tir.all(y >= 0, x >= 0), tvm.te.floordiv(x, y), zero)
)
check_value(res, x, y, data, lambda a, b: a
res = lower_intrin([x, y], tvm.te.floordiv(x, tvm.tir.const(8, dtype=dtype)))
check_value(res, x, y, [(a, b) for a, b in data if b == 8], lambda a, b: a
@tvm.testing.requires_llvm
def test_lower_floormod():
data = get_ref_data()
for dtype in ["int32", "int64", "int16"]:
x = te.var("x", dtype=dtype)
y = te.var("y", dtype=dtype)
zero = tvm.tir.const(0, dtype)
res = lower_intrin([x, y], tvm.te.floormod(x, y))
check_value(res, x, y, data, lambda a, b: a % b)
res = lower_intrin([x, y], tvm.tir.Select(y >= 0, tvm.te.floormod(x, y), zero))
check_value(res, x, y, data, lambda a, b: a % b if b > 0 else 0)
res = lower_intrin(
[x, y], tvm.tir.Select(tvm.tir.all(y >= 0, x >= 0), tvm.te.floormod(x, y), zero)
)
check_value(res, x, y, data, lambda a, b: a % b if b > 0 and a >= 0 else 0)
res = lower_intrin([x, y], tvm.te.floormod(x, tvm.tir.const(8, dtype |
=dtype)))
check_value(res, x, y, [(a, b) for a, b in data if b == 8], lambda a, b: a % b)
if __name__ == "__main__":
test_lower_floordiv()
test_lower_floormod() |
import tvm |
import tvm.testing
from tvm |
import te
from tvm.script |
import tir as T
def _check(original, transformed):
func = original
mod = tvm.IRModule.from_expr(func)
mod = tvm.tir.transform.LowerOpaqueBlock()(mod)
mod = tvm.tir.transform.Simplify()(mod)
tvm.ir.assert_structural_equal(mod["main"], transformed, True)
@T.prim_func
def compacted_elementwise_func(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (16, 16), "float32")
C = T.match_buffer(c, (16, 16), "float32")
for i in range(0, 16):
with T.block():
T.reads(A[i, 0:16])
T.writes(C[i, 0:16])
B = T.alloc_buffer([1, 16], "float32", scope="global")
for j in range(0, 16):
with T.block():
T.reads(A[i, j])
T.writes(B[0, j])
B[0, j] = A[i, j] + 1.0
for j in range(0, 16):
with T.block():
T.reads(B[0, j])
T.writes(C[i, j])
C[i, j] = B[0, j] * 2.0
@T.prim_func
def transformed_elementwise_func(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (16, 16), "float32")
C = T.match_buffer(c, (16, 16), "float32")
for i in T.serial(0, 16):
B_new = T.decl_buffer(shape=[1, 16], dtype="float32")
for j in T.serial(0, 16):
B_new[0, j] = A[i, j] + 1.0
for j in T.serial(0, 16):
C[i, j] = B_new[0, j] * 2.0
@T.prim_func
def compacted_gpu_func(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (16, 16), "float32")
C = T.match_buffer(c, (16, 16), "float32")
for i0 in T.thread_binding(0, 4, thread="blockIdx.x"):
for i1 in T.thread_binding(0, 2, thread="threadIdx.x"):
for i2 in T.thread_binding(0, 2, thread="vthread"):
with T.block():
T.reads(A[i0 * 4 + i1 * 2 + i2, 0:16])
T.writes(C[i0 * 4 + i1 * 2 + i2, 0:16])
B = T.alloc_buffer([1, 16], "float32", scope="local")
for j in range(0, 16): |
with T.block():
T.reads(A[i0 * 4 + i1 * 2 + i2, j])
T.writes(B[0, j])
B[0, j] = A[i0 * 4 + i1 * 2 + i2, j] + 1.0
for j in range(0, 16):
with T.block():
T.reads(B[0, j])
T.writes(C[i0 * 4 + i1 * 2 + i2, j])
C[i0 * 4 + i1 * 2 + i2, j] = B[0, j] * 2.0
@T.prim_func
def transformed_gpu_func(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (16, 16), "float32")
C = T.match_buffer(c, (16, 16), "float32")
i0 = T.env_thread("blockIdx.x")
i1 = T.env_thread("threadIdx.x")
i2 = T.env_thread("vthread")
T.launch_thread(i0, 4)
T.launch_thread(i1, 2)
T.launch_thread(i2, 2)
B = T.decl_buffer(shape=[1, 16], dtype="float32", scope="local")
for j in range(0, 16):
B[0, j] = A[i0 * 4 + i1 * 2 + i2, j] + 1.0
for j in range(0, 16):
C[i0 * 4 + i1 * 2 + i2, j] = B[0, j] * 2.0
@T.prim_func
def compacted_symbolic_func(a: T.handle, c: T.handle, n: T.int32, m: T.int32) -> None:
A = T.match_buffer(a, (n, m), "float32")
C = T.match_buffer(c, (n, m), "float32")
for i in range(0, n):
with T.block():
T.reads(A[i, m])
T.writes(C[i, m])
B = T.alloc_buffer((m,), "float32", scope="global")
for j in range(0, m):
with T.block():
T.reads(A[i, j])
T.writes(B[j])
B[j] = A[i, j] + 1.0
for j in range(0, m):
with T.block():
T.reads(B[j])
T.writes(C[i, j])
C[i, j] = B[j] * 2.0
@T.prim_func
def transformed_symbolic_func(a: T.handle, c: T.handle, n: T.int32, m: T.int32) -> None:
A = T.match_buffer(a, (n, m), "float32")
C = T.match_buffer(c, (n, m), "float32")
for i in range(0, n):
B = T.decl_buffer(shape=[m], dt |
ype="float32")
for j in range(0, m):
B[j] = A[i, j] + 1.0
for j in range(0, m):
C[i, j] = B[j] * 2.0
@T.prim_func
def compacted_predicate_func(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (32), "float32")
C = T.match_buffer(c, (32), "float32")
for i, j in T.grid(5, 7):
with T.block():
T.reads(A[i * 7 + j])
T.writes(C[i * 7 + j])
T.where(i * 7 + j < 32)
C[i * 7 + j] = A[i * 7 + j] + 1.0
@T.prim_func
def transformed_predicate_func(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (32), "float32")
C = T.match_buffer(c, (32), "float32")
for i, j in T.grid(5, 7):
if i * 7 + j < 32:
C[i * 7 + j] = A[i * 7 + j] + 1.0
@T.prim_func
def compacted_unit_loop_func(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (32), "float32")
C = T.match_buffer(c, (32), "float32")
for x, y, z in T.grid(4, 1, 8):
with T.block():
T.reads(A[x * 8 + y * 8 + z])
T.writes(C[x * 8 + y * 8 + z])
C[x * 8 + y * 8 + z] = A[x * 8 + y * 8 + z] + 1.0
@T.prim_func
def transformed_unit_loop_func(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (32), "float32")
C = T.match_buffer(c, (32), "float32")
for x, z in T.grid(4, 8):
C[x * 8 + z] = A[x * 8 + z] + 1.0
@T.prim_func
def compacted_multi_alloc_func(a: T.handle, d: T.handle) -> None:
A = T.match_buffer(a, (32), "float32")
D = T.match_buffer(d, (32), "float32")
for i in range(0, 32):
with T.block():
T.reads(A[i])
T.writes(D[i])
B = T.alloc_buffer((32,), scope="global")
C = T.alloc_buffer((32,), scope="global")
B[i] = A[i] + 1.0
C[i] = A[i] + B[i]
D[i] = C[i] * 2.0
@T.prim_func
def transformed_multi_alloc_func(a: T.handle, d: T.handle) -> None:
A = T.match_buffer(a, (32), "float32")
D = T.match_buffer(d, (32), "float32")
for i i |
n range(0, 32):
B = T.decl_buffer(shape=(32,), dtype="float32")
C = T.decl_buffer(shape=(32,), dtype="float32")
B[i] = A[i] + 1.0
C[i] = A[i] + B[i]
D[i] = C[i] * 2.0
@T.prim_func
def compacted_strided_buffer_func(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (16, 16), "float32")
C = T.match_buffer(c, (16, 16), "float32")
for i0 in range(0, 4):
with T.block():
T.reads(A[i0 * 4 : i0 * 4 + 4, 0:16])
T.writes(C[i0 * 4 : i0 * 4 + 4, 0:16])
B = T.alloc_buffer([4, 16], "float32", strides=[17, 1], scope="global")
for i1 in range(0, 4):
for j in range(0, 16):
with T.block():
T.reads(A[i0 * 4 + i1, j])
T.writes(B[i1, j])
B[i1, j] = A[i0 * 4 + i1, j] + 1.0
for i1 in range(0, 4):
for j in range(0, 16):
with T.block():
T.reads(B[i1, j])
T.writes(C[i0 * 4 + i1, j])
C[i0 * 4 + i1, j] = B[i1, j] * 2.0
@T.prim_func
def transformed_strided_buffer_func(
A: T.Buffer[(16, 16), "float32"], C: T.Buffer[(16, 16), "float32"]
) -> None:
for i0 in T.serial(4):
B_data = T.allocate([4, 17], "float32", "global")
B = T.decl_buffer(shape=[4, 16], dtype="float32", strides=[17, 1], data=B_data)
for i1, j in T.grid(4, 16):
B[i1, j] = A[i0 * 4 + i1, j] + T.float32(1)
for i1, j in T.grid(4, 16):
C[i0 * 4 + i1, j] = B[i1, j] * T.float32(2)
@T.prim_func
def annotated_loops(a: T.handle) -> None:
A = T.match_buffer(a, (16,), "float32")
for i in range(0, 16, annotations={"pragma_1": "str_value", "pragma_2": 1, "pragma_3": 0.0}):
A[i] = 0.0
@T.prim_func
def boolean_handling_before(a: T.Buffer[10, "bool"], b: T.Buffer[10, "bool"]) -> None:
for i0 in T.serial(10):
with T.block("b"):
T.reads(a[i0]) |
T.writes(b[i0])
b[i0] = a[i0]
@T.prim_func
def boolean_handling_after(a: T.Buffer[10, "bool"], b: T.Buffer[10, "bool"]) -> None:
for i0 in T.serial(10):
b[i0] = a[i0]
def test_elementwise():
_check(compacted_elementwise_func, transformed_elementwise_func)
def test_gpu_workload():
_check(compacted_gpu_func, transformed_gpu_func)
def test_symbolic_shape():
_check(compacted_symbolic_func, transformed_symbolic_func)
def test_predicate():
_check(compacted_predicate_func, transformed_predicate_func)
def test_unit_loops():
_check(compacted_unit_loop_func, transformed_unit_loop_func)
def test_multi_alloc():
_check(compacted_multi_alloc_func, transformed_multi_alloc_func)
def test_strided_buffer():
_check(compacted_strided_buffer_func, transformed_strided_buffer_func)
def test_lower_te():
x = te.placeholder((1,))
y = te.compute((1,), lambda i: x[i] + 2)
s = te.create_schedule(y.op)
orig_mod = tvm.driver.build_module.schedule_to_module(s, [x, y])
mod = tvm.tir.transform.LowerOpaqueBlock()(orig_mod)
tvm.ir.assert_structural_equal(mod, orig_mod)
def test_annotated_loops():
mod = tvm.IRModule.from_expr(annotated_loops)
mod = tvm.tir.transform.LowerOpaqueBlock()(mod)
attr1 = mod["main"].body
attr2 = attr1.body
attr3 = attr2.body
assert attr1.attr_key == "pragma_1" and attr1.value == "str_value"
assert attr2.attr_key == "pragma_2"
tvm.ir.assert_structural_equal(attr2.value, tvm.tir.IntImm("int32", 1))
assert attr3.attr_key == "pragma_3"
tvm.ir.assert_structural_equal(attr3.value, tvm.tir.FloatImm("float32", 0.0))
def test_annotated_block():
@T.prim_func
def annotated_block() -> None:
with T.block():
T.block_attr({"pragma_1": "str_value", "pragma_2": 1, "pragma_3": 0.0})
T.evaluate(0)
mod = tvm.IRModule.from_expr(annotated_block)
mod = tvm.tir.transform.LowerOpaqueBlock()(mod)
attr1 = mod["main"].body
attr2 = attr1.body |
attr3 = attr2.body
assert attr1.attr_key == "pragma_1" and attr1.value == "str_value"
assert attr2.attr_key == "pragma_2"
tvm.ir.assert_structural_equal(attr2.value, tvm.tir.IntImm("int32", 1))
assert attr3.attr_key == "pragma_3"
tvm.ir.assert_structural_equal(attr3.value, tvm.tir.FloatImm("float32", 0.0))
def test_preserved_annotations():
@T.prim_func
def before(A: T.Buffer[8, "float32"], B: T.Buffer[8, "float32"]):
for i in T.serial(8, annotations={"k_0": 1, "k_1": [2, 3], "k_2": 3.14}):
with T.block("block"):
T.block_attr({"k_3": "oops"})
B[i] = A[i] + 1.0
@T.prim_func
def after(A: T.Buffer[8, "float32"], B: T.Buffer[8, "float32"]):
for i in T.serial(8, annotations={"k_0": 1, "k_1": [2, 3], "k_2": 3.14}):
B[i] = A[i] + 1.0
mod = tvm.IRModule.from_expr(before)
mod = tvm.tir.transform.LowerOpaqueBlock()(mod)
tvm.ir.assert_structural_equal(mod["main"], after)
def test_boolean_handling():
_check(boolean_handling_before, boolean_handling_after)
if __name__ == "__main__":
tvm.testing.main() |
import tvm
from tvm |
import te |
import numpy as np
from tvm |
import testing
@tvm.register_func("tvm.test_matmul")
def my_matmul(a, b, c):
c.copyfrom(np.dot(a.numpy(), b.numpy()))
def check_packed_func(target="llvm"):
ib = tvm.tir.ir_builder.create()
m = n = k = 16
a = te.placeholder((m, k), name="a", dtype="float64")
b = te.placeholder((k, n), name="b", dtype="float64")
k = te.reduce_axis((0, k), name="k")
c = te.compute((m, n), lambda i, j: te.sum(a[i, k] * b[k, j], axis=k), name="c")
a_buffer = tvm.tir.decl_buffer(
a.shape, a.dtype, name="a_buffer", offset_factor=1, strides=[te.var("s1"), 1]
)
b_buffer = tvm.tir.decl_buffer(
b.shape, b.dtype, name="b_buffer", offset_factor=1, strides=[te.var("s2"), 1]
)
c_buffer = tvm.tir.decl_buffer(
c.shape, c.dtype, name="c_buffer", offset_factor=1, strides=[te.var("s3"), 1]
)
with ib.for_range(0, 10, "i", kind="parallel"):
ib.emit(tvm.tir.call_packed("tvm.test_matmul", a_buffer, b_buffer, c_buffer))
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([a_buffer, b_buffer, c_buffer], stmt))
target = tvm.target.Target(target)
mod = tvm.tir.transform.Apply(lambda f: f.with_attr("target", target))(mod)
mod = tvm.tir.transform.Apply(lambda f: f.with_attr("global_symbol", "main"))(mod)
mod = tvm.tir.transform.MakePackedAPI()(mod)
mod = tvm.tir.transform.LowerTVMBuiltin()(mod)
prim_func = mod.functions.items()[0][1]
node = prim_func.body
while isinstance(node, (tvm.tir.AssertStmt, tvm.tir.LetStmt, tvm.tir.AttrStmt)):
node = node.body
assert isinstance(node, tvm.tir.stmt.For)
alloca_tcode = node.body
assert isinstance(alloca_tcode, tvm.tir.LetStmt)
expected_value = tvm.tir.call_intrin(
"handle", tvm.ir.Op.get("tir.tvm_stack_alloca"), "arg_tcode", 4
)
expected_var = alloca_tcode.var
expected_stmt = tvm.tir.LetStmt(expected_var, expected_value, alloca_tcode.body)
tvm.ir.assert_structural_e |
qual(alloca_tcode, expected_stmt, map_free_vars=True)
alloca_value = alloca_tcode.body
assert isinstance(alloca_value, tvm.tir.LetStmt)
expected_value = tvm.tir.call_intrin(
"handle", tvm.ir.Op.get("tir.tvm_stack_alloca"), "arg_value", 4
)
expected_var = alloca_value.var
expected_stmt = tvm.tir.LetStmt(expected_var, expected_value, alloca_value.body)
tvm.ir.assert_structural_equal(alloca_value, expected_stmt, map_free_vars=True)
alloca_array = alloca_value.body
assert isinstance(alloca_array, tvm.tir.LetStmt)
expected_value = tvm.tir.call_intrin(
"handle", tvm.ir.Op.get("tir.tvm_stack_alloca"), "array", 3
)
expected_var = alloca_array.var
expected_stmt = tvm.tir.LetStmt(expected_var, expected_value, alloca_array.body)
tvm.ir.assert_structural_equal(alloca_array, expected_stmt, map_free_vars=True)
alloca_shape = alloca_array.body
assert isinstance(alloca_shape, tvm.tir.LetStmt)
expected_value = tvm.tir.call_intrin(
"handle", tvm.ir.Op.get("tir.tvm_stack_alloca"), "shape", 12
)
expected_var = alloca_shape.var
expected_stmt = tvm.tir.LetStmt(expected_var, expected_value, alloca_shape.body)
tvm.ir.assert_structural_equal(alloca_shape, expected_stmt, map_free_vars=True)
def test_lower_packed_func():
check_packed_func("llvm")
check_packed_func("stackvm")
@tvm.testing.requires_llvm
def test_call_packed_return_non_i32():
expected_value = np.array([1.2, 1.4], dtype="float32")
def packed_echo(value):
return tvm.tir.call_intrin(
value.dtype, tvm.ir.Op.get("tir.tvm_call_packed"), "testing.echo", value
)
def build_tir():
Ab = tvm.tir.decl_buffer((2,), "float32")
ib = tvm.tir.ir_builder.create()
Aptr = ib.buffer_ptr(Ab)
Aptr[0] = packed_echo(tvm.tir.const(expected_value[0], "float32"))
Aptr_var = ib.let("Aptr_dup", packed_echo(Aptr.aso |
bject().data))
ib.emit(tvm.tir.BufferStore(Aptr, tvm.tir.const(expected_value[1], "float32"), [1]))
stmt = ib.get()
return tvm.IRModule.from_expr(
tvm.tir.PrimFunc([Ab], stmt).with_attr("global_symbol", "packed_test")
)
mod = build_tir()
f = tvm.build(mod, None, "llvm")
a = tvm.nd.array(np.zeros(2, dtype="float32"))
f(a)
tvm.testing.assert_allclose(a.numpy(), expected_value)
if __name__ == "__main__":
test_call_packed_return_non_i32()
test_lower_packed_func() |
import tvm
from tvm |
import te
from tvm.contrib.nvcc |
import have_fp16 |
import numpy as np |
import tvm.testing |
import pytest
@tvm.testing.requires_cuda
def test_lower_warp_memory_local_scope():
m = 128
A = te.placeholder((m,), name="A")
B = te.compute((m,), lambda i: A[i] + 3, name="B")
s = te.create_schedule(B.op)
AA = s.cache_read(A, "warp", [B])
xo, xi = s[B].split(B.op.axis[0], 64)
xi0, xi1 = s[B].split(xi, factor=32)
tx = te.thread_axis("threadIdx.x")
s[B].bind(xi1, tx)
s[B].bind(xo, te.thread_axis("blockIdx.x"))
s[AA].compute_at(s[B], xo)
xo, xi = s[AA].split(s[AA].op.axis[0], 32)
s[AA].bind(xi, tx)
cuda_target = tvm.target.Target("cuda")
assert cuda_target.thread_warp_size == 32
with tvm.transform.PassContext(opt_level=3, disabled_pass=["tir.CommonSubexprElimTIR"]):
mod = tvm.lower(s, [A, B], name="f")
mod = tvm.tir.transform.Apply(lambda f: f.with_attr("target", cuda_target))(mod)
fdevice = tvm.tir.transform.SplitHostDevice()(mod)["f_kernel0"]
mod = tvm.IRModule.from_expr(fdevice)
fdevice = tvm.tir.transform.LowerWarpMemory()(mod)["f_kernel0"]
allocate = fdevice.body.body
assert allocate.buffer_var.type_annotation.storage_scope == "local"
assert fdevice.body.body.extents[0].value == 2
@tvm.testing.requires_cuda
def test_lower_warp_memory_correct_indices():
n = 32
A = te.placeholder((2, n, n), name="A", dtype="float32")
C = te.compute((2, n, n), lambda x, i, j: A(x, i, (j + 1) % n), name="C")
s = te.create_schedule(C.op)
bk_x = te.thread_axis("blockIdx.x")
th_y = te.thread_axis("threadIdx.y")
th_x = te.thread_axis("threadIdx.x")
B = s.cache_read(A, "warp", [C])
cx, ci, cj = C.op.axis
bx, bi, bj = B.op.axis
s[C].bind(cj, th_x)
s[C].bind(cx, bk_x)
s[B].compute_at(s[C], cx)
s[B].bind(bi, th_y)
s[B].bind(bj, th_x)
bounds = tvm.te.schedule.InferBound(s)
ir = tvm.te.schedule.ScheduleOps(s, bounds)
inner_func = ir.body.body.body
store_A_warp = inner_func.seq[0].body.body
indices = list(store_A_warp.indices) |
idx_names = map(lambda x: x.name, filter(lambda x: type(x) is tvm.tir.expr.Var, indices))
assert "threadIdx.x" in idx_names
assert "threadIdx.y" not in idx_names
@tvm.testing.requires_gpu
@tvm.testing.requires_cuda
def test_lower_warp_memory_cuda_end_to_end():
def check_cuda(dtype):
if dtype == "float16" and not have_fp16(tvm.cuda(0).compute_version):
print("Skip because gpu does not have fp16 support")
return
m = 128
A = te.placeholder((m,), name="A", dtype=dtype)
B = te.compute((m,), lambda i: A[i
cuda_target = tvm.target.Target("cuda")
assert cuda_target.thread_warp_size == 32
with cuda_target:
s = te.create_schedule(B.op)
AA = s.cache_read(A, "warp", [B])
xo, xi = s[B].split(B.op.axis[0], 64)
xi0, xi1 = s[B].split(xi, factor=32)
tx = te.thread_axis("threadIdx.x")
s[B].bind(xi1, tx)
s[B].bind(xo, te.thread_axis("blockIdx.x"))
s[AA].compute_at(s[B], xo)
xo, xi = s[AA].split(s[AA].op.axis[0], 32)
s[AA].bind(xi, tx)
dev = tvm.cuda(0)
with tvm.transform.PassContext(opt_level=3, disabled_pass=["tir.CommonSubexprElimTIR"]):
func = tvm.build(s, [A, B], "cuda")
A_np = np.array(list(range(m)), dtype=dtype)
B_np = np.array(
list(range(1, 32))
+ [0]
+ list(range(33, 64))
+ [32]
+ list(range(65, 96))
+ [64]
+ list(range(97, 128))
+ [96],
dtype=dtype,
)
A_nd = tvm.nd.array(A_np, dev)
B_nd = tvm.nd.array(np.zeros(B_np.shape, dtype=B_np.dtype), dev)
func(A_nd, B_nd)
tvm.testing.assert_allclose(B_nd.numpy(), B_np, rtol=1e-3)
check_cuda("float32")
check_cuda("float16")
@tvm.testing.requires_gpu
@tvm.testing.req |
uires_cuda
def test_lower_warp_memory_cuda_half_a_warp():
def check_cuda(dtype):
if dtype == "float16" and not have_fp16(tvm.cuda(0).compute_version):
print("Skip because gpu does not have fp16 support")
return
n, m = 16, 16
A = te.placeholder(
(
n,
m,
),
name="A",
dtype=dtype,
)
B = te.compute(
(
n,
m,
),
lambda j, i: A[j, (i + 1) % m],
name="B",
)
cuda_target = tvm.target.Target("cuda")
assert cuda_target.thread_warp_size == 2 * m
with cuda_target:
s = te.create_schedule(B.op)
tx = te.thread_axis("threadIdx.x")
ty = te.thread_axis("threadIdx.y")
bx = te.thread_axis("blockIdx.x")
AA = s.cache_read(A, "warp", [B])
y, x = B.op.axis
z, y = s[B].split(y, nparts=2)
s[B].bind(x, tx)
s[B].bind(y, ty)
s[B].bind(z, bx)
s[AA].compute_at(s[B], y)
_, x = AA.op.axis
s[AA].bind(x, tx)
dev = tvm.cuda(0)
with tvm.transform.PassContext(opt_level=3, disabled_pass=["tir.CommonSubexprElimTIR"]):
func = tvm.build(s, [A, B], "cuda")
A_np = np.array([list(range(i, m + i)) for i in range(n)], dtype=dtype)
B_np = np.array([list(range(1 + i, m + i)) + [i] for i in range(n)], dtype=dtype)
A_nd = tvm.nd.array(A_np, dev)
B_nd = tvm.nd.array(np.zeros(B_np.shape, dtype=B_np.dtype), dev)
func(A_nd, B_nd)
tvm.testing.assert_allclose(B_nd.numpy(), B_np, rtol=1e-3)
check_cuda("float32")
check_cuda("float16")
@tvm.testing.requires_gpu
@tvm.testing.requires_cuda
def test_lower_warp_memory_cuda_2_buffers():
def check_cuda(dtype):
if dtype == "float16" and not have_fp16(tvm.cuda(0).compute_ver |
sion):
print("Skip because gpu does not have fp16 support")
return
m = 32
A = te.placeholder((m,), name="A", dtype=dtype)
B = te.placeholder((m,), name="B", dtype=dtype)
C = te.compute((m,), lambda i: A[(i + 1) % m] + B[(i + 1) % m], name="C")
cuda_target = tvm.target.Target("cuda")
assert m <= cuda_target.thread_warp_size
with cuda_target:
s = te.create_schedule(C.op)
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("blockIdx.x")
AA = s.cache_read(A, "warp", [C])
BB = s.cache_read(B, "warp", [C])
xo, xi = s[C].split(C.op.axis[0], nparts=1)
s[C].bind(xi, tx)
s[C].bind(xo, bx)
s[AA].compute_at(s[C], xo)
s[BB].compute_at(s[C], xo)
xo, xi = s[AA].split(s[AA].op.axis[0], nparts=1)
s[AA].bind(xo, bx)
s[AA].bind(xi, tx)
xo, xi = s[BB].split(s[BB].op.axis[0], nparts=1)
s[BB].bind(xo, bx)
s[BB].bind(xi, tx)
dev = tvm.cuda(0)
with tvm.transform.PassContext(opt_level=3, disabled_pass=["tir.CommonSubexprElimTIR"]):
func = tvm.build(s, [A, B, C], "cuda")
AB_np = np.array(list(range(m)), dtype=dtype)
C_np = np.array(list(range(1, m)) + [0], dtype=dtype) * 2
A_nd = tvm.nd.array(AB_np, dev)
B_nd = tvm.nd.array(AB_np, dev)
C_nd = tvm.nd.array(np.zeros(C_np.shape, dtype=C_np.dtype), dev)
func(A_nd, B_nd, C_nd)
tvm.testing.assert_allclose(C_nd.numpy(), C_np, rtol=1e-3)
check_cuda("float32")
check_cuda("float16")
@tvm.testing.requires_gpu
def test_lower_warp_memory_roundup():
def check(device, m):
A = te.placeholder((m,), name="A")
B = te.compute((m,), lambda i: A[i] + 1, name="B")
with tvm.target.Target(device):
s = te.create_schedule(B.op)
xo, xi = s[B].sp |
lit(B.op.axis[0], factor=32)
tx = te.thread_axis("threadIdx.x")
s[B].bind(xo, te.thread_axis("blockIdx.x"))
s[B].bind(xi, tx)
AA = s.cache_read(A, "warp", [B])
_, yi = s[AA].split(s[AA].op.axis[0], factor=32)
s[AA].bind(yi, tx)
s[AA].compute_at(s[B], xo)
dev = tvm.device(device, 0)
with tvm.transform.PassContext(opt_level=3, disabled_pass=["tir.CommonSubexprElimTIR"]):
func = tvm.build(s, [A, B], device)
A_np = np.random.uniform(size=(m,)).astype(A.dtype)
B_np = np.zeros(shape=(m,)).astype(B.dtype)
A_nd = tvm.nd.array(A_np, dev)
B_nd = tvm.nd.array(B_np, dev)
func(A_nd, B_nd)
B_np = A_np + 1
tvm.testing.assert_allclose(B_nd.numpy(), B_np)
for device in ["cuda", "rocm"]:
if not tvm.testing.device_enabled(device):
print("skip because", device, "is not enabled..")
continue
check(device, m=31)
check(device, m=32)
check(device, m=33)
check(device, m=63)
check(device, m=64)
check(device, m=65)
@tvm.testing.requires_cuda
def test_lower_warp_memory_same_thread():
m = n = 128
A = te.placeholder((m, n), name="A")
k = te.reduce_axis((0, n), name="k")
B = te.compute((m,), lambda i: te.sum(A[i, k], axis=[k]))
s = te.create_schedule(B.op)
BB = s.cache_write(B, "warp")
tx = te.thread_axis("threadIdx.x")
xo, xi = s[B].split(B.op.axis[0], factor=32)
s[B].bind(xi, tx)
s[B].bind(xo, te.thread_axis("blockIdx.x"))
s[BB].compute_at(s[B], xo)
xo, xi = s[BB].split(s[BB].op.axis[0], factor=32)
s[BB].bind(xi, tx)
cuda_target = tvm.target.Target("cuda")
assert cuda_target.thread_warp_size == 32
with tvm.transform.PassContext(opt_level=3, disabled_pass=["tir.CommonSubexprElimTIR"]):
mod = tvm.lower(s, [A, B], name="f")
mod = tvm.tir.transform.Apply(lambda |
f: f.with_attr("target", cuda_target))(mod)
fdevice = tvm.tir.transform.SplitHostDevice()(mod)["f_kernel0"]
mod = tvm.IRModule.from_expr(fdevice)
fdevice = tvm.tir.transform.LowerWarpMemory()(mod)["f_kernel0"]
assert "tvm_warp_shuffle" not in fdevice.astext()
@tvm.testing.requires_cuda
def test_lower_warp_memory_divide_by_factor():
ib = tvm.tir.ir_builder.IRBuilder()
bx = te.thread_axis("blockIdx.x")
tx = te.thread_axis("threadIdx.x")
with ib.new_scope():
ib.scope_attr(bx, "thread_extent", 32)
ib.scope_attr(tx, "thread_extent", 32)
t = ib.allocate("float32", 16, name="t", scope="warp")
n = ib.allocate("float32", 16, name="n", scope="local")
n[0] = t[0]
stmt = ib.get()
func = tvm.tir.PrimFunc([], stmt)
func = func.with_attr("from_legacy_te_schedule", True)
cuda_target = tvm.target.Target("cuda")
with tvm.transform.PassContext(opt_level=3, disabled_pass=["tir.CommonSubexprElimTIR"]):
mod = tvm.lower(func, name="f")
mod = tvm.tir.transform.Apply(lambda f: f.with_attr("target", cuda_target))(mod)
with pytest.raises(tvm.error.TVMError, match="Divide by zero") as cm:
tvm.tir.transform.LowerWarpMemory()(mod)["f_kernel0"]
if __name__ == "__main__":
pytest.main([__file__]) |
import tvm
from tvm |
import te
from tvm.driver.build_module |
import schedule_to_module
def test_makeapi():
"""Not yet working, mock design"""
n = te.size_var("n")
A = te.placeholder((n,), name="A")
B = te.placeholder((n,), name="B")
C = te.compute(A.shape, lambda *i: A(*i) + B(*i), name="C")
s = te.create_schedule(C.op)
mod = schedule_to_module(s, [n, A, B, C])
mod = tvm.tir.transform.StorageFlatten(64)(mod)
mod = tvm.tir.transform.Apply(
lambda f: f.with_attr(
{
"target": tvm.target.Target("llvm"),
"global_symbol": "main",
}
)
)(mod)
f = tvm.tir.transform.MakePackedAPI()(mod)["main"]
assert len(f.params) == 6
def _find_assignment(stmt, var_name):
while not isinstance(stmt, tvm.tir.LetStmt):
stmt = stmt.body
if stmt.var.name != var_name:
return _find_assignment(stmt.body, var_name)
return stmt
def _find_next(stmt, type):
while not isinstance(stmt, type):
stmt = stmt.body
return stmt
def test_variable_passed_from_args():
ib = tvm.tir.ir_builder.create()
input_buffer = tvm.tir.decl_buffer(name="input_buffer", shape=[1])
not_device_context = tvm.tir.Var("not_device_context", dtype="handle")
ib.emit(
tvm.tir.call_extern("float32", "some_external_call", input_buffer.data, not_device_context),
)
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([input_buffer, not_device_context], stmt))
mod = tvm.tir.transform.Apply(lambda f: f.with_attr("target", tvm.target.Target("llvm")))(mod)
mod = tvm.tir.transform.Apply(lambda f: f.with_attr("global_symbol", "main"))(mod)
func = tvm.tir.transform.MakePackedAPI()(mod)["main"]
num_args = func.params[2]
assert func.body.condition.a == num_args
assert func.body.condition.b == 2
assignment = _find_assignment(func.body, "arg.input_buffer")
assert str(assignment.value) == "@tir.tvm_struct_get(args: handle, 0, 12, dtype=handle)"
assignment = _find_assignment(func.body, "arg.n |
ot_device_context")
assert str(assignment.value) == "@tir.tvm_struct_get(args: handle, 1, 12, dtype=handle)"
assignment = _find_assignment(func.body, "input_buffer")
assert (
str(assignment.value) == "@tir.tvm_struct_get(arg.input_buffer: handle, 0, 1, dtype=handle)"
)
unpacked_input_buffer = assignment.var
assignment = _find_assignment(func.body, "not_device_context")
assert str(assignment.value) == "arg.not_device_context: handle"
unpacked_not_device_context = assignment.var
seq_stmt = _find_next(assignment, tvm.tir.SeqStmt)
call = _find_next(seq_stmt[1], tvm.tir.Evaluate)
call_extern = call.value
assert call_extern.args[1] == unpacked_input_buffer
assert call_extern.args[2] == unpacked_not_device_context
def test_device_api_context_implicit_resource_handle():
ib = tvm.tir.ir_builder.create()
input_buffer = tvm.tir.decl_buffer(name="input_buffer", shape=[1])
device_context = tvm.tir.Var("device_api_context", dtype="handle")
ib.emit(
tvm.tir.call_extern("float32", "some_external_call", input_buffer.data, device_context),
)
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([input_buffer, device_context], stmt))
mod = tvm.tir.transform.Apply(lambda f: f.with_attr("target", tvm.target.Target("llvm")))(mod)
mod = tvm.tir.transform.Apply(lambda f: f.with_attr("global_symbol", "main"))(mod)
func = tvm.tir.transform.MakePackedAPI()(mod)["main"]
num_args = func.params[2]
device_context_in_resource_handle = func.params[5]
assert func.body.condition.a == num_args
assert func.body.condition.b == 1
assignment = _find_assignment(func.body, "arg.input_buffer")
assert str(assignment.value) == "@tir.tvm_struct_get(args: handle, 0, 12, dtype=handle)"
assignment = _find_assignment(func.body, "input_buffer")
assert (
str(assignment.value) == "@tir.tvm_struct_get(arg.input_buffer: handle, 0, 1, dtype=handle)"
)
unpacked_input_buffer = assignmen |
t.var
seq_stmt = _find_next(assignment, tvm.tir.SeqStmt)
call = _find_next(seq_stmt[1], tvm.tir.Evaluate)
call_extern = call.value
assert call_extern.args[1] == unpacked_input_buffer
assert call_extern.args[2] == device_context_in_resource_handle
if __name__ == "__main__":
test_makeapi() |
import pytest |
import tvm
from tvm |
import te |
import numpy
@pytest.fixture
def mod_without_attrs():
ib = tvm.tir.ir_builder.create()
A = tvm.tir.decl_buffer(name="A", shape=[1])
stmt = ib.get()
return tvm.IRModule.from_expr(tvm.tir.PrimFunc([A], stmt))
@pytest.fixture
def mod(mod_without_attrs):
mod = tvm.tir.transform.Apply(lambda f: f.with_attr("target", tvm.target.Target("llvm")))(
mod_without_attrs
)
mod = tvm.tir.transform.Apply(lambda f: f.with_attr("global_symbol", "main"))(mod)
return mod
def test_fails_if_not_global_symbol(mod_without_attrs):
mod = tvm.tir.transform.Apply(lambda f: f.with_attr("target", tvm.target.Target("llvm")))(
mod_without_attrs
)
with pytest.raises(tvm.TVMError, match="Expect PrimFunc to have the global_symbol attribute"):
f = tvm.tir.transform.MakeUnpackedAPI()(mod)["main"]
def test_fails_if_no_target(mod_without_attrs):
mod = tvm.tir.transform.Apply(lambda f: f.with_attr("global_symbol", "main"))(mod_without_attrs)
with pytest.raises(tvm.TVMError, match="Require the target attribute"):
f = tvm.tir.transform.MakeUnpackedAPI()(mod)["main"]
@tvm.testing.parametrize_targets("c", "llvm", "cuda")
def test_device_setup(mod, target, dev):
mod = tvm.tir.transform.Apply(lambda f: f.with_attr("target", tvm.target.Target(target)))(mod)
f = tvm.tir.transform.MakeUnpackedAPI()(mod)["main"]
assert len(f.params) == 1
assert f.params[0].name == "A"
assert f.body.node == "default"
assert f.body.attr_key == "device_id"
assert f.body.value == 0
assert f.body.body.node == "default"
assert f.body.body.attr_key == "device_type"
assert f.body.body.value == dev.device_type
def test_no_buffers_no_device_setup():
ib = tvm.tir.ir_builder.create()
A = ib.pointer("float32", name="A")
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([A], stmt))
mod = tvm.tir.transform.Apply(lambda f: f.with_attr("target", tvm.target.Target("llvm")))(mod)
mod = tvm.tir.transform.Apply(lambda f: f.with_a |
ttr("global_symbol", "main"))(mod)
f = tvm.tir.transform.MakeUnpackedAPI()(mod)["main"]
assert len(f.params) == 1
assert f.params[0].name == "A"
def test_argument_mapping(mod):
f = tvm.tir.transform.MakeUnpackedAPI()(mod)["main"]
assert len(f.params) == 1
assert f.params[0].name == "A"
def test_argument_mapping_multiple():
ib = tvm.tir.ir_builder.create()
A = tvm.tir.decl_buffer(name="A", shape=[1])
B = tvm.tir.decl_buffer(name="B", shape=[1])
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([A, B], stmt))
mod = tvm.tir.transform.Apply(lambda f: f.with_attr("target", tvm.target.Target("llvm")))(mod)
mod = tvm.tir.transform.Apply(lambda f: f.with_attr("global_symbol", "main"))(mod)
f = tvm.tir.transform.MakeUnpackedAPI()(mod)["main"]
assert len(f.params) == 2
assert f.params[0].name == "A"
assert f.params[1].name == "B"
def test_argument_mapping_multiple_matching():
ib = tvm.tir.ir_builder.create()
A = tvm.tir.decl_buffer(name="A", shape=[1])
B = tvm.tir.decl_buffer(name="B", shape=[1])
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([A, A], stmt))
mod = tvm.tir.transform.Apply(lambda f: f.with_attr("target", tvm.target.Target("llvm")))(mod)
mod = tvm.tir.transform.Apply(lambda f: f.with_attr("global_symbol", "main"))(mod)
f = tvm.tir.transform.MakeUnpackedAPI()(mod)["main"]
assert len(f.params) == 2
assert f.params[0].name == "A"
assert f.params[1].name == "A"
def test_body():
ib = tvm.tir.ir_builder.create()
A = tvm.tir.decl_buffer(name="A", shape=[1])
B = tvm.tir.decl_buffer(name="B", shape=[1])
C = ib.buffer_ptr(A)
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([A, B, C], stmt))
mod = tvm.tir.transform.Apply(lambda f: f.with_attr("target", tvm.target.Target("llvm")))(mod)
mod = tvm.tir.transform.Apply(lambda f: f.with_attr("global_symbol", "main"))(mod)
f = tvm.tir.transform.MakeUnpackedAPI()(mod)["main"]
a |
ssert len(f.params) == 3
assert f.params[0].name == "A"
assert f.params[1].name == "B"
assert f.params[2].name == "A"
if __name__ == "__main__":
pytest.main([__file__]) |
import tvm |
import tvm.testing
from tvm.script |
import tir as T
@tvm.script.ir_module
class MatmulBefore:
@T.prim_func
def main(A: T.Buffer[(1024, 1024), "float32"], B: T.Buffer[(1024, 1024), "float32"], C: T.Buffer[(1024, 1024), "float32"]) -> None:
T.func_attr({"global_symbol": "default_function", "tir.noalias": True})
for blockIdx_y in T.thread_binding(32, thread="blockIdx.y"):
for blockIdx_x in T.thread_binding(32, thread="blockIdx.x"):
for threadIdx_y in T.thread_binding(2, thread="threadIdx.y"):
for threadIdx_x in T.thread_binding(2, thread="threadIdx.x"):
for k_0 in T.serial(32):
with T.block():
T.reads(A[blockIdx_y * 32 : blockIdx_y * 32 + 32, k_0 * 32 : k_0 * 32 + 32], B[k_0 * 32 : k_0 * 32 + 32, blockIdx_x * 32 : blockIdx_x * 32 + 32])
T.writes(C[blockIdx_y * 32 : blockIdx_y * 32 + 32, blockIdx_x * 32 : blockIdx_x * 32 + 32])
A_shared = T.alloc_buffer([1024, 1024], dtype="float32", scope="shared")
B_shared = T.alloc_buffer([1024, 1024], dtype="float32", scope="shared")
for ax0_ax1_fused_0 in T.serial(64):
for ax0_ax1_fused_3 in T.vectorized(4):
with T.block("A_shared"):
T.reads(A[blockIdx_y * 32 + (ax0_ax1_fused_0 * 16 + threadIdx_y * 8 + threadIdx_x * 4 + ax0_ax1_fused_3)
T.writes(A_shared[blockIdx_y * 32 + (ax0_ax1_fused_0 * 16 + threadIdx_y * 8 + threadIdx_x * 4 + ax0_ax1_fused_3)
T.block_attr({"tir.manifest_shared_memory_local_stage":1})
A_shared[blockIdx_y * 32 + (ax0_ax1_fused_0 * 16 + threadIdx_y * 8 + threadIdx_x * 4 + ax0_ax1_fused_3)
for ax0_ax1_fu |
sed_0 in T.serial(64):
for ax0_ax1_fused_3 in T.vectorized(4):
with T.block("B_shared"):
T.reads(B[k_0 * 32 + (ax0_ax1_fused_0 * 16 + threadIdx_y * 8 + threadIdx_x * 4 + ax0_ax1_fused_3)
T.writes(B_shared[k_0 * 32 + (ax0_ax1_fused_0 * 16 + threadIdx_y * 8 + threadIdx_x * 4 + ax0_ax1_fused_3)
T.block_attr({"tir.manifest_shared_memory_local_stage":1})
B_shared[k_0 * 32 + (ax0_ax1_fused_0 * 16 + threadIdx_y * 8 + threadIdx_x * 4 + ax0_ax1_fused_3)
for k_1, i_2, j_2, k_2 in T.grid(2, 16, 16, 16):
with T.block("C"):
T.reads(A_shared[blockIdx_y * 32 + threadIdx_y * 16 + i_2, k_0 * 32 + k_1 * 16 + k_2], B_shared[k_0 * 32 + k_1 * 16 + k_2, blockIdx_x * 32 + threadIdx_x * 16 + j_2])
T.writes(C[blockIdx_y * 32 + threadIdx_y * 16 + i_2, blockIdx_x * 32 + threadIdx_x * 16 + j_2])
if k_0 * 32 + k_1 * 16 + k_2 == 0:
C[blockIdx_y * 32 + threadIdx_y * 16 + i_2, blockIdx_x * 32 + threadIdx_x * 16 + j_2] = T.float32(0)
C[blockIdx_y * 32 + threadIdx_y * 16 + i_2, blockIdx_x * 32 + threadIdx_x * 16 + j_2] = C[blockIdx_y * 32 + threadIdx_y * 16 + i_2, blockIdx_x * 32 + threadIdx_x * 16 + j_2] + A_shared[blockIdx_y * 32 + threadIdx_y * 16 + i_2, k_0 * 32 + k_1 * 16 + k_2] * B_shared[k_0 * 32 + k_1 * 16 + k_2, blockIdx_x * 32 + threadIdx_x * 16 + j_2]
@tvm.script.ir_module
class MatmulAfter:
@T.prim_func
def main(A: T.Buffer[(1024, 1024), "float32"], B: T.Buffer[(1024, 1024), "float32"], C: T.Buffer[(1024, 1024), "float32"]) -> None:
T.func_attr({"global_symbol": "default_function", "tir.noal |
ias": True})
for blockIdx_y in T.thread_binding(32, thread="blockIdx.y"):
for blockIdx_x in T.thread_binding(32, thread="blockIdx.x"):
for threadIdx_y in T.thread_binding(2, thread="threadIdx.y"):
for threadIdx_x in T.thread_binding(2, thread="threadIdx.x"):
for k_0 in T.serial(32):
with T.block():
T.reads(A[blockIdx_y * 32 : blockIdx_y * 32 + 32, k_0 * 32 : k_0 * 32 + 32], B[k_0 * 32 : k_0 * 32 + 32, blockIdx_x * 32 : blockIdx_x * 32 + 32])
T.writes(C[blockIdx_y * 32 : blockIdx_y * 32 + 32, blockIdx_x * 32 : blockIdx_x * 32 + 32])
A_shared = T.alloc_buffer([1024, 1024], dtype="float32", scope="shared")
B_shared = T.alloc_buffer([1024, 1024], dtype="float32", scope="shared")
A_shared_local = T.alloc_buffer([64, 4], dtype="float32", scope="local")
B_shared_local = T.alloc_buffer([64, 4], dtype="float32", scope="local")
for ax0_ax1_fused_0 in T.serial(64):
for ax0_ax1_fused_3 in T.vectorized(4):
with T.block():
T.reads(A[blockIdx_y * 32 + (ax0_ax1_fused_0 * 16 + threadIdx_y * 8 + threadIdx_x * 4 + ax0_ax1_fused_3)
T.writes(A_shared_local[ax0_ax1_fused_0, ax0_ax1_fused_3])
A_shared_local[ax0_ax1_fused_0, ax0_ax1_fused_3] = A[blockIdx_y * 32 + (ax0_ax1_fused_0 * 16 + threadIdx_y * 8 + threadIdx_x * 4 + ax0_ax1_fused_3)
for ax0_ax1_fused_0 in T.serial(64):
for ax0_ax1_fused_3 in T.vectorized(4):
with T.block("A_shared"): |
T.reads(A[blockIdx_y * 32 + (ax0_ax1_fused_0 * 16 + threadIdx_y * 8 + threadIdx_x * 4 + ax0_ax1_fused_3)
T.writes(A_shared[blockIdx_y * 32 + (ax0_ax1_fused_0 * 16 + threadIdx_y * 8 + threadIdx_x * 4 + ax0_ax1_fused_3)
A_shared[blockIdx_y * 32 + (ax0_ax1_fused_0 * 16 + threadIdx_y * 8 + threadIdx_x * 4 + ax0_ax1_fused_3)
for ax0_ax1_fused_0 in T.serial(64):
for ax0_ax1_fused_3 in T.vectorized(4):
with T.block():
T.reads(B[k_0 * 32 + (ax0_ax1_fused_0 * 16 + threadIdx_y * 8 + threadIdx_x * 4 + ax0_ax1_fused_3)
T.writes(B_shared_local[ax0_ax1_fused_0, ax0_ax1_fused_3])
B_shared_local[ax0_ax1_fused_0, ax0_ax1_fused_3] = B[k_0 * 32 + (ax0_ax1_fused_0 * 16 + threadIdx_y * 8 + threadIdx_x * 4 + ax0_ax1_fused_3)
for ax0_ax1_fused_0 in T.serial(64):
for ax0_ax1_fused_3 in T.vectorized(4):
with T.block("B_shared"):
T.reads(B[k_0 * 32 + (ax0_ax1_fused_0 * 16 + threadIdx_y * 8 + threadIdx_x * 4 + ax0_ax1_fused_3)
T.writes(B_shared[k_0 * 32 + (ax0_ax1_fused_0 * 16 + threadIdx_y * 8 + threadIdx_x * 4 + ax0_ax1_fused_3)
B_shared[k_0 * 32 + (ax0_ax1_fused_0 * 16 + threadIdx_y * 8 + threadIdx_x * 4 + ax0_ax1_fused_3)
for k_1, i_2, j_2, k_2 in T.grid(2, 16, 16, 16):
with T.block("C"):
T.reads(A_shared[blockIdx_y * 32 + threadIdx_y * 16 + i_2, k_0 * 32 + k_1 * 16 + k_2], B_shared[k_0 * 32 + k_1 * 16 + k_2, blockIdx_x * 32 + threadIdx_x * 16 + j_2]) |
T.writes(C[blockIdx_y * 32 + threadIdx_y * 16 + i_2, blockIdx_x * 32 + threadIdx_x * 16 + j_2])
if k_0 * 32 + k_1 * 16 + k_2 == 0:
C[blockIdx_y * 32 + threadIdx_y * 16 + i_2, blockIdx_x * 32 + threadIdx_x * 16 + j_2] = T.float32(0)
C[blockIdx_y * 32 + threadIdx_y * 16 + i_2, blockIdx_x * 32 + threadIdx_x * 16 + j_2] = C[blockIdx_y * 32 + threadIdx_y * 16 + i_2, blockIdx_x * 32 + threadIdx_x * 16 + j_2] + A_shared[blockIdx_y * 32 + threadIdx_y * 16 + i_2, k_0 * 32 + k_1 * 16 + k_2] * B_shared[k_0 * 32 + k_1 * 16 + k_2, blockIdx_x * 32 + threadIdx_x * 16 + j_2]
def _check(before, expected):
after = tvm.tir.transform.ManifestSharedMemoryLocalStage()(before)
tvm.ir.assert_structural_equal(after, expected)
def test_transform_matmul():
_check(MatmulBefore, MatmulAfter)
if __name__ == "__main__":
tvm.testing.main() |
import numpy as np |
import tvm |
import tvm.testing
from tvm |
import te
from tvm.driver.build_module |
import schedule_to_module
from tvm.topi.math |
import cast
def run_passes(sch, args):
mod = schedule_to_module(sch, args)
return tvm.transform.Sequential(
[
tvm.tir.transform.StorageFlatten(64),
tvm.tir.transform.Simplify(),
tvm.tir.transform.VectorizeLoop(),
tvm.tir.transform.StorageRewrite(),
tvm.tir.transform.MergeDynamicSharedMemoryAllocations(),
]
)(mod)
def verify_single_allocation(stmt, alloc_size=None):
num_alloc = [0]
alloc_extents = []
def verify(n):
if (
isinstance(n, tvm.tir.Allocate)
and n.buffer_var.type_annotation.storage_scope == "shared.dyn"
):
num_alloc[0] += 1
alloc_extents.append(n.extents[0])
tvm.tir.stmt_functor.post_order_visit(stmt, verify)
assert num_alloc[0] == 1
if alloc_size:
assert alloc_extents[0] == alloc_size
@tvm.testing.requires_gpu
def test_matmul_dyn_shared():
n = 1024
block = 16
A = te.placeholder((n, n), name="A", dtype="float16")
B = te.placeholder((n, n), name="B", dtype="float16")
def syncthread():
return tvm.tir.Call(None, "tir.tvm_storage_sync", tvm.runtime.convert(["shared"]))
def test_matmul_ir(A, B, C):
ib = tvm.tir.ir_builder.create()
tx = te.thread_axis("threadIdx.x")
ty = te.thread_axis("threadIdx.y")
bx = te.thread_axis("blockIdx.x")
by = te.thread_axis("blockIdx.y")
ib.scope_attr(tx, "thread_extent", block)
ib.scope_attr(ty, "thread_extent", block)
ib.scope_attr(bx, "thread_extent", n
ib.scope_attr(by, "thread_extent", n
A_sh = ib.allocate(A.dtype, (block, block), scope="shared.dyn", name="A_sh")
B_sh = ib.allocate(B.dtype, (block, block), scope="shared.dyn", name="B_sh")
C_local = ib.allocate(C.dtype, (1,), scope="local", name="C_local")
C_sh = ib.allocate(C.dtype, (block, block), scope="shared.dyn", name="C_sh")
A_ptr = ib.buffer_ptr(A) |
B_ptr = ib.buffer_ptr(B)
C_ptr = ib.buffer_ptr(C)
C_local[0] = 0.0
with ib.for_range(0, n
A_sh[ty, tx] = A_ptr[by * block + ty, i * block + tx]
B_sh[ty, tx] = B_ptr[i * block + ty, bx * block + tx]
ib.emit(syncthread())
with ib.for_range(0, block, name="k") as k:
C_local[0] += cast(A_sh[ty, k] * B_sh[k, tx], "float32")
ib.emit(syncthread())
C_sh[ty, tx] = C_local[0]
C_ptr[by * block + ty, bx * block + tx] = C_sh[ty, tx]
return ib.get()
C = te.extern(
A.shape,
[A, B],
lambda ins, outs: test_matmul_ir(ins[0], ins[1], outs[0]),
name="matmul",
dtype="float32",
)
s = te.create_schedule(C.op)
mod = run_passes(s, [A, B, C])
expected_alloc_size = block * block * 4
verify_single_allocation(mod["main"].body, expected_alloc_size)
def check_target(target):
if not tvm.testing.device_enabled(target):
return
fmatmul = tvm.build(s, [A, B, C], target)
dev = tvm.device(target, 0)
size = (n, n)
a_np = np.random.uniform(size=size).astype(A.dtype)
b_np = np.random.uniform(size=size).astype(B.dtype)
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array(b_np, dev)
c = tvm.nd.array(np.zeros(size, dtype=C.dtype), dev)
fmatmul(a, b, c)
np_ref = np.dot(a_np.astype("float32"), b_np.astype("float32"))
tvm.testing.assert_allclose(c.numpy(), np_ref, 1e-4, 1e-4)
for target in ["cuda", "nvptx"]:
check_target(target)
@tvm.testing.requires_gpu
def test_dyn_shared_vectorized_store():
"""Test vectorized store into dynamic shared memory"""
n = te.size_var("n")
A = te.placeholder((n,), name="A", dtype="float16")
B = te.placeholder((n,), name="B", dtype="float32")
def test_device_ir(A, B, C):
n = A.shape[0]
ib = tvm.tir.ir_builder.create()
values_per_thread = 4
tx = te.thread_ |
axis("threadIdx.x")
ib.scope_attr(tx, "thread_extent", tvm.tir.indexdiv(n, values_per_thread))
A_sh = ib.allocate(A.dtype, (n,), scope="shared.dyn")
B_sh = ib.allocate(B.dtype, (n,), scope="shared.dyn")
Aptr = ib.buffer_ptr(A)
Bptr = ib.buffer_ptr(B)
Cptr = ib.buffer_ptr(C)
with ib.for_range(0, values_per_thread, kind="vectorize") as i:
A_sh[tx * values_per_thread + i] = Aptr[tx * values_per_thread + i]
B_sh[tx * values_per_thread + i] = Bptr[tx * values_per_thread + i]
with ib.for_range(0, values_per_thread) as i:
Cptr[tx * values_per_thread + i] = (
cast(A_sh[tx * values_per_thread + i], "float32") + B_sh[tx * values_per_thread + i]
)
return ib.get()
C = te.extern(
(n,),
[A, B],
lambda ins, outs: test_device_ir(ins[0], ins[1], outs[0]),
name="vadd",
dtype="float32",
)
s = te.create_schedule(C.op)
mod = run_passes(s, [A, B, C])
verify_single_allocation(mod["main"].body)
def check_target(target):
if not tvm.testing.device_enabled(target):
return
fadd = tvm.build(s, [A, B, C], target)
dev = tvm.device(target, 0)
for n in [512, 1024]:
a = tvm.nd.array(np.random.uniform(size=n).astype(A.dtype), dev)
b = tvm.nd.array(np.random.uniform(size=n).astype(B.dtype), dev)
c = tvm.nd.array(np.zeros((n,), dtype=C.dtype), dev)
fadd(a, b, c)
tvm.testing.assert_allclose(
c.numpy(), a.numpy().astype("float32") + b.numpy(), 1e-4, 1e-4
)
for target in ["cuda", "nvptx"]:
check_target(target)
@tvm.testing.requires_gpu
def test_dyn_shared_reuse_and_merge():
n = 64
A = te.placeholder((n,), name="A", dtype="float32")
B = te.placeholder((n,), name="B", dtype="float32")
C = te.placeholder((te.size_var("n_dyn"),), name="C", dtype="float32")
def test_device_ir(A, B, |
C, D):
ib = tvm.tir.ir_builder.create()
tx = te.thread_axis("threadIdx.x")
ib.scope_attr(tx, "thread_extent", n)
A_sh = ib.allocate(A.dtype, (n,), scope="shared.dyn", name="A_sh")
B_sh = ib.allocate(B.dtype, (n,), scope="shared.dyn", name="B_sh")
C_sh = ib.allocate(C.dtype, (C.shape[0],), scope="shared.dyn", name="C_sh")
Aptr = ib.buffer_ptr(A)
Bptr = ib.buffer_ptr(B)
Cptr = ib.buffer_ptr(C)
Dptr = ib.buffer_ptr(D)
A_sh[tx] = Aptr[tx]
Dptr[tx] = A_sh[tx]
B_sh[tx] = Bptr[tx]
Dptr[tx] += B_sh[tx]
C_sh[tx] = Cptr[tx]
Dptr[tx] += C_sh[tx]
return ib.get()
D = te.extern(
(n,),
[A, B, C],
lambda ins, outs: test_device_ir(ins[0], ins[1], ins[2], outs[0]),
name="vadd",
dtype="float32",
)
s = te.create_schedule(D.op)
mod = run_passes(s, [A, B, C, D])
verify_single_allocation(mod["main"].body)
def check_target(target):
if not tvm.testing.device_enabled(target):
return
fadd = tvm.build(s, [A, B, C, D], target)
dev = tvm.device(target, 0)
a = tvm.nd.array(np.random.uniform(size=n).astype(A.dtype), dev)
b = tvm.nd.array(np.random.uniform(size=n).astype(B.dtype), dev)
c = tvm.nd.array(np.random.uniform(size=n).astype(C.dtype), dev)
d = tvm.nd.array(np.zeros((n,), dtype=D.dtype), dev)
fadd(a, b, c, d)
tvm.testing.assert_allclose(d.numpy(), a.numpy() + b.numpy() + c.numpy(), 1e-4, 1e-4)
for target in ["cuda", "nvptx"]:
check_target(target)
def test_dyn_shared_more_dtype():
"""Test vectorized store into dynamic shared memory"""
n = 512
A = te.placeholder((n,), name="A", dtype="int8")
B = te.placeholder((n,), name="B", dtype="int16")
def test_device_ir(A, B, C):
n = A.shape[0]
ib = tvm.tir.ir_builder.create()
tx = te.thread_axis("threadIdx.x")
ib.scope_attr(tx, |
"thread_extent", n)
A_sh = ib.allocate(A.dtype, (n,), scope="shared.dyn")
B_sh = ib.allocate(B.dtype, (n,), scope="shared.dyn")
C_sh = ib.allocate(C.dtype, (n,), scope="shared.dyn")
Aptr = ib.buffer_ptr(A)
Bptr = ib.buffer_ptr(B)
Cptr = ib.buffer_ptr(C)
A_sh[tx] = Aptr[tx]
B_sh[tx] = Bptr[tx]
C_sh[tx] = cast(A_sh[tx], "int32") + cast(B_sh[tx], "int32")
Cptr[tx] = C_sh[tx]
return ib.get()
C = te.extern(
(n,),
[A, B],
lambda ins, outs: test_device_ir(ins[0], ins[1], outs[0]),
name="vadd",
dtype="int32",
)
s = te.create_schedule(C.op)
mod = run_passes(s, [A, B, C])
verify_single_allocation(mod["main"].body, n * 4)
def check_target(target):
if not tvm.testing.device_enabled(target):
return
fadd = tvm.build(s, [A, B, C], target)
dev = tvm.device(target, 0)
a = tvm.nd.array(np.random.uniform(size=n).astype(A.dtype), dev)
b = tvm.nd.array(np.random.uniform(size=n).astype(B.dtype), dev)
c = tvm.nd.array(np.zeros((n,), dtype=C.dtype), dev)
fadd(a, b, c)
tvm.testing.assert_allclose(c.numpy(), a.numpy().astype("float32") + b.numpy(), 1e-4, 1e-4)
for target in ["cuda", "nvptx"]:
check_target(target)
if __name__ == "__main__":
test_matmul_dyn_shared()
test_dyn_shared_vectorized_store()
test_dyn_shared_reuse_and_merge()
test_dyn_shared_more_dtype() |
import tvm
from tvm |
import relay, te
from tvm.driver.build_module |
import schedule_to_module
from tvm.script |
import tir as T
from tvm.tir |
import const
def lower_stmt(params, stmt, target_bits):
func = tvm.tir.PrimFunc(params, stmt)
func = tvm.tir.transform.NarrowDataType(target_bits)(tvm.IRModule.from_expr(func))["main"]
stmt = func.body
return stmt
def lower_sch(sch, args, target_bits, extra_passes=None):
binds = {}
arg_list = []
for x in args:
if isinstance(x, te.tensor.Tensor):
buf = tvm.tir.decl_buffer(x.shape, dtype=x.dtype, name=x.name)
assert x not in binds
binds[x] = buf
arg_list.append(buf)
else:
raise ValueError("args must be Tensor, Buffer or Var")
sch = sch.normalize()
mod = schedule_to_module(sch, args)
mod = tvm.tir.transform.StorageFlatten(64)(mod)
if extra_passes:
for p in extra_passes:
mod = p(mod)
return tvm.tir.transform.NarrowDataType(target_bits)(mod)["main"].body
def test_basic():
def check(m, n, target_bits, target_dtype):
ib = tvm.tir.ir_builder.create()
Ab = tvm.tir.decl_buffer([m * n], name="A")
A = ib.buffer_ptr(Ab)
Bb = tvm.tir.decl_buffer([m * n], name="B")
B = ib.buffer_ptr(Bb)
with ib.for_range(0, m, name="i") as i:
with ib.for_range(0, n, name="j") as j:
B[i * n + j] = A[i * n + j] + 1
stmt = ib.get()
stmt = lower_stmt([Ab, Bb], stmt, target_bits)
assert stmt.loop_var.dtype == target_dtype
assert stmt.body.loop_var.dtype == target_dtype
check(2, 2, 32, "int32")
check(const(2, dtype="int64"), const(2, dtype="int64"), 32, "int32")
check(const(2**16, dtype="int64"), const(2**16, dtype="int64"), 32, "int64")
check(2, 2, 16, "int16")
check(2**10, 2**10, 16, "int32")
check(te.size_var(name="m", dtype="int32"), te.size_var(name="n", dtype="int32"), 32, "int32")
check(te.size_var(name="m", dtype="int64"), te.size_var(name="n", dtype="int64"), 32, "int64")
def test_thread_axis():
def check(m, n, target_bits, |
target_dtype):
ib = tvm.tir.ir_builder.create()
Ab = tvm.tir.decl_buffer([m * n], name="A")
A = ib.buffer_ptr(Ab)
Bb = tvm.tir.decl_buffer([m * n], name="B")
B = ib.buffer_ptr(Bb)
bx = te.thread_axis("blockIdx.x")
tx = te.thread_axis("threadIdx.x")
ib.scope_attr(bx, "thread_extent", m)
ib.scope_attr(tx, "thread_extent", n)
B[bx * n + tx] = A[bx * n + tx] + 1
stmt = ib.get()
stmt = lower_stmt([Ab, Bb], stmt, target_bits)
assert stmt.node.var.dtype == target_dtype
assert stmt.body.node.var.dtype == target_dtype
check(2, 32, target_bits=32, target_dtype="int32")
check(const(2, dtype="int64"), const(32, dtype="int64"), target_bits=32, target_dtype="int32")
check(
const(2**30, dtype="int64"),
const(32, dtype="int64"),
target_bits=32,
target_dtype="int64",
)
check(2, 32, target_bits=16, target_dtype="int16")
check(2**14, 32, target_bits=16, target_dtype="int32")
def test_thread_axis_2():
@tvm.script.ir_module
class Before:
@T.prim_func
def main(T_reshape: T.Buffer[(1, 12, 384, 384), "float32"], placeholder_1: T.Buffer[(T.int64(1), T.int64(12), T.int64(384), 384), "bool"], T_where: T.Buffer[(T.int64(1), T.int64(12), T.int64(384), 384), "float32"]) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
for i0_i1_i2_i3_fused_1 in T.thread_binding(T.int64(256), thread="blockIdx.x"):
for i0_i1_i2_i3_fused_2 in T.thread_binding(T.int64(1024), thread="threadIdx.x"):
for i0_i1_i2_i3_fused_0 in T.serial(T.int64(7)):
with T.block("T_where"):
ax0 = T.axis.spatial(T.int64(1), T.int64(0))
ax1 = T.axis.spatial(T.int64(12), ((i0_i1_i2_i3_fused_0 * T.int64(256) + i0_i1_i2_i3_fused_1) * T.int64(1024) + i0_i1_i2_i3_fused_2) % T.int64(1 |
769472)
ax2 = T.axis.spatial(T.int64(384), ((i0_i1_i2_i3_fused_0 * T.int64(256) + i0_i1_i2_i3_fused_1) * T.int64(1024) + i0_i1_i2_i3_fused_2) % T.int64(147456)
ax3 = T.axis.spatial(384, T.cast(((i0_i1_i2_i3_fused_0 * T.int64(256) + i0_i1_i2_i3_fused_1) * T.int64(1024) + i0_i1_i2_i3_fused_2) % T.int64(384), "int32"))
T.where((i0_i1_i2_i3_fused_0 * T.int64(256) + i0_i1_i2_i3_fused_1) * T.int64(1024) + i0_i1_i2_i3_fused_2 < T.int64(1769472))
T.reads(placeholder_1[ax0, ax1, ax2, ax3], T_reshape[ax0, ax1, ax2, ax3])
T.writes(T_where[ax0, ax1, ax2, ax3])
T_where[ax0, ax1, ax2, ax3] = T.Select(T.cast(placeholder_1[ax0, ax1, ax2, ax3], "int32") != 0, T.float32(-1000000000), T_reshape[ax0, ax1, ax2, ax3])
tvm.lower(Before)
def test_multilanes():
def check(m, lanes, target_bits, target_dtype):
ib = tvm.tir.ir_builder.create()
Ab = tvm.tir.decl_buffer((m,), dtype="float32x{}".format(lanes), name="A")
A = ib.buffer_ptr(Ab)
Bb = tvm.tir.decl_buffer((m,), dtype="float32x{}".format(lanes), name="B")
B = ib.buffer_ptr(Bb)
with ib.for_range(0, m, name="i", dtype=m.dtype) as i:
B[i] = A[i] + 1
A[0] = B[1]
stmt = ib.get()
stmt = lower_stmt([Ab, Bb], stmt, target_bits)
assert stmt.seq[0].loop_var.dtype == target_dtype
check(const(2**10, dtype="int32"), 2, target_bits=32, target_dtype="int32")
check(const(2**10, dtype="int64"), 2, target_bits=32, target_dtype="int32")
check(const(2**32, dtype="int64"), 2, target_bits=32, target_dtype="int64")
check(const(2**10, dtype="int32"), 2, target_bits=16, target_dtype="int16")
check(const(2**16, dtype="int32"), 2, target_bits=16, target_dtype="int32")
def test_reduce():
def check(m, target_bits, target_dtype):
A = te.placeholder((m,), name="A", dtype="float32") |
k = te.reduce_axis((0, m), "k")
B = te.compute((), lambda *idx: te.sum(A[k], axis=k), name="B")
s = te.create_schedule(B.op)
stmt = lower_sch(s, [A, B], target_bits)
assert stmt[1].loop_var.dtype == target_dtype
check(const(64, dtype="int32"), 32, "int32")
check(const(64, dtype="int64"), 32, "int32")
check(const(64, dtype="int32"), 16, "int16")
check(const(2**16, dtype="int32"), 16, "int32")
check(te.var("n", dtype="int32"), 32, "int32")
check(te.var("n", dtype="int64"), 32, "int64")
def test_slice():
def check(m, n, target_bits, target_dtype):
ib = tvm.tir.ir_builder.create()
Ab = tvm.tir.decl_buffer([m * n], name="A")
A = ib.buffer_ptr(Ab)
Bb = tvm.tir.decl_buffer([m * n * 2], name="B")
B = ib.buffer_ptr(Bb)
with ib.for_range(0, m, name="i") as i:
with ib.for_range(0, n, name="j") as j:
A[i * n + j] = B[i * 2 * n + 2 * j] + 1
stmt = ib.get()
stmt = lower_stmt([Ab, Bb], stmt, target_bits)
assert stmt.loop_var.dtype == target_dtype
assert stmt.body.loop_var.dtype == target_dtype
check(const(2**15, "int64"), const(2**15, "int64"), target_bits=32, target_dtype="int32")
check(
const(2**15, "int64"), const((2**15 + 1), "int64"), target_bits=32, target_dtype="int64"
)
def test_relay_basic():
engine = relay.backend.te_compiler.get()
def check(shapex, shapey, target_bits, target_dtype):
x = relay.var("x", shape=shapex)
y = relay.var("y", shape=shapey)
z = relay.add(x, y)
func = relay.Function([x, y], z)
mod = tvm.IRModule.from_expr(func)
mod = relay.transform.InferType()(mod)
func = mod["main"]
z = engine.lower(func, "llvm")
stmt = lower_sch(z.schedule, tuple(z.inputs) + tuple(z.outputs), 32)
assert stmt.loop_var.dtype == target_dtype
if len(shapex) > 1 or len(shapey) > 1: |
assert stmt.body.loop_var.dtype == target_dtype
check(
(const(2**16, "int64"), const(2**15 + 1, "int64")),
(1, const(2**15 + 1, "int64")),
target_bits=32,
target_dtype="int64",
)
check(
(const(2**16, "int64"), const(2**15, "int64")),
(1, const(2**15, "int64")),
target_bits=32,
target_dtype="int32",
)
check(
(const(2**31, "int64"),), (const(2**31, "int64"),), target_bits=32, target_dtype="int32"
)
check(
(const(2**31 + 1, "int64"),),
(const(2**31 + 1, "int64"),),
target_bits=32,
target_dtype="int64",
)
def test_relay_take():
engine = relay.backend.te_compiler.get()
def check(shape, index, target_bits, target_dtype):
x = relay.var("x", shape=shape)
y = relay.op.take(x, indices=index)
func = relay.Function([x], y)
mod = tvm.IRModule.from_expr(func)
mod = relay.transform.InferType()(mod)
func = mod["main"]
z = engine.lower(func, "llvm")
stmt = lower_sch(z.schedule, tuple(z.inputs) + tuple(z.outputs), 32)
assert stmt.value.indices[0].dtype == target_dtype
check(
(const(2**16, "int64"), const(2**15 + 1, "int64")),
relay.const(0, dtype="int64"),
target_bits=32,
target_dtype="int32",
)
check(
(const(2**16, "int64"), const(2**15 + 1, "int64")),
relay.const(2**31, dtype="int64"),
target_bits=32,
target_dtype="int64",
)
def test_ramp_dtype_consistency():
"""
for (i :int64, (int64)0, (int64)4) {
A[ramp(i*(int64)2, (int64)1, 2)] = cast(int64, 2 ** 31 - 1) * i;
}
The infer result:
base: int64 -> int64 (since i is involved in another int64 expr)
stride: int64 -> int32
Thus ramp should still use int64 for both stride and base after rewrite.
"""
n = tvm.tir.IntImm("int64", 4)
m = tvm.tir.IntImm("int64", 2)
A = te.compute((n, m), lambda i, j: tvm.tir.Cast("int64" |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.