text
stringlengths 1
2.05k
|
---|
* 6422528) + (ty * 1605632))
+ (tz * 802816)
)
+ (kh * 57344)
)
+ (bz * 4096)
)
+ (ax2 * 4096)
)
+ (ic_outer * 512)
)
+ tx
)
- 61088
),
],
T.float16(0),
dtype="float16",
)
with T.launch_thread(tx, 32):
Apad_shared[
(((((ty * 3072) + (tz * 1536)) + (ax2 * 512)) + tx) + 384)
] = T.if_then_else(
(
(
(
(1 <= (T.floordiv(bz, 14) + kh))
and ((T.floordiv(bz, 14) + kh) < 15)
)
and (1 <= (ax2 + T.floormod(bz, 14)))
)
and ((ax2 + T.floormod(bz, 14)) < 15)
),
A_1[
(
(
(
(
(
(
( |
((bx * 6422528) + (ty * 1605632))
+ (tz * 802816)
)
+ (kh * 57344)
)
+ (bz * 4096)
)
+ (ax2 * 4096)
)
+ (ic_outer * 512)
)
+ tx
)
- 61056
),
],
T.float16(0),
dtype="float16",
)
with T.launch_thread(tx, 32):
Apad_shared[
(((((ty * 3072) + (tz * 1536)) + (ax2 * 512)) + tx) + 416)
] = T.if_then_else(
(
(
(
(1 <= (T.floordiv(bz, 14) + kh))
and ((T.floordiv(bz, 14) + kh) < 15)
)
and (1 <= (ax2 + T.floormod(bz, 14)))
)
and ((ax2 + T.floormod(bz, 14)) < 15)
),
A_1[
(
(
(
(
(
(
( |
((bx * 6422528) + (ty * 1605632))
+ (tz * 802816)
)
+ (kh * 57344)
)
+ (bz * 4096)
)
+ (ax2 * 4096)
)
+ (ic_outer * 512)
)
+ tx
)
- 61024
),
],
T.float16(0),
dtype="float16",
)
with T.launch_thread(tx, 32):
Apad_shared[
(((((ty * 3072) + (tz * 1536)) + (ax2 * 512)) + tx) + 448)
] = T.if_then_else(
(
(
(
(1 <= (T.floordiv(bz, 14) + kh))
and ((T.floordiv(bz, 14) + kh) < 15)
)
and (1 <= (ax2 + T.floormod(bz, 14)))
)
and ((ax2 + T.floormod(bz, 14)) < 15)
),
A_1[
(
(
(
(
(
(
( |
((bx * 6422528) + (ty * 1605632))
+ (tz * 802816)
)
+ (kh * 57344)
)
+ (bz * 4096)
)
+ (ax2 * 4096)
)
+ (ic_outer * 512)
)
+ tx
)
- 60992
),
],
T.float16(0),
dtype="float16",
)
T.launch_thread(tx, 32)
Apad_shared[
(((((ty * 3072) + (tz * 1536)) + (ax2 * 512)) + tx) + 480)
] = T.if_then_else(
(
(
(
(1 <= (T.floordiv(bz, 14) + kh))
and ((T.floordiv(bz, 14) + kh) < 15)
)
and (1 <= (ax2 + T.floormod(bz, 14)))
)
and ((ax2 + T.floormod(bz, 14)) < 15)
),
A_1[
(
(
(
(
(
(
(
((bx * 6422528) + (ty * 1605632)) |
+ (tz * 802816)
)
+ (kh * 57344)
)
+ (bz * 4096)
)
+ (ax2 * 4096)
)
+ (ic_outer * 512)
)
+ tx
)
- 60960
),
],
T.float16(0),
dtype="float16",
)
with T.launch_thread(tx, 32):
W_shared[T.ramp((((ty * 512) + (tz * 256)) + (tx * 8)), 1, 8)] = W_1[
T.ramp(
(
(
(
(((kh * 393216) + (ic_outer * 16384)) + (by * 2048))
+ (ty * 512)
)
+ (tz * 256)
)
+ (tx * 8)
),
1,
8,
)
]
with T.launch_thread(tx, 32):
W_shared[T.ramp(((((ty * 512) + (tz * 256)) + (tx * 8)) + 2048), 1, 8)] = W_1[
T.ramp(
(
(
(
(
(((kh * 393216) + (ic_outer * 16384)) + (by * 2048))
+ (ty * 512)
) |
+ (tz * 256)
)
+ (tx * 8)
)
+ 8192
),
1,
8,
)
]
with T.launch_thread(tx, 32):
W_shared[T.ramp(((((ty * 512) + (tz * 256)) + (tx * 8)) + 4096), 1, 8)] = W_1[
T.ramp(
(
(
(
(
(((kh * 393216) + (ic_outer * 16384)) + (by * 2048))
+ (ty * 512)
)
+ (tz * 256)
)
+ (tx * 8)
)
+ 131072
),
1,
8,
)
]
with T.launch_thread(tx, 32):
W_shared[T.ramp(((((ty * 512) + (tz * 256)) + (tx * 8)) + 6144), 1, 8)] = W_1[
T.ramp(
(
(
(
(
(((kh * 393216) + (ic_outer * 16384)) + (by * 2048))
+ (ty * 512)
)
+ (tz * 256)
)
+ (tx * 8)
)
+ 139264
),
1,
8, |
)
]
with T.launch_thread(tx, 32):
W_shared[T.ramp(((((ty * 512) + (tz * 256)) + (tx * 8)) + 8192), 1, 8)] = W_1[
T.ramp(
(
(
(
(
(((kh * 393216) + (ic_outer * 16384)) + (by * 2048))
+ (ty * 512)
)
+ (tz * 256)
)
+ (tx * 8)
)
+ 262144
),
1,
8,
)
]
with T.launch_thread(tx, 32):
W_shared[T.ramp(((((ty * 512) + (tz * 256)) + (tx * 8)) + 10240), 1, 8)] = W_1[
T.ramp(
(
(
(
(
(((kh * 393216) + (ic_outer * 16384)) + (by * 2048))
+ (ty * 512)
)
+ (tz * 256)
)
+ (tx * 8)
)
+ 270336
),
1,
8,
)
]
for ic_inner in T.serial(0, 2):
for kw in T.serial(0, 3):
T.evaluate(
T.tvm_load_matrix_sync(
Apad_shared_wmma_matrix_a.data, |
16,
16,
16,
0,
T.tvm_access_ptr(
T.type_annotation(dtype="float16"),
Apad_shared.data,
(((ty * 3072) + (kw * 512)) + (ic_inner * 256)),
256,
1,
dtype="handle",
),
16,
"row_major",
dtype="handle",
)
)
T.evaluate(
T.tvm_load_matrix_sync(
Apad_shared_wmma_matrix_a.data,
16,
16,
16,
1,
T.tvm_access_ptr(
T.type_annotation(dtype="float16"),
Apad_shared.data,
((((ty * 3072) + (kw * 512)) + (ic_inner * 256)) + 1536),
256,
1,
dtype="handle",
),
16,
"row_major",
dtype="handle",
)
)
T.evaluate(
T.tvm_load_matrix_sync(
W_shared_wmma_matrix_b.data,
16,
16,
16,
0,
T.tvm_access_ptr(
T.type_an |
notation(dtype="float16"),
W_shared.data,
(((kw * 4096) + (ic_inner * 2048)) + (tz * 1024)),
256,
1,
dtype="handle",
),
16,
"row_major",
dtype="handle",
)
)
T.evaluate(
T.tvm_load_matrix_sync(
W_shared_wmma_matrix_b.data,
16,
16,
16,
1,
T.tvm_access_ptr(
T.type_annotation(dtype="float16"),
W_shared.data,
((((kw * 4096) + (ic_inner * 2048)) + (tz * 1024)) + 256),
256,
1,
dtype="handle",
),
16,
"row_major",
dtype="handle",
)
)
T.evaluate(
T.tvm_load_matrix_sync(
W_shared_wmma_matrix_b.data,
16,
16,
16,
2,
T.tvm_access_ptr(
T.type_annotation(dtype="float16"),
W_shared.data,
((((kw * 4096) + (ic_inner * 2048)) + (tz * 1024)) + 512),
256, |
1,
dtype="handle",
),
16,
"row_major",
dtype="handle",
)
)
T.evaluate(
T.tvm_load_matrix_sync(
W_shared_wmma_matrix_b.data,
16,
16,
16,
3,
T.tvm_access_ptr(
T.type_annotation(dtype="float16"),
W_shared.data,
((((kw * 4096) + (ic_inner * 2048)) + (tz * 1024)) + 768),
256,
1,
dtype="handle",
),
16,
"row_major",
dtype="handle",
)
)
T.evaluate(
T.tvm_mma_sync(
Conv_wmma_accumulator.data,
0,
Apad_shared_wmma_matrix_a.data,
0,
W_shared_wmma_matrix_b.data,
0,
Conv_wmma_accumulator.data,
0,
dtype="handle",
)
)
T.evaluate(
T.tvm_mma_sync(
Conv_wmma_accumulator.data,
1,
Apad_shared_wmma_matrix_a.data,
0, |
W_shared_wmma_matrix_b.data,
1,
Conv_wmma_accumulator.data,
1,
dtype="handle",
)
)
T.evaluate(
T.tvm_mma_sync(
Conv_wmma_accumulator.data,
2,
Apad_shared_wmma_matrix_a.data,
0,
W_shared_wmma_matrix_b.data,
2,
Conv_wmma_accumulator.data,
2,
dtype="handle",
)
)
T.evaluate(
T.tvm_mma_sync(
Conv_wmma_accumulator.data,
3,
Apad_shared_wmma_matrix_a.data,
0,
W_shared_wmma_matrix_b.data,
3,
Conv_wmma_accumulator.data,
3,
dtype="handle",
)
)
T.evaluate(
T.tvm_mma_sync(
Conv_wmma_accumulator.data,
4,
Apad_shared_wmma_matrix_a.data,
1,
W_shared_wmma_matrix_b.data,
0,
Conv_wmma_accumulator.data,
4,
dtype="handle",
)
)
T.evaluate( |
T.tvm_mma_sync(
Conv_wmma_accumulator.data,
5,
Apad_shared_wmma_matrix_a.data,
1,
W_shared_wmma_matrix_b.data,
1,
Conv_wmma_accumulator.data,
5,
dtype="handle",
)
)
T.evaluate(
T.tvm_mma_sync(
Conv_wmma_accumulator.data,
6,
Apad_shared_wmma_matrix_a.data,
1,
W_shared_wmma_matrix_b.data,
2,
Conv_wmma_accumulator.data,
6,
dtype="handle",
)
)
T.evaluate(
T.tvm_mma_sync(
Conv_wmma_accumulator.data,
7,
Apad_shared_wmma_matrix_a.data,
1,
W_shared_wmma_matrix_b.data,
3,
Conv_wmma_accumulator.data,
7,
dtype="handle",
)
)
T.evaluate(
T.tvm_store_matrix_sync(
Conv_wmma_accumulator.data,
16,
16,
16,
0,
T.tvm_access_ptr(
T.type_annotation(dtype="float32"),
Conv_1.data,
(
((((bx * 12845056) + (ty * 3211264)) + |
(bz * 8192)) + (by * 2048))
+ (tz * 1024)
),
256,
2,
dtype="handle",
),
16,
"row_major",
dtype="handle",
)
)
T.evaluate(
T.tvm_store_matrix_sync(
Conv_wmma_accumulator.data,
16,
16,
16,
1,
T.tvm_access_ptr(
T.type_annotation(dtype="float32"),
Conv_1.data,
(
(
((((bx * 12845056) + (ty * 3211264)) + (bz * 8192)) + (by * 2048))
+ (tz * 1024)
)
+ 256
),
256,
2,
dtype="handle",
),
16,
"row_major",
dtype="handle",
)
)
T.evaluate(
T.tvm_store_matrix_sync(
Conv_wmma_accumulator.data,
16,
16,
16,
2,
T.tvm_access_ptr(
T.type_annotation(dtype="float32"),
Conv_1.data,
(
(
((((bx * 12845056) + (ty * 3211264)) + (bz * 8192)) + (by * 2048))
+ (tz * 1024)
)
+ 512
),
256,
2,
dtype="handle",
),
16,
"row_major",
dtype="handle",
)
)
T.evaluate(
T.tvm_store_matrix_sync(
Conv_wmma_accumulator.data,
16,
16,
16,
3,
T.tv |
m_access_ptr(
T.type_annotation(dtype="float32"),
Conv_1.data,
(
(
((((bx * 12845056) + (ty * 3211264)) + (bz * 8192)) + (by * 2048))
+ (tz * 1024)
)
+ 768
),
256,
2,
dtype="handle",
),
16,
"row_major",
dtype="handle",
)
)
T.evaluate(
T.tvm_store_matrix_sync(
Conv_wmma_accumulator.data,
16,
16,
16,
4,
T.tvm_access_ptr(
T.type_annotation(dtype="float32"),
Conv_1.data,
(
(
((((bx * 12845056) + (ty * 3211264)) + (bz * 8192)) + (by * 2048))
+ (tz * 1024)
)
+ 1605632
),
256,
2,
dtype="handle",
),
16,
"row_major",
dtype="handle",
)
)
T.evaluate(
T.tvm_store_matrix_sync(
Conv_wmma_accumulator.data,
16,
16,
16,
5,
T.tvm_access_ptr(
T.type_annotation(dtype="float32"),
Conv_1.data,
(
(
((((bx * 12845056) + (ty * 3211264)) + (bz * 8192)) + (by * 2048))
+ (tz * 1024)
)
+ 1605888
),
256,
2,
dtype="handle",
),
16, |
"row_major",
dtype="handle",
)
)
T.evaluate(
T.tvm_store_matrix_sync(
Conv_wmma_accumulator.data,
16,
16,
16,
6,
T.tvm_access_ptr(
T.type_annotation(dtype="float32"),
Conv_1.data,
(
(
((((bx * 12845056) + (ty * 3211264)) + (bz * 8192)) + (by * 2048))
+ (tz * 1024)
)
+ 1606144
),
256,
2,
dtype="handle",
),
16,
"row_major",
dtype="handle",
)
)
T.evaluate(
T.tvm_store_matrix_sync(
Conv_wmma_accumulator.data,
16,
16,
16,
7,
T.tvm_access_ptr(
T.type_annotation(dtype="float32"),
Conv_1.data,
(
(
((((bx * 12845056) + (ty * 3211264)) + (bz * 8192)) + (by * 2048))
+ (tz * 1024)
)
+ 1606400
),
256,
2,
dtype="handle",
),
16,
"row_major",
dtype="handle",
)
)
return func
def opt_conv_tensorcore_mod_host():
@T.prim_func
def opt_conv_tensorcore_mod_host(
args: T.handle,
arg_type_ids: T.Buffer[(3,), "int32"],
num_args: T.int32,
out_ret_value: T.handle,
out_ret_tcode: T.handle,
resource_handle: T.handle,
) -> T.int32:
T.func_attr(
{
"tir.noalias": Tru |
e,
"global_symbol": "default_function",
"tir.is_entry_func": True,
"calling_conv": 1,
}
)
stack_tcode_data: T.Ptr[T.int32] = T.tvm_stack_alloca("arg_tcode", 10, dtype="handle")
stack_tcode = T.buffer_decl([9], "int32", data=stack_tcode_data)
stack_value: T.handle = T.tvm_stack_alloca("arg_value", 10, dtype="handle")
assert num_args == 3, "default_function: num_args should be 3"
arg0: T.handle = T.tvm_struct_get(args, 0, 12, dtype="handle")
arg0_code: T.int32 = arg_type_ids[0]
arg1: T.handle = T.tvm_struct_get(args, 1, 12, dtype="handle")
arg1_code: T.int32 = arg_type_ids[1]
arg2: T.handle = T.tvm_struct_get(args, 2, 12, dtype="handle")
arg2_code: T.int32 = arg_type_ids[2]
A: T.handle = T.tvm_struct_get(arg0, 0, 1, dtype="handle")
T.attr(A, "storage_alignment", 128)
arg0_shape_data: T.Ptr[T.int64] = T.tvm_struct_get(arg0, 0, 2, dtype="handle")
arg0_shape = T.buffer_decl([6], "int64", data=arg0_shape_data)
arg0_strides_data: T.Ptr[T.int64] = T.tvm_struct_get(arg0, 0, 3, dtype="handle")
arg0_strides = T.buffer_decl([6], "int64", data=arg0_strides_data)
dev_id: T.int32 = T.tvm_struct_get(arg0, 0, 9, dtype="int32")
W: T.handle = T.tvm_struct_get(arg1, 0, 1, dtype="handle")
T.attr(W, "storage_alignment", 128)
arg1_shape_data: T.Ptr[T.int64] = T.tvm_struct_get(arg1, 0, 2, dtype="handle")
arg1_shape = T.buffer_decl([6], "int64", data=arg1_shape_data)
arg1_strides_data: T.Ptr[T.int64] = T.tvm_struct_get(arg1, 0, 3, dtype="handle")
arg1_strides = T.buffer_decl([6], "int64", data=arg1_strides_data)
Conv: T.handle = T.tvm_struct_get(arg2, 0, 1, dtype="handle")
T.attr(Conv, "storage_alignment", 128)
arg2_shape_data: T.Ptr[T.int64] = T.tvm_struct_get(arg2, 0, 2, dtype="handle")
arg2_shape = T.buffer_decl([6], "int64", data=arg2_shape_d |
ata)
arg2_strides_data: T.Ptr[T.int64] = T.tvm_struct_get(arg2, 0, 3, dtype="handle")
arg2_strides = T.buffer_decl([6], "int64", data=arg2_strides_data)
assert (((arg0_code == 3) or (arg0_code == 13)) or (arg0_code == 7)) or (
arg0_code == 4
), "default_function: Expect arg[0] to be pointer"
assert (((arg1_code == 3) or (arg1_code == 13)) or (arg1_code == 7)) or (
arg1_code == 4
), "default_function: Expect arg[1] to be pointer"
assert (((arg2_code == 3) or (arg2_code == 13)) or (arg2_code == 7)) or (
arg2_code == 4
), "default_function: Expect arg[2] to be pointer"
assert 6 == T.tvm_struct_get(arg0, 0, 4, dtype="int32"), "arg0.ndim is expected to equal 6"
assert 6 == T.tvm_struct_get(arg0, 0, 4, dtype="int32"), "arg0.ndim is expected to equal 6"
assert (
(T.tvm_struct_get(arg0, 0, 5, dtype="uint8") == T.uint8(2))
and (T.tvm_struct_get(arg0, 0, 6, dtype="uint8") == T.uint8(16))
) and (
T.tvm_struct_get(arg0, 0, 7, dtype="uint16") == T.uint16(1)
), "arg0.dtype is expected to be float16"
assert 16 == T.cast(
arg0_shape[0], "int32"
), "Argument arg0.shape[0] has an unsatisfied constraint"
assert 14 == T.cast(
arg0_shape[1], "int32"
), "Argument arg0.shape[1] has an unsatisfied constraint"
assert 14 == T.cast(
arg0_shape[2], "int32"
), "Argument arg0.shape[2] has an unsatisfied constraint"
assert 16 == T.cast(
arg0_shape[3], "int32"
), "Argument arg0.shape[3] has an unsatisfied constraint"
assert 16 == T.cast(
arg0_shape[4], "int32"
), "Argument arg0.shape[4] has an unsatisfied constraint"
assert 16 == T.cast(
arg0_shape[5], "int32"
), "Argument arg0.shape[5] has an unsatisfied constraint"
if not (T.isnullptr(arg0_strides.data, dtype="bool")):
assert ( |
(
(
(
(1 == T.cast(arg0_strides[5], "int32"))
and (16 == T.cast(arg0_strides[4], "int32"))
)
and (256 == T.cast(arg0_strides[3], "int32"))
)
and (4096 == T.cast(arg0_strides[2], "int32"))
)
and (57344 == T.cast(arg0_strides[1], "int32"))
) and (
802816 == T.cast(arg0_strides[0], "int32")
), "arg0.strides: expected to be compact array"
T.evaluate(0)
assert T.uint64(0) == T.tvm_struct_get(
arg0, 0, 8, dtype="uint64"
), "Argument arg0.byte_offset has an unsatisfied constraint"
assert 2 == T.tvm_struct_get(
arg0, 0, 10, dtype="int32"
), "Argument arg0.device_type has an unsatisfied constraint"
assert 6 == T.tvm_struct_get(arg1, 0, 4, dtype="int32"), "arg1.ndim is expected to equal 6"
assert 6 == T.tvm_struct_get(arg1, 0, 4, dtype="int32"), "arg1.ndim is expected to equal 6"
assert (
(T.tvm_struct_get(arg1, 0, 5, dtype="uint8") == T.uint8(2))
and (T.tvm_struct_get(arg1, 0, 6, dtype="uint8") == T.uint8(16))
) and (
T.tvm_struct_get(arg1, 0, 7, dtype="uint16") == T.uint16(1)
), "arg1.dtype is expected to be float16"
assert 3 == T.cast(
arg1_shape[0], "int32"
), "Argument arg1.shape[0] has an unsatisfied constraint"
assert 3 == T.cast(
arg1_shape[1], "int32"
), "Argument arg1.shape[1] has an unsatisfied constraint"
assert 16 == T.cast(
arg1_shape[2], "int32"
), "Argument arg1.shape[2] has an unsatisfied constraint"
assert 32 == T.cast(
arg1_shape[3], "int32"
), "Argument arg1.shape[3] has an unsatisfied constraint"
assert 16 == T.cast(
arg1_shape[4], "int32"
), "Argument arg1.s |
hape[4] has an unsatisfied constraint"
assert 16 == T.cast(
arg1_shape[5], "int32"
), "Argument arg1.shape[5] has an unsatisfied constraint"
if not (T.isnullptr(arg1_strides.data, dtype="bool")):
assert (
(
(
(
(1 == T.cast(arg1_strides[5], "int32"))
and (16 == T.cast(arg1_strides[4], "int32"))
)
and (256 == T.cast(arg1_strides[3], "int32"))
)
and (8192 == T.cast(arg1_strides[2], "int32"))
)
and (131072 == T.cast(arg1_strides[1], "int32"))
) and (
393216 == T.cast(arg1_strides[0], "int32")
), "arg1.strides: expected to be compact array"
T.evaluate(0)
assert T.uint64(0) == T.tvm_struct_get(
arg1, 0, 8, dtype="uint64"
), "Argument arg1.byte_offset has an unsatisfied constraint"
assert 2 == T.tvm_struct_get(
arg1, 0, 10, dtype="int32"
), "Argument arg1.device_type has an unsatisfied constraint"
assert dev_id == T.tvm_struct_get(
arg1, 0, 9, dtype="int32"
), "Argument arg1.device_id has an unsatisfied constraint"
assert 6 == T.tvm_struct_get(arg2, 0, 4, dtype="int32"), "arg2.ndim is expected to equal 6"
assert 6 == T.tvm_struct_get(arg2, 0, 4, dtype="int32"), "arg2.ndim is expected to equal 6"
assert (
(T.tvm_struct_get(arg2, 0, 5, dtype="uint8") == T.uint8(2))
and (T.tvm_struct_get(arg2, 0, 6, dtype="uint8") == T.uint8(32))
) and (
T.tvm_struct_get(arg2, 0, 7, dtype="uint16") == T.uint16(1)
), "arg2.dtype is expected to be float32"
assert 16 == T.cast(
arg2_shape[0], "int32"
), "Argument arg2.shape[0] has an unsatisfied constraint"
assert 14 == T.cast(
arg2_shape[1], "int32" |
), "Argument arg2.shape[1] has an unsatisfied constraint"
assert 14 == T.cast(
arg2_shape[2], "int32"
), "Argument arg2.shape[2] has an unsatisfied constraint"
assert 32 == T.cast(
arg2_shape[3], "int32"
), "Argument arg2.shape[3] has an unsatisfied constraint"
assert 16 == T.cast(
arg2_shape[4], "int32"
), "Argument arg2.shape[4] has an unsatisfied constraint"
assert 16 == T.cast(
arg2_shape[5], "int32"
), "Argument arg2.shape[5] has an unsatisfied constraint"
if not (T.isnullptr(arg2_strides.data, dtype="bool")):
assert (
(
(
(
(1 == T.cast(arg2_strides[5], "int32"))
and (16 == T.cast(arg2_strides[4], "int32"))
)
and (256 == T.cast(arg2_strides[3], "int32"))
)
and (8192 == T.cast(arg2_strides[2], "int32"))
)
and (114688 == T.cast(arg2_strides[1], "int32"))
) and (
1605632 == T.cast(arg2_strides[0], "int32")
), "arg2.strides: expected to be compact array"
T.evaluate(0)
assert T.uint64(0) == T.tvm_struct_get(
arg2, 0, 8, dtype="uint64"
), "Argument arg2.byte_offset has an unsatisfied constraint"
assert 2 == T.tvm_struct_get(
arg2, 0, 10, dtype="int32"
), "Argument arg2.device_type has an unsatisfied constraint"
assert dev_id == T.tvm_struct_get(
arg2, 0, 9, dtype="int32"
), "Argument arg2.device_id has an unsatisfied constraint"
T.evaluate(T.tvm_struct_set(stack_value, 0, 12, T.cast(2, "int64"), dtype="int32"))
stack_tcode[0] = 0
T.evaluate(T.tvm_struct_set(stack_value, 1, 12, T.cast(dev_id, "int64"), dtype="int32"))
stack_tcode[1] = 0
T.evaluate(
T.tvm_call_packed_l |
owered(
"__tvm_set_device", stack_value, stack_tcode.data, 0, 2, dtype="int32"
)
)
T.attr(0, "compute_scope", "default_function_compute_")
T.evaluate(T.tvm_struct_set(stack_value, 0, 12, A, dtype="int32"))
stack_tcode[0] = 3
T.evaluate(T.tvm_struct_set(stack_value, 1, 12, W, dtype="int32"))
stack_tcode[1] = 3
T.evaluate(T.tvm_struct_set(stack_value, 2, 12, Conv, dtype="int32"))
stack_tcode[2] = 3
T.evaluate(T.tvm_struct_set(stack_value, 3, 12, T.cast(196, "int64"), dtype="int32"))
stack_tcode[3] = 0
T.evaluate(T.tvm_struct_set(stack_value, 4, 12, T.cast(2, "int64"), dtype="int32"))
stack_tcode[4] = 0
T.evaluate(T.tvm_struct_set(stack_value, 5, 12, T.cast(4, "int64"), dtype="int32"))
stack_tcode[5] = 0
T.evaluate(T.tvm_struct_set(stack_value, 6, 12, T.cast(4, "int64"), dtype="int32"))
stack_tcode[6] = 0
T.evaluate(T.tvm_struct_set(stack_value, 7, 12, T.cast(2, "int64"), dtype="int32"))
stack_tcode[7] = 0
T.evaluate(T.tvm_struct_set(stack_value, 8, 12, T.cast(32, "int64"), dtype="int32"))
stack_tcode[8] = 0
T.evaluate(
T.tvm_call_packed_lowered(
"default_function_kernel0", stack_value, stack_tcode.data, 0, 9, dtype="int32"
)
)
return opt_conv_tensorcore_mod_host
def vthread_func():
@T.prim_func
def vthread_func(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [256], "float32")
C = T.match_buffer(c, [256], "float32")
i0 = T.env_thread("blockIdx.x")
i1 = T.env_thread("threadIdx.x")
i2 = T.env_thread("vthread")
T.launch_thread(i0, 4)
T.launch_thread(i1, 2)
T.launch_thread(i2, 2)
B_data = T.allocate([16], "float32", "local")
B = T.buffer_decl(shape=[16], dtype="float32", scope="local", data=B_data)
for j in range(16):
B[j] = A[i0 * 64 + i1 * 32 + i2 * 16 + |
j] + T.float32(1)
for j in range(16):
C[i0 * 64 + i1 * 32 + i2 * 16 + j] = B[j] * T.float32(2)
return vthread_func
def matmul():
@T.prim_func
def matmul(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [128, 128])
B = T.match_buffer(b, [128, 128])
C = T.match_buffer(c, [128, 128])
for i, j, k in T.grid(128, 128, 128):
with T.block("update"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
with T.init():
C[vi, vj] = T.float32(0)
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vj, vk]
return matmul
def matmul_original():
@T.prim_func
def matmul_original(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [128, 128])
B = T.match_buffer(b, [128, 128])
C = T.match_buffer(c, [128, 128])
for i, j in T.grid(128, 128):
with T.block("init"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = T.float32(0)
for k in range(128):
with T.block("update"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vj, vk]
return matmul_original
def element_wise():
@T.prim_func
def element_wise(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 128), "float32")
C = T.match_buffer(c, (128, 128), "float32")
B = T.alloc_buffer((128, 128), "float32")
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * T.float32(2)
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + T.float32(1)
return element_wise
def predicate():
@T.prim_func
def predicate(b: T.handle, c: T.handle) -> None:
B = T.match_buffer(b, (16, |
16), "float32")
C = T.match_buffer(c, (16, 16), "float32")
for i, jo, ji in T.grid(16, 4, 5):
with T.block("update"):
vi = T.axis.S(16, i)
vj = T.axis.S(16, jo * 4 + ji)
T.where(jo * 4 + ji < 16)
C[vi, vj] = B[vi, vj] + T.float32(1)
return predicate
def test_module_define():
func1 = tvm.ir.IRModule({"matmul": matmul()})["matmul"]
func2 = tvm.ir.IRModule({"element_wise": element_wise()})["element_wise"]
func3 = tvm.ir.IRModule({"predicate": predicate()})["predicate"]
mod1 = tvm.ir.IRModule({"func1": func1, "func2": func2, "func3": func3})
mod2 = tvm.ir.IRModule({"func1": matmul(), "func2": element_wise(), "func3": predicate()})
tvm.ir.assert_structural_equal(mod1, mod2)
def test_matmul_original():
func = matmul_original()
rt_func = tvm.script.from_source(func.script(show_meta=True))
tvm.ir.assert_structural_equal(func, rt_func)
assert isinstance(rt_func.body.block, tir.stmt.Block)
assert isinstance(rt_func.body.block.body, tir.stmt.For)
assert isinstance(rt_func.body.block.body.body, tir.stmt.For)
assert isinstance(rt_func.body.block.body.body.body, tir.stmt.SeqStmt)
assert isinstance(rt_func.body.block.body.body.body[0].block, tir.stmt.Block)
assert isinstance(rt_func.body.block.body.body.body[1], tir.stmt.For)
assert isinstance(rt_func.body.block.body.body.body[1].body.block, tir.stmt.Block)
def test_element_wise():
func = element_wise()
rt_func = tvm.script.from_source(func.script(show_meta=True))
tvm.ir.assert_structural_equal(func, rt_func)
assert isinstance(rt_func.body.block, tir.stmt.Block)
assert isinstance(rt_func.body.block.body, tir.stmt.SeqStmt)
assert isinstance(rt_func.body.block.body[0], tir.stmt.For)
assert isinstance(rt_func.body.block.body[0].body, tir.stmt.For)
assert isinstance(rt_func.body.block.body[0].body.body.block, tir.stmt.Block)
assert isinstance(rt_func.body.block.body[1], tir.stm |
t.For)
assert isinstance(rt_func.body.block.body[1].body, tir.stmt.For)
assert isinstance(rt_func.body.block.body[1].body.body.block, tir.stmt.Block)
def test_predicate():
func = predicate()
rt_func = tvm.script.from_source(func.script(show_meta=True))
tvm.ir.assert_structural_equal(func, rt_func)
assert isinstance(rt_func.body.block, tir.stmt.Block)
assert isinstance(rt_func.body.block.body, tir.stmt.For)
assert isinstance(rt_func.body.block.body.body, tir.stmt.For)
assert isinstance(rt_func.body.block.body.body.body, tir.stmt.For)
assert isinstance(rt_func.body.block.body.body.body.body.block, tir.stmt.Block)
def for_thread_binding():
@T.prim_func
def for_thread_binding(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (16, 16), "float32")
B = T.match_buffer(b, (16, 16), "float32")
for i in T.thread_binding(0, 16, thread="threadIdx.x"):
for j in T.thread_binding(
0, 16, thread="threadIdx.y", annotations={"attr_key": "attr_value"}
):
A[i, j] = B[i, j] + T.float32(1)
return for_thread_binding
def test_for_thread_binding():
func = for_thread_binding()
rt_func = tvm.script.from_source(func.script(show_meta=True))
tvm.ir.assert_structural_equal(func, rt_func)
assert isinstance(rt_func.body, tir.stmt.For)
assert rt_func.body.kind == 4
assert rt_func.body.thread_binding.thread_tag == "threadIdx.x"
assert isinstance(rt_func.body.body, tir.stmt.For)
assert rt_func.body.body.kind == 4
assert rt_func.body.body.thread_binding.thread_tag == "threadIdx.y"
assert rt_func.body.body.annotations["attr_key"] == "attr_value"
def match_buffer_region():
@T.prim_func
def match_buffer_region(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (16, 16, 16), "float32")
B = T.match_buffer(b, (1), "float32")
for i, j in T.grid(16, 4):
with T.block():
vi, vj = T.axis.remap("SS", [i, j]) |
C = T.match_buffer(A[0:16, vi, vj * 4 : vj * 4 + 4], (16, 1, 4))
for ii in range(4):
with T.block():
vii = T.axis.S(4, ii)
D = T.match_buffer(C[vii * 4 : vii * 4 + 4, 0, 0:4], (4, 1, 4))
for i, j in T.grid(4, 4):
B[0] += D[i, 0, j]
return match_buffer_region
def test_match_buffer_region():
func = match_buffer_region()
rt_func = tvm.script.from_source(func.script(show_meta=True))
tvm.ir.assert_structural_equal(func, rt_func)
assert isinstance(rt_func.body, tir.stmt.BlockRealize)
root = rt_func.body.block
assert isinstance(root.body, tir.stmt.For)
assert isinstance(root.body.body, tir.stmt.For)
assert isinstance(root.body.body.body, tir.stmt.BlockRealize)
outer_block = root.body.body.body.block
assert len(outer_block.match_buffers) == 1
buffer_C = outer_block.match_buffers[0].buffer
tvm.ir.assert_structural_equal(buffer_C.shape, [16, 1, 4])
assert isinstance(outer_block.body, tir.stmt.For)
assert isinstance(outer_block.body.body, tir.stmt.BlockRealize)
inner_block = outer_block.body.body.block
assert len(inner_block.match_buffers) == 1
buffer_D = inner_block.match_buffers[0].buffer
tvm.ir.assert_structural_equal(buffer_D.shape, [4, 1, 4])
def block_elements():
@T.prim_func
def block_elements(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (16, 16), "float32")
B = T.match_buffer(b, (1, 1), "float32")
with T.block("update"):
vi = T.axis.S(1, 0)
T.where(True)
T.reads(A[0:16, 0:16])
T.writes(B[0, 0])
T.block_attr({"attr_key": "attr_value"})
C = T.alloc_buffer((4, 4), dtype="float32")
D = T.match_buffer(A[0:4, 0], (4, 1))
with T.init():
B[0, 0] = T.float32(0)
B[0, 0] = A[0, 0] + B[0, 0] + C[1, 1] + D[2, 0]
return block_e |
lements
def test_block_elements():
func = block_elements()
rt_func = tvm.script.from_source(func.script(show_meta=True))
tvm.ir.assert_structural_equal(func, rt_func)
assert isinstance(rt_func.body.block, tir.stmt.Block)
assert isinstance(rt_func.body.block.body, tir.stmt.BlockRealize)
assert isinstance(rt_func.body.block.body.block, tir.stmt.Block)
block = rt_func.body.block.body.block
assert isinstance(block.body, tir.stmt.BufferStore)
assert isinstance(block.init, tir.stmt.BufferStore)
assert len(block.annotations) == 1
assert block.annotations["attr_key"] == "attr_value"
def opaque_block():
@T.prim_func
def opaque_block(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (16, 16), "float32")
B = T.match_buffer(b, (16, 16), "float32")
for i in range(16):
for j in range(16):
with T.block():
T.reads([])
T.writes(A[i, j])
A[i, j] = T.float32(0)
with T.block():
T.reads([A[i, 0:16]])
T.writes([B[i, 0:16]])
for j in range(16):
B[i, j] = A[i, j]
return opaque_block
def test_opaque_block():
func = opaque_block()
rt_func = tvm.script.from_source(func.script(show_meta=True))
tvm.ir.assert_structural_equal(func, rt_func)
root_block = rt_func.body.block
assert isinstance(root_block, tir.stmt.Block)
assert isinstance(root_block.body, tir.stmt.For)
assert isinstance(root_block.body.body[0], tir.stmt.For)
assert isinstance(root_block.body.body[0].body, tir.stmt.BlockRealize)
assert isinstance(root_block.body.body[0].body.block, tir.stmt.Block)
assert len(root_block.body.body[0].body.block.iter_vars) == 0
assert isinstance(root_block.body.body[1], tir.stmt.BlockRealize)
assert isinstance(root_block.body.body[1].block, tir.stmt.Block)
assert len(root_block.body.body[1].block.iter_vars) == 0
def module_const():
@ |
tvm.script.ir_module
class Module4:
"""
@T.prim_func
def A(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (10), "int32")
C = T.match_buffer(c, (10), "int32")
B = T.alloc_buffer((10), "int32")
K1 = T.allocate_const([1, 1, 1, 1, 1, 1, 1, 1, 1, 1], "int32", [10])
for x in T.serial(0, 10):
B[x] = A[x] + T.load("int32", K1, x)
for x in T.serial(0, 10):
C[x] = B[x]
"""
@T.prim_func
def B(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (10), "int32")
C = T.match_buffer(c, (10), "int32")
B = T.alloc_buffer((10), "int32")
K1_data = T.allocate_const([1, 1, 1, 1, 1, 1, 1, 1, 1, 1], "int32", [10])
K1 = T.buffer_decl(shape=[10], dtype="int32", data=K1_data)
for x in T.serial(0, 10):
B[x] = A[x] + K1[x]
K2_data = T.allocate_const([1, 1, 1, 1, 1, 1, 1, 1, 1, 1], "int32", [10])
K2 = T.buffer_decl(shape=[10], dtype="int32", data=K2_data)
for x in T.serial(0, 10):
B[x] = B[x] + K2[x]
for x in T.serial(0, 10):
C[x] = B[x]
return Module4
def constant():
@T.prim_func
def constant(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (10), "int32")
C = T.match_buffer(c, (10), "int32")
B = T.alloc_buffer((10), "int32")
K_data = T.allocate_const([1, 1, 1, 1, 1, 1, 1, 1, 1, 1], "int32", [10])
K = T.buffer_decl(shape=[10], dtype="int32", data=K_data)
for x in T.serial(0, 10):
B[x] = A[x] + K[x]
for x in T.serial(0, 10):
C[x] = B[x]
return constant
def rank0():
@T.prim_func
def rank0(a: T.handle) -> None:
A = T.match_buffer(a, (), "float32")
B = T.alloc_buffer((), "float32")
A[()] |
= 2
B[()] = A[()]
return rank0
def rank0_block():
@T.prim_func
def rank0_block(a: T.handle) -> None:
A = T.match_buffer(a, (), "float32")
B = T.alloc_buffer((), "float32")
B[()] = A[()]
with T.block("update"):
T.reads([A[()]])
T.writes([B[()]])
for i in range(1):
B[()] = A[()]
return rank0_block
def select():
@T.prim_func
def select(a: T.handle) -> None:
A = T.match_buffer(a, (), "float32")
A[()] = T.Select(True, 1, 2)
return select
def minmax():
@T.prim_func
def minmax(a: T.handle) -> None:
A = T.match_buffer(a, (), "float32")
A[()] = T.min(1, 2)
A[()] = T.max(1, 2)
return minmax
def abs():
@T.prim_func
def abs(a: T.handle) -> None:
A = T.match_buffer(a, (128, 128), "float32")
for i, j in T.grid(128, 128):
with T.block("A"):
vi, vj = T.axis.remap("SS", [i, j])
A[vi, vj] = T.abs(A[vi, vj])
return abs
def constant_folding():
@T.prim_func
def constant_folding(a: T.handle) -> None:
A = T.match_buffer(a, (), "float32")
A[()] = T.min(2.2, 5.2)
A[()] = T.max(T.float32(2.2), T.float32(T.float32(5.2)))
A[()] = T.min(2.2, 5.0)
return constant_folding
def simplify_bracket():
@T.prim_func
def simplify_bracket() -> None:
a = T.var("int32")
b = T.var("int32")
c = T.var("int32")
d = T.var("int32")
T.evaluate(a + b * (c + d))
return simplify_bracket
def var_with_same_name():
@T.prim_func
def var_with_same_name(a: T.handle) -> None:
A = T.match_buffer(a, (16, 16), "float32")
for i, j in T.grid(16, 16):
with T.block():
vi, vj = T.axis.remap("SS", [i, j])
A[vi, vj] = 0
for i, j in T.grid(16, 16):
with T.block():
vi, vj = T.axis.remap("SS", [i, j])
A[vi, vj] |
= 0
return var_with_same_name
def test_same_name_var():
func = var_with_same_name()
out_str = func.script(tir_prefix="T", show_meta=True)
rt_func = tvm.script.from_source(out_str)
tvm.ir.assert_structural_equal(func, rt_func)
assert out_str.count('vi, vj = T.axis.remap("SS", [i, j])') == 2
assert out_str.find("vi_") == -1
assert out_str.find("vj_") == -1
assert out_str.count("for i, j in T.grid(16, 16)") == 2
assert out_str.find("i_") == -1
assert out_str.find("i_") == -1
def while_loop():
@T.prim_func
def while_loop(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (16,), "float32")
B = T.match_buffer(b, (16,), "float32")
i = T.alloc_buffer((), "int32", scope="local")
for ii in range(16):
with T.block():
vi = T.axis.S(16, ii)
B[vi] = 0
while i[()] < 10:
for j in range(16):
B[j] += A[j]
return while_loop
def primfunc_with_allocate_annotations():
@T.prim_func
def primfunc_with_allocate_annotations(placeholder_28: T.handle, T_cast_6: T.handle) -> None:
T.func_attr({"global_symbol": "tvmgen_default_fused_nn_max_pool2d_cast", "tir.noalias": True})
placeholder_29 = T.match_buffer(placeholder_28, [802816], dtype="uint8", elem_offset=0, align=64, offset_factor=1)
T_cast_7 = T.match_buffer(T_cast_6, [200704], dtype="int16", elem_offset=0, align=64, offset_factor=1)
tensor_2_data = T.allocate([200704], "uint8", "global", annotations={"attr1_key": "attr1_value"})
tensor_2 = T.buffer_decl(shape=[200704], dtype="uint8", scope="global", data=tensor_2_data)
for ax0_ax1_fused_4 in T.serial(0, 56):
for ax2_4 in T.serial(0, 56):
for ax3_init in T.serial(0, 64):
tensor_2[(((ax0_ax1_fused_4*3584) + (ax2_4*64)) + ax3_init)] = T.uint8(0)
for rv0_rv1_fused_1, ax3_2 in T.grid(9, 64):
t |
ensor_2[(((ax0_ax1_fused_4*3584) + (ax2_4*64)) + ax3_2)] = T.max(tensor_2[(((ax0_ax1_fused_4*3584) + (ax2_4*64)) + ax3_2)], T.if_then_else(((((ax0_ax1_fused_4*2) + T.floordiv(rv0_rv1_fused_1, 3)) < 112) and (((ax2_4*2) + T.floormod(rv0_rv1_fused_1, 3)) < 112)), placeholder_29[(((((ax0_ax1_fused_4*14336) + (T.floordiv(rv0_rv1_fused_1, 3)*7168)) + (ax2_4*128)) + (T.floormod(rv0_rv1_fused_1, 3)*64)) + ax3_2)], T.uint8(0), dtype="uint8"))
for ax0_ax1_fused_5 in T.serial(0, 56):
for ax2_5, ax3_3 in T.grid(56, 64):
T_cast_7[(((ax0_ax1_fused_5*3584) + (ax2_5*64)) + ax3_3)] = T.cast(tensor_2[(((ax0_ax1_fused_5*3584) + (ax2_5*64)) + ax3_3)], "int16")
return primfunc_with_allocate_annotations
def comm_reducer_single_reduce_group():
@T.prim_func
def comm_reducer_single_reduce_group(a: T.handle, b: T.handle) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
threadIdx_x = T.env_thread("threadIdx.x")
A = T.match_buffer(a, [16384], dtype="float32")
for i in T.serial(0, 128):
T.launch_thread(threadIdx_x, 128)
reduce_temp0_data = T.allocate([1], "float32", "local")
reduce_temp0 = T.buffer_decl(shape=[1], dtype="float32", scope="local", data=reduce_temp0_data)
with T.attr(T.comm_reducer(lambda x, y: x + y, [T.float32(0)]), "reduce_scope", T.reinterpret(T.uint64(0), dtype="handle")):
T.evaluate(T.tvm_thread_allreduce(T.uint32(1), A[i * 128 + threadIdx_x], True, reduce_temp0.data, threadIdx_x, dtype="handle"))
return comm_reducer_single_reduce_group
def comm_reducer_multiple_reduce_groups():
@T.prim_func
def comm_reducer_multiple_reduce_groups(a: T.handle, b: T.handle) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
threadIdx_x = T.env_thread("threadIdx.x")
A = T.match_buffer(a, [16384], dtype="float32")
for i in T.serial(0, 128):
T.launch_thread(threadIdx_x, 128)
reduce_temp |
0_data = T.allocate([1], "float32", "local")
reduce_temp0 = T.buffer_decl(shape=[1], dtype="float32", scope="local", data=reduce_temp0_data)
with T.attr(T.comm_reducer(lambda x0, x1, y0, y1: (T.Select((x1 >= y1), x0, y0), T.Select((x1 >= y1), x1, y1)), [T.int32(-1), T.min_value("float32")]), "reduce_scope", T.reinterpret(T.uint64(0), dtype="handle")):
T.evaluate(T.tvm_thread_allreduce(T.uint32(1), A[i * 128 + threadIdx_x], True, reduce_temp0.data, threadIdx_x, dtype="handle"))
return comm_reducer_multiple_reduce_groups
def multiple_commreducer():
@T.prim_func
def multiple_commreducer() -> None:
normal_reduce_temp0 = T.buffer_decl([1], dtype="float32", strides=[1], scope="local")
normal_reduce_temp1 = T.buffer_decl([1], dtype="float32", strides=[1], scope="local")
reduce_temp0 = T.buffer_decl([1], dtype="float32", strides=[1], scope="local")
reduce_temp1 = T.buffer_decl([1], dtype="float32", strides=[1], scope="local")
for ax0_1 in T.thread_binding(0, 32, thread="threadIdx.x"):
with T.block("T_softmax_maxelem_cross_thread_reduction"):
T.attr(T.comm_reducer(lambda x, y: T.max(x, y), [T.min_value("float32")]), "reduce_scope", T.reinterpret(T.uint64(0), dtype="handle"))
T.evaluate(T.tvm_thread_allreduce(T.uint32(1), normal_reduce_temp0[0], True, reduce_temp0.data, ax0_1, dtype="handle"))
for ax0_1 in T.thread_binding(0, 32, thread="threadIdx.x"):
with T.block("T_softmax_expsum_cross_thread_reduction"):
T.attr(T.comm_reducer(lambda x, y: x + y, [T.float32(0)]), "reduce_scope", T.reinterpret(T.uint64(0), dtype="handle"))
T.evaluate(T.tvm_thread_allreduce(T.uint32(1), normal_reduce_temp1[0], True, reduce_temp1.data, ax0_1, dtype="handle"))
return multiple_commreducer
def func_div_mod():
@T.prim_func
def func_div_mod():
a = T.var("int32")
b = T.var("int32")
T.evaluate(a
T.evaluate(a % |
b)
T.evaluate(T.truncmod(a, b))
return func_div_mod
def test_div_mod():
func = func_div_mod()
rt_func = tvm.script.from_source(func.script())
tvm.ir.assert_structural_equal(func, rt_func, True)
assert isinstance(func.body[0].value, tvm.tir.FloorDiv)
assert isinstance(func.body[1].value, tvm.tir.FloorMod)
assert isinstance(func.body[2].value, tvm.tir.Mod)
def loop_extent_dependent():
@T.prim_func
def loop_extent_dependent(a: T.handle) -> None:
A = T.match_buffer(a, [], dtype="int32")
for i in T.serial(0, 128):
for j in T.serial(0, i):
A[()] = A[()] + j
return loop_extent_dependent
def nontrivial_range_axis():
@T.prim_func
def nontrivial_range_axis(a: T.handle) -> None:
A = T.match_buffer(a, (10), "float32")
for i in range(10):
with T.block("block"):
vi = T.axis.spatial((1, 11), i + 1)
A[vi - 1] = A[vi - 1] + 1.0
return nontrivial_range_axis
def func_with_target_spec_by_config():
@T.prim_func
def func_with_target_spec_by_config() -> None:
T.func_attr(
{
"kTarget": T.target(
{
"max_num_threads": 1024,
"arch": "sm_70",
"thread_warp_size": 32,
"kind": "cuda",
"tag": "",
"keys": ["cuda", "gpu"],
"host": T.target({"kind": "llvm", "tag": "", "keys": ["cpu"]}),
}
)
}
)
T.evaluate(0)
return func_with_target_spec_by_config
def func_with_target_spec_by_str():
@T.prim_func
def func_with_target_spec_by_str() -> None:
T.func_attr({"kTarget": T.target("nvidia/nvidia-a100")})
T.evaluate(0)
return func_with_target_spec_by_str
def func_root_attr():
@T.prim_func
def func_root_attr():
with T.block("root"): |
T.block_attr({"a": "0"})
T.evaluate(0)
return func_root_attr
def func_trivial_root_block():
@T.prim_func
def func(A: T.Buffer[1, "int32"]):
with T.block("root"):
A[0] = 0
return func
def func_nested_root_block():
@T.prim_func
def func(A: T.Buffer[1, "int32"]):
with T.block("root"):
with T.block("block"):
A[0] = 0
return func
def func_T_ptr_let_statement():
@T.prim_func
def func_T_ptr_let_statement(
args: T.handle, arg_type_ids_handle: T.Ptr[T.int32], num_args: T.int32
) -> None:
arg_type_ids = T.buffer_decl([2], dtype="int32", data=arg_type_ids_handle)
arg0: T.handle = T.tvm_struct_get(args, 0, 12, dtype="handle")
arg1: T.handle = T.tvm_struct_get(args, 1, 12, dtype="handle")
A_data: T.Ptr[T.float32] = T.tvm_struct_get(arg0, 0, 1, dtype="handle")
A = T.buffer_decl([1024], dtype="float32", data=A_data)
B_data: T.Ptr[T.float32] = T.tvm_struct_get(arg1, 0, 1, dtype="handle")
B = T.buffer_decl([1024], dtype="float32", data=B_data)
B[0] = A[0]
return func_T_ptr_let_statement
def func_T_ptr_allocate():
@T.prim_func
def func_T_ptr_allocate() -> None:
A_data = T.allocate([1024], "float32", "global")
A = T.buffer_decl(shape=[1024], dtype="float32", scope="global", data=A_data)
A[0] = 0.0
return func_T_ptr_allocate
def llvm_intrin_call():
@T.prim_func
def ctpop(A: T.Buffer[(16,), "uint8"], B: T.Buffer[(16,), "uint8"]) -> None:
for i in range(0, 16):
with T.block("A"):
vi = T.axis.remap(
"S",
[
i,
],
)
B[vi] = T.call_llvm_pure_intrin(
T.llvm_lookup_intrinsic_id("llvm.ctpop.i8"),
T.uint32(1),
A |
[vi],
dtype="uint8",
)
return ctpop
def parse_bufferslice_as_range_bound():
@T.prim_func
def segment_sum(
A_ptr: T.handle, B_ptr: T.handle, indptr_ptr: T.handle, n: T.int32, m: T.int32
) -> None:
A = T.match_buffer(A_ptr, [m], dtype="float32")
B = T.match_buffer(B_ptr, [n], dtype="float32")
indptr = T.match_buffer(indptr_ptr, [n + 1], dtype="int32")
for i in T.serial(n):
with T.block("outer"):
vi = T.axis.spatial(n, i)
T.reads(indptr[i : i + 2], B[vi], A[indptr[i] : indptr[i + 1]])
T.writes(B[vi])
for j in T.serial(indptr[i], indptr[i + 1]):
with T.block("inner"):
vj = T.axis.reduce(m, j)
T.reads(B[vi], A[vj])
T.writes(B[vi])
with T.init():
B[vi] = T.float32(0)
B[vi] = B[vi] + A[vj]
return segment_sum
def int64_support():
@T.prim_func
def elementwise_shape_int64(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (T.int64(128), T.int64(128)), dtype="float32")
B = T.alloc_buffer((T.int64(128), T.int64(128)), dtype="float32")
C = T.match_buffer(c, (T.int64(128), T.int64(128)), dtype="float32")
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(T.int64(128), T.int64(128)):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + 1.0
return elementwise_shape_int64
def string_annotation_escaping():
@T.prim_func
def string_annotation_of_special_chars():
T.func_attr(
{
"key1": '"\'hello\t\r"',
"key2": """
%1 = add i32 %0, %0
%2 = add i32 %0, %1 |
%3 = add i32 %1, %2
""",
}
)
T.evaluate(0)
return string_annotation_of_special_chars
def pointer_type():
@T.prim_func
def func_with_ptr_type_annotations(x: T.Ptr[T.int32], y: T.Ptr[T.int32, "shared"]):
xx_data = T.allocate([16], "int32", "global")
xx = T.buffer_decl(shape=[16], dtype="int32", scope="global", data=xx_data)
yy_data = T.allocate([16], "int32", "shared")
yy = T.buffer_decl(shape=[16], dtype="int32", scope="shared", data=yy_data)
a: T.Ptr[T.int32] = T.address_of(xx[0], dtype="handle")
b: T.Ptr[T.int32, "shared"] = T.address_of(yy[0], dtype="handle")
T.evaluate(T.call_extern("copy", a, b, dtype=""))
return func_with_ptr_type_annotations
def buffer_axis_separator():
@T.prim_func
def element_wise(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 128), "float32", axis_separators=[1])
C = T.match_buffer(c, (128, 128), "float32")
B = T.alloc_buffer((128, 128), "float32", axis_separators=[1])
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * T.float32(2)
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + T.float32(1)
return element_wise
def buffer_ramp_access_as_slice_index():
@T.prim_func
def buffer_ramp_access(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128,), "float32")
B = T.match_buffer(b, (128,), "float32")
C = T.match_buffer(c, (128,), "float32")
for i in range(128):
A[i : i + 1 : 1] = i
for i in range(4):
B[i * 32 : i * 32 + 32] = A[i * 32 : i * 32 + 32 : 1] + T.broadcast(1.0, 32)
for i in range(4):
C[i : i + 128 : 4] = B[i : i + 128 : 4] + T.broadcast(1.0, 32)
return buffer_ramp_access |
def let_expression():
@T.prim_func
def func():
x = T.var("int32")
T.evaluate(T.let(x, 1, x + 1))
return func
def void_ptr():
@T.prim_func
def func(out_ret_value: T.Ptr[T.void]):
T.evaluate(out_ret_value)
return func
def decl_buffer():
@T.prim_func
def func(A: T.Buffer[(16, 16), "float32"], B: T.Buffer[(16, 16), "float32"]) -> None:
A_flattened = T.decl_buffer(data=A.data, shape=(256,), dtype="float32")
B_flattened = T.decl_buffer(data=B.data, shape=(256,), dtype="float32")
C_alias = T.decl_buffer(data=A_flattened.data, shape=(256,), dtype="float32")
for i in range(256):
B_flattened[i] = A_flattened[i] + C_alias[i] + T.float32(1.0)
return func
def allocate_and_decl_buffer():
@T.prim_func
def func(A: T.Buffer[(16,), "float32"], B: T.Buffer[(16,), "float32"]) -> None:
D_data = T.allocate((16,), "float32", "global")
D = T.decl_buffer((16,), "float32", data=D_data)
for i in range(4):
with T.allocate((4,), "float32", "global") as C_data:
C = T.decl_buffer((4,), "float32", data=C_data)
for j in range(4):
C[j] = A[i * 4 + j] + T.float32(1.0)
for j in range(4):
D[j] = C[j]
for j in range(4):
B[i * 4 + j] = D[j]
return func
def float_infinity():
@T.prim_func
def func(
placeholder: T.Buffer[(1, 512, 768), "float32"], T_isinf: T.Buffer[(1, 512, 768), "bool"]
) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
for i0, i1, i2 in T.grid(1, 512, 768):
with T.block("T_isinf"):
ax0, ax1, ax2 = T.axis.remap("SSS", [i0, i1, i2])
T.reads(placeholder[ax0, ax1, ax2])
T.writes(T_isinf[ax0, ax1, ax2])
T_isinf[ax0, ax1, ax2] = T.fabs(
placeholder[ax0, ax1, ax2], dtype="float32" |
) == T.float32("inf") and not (T.isnan(placeholder[ax0, ax1, ax2], dtype="bool"))
return func
def minimal_i32_literal():
@T.prim_func
def func() -> None:
T.evaluate(T.int32(-2147483648))
T.evaluate(-T.int64(2147483648))
return func
def boolean_argument():
@T.prim_func
def func(a: T.boolean) -> None:
T.evaluate(a)
return func
def bool_argument():
@T.prim_func
def func(a: T.bool) -> None:
T.evaluate(a)
return func
def bool_variable_annotation():
@T.prim_func
def func() -> None:
a: T.bool = T.call_extern("dummy", dtype="bool")
T.evaluate(0)
return func
def return_none():
@T.prim_func
def func():
T.evaluate(0)
return func
def bool_primitive():
@T.prim_func
def func() -> None:
T.evaluate(T.bool(True))
return func
def bool_cast():
@T.prim_func
def func() -> None:
T.evaluate(T.bool(T.int32(0)))
return func
ir_generator = tvm.testing.parameter(
opt_gemm_normalize,
opt_gemm_lower,
opt_gemm_mod_host,
opt_conv_tensorcore_normalize,
opt_conv_tensorcore_lower,
opt_conv_tensorcore_mod_host,
vthread_func,
matmul,
module_const,
constant,
rank0,
rank0_block,
select,
minmax,
abs,
constant_folding,
simplify_bracket,
while_loop,
primfunc_with_allocate_annotations,
comm_reducer_single_reduce_group,
comm_reducer_multiple_reduce_groups,
multiple_commreducer,
loop_extent_dependent,
nontrivial_range_axis,
func_with_target_spec_by_config,
func_with_target_spec_by_str,
func_root_attr,
func_trivial_root_block,
func_nested_root_block,
func_T_ptr_let_statement,
func_T_ptr_allocate,
llvm_intrin_call,
parse_bufferslice_as_range_bound,
int64_support,
string_annotation_escaping,
pointer_type,
buffer_axis_separator,
buffer_ramp_access_as_slice_index,
let_expression,
void_ptr,
decl_buffer,
allocate |
_and_decl_buffer,
float_infinity,
minimal_i32_literal,
boolean_argument,
bool_argument,
bool_variable_annotation,
bool_primitive,
bool_cast,
return_none,
)
def test_roundtrip(ir_generator):
original = ir_generator()
after_roundtrip = tvm.script.from_source(original.script(show_meta=True))
tvm.ir.assert_structural_equal(original, after_roundtrip, True)
def test_return_none_no_trailing_type():
func = return_none()
script = func.script()
assert "-> None" not in script
if __name__ == "__main__":
tvm.testing.main() |
from tvm.script.parser_v1 |
import tir as T
@T.prim_func
def loops() -> None:
for i in T.parallel(0, 2):
for j in T.serial(0, 1):
for z in T.vectorized(3, 4):
T.evaluate(0)
def test_loops():
start_line = 23
parsed = loops
assert parsed.span.line == start_line
assert parsed.body.span.line == start_line + 1
assert parsed.body.min.span.column == 25
assert parsed.body.extent.span.column == 28
assert parsed.body.extent.span.line == start_line + 1
assert parsed.body.body.span.line == start_line + 2
assert parsed.body.body.loop_var.span.line == start_line + 2
assert parsed.body.body.loop_var.span.column == 13
assert parsed.body.body.body.span.line == start_line + 3
assert parsed.body.body.body.span.column == 22
assert parsed.body.body.body.body.span.line == start_line + 4
assert parsed.body.body.body.body.span.column == 17
@T.prim_func
def statements() -> None:
T.evaluate(1)
T.evaluate("test")
def test_statements():
start_line = 53
parsed = statements
assert parsed.body.span.line == start_line + 1
assert parsed.body[0].span.line == start_line + 1
assert parsed.body[0].span.column == 5
assert parsed.body[0].span.line == start_line + 1
assert parsed.body[0].span.column == 5
if __name__ == "__main__":
test_loops()
test_statements() |
import sys |
import pytest |
import tvm.testing
from tvm.ir |
import assert_structural_equal
from tvm.script |
import from_source
from tvm.script |
import tir as T
@T.prim_func
def transformed_matmul_no_syntax_sugar(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [128, 128])
B = T.match_buffer(b, [128, 128])
C = T.match_buffer(c, [128, 128])
for i0, i1, i2_outer, i2_inner_outer, i2_inner_inner in T.grid(128, 128, 4, 8, 4):
with T.block("update"):
vi, vj = T.axis.remap("SS", [i0, i1])
vk = T.axis.R(128, i2_outer * 32 + i2_inner_outer * 4 + i2_inner_inner)
T.reads([C[vi, vj], A[vi, vk], B[vj, vk]])
T.writes([C[vi, vj], A[vi, vk]])
with T.init():
C[vi, vj] = 0.0
A[vi, vk] = A[vi, vk] + B[vj, vk]
C[vi, vj] = C[vi, vj] + (A[vi, vk] * B[vj, vk])
@T.prim_func
def transformed_matmul_syntax_sugar(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [128, 128])
B = T.match_buffer(b, [128, 128])
C = T.match_buffer(c, [128, 128])
for i0, i1, i2_outer, i2_inner_outer, i2_inner_inner in T.grid(128, 128, 4, 8, 4):
with T.block("update"):
vi, vj = T.axis.remap("SS", [i0, i1])
vk = T.axis.R(128, i2_outer * 32 + i2_inner_outer * 4 + i2_inner_inner)
T.reads(C[vi, vj], A[vi, vk], B[vj, vk])
T.writes(C[vi, vj], A[vi, vk])
with T.init():
C[vi, vj] = 0.0
A[vi, vk] = A[vi, vk] + B[vj, vk]
C[vi, vj] = C[vi, vj] + (A[vi, vk] * B[vj, vk])
def test_reads_writes_syntax_sugar():
assert_structural_equal(transformed_matmul_no_syntax_sugar, transformed_matmul_syntax_sugar)
@T.prim_func
def loop_no_syntax_sugar(a: T.handle) -> None:
A = T.match_buffer(a, (128, 128, 128, 128))
for i in T.serial(0, 128):
for j in T.parallel(0, 128):
for k in T.vectorized(0, 128):
for x in T.unroll(0, 128):
for y in T.thread_binding(0, 128, thread="threadIdx.x"):
for z in T.thread_binding(0, 128, thread="threadIdx.x"): |
A[i, j, k, x] = A[i, j, k, x] * 2.0
@T.prim_func
def loop_syntax_sugar(a: T.handle) -> None:
A = T.match_buffer(a, (128, 128, 128, 128))
for i in T.serial(128):
for j in T.parallel(128):
for k in T.vectorized(128):
for x in T.unroll(128):
for y in T.thread_binding(128, "threadIdx.x"):
for z in T.thread_binding(128, thread="threadIdx.x"):
A[i, j, k, x] = A[i, j, k, x] * 2.0
def test_loop_syntax_sugar():
assert_structural_equal(loop_no_syntax_sugar, loop_syntax_sugar)
@T.prim_func
def elementwise_handle(
a: T.handle,
b: T.handle,
) -> None:
A = T.match_buffer(a, (128, 128, 128, 128))
B = T.match_buffer(b, (128, 128, 128, 128))
for i, j, k, l in T.grid(128, 128, 128, 128):
with T.block("B"):
vi, vj, vk, vl = T.axis.remap("SSSS", [i, j, k, l])
B[vi, vj, vk, vl] = A[vi, vj, vk, vl] * 2.0
@T.prim_func
def elementwise_buffer_kwargs(
a: T.Buffer(shape=(128, 128, 128, 128), dtype="float32"),
b: T.Buffer(shape=(128, 128, 128, 128), dtype="float32"),
) -> None:
for i, j, k, l in T.grid(128, 128, 128, 128):
with T.block("B"):
vi, vj, vk, vl = T.axis.remap("SSSS", [i, j, k, l])
b[vi, vj, vk, vl] = a[vi, vj, vk, vl] * 2.0
@T.prim_func
def elementwise_buffer_no_kwargs(
a: T.Buffer[(128, 128, 128, 128), "float32"],
b: T.Buffer[(128, 128, 128, 128), "float32"],
) -> None:
for i, j, k, l in T.grid(128, 128, 128, 128):
with T.block("B"):
vi, vj, vk, vl = T.axis.remap("SSSS", [i, j, k, l])
b[vi, vj, vk, vl] = a[vi, vj, vk, vl] * 2.0
def test_match_buffer_syntax_sugar():
assert_structural_equal(elementwise_handle, elementwise_buffer_kwargs)
assert_structural_equal(elementwise_handle, elementwise_buffer_no_kwargs)
def test_match_buffer_1d():
@T.prim_func
def func_no_sugar(a: T.handle):
A = T.match_buffer(a, shap |
e=(16,))
for i in T.serial(16):
A[i] = 0.0
@T.prim_func
def func_with_sugar(A: T.Buffer[16, "float32"]):
for i in T.serial(16):
A[i] = 0.0
assert_structural_equal(func_no_sugar, func_with_sugar)
def test_match_buffer_no_kwargs_failed():
with pytest.raises(ValueError) as e:
@T.prim_func
def elementwise_buffer_no_kwargs_failed(
a: T.Buffer[(128, 128, 128, 128)],
b: T.Buffer[(128, 128, 128, 128)],
) -> None:
pass
@T.prim_func
def gemm_dyn_shape(a: T.handle, b: T.handle, c: T.handle):
N = T.var("int32")
M = T.var("int32")
K = T.var("int32")
A = T.match_buffer(a, (N, K), "float32")
B = T.match_buffer(b, (K, M), "float32")
C = T.match_buffer(c, (N, M), "float32")
for i, j, k in T.grid(N, M, K):
with T.block("gemm"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
with T.init():
C[vi, vj] = 0.0
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vk, vj]
def test_dynamic_shape_gemm():
gemm_dyn_shape_roundtrip = from_source(gemm_dyn_shape.script())
assert_structural_equal(gemm_dyn_shape, gemm_dyn_shape_roundtrip)
@T.prim_func
def preflattened_buffer_map(A: T.handle, B: T.handle):
A_1 = T.match_buffer(A, [1])
T.preflattened_buffer(A_1, [1], align=1, offset_factor=2)
B_1 = T.match_buffer(B, [1])
T.preflattened_buffer(B_1, [1])
B_1[0] = A_1[0]
def test_preflattened_buffer_map():
A_var = [
k for k, _ in preflattened_buffer_map.preflattened_buffer_map.items() if k.name == "A"
][0]
assert preflattened_buffer_map.preflattened_buffer_map[A_var].data_alignment == 1
assert preflattened_buffer_map.preflattened_buffer_map[A_var].offset_factor == 2
@T.prim_func
def match_buffer_int64(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (T.int64(128), T.int64(128)), dtype="float32")
B = T.alloc_buffer((T.int64(128), T.int64(128)), dtype="float32")
C = T.match_buffer(c, |
(T.int64(128), T.int64(128)), dtype="float32")
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(T.int64(128), T.int64(128)):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + 1.0
@T.prim_func
def match_buffer_int64_after_roundtrip(
A: T.Buffer[(T.int64(128), T.int64(128)), "float32"],
C: T.Buffer[(T.int64(128), T.int64(128)), "float32"],
) -> None:
B = T.alloc_buffer((T.int64(128), T.int64(128)), dtype="float32")
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(T.int64(128), T.int64(128)):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + 1.0
def test_match_buffer_int64():
original = match_buffer_int64
after_roundtrip = match_buffer_int64_after_roundtrip
assert_structural_equal(original, after_roundtrip, True)
def test_match_buffer_region_has_implicit_shape_dtype():
@T.prim_func
def explicit_shape_dtype(A: T.Buffer[(16, 64), "int32"]):
with T.block():
B = T.match_buffer(A[8:16, 32:64], shape=(8, 32), dtype="int32")
T.evaluate(0)
@T.prim_func
def implicit_shape_dtype(A: T.Buffer[(16, 64), "int32"]):
with T.block():
B = T.match_buffer(A[8:16, 32:64])
T.evaluate(0)
assert_structural_equal(explicit_shape_dtype, implicit_shape_dtype)
def test_match_buffer_input_requires_shape_arg():
with pytest.raises(tvm.error.DiagnosticError):
@T.prim_func
def func(a: T.handle):
A = T.match_buffer(a, dtype="int32")
T.evaluate(0)
def test_letstmt_bufferload_without_type_annotation():
@T.prim_func
def func_without_type_annotation(A: T.Buffer[(1,), "int32"]):
x = A[0 |
]
T.evaluate(x)
def test_letstmt_bind_with_constant():
@T.prim_func
def constant_binds():
x = 1
y = 42.0
T.evaluate(T.cast(x, "float32") + y)
@T.prim_func
def constant_binds_wrapped():
x = T.meta_var(T.int32(1))
y = T.meta_var(T.float32(42.0))
T.evaluate(T.cast(x, "float32") + y)
assert_structural_equal(constant_binds, constant_binds_wrapped)
def test_func_call():
def shared_16x16_to_ldmatrix_32x8_layout(i, j):
thread_id = (i % 8) * 4 + (j % 8)
return T.meta_var((thread_id, (j
@T.prim_func
def mma_sync_m16n16k16_desc(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (32, 8), "float16", align=64, offset_factor=16, scope="warp")
B = T.match_buffer(b, (32, 8), "float16", align=64, offset_factor=16, scope="warp")
C = T.match_buffer(c, (32, 8), "float16", align=64, offset_factor=16, scope="warp")
with T.block("root"):
T.reads(C[0:32, 0:8], A[0:32, 0:8], B[0:32, 0:8])
T.writes(C[0:32, 0:8])
for i, j, k in T.grid(16, 16, 16):
with T.block("C"):
i, j, k = T.axis.remap("SSR", [i, j, k])
thread_id_C, local_id_C = shared_16x16_to_ldmatrix_32x8_layout(i, j)
thread_id_A, local_id_A = shared_16x16_to_ldmatrix_32x8_layout(i, k)
thread_id_B, local_id_B = shared_16x16_to_ldmatrix_32x8_layout(k, j)
T.reads(
C[thread_id_C, local_id_C],
A[thread_id_A, local_id_A],
B[thread_id_B, local_id_B],
)
T.writes(C[thread_id_C, local_id_C])
C[thread_id_C, local_id_C] += (
A[thread_id_A, local_id_A] * B[thread_id_B, local_id_B]
)
@T.prim_func
def mma_sync_m16n16k16_desc_manual(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffe |
r(a, (32, 8), "float16", align=64, offset_factor=16, scope="warp")
B = T.match_buffer(b, (32, 8), "float16", align=64, offset_factor=16, scope="warp")
C = T.match_buffer(c, (32, 8), "float16", align=64, offset_factor=16, scope="warp")
with T.block("root"):
T.reads(C[0:32, 0:8], A[0:32, 0:8], B[0:32, 0:8])
T.writes(C[0:32, 0:8])
for i, j, k in T.grid(16, 16, 16):
with T.block("C"):
i, j, k = T.axis.remap("SSR", [i, j, k])
T.reads(
C[i % 8 * 4 + j % 8
A[i % 8 * 4 + k % 8
B[k % 8 * 4 + j % 8
)
T.writes(C[i % 8 * 4 + j % 8
C[i % 8 * 4 + j % 8
C[i % 8 * 4 + j % 8
+ A[i % 8 * 4 + k % 8
* B[k % 8 * 4 + j % 8
)
assert_structural_equal(mma_sync_m16n16k16_desc, mma_sync_m16n16k16_desc_manual)
def test_int64_loop():
@T.prim_func
def int64_grid(
A: T.Buffer[(T.int64(128), T.int64(128)), "float32"],
B: T.Buffer[(T.int64(128), T.int64(128)), "float32"],
) -> None:
for i, j in T.grid(T.int64(128), T.int64(128)):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] + 1.0
@T.prim_func
def int64_grid_expanded(
A: T.Buffer[(T.int64(128), T.int64(128)), "float32"],
B: T.Buffer[(T.int64(128), T.int64(128)), "float32"],
) -> None:
for i in range(T.int64(0), T.int64(128)):
for j in range(T.int64(0), T.int64(128)):
with T.block("C"):
vi = T.axis.spatial(T.int64(128), i)
vj = T.axis.spatial(T.int64(128), j)
B[vi, vj] = A[vi, vj] + 1.0
assert_structural_equal(int64_gr |
id, int64_grid_expanded)
if __name__ == "__main__":
tvm.testing.main() |
from tvm.script |
import tir as T
"""
This prim func |
include necessary buffer types that need to be checked
e.g. reads/writes, match_buffer/alloc_buffer, serial/block etc.
"""
@T.prim_func
def element_wise_storage_align(a: T.handle, c: T.handle) -> None:
C = T.match_buffer(c, [128, 128], elem_offset=0, align=64, offset_factor=1)
A = T.match_buffer(a, [128, 128], elem_offset=0, align=64, offset_factor=1)
with T.block("root"):
T.reads([])
T.writes([])
B = T.alloc_buffer([128, 128], elem_offset=0, align=64, offset_factor=1)
for i0 in T.serial(0, 128):
for ax1 in T.serial(0, 128):
with T.block("B"):
vi = T.axis.S(128, i0)
vj = T.axis.S(128, ax1)
T.reads([A[vi, vj]])
T.writes([B[vi, vj]])
T.block_attr({"buffer_dim_align": [[0, 0, 128, 127]]})
B[vi, vj] = A[vi, vj] * T.float32(2)
for i1 in T.serial(0, 128):
with T.block("C"):
vi_1, vj_1 = T.axis.remap("SS", [i0, i1])
T.reads([B[vi_1, vj_1]])
T.writes([C[vi_1, vj_1]])
C[vi_1, vj_1] = B[vi_1, vj_1] + T.float32(1)
"""
This prim func |
include necessary thread types that need to be checked
e.g. env_thread, launch_thread, thread_binding etc.
"""
@T.prim_func
def element_wise_env_thread_x(a: T.handle, b: T.handle, c: T.handle) -> None:
j1_0 = T.env_thread("threadIdx.x")
j0_0 = T.env_thread("threadIdx.x")
i = T.env_thread("blockIdx.x")
A = T.match_buffer(a, [128, 128])
B = T.match_buffer(b, [128, 128])
C = T.match_buffer(c, [128, 128])
T.launch_thread(i, 128)
T.launch_thread(j0_0, 4)
T.launch_thread(j1_0, 4)
for blockIdx_x in T.thread_binding(0, 128, "blockIdx.x"):
for threadIdx_x in T.thread_binding(0, 4, "threadIdx.x"):
for j0_1 in T.serial(0, 32):
with T.block(""):
B[blockIdx_x, threadIdx_x * 32 + j0_1] = (
A[blockIdx_x, threadIdx_x * 32 + j0_1] * 2.0
)
for j1_1 in T.serial(0, 32):
with T.block(""):
C[blockIdx_x, threadIdx_x * 32 + j1_1] = (
B[blockIdx_x, threadIdx_x * 32 + j1_1] + 1.0
)
"""
This test case is added to test T.grid
"""
@T.prim_func
def loop_split(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [128, 128], dtype="float32")
B = T.match_buffer(b, [128], dtype="float32")
for i, ko in T.grid(128, 4):
for ki in T.thread_binding(0, 32, thread="threadIdx.x"):
with T.block("B"):
vi = T.axis.S(128, i)
vk = T.axis.R(128, ko * 32 + ki)
T.reads([B[vi], A[vi, vk]])
T.writes([B[vi]])
with T.init():
B[vi] = T.float32(0)
B[vi] = B[vi] + A[vi, vk]
"""
This test case is added to test T.comm_reducer, T.reinterpret, T.tvm_thread_allreduce
"""
@T.prim_func
def lowered_loop_split(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [128, 128], dtype="float32")
B = T.match_buffer(b, [128], dtype="float32")
reduce_temp0 = T.alloc_buffer([1], dtyp |
e="float32", strides=[1], scope="local")
normal_reduce_temp0 = T.alloc_buffer([1], dtype="float32", strides=[1], scope="local")
for i in T.serial(0, 128):
for ki in T.thread_binding(0, 32, thread="threadIdx.x"):
normal_reduce_temp0[0] = T.float32(0)
for ko in T.serial(0, 4):
with T.block("B_normal_reduction"):
vi = T.axis.S(128, i)
vk = T.axis.R(128, ko * 32 + ki)
T.reads([A[vi, vk], normal_reduce_temp0[0]])
T.writes([normal_reduce_temp0[0]])
normal_reduce_temp0[0] = normal_reduce_temp0[0] + A[vi, vk]
with T.block("B_cross_thread_reduction"):
T.reads([normal_reduce_temp0[0]])
T.writes([reduce_temp0[0]])
T.attr(
T.comm_reducer(lambda x, y: x + y, [T.float32(0)]),
"reduce_scope",
T.reinterpret(T.uint64(0), dtype="handle"),
)
T.evaluate(
T.tvm_thread_allreduce(
T.uint32(1),
normal_reduce_temp0[0],
True,
reduce_temp0.data,
ki,
dtype="handle",
)
)
with T.block("B_write_back"):
vi = T.axis.S(128, i)
T.reads([reduce_temp0[0]])
T.writes([B[vi]])
B[vi] = reduce_temp0[0]
"""
This test case is added to test T.Buffer with slice as argument and T.exp
"""
@T.prim_func
def different_access_indices(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [128, 128, 128], dtype="float32")
B = T.match_buffer(b, [128, 128], dtype="float32")
for i, j in T.grid(128, 128):
for k in T.thread_binding(0, 128, thread="threadIdx.x"):
with T.block("B"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
T.re |
ads([B[vi, vj], A[vi, vj, vk]])
T.writes(
[
B[
T.min(vj, vi) : T.min(vj, vi)
+ (T.max(vj, vi) + 1 - T.min(vj, vi)),
T.min(vi, vj) : T.min(vi, vj)
+ (T.max(vi, vj) + 1 - T.min(vi, vj)),
]
]
)
with T.init():
B[vj, vi] = T.exp(B[vj, vi], dtype="float32")
B[vi, vj] = B[vi, vj] + A[vi, vj, vk]
if __name__ == "__main__":
pass |
"""Test type checker based on python's type annotations""" |
import sys
from typing |
import Dict, List, Tuple, Union, Callable |
import pytest |
import _pytest
from tvm.tir.schedule._type_checker |
import type_checked
def int_func(x: int) -> int:
return 2 * x
def str_func(x: str) -> str:
return 2 * x
test_cases = [
{
"type_annotation": int,
"positive_cases": [5],
"negative_cases": ["5"],
},
{
"type_annotation": List[int],
"positive_cases": [
[5],
[],
(1, 2, 3),
],
"negative_cases": [
None,
5,
["5"],
],
},
{
"type_annotation": Dict[str, int],
"positive_cases": [
{"key1": 0, "key2": 1, "key3": -1},
],
"negative_cases": [None, [1], {1: "1"}],
},
{
"type_annotation": Tuple[int],
"positive_cases": [
(5,),
],
"negative_cases": [
None,
(1, 2, 3),
[1],
5,
["5"],
],
},
{
"type_annotation": Tuple[str, int],
"positive_cases": [
("x", 5),
],
"negative_cases": [
42,
("x", 5, 6),
("x", 5, "y"),
("x", 5.0),
(None, 5),
],
},
{
"type_annotation": Union[str, int],
"positive_cases": [
"x",
5,
],
"negative_cases": [
5.0,
("x", 5, 6),
None,
],
},
{
"type_annotation": Callable,
"positive_cases": [str_func, int_func],
"negative_cases": [
None,
"x",
42,
],
},
{
"type_annotation": Callable[[int], int],
"positive_cases": [int_func],
"negative_cases": [
None,
"x",
42,
pytest.param(
str_func,
marks=pytest.mark.xfail(
reason="Signature of Callable arguments not currently checked"
),
),
],
},
]
def make_parametriza |
tion(type_annotation, case):
if isinstance(case, _pytest.mark.structures.ParameterSet):
marks = case.marks
(case,) = case.values
else:
marks = []
try:
annotation_name = type_annotation.__name__
except AttributeError:
annotation_name = str(type_annotation).replace("typing.", "")
if hasattr(case, "__name__"):
case_name = case.__name__
else:
case_name = str(case)
name = f"{annotation_name}, {case_name}"
return pytest.param(type_annotation, case, marks=marks, id=name)
positive_cases = [
make_parametrization(config["type_annotation"], case)
for config in test_cases
for case in config["positive_cases"]
]
negative_cases = [
make_parametrization(config["type_annotation"], case)
for config in test_cases
for case in config["negative_cases"]
]
@pytest.mark.parametrize(
["type_annotation", "case"],
positive_cases,
)
def test_matches_type(type_annotation, case):
@type_checked
def func(_: type_annotation):
pass
func(case)
@pytest.mark.parametrize(
["type_annotation", "case"],
negative_cases,
)
def test_not_matches(type_annotation, case):
@type_checked
def func(_: type_annotation):
pass
with pytest.raises(TypeError):
func(case)
if __name__ == "__main__":
sys.exit(pytest.main(sys.argv)) |
import sys |
import multiprocessing |
import os |
import getpass |
import inspect |
import argparse |
import json |
import shutil |
import grp |
import string |
import random |
import subprocess |
import platform |
import textwrap |
import typing
from pathlib |
import Path
from typing |
import List, Dict, Any, Optional, Tuple, Callable, Union
REPO_ROOT = Path(__file__).resolve().parent.parent.parent
SCRIPT_DIR = REPO_ROOT / ".ci-py-scripts"
NPROC = multiprocessing.cpu_count()
class col:
BLUE = "\033[94m"
CYAN = "\033[96m"
GREEN = "\033[92m"
YELLOW = "\033[93m"
RED = "\033[91m"
RESET = "\033[0m"
BOLD = "\033[1m"
UNDERLINE = "\033[4m"
def print_color(color: str, msg: str, bold: bool, **kwargs: Any) -> None:
if hasattr(sys.stdout, "isatty") and sys.stdout.isatty():
bold_code = col.BOLD if bold else ""
print(bold_code + color + msg + col.RESET, **kwargs)
else:
print(msg, **kwargs)
warnings: List[str] = []
def clean_exit(msg: str) -> None:
print_color(col.RED, msg, bold=True, file=sys.stderr)
for warning in warnings:
print_color(col.YELLOW, warning, bold=False, file=sys.stderr)
exit(1)
def cmd(commands: List[Any], **kwargs: Any):
commands = [str(s) for s in commands]
command_str = " ".join(commands)
print_color(col.BLUE, command_str, bold=True)
proc = subprocess.run(commands, **kwargs)
if proc.returncode != 0:
raise RuntimeError(f"Command failed: '{command_str}'")
return proc
def get_build_dir(name: str) -> str:
build_dir = REPO_ROOT / f"build-{name}"
return str(build_dir.relative_to(REPO_ROOT))
def check_docker():
executable = shutil.which("docker")
if executable is None:
clean_exit("'docker' executable not found, install it first (e.g. 'apt install docker.io')")
if sys.platform == "linux":
try:
group = grp.getgrnam("docker")
if getpass.getuser() not in group.gr_mem:
warnings.append(
f"Note: User '{getpass.getuser()}' is not in the 'docker' group, either:\n"
" * run with 'sudo'\n"
" * add user to 'docker': sudo usermod -aG docker $(whoami), then log out and back in",
)
except KeyError:
warn |
ings.append("Note: 'docker' group does not exist")
def check_gpu():
if not (sys.platform == "linux" and shutil.which("lshw")):
return
try:
proc = cmd(
["lshw", "-json", "-C", "display"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
encoding="utf-8",
)
stdout = proc.stdout.strip().strip(",")
stdout = json.loads(stdout)
except (subprocess.CalledProcessError, json.decoder.JSONDecodeError):
return
if isinstance(stdout, dict):
stdout = [stdout]
if not isinstance(stdout, list):
return
vendors = [s.get("vendor", "").lower() for s in stdout]
if not any("nvidia" in vendor for vendor in vendors):
warnings.append(
"nvidia GPU not found in 'lshw', maybe use --cpu flag when running 'docs' command?"
)
def gen_name(s: str) -> str:
suffix = "".join([random.choice(string.ascii_lowercase) for i in range(5)])
return f"{s}-{suffix}"
def docker(
name: str,
image: str,
scripts: List[str],
env: Dict[str, str],
interactive: bool,
additional_flags: Optional[Dict[str, str]] = None,
):
"""
Invoke a set of bash scripts through docker/bash.sh
name: container name
image: docker image name
scripts: list of bash commands to run
env: environment to set
"""
check_docker()
sccache_images = {
"ci_gpu",
"ci_cpu",
"ci_cortexm",
"ci_arm",
"ci_hexagon",
"ci_riscv",
"ci_adreno",
}
if image in sccache_images and os.getenv("USE_SCCACHE", "1") == "1":
scripts = [
"sccache --start-server",
] + scripts
env["CC"] = "/opt/sccache/cc"
env["CXX"] = "/opt/sccache/c++"
env["SCCACHE_CACHE_SIZE"] = os.getenv("SCCACHE_CACHE_SIZE", "50G")
docker_bash = REPO_ROOT / "docker" / "bash.sh"
command = [docker_bash] |
if sys.stdout.isatty():
command.append("-t")
command.append("--name")
command.append(name)
if interactive:
command.append("-i")
scripts = ["interact() {", " bash", "}", "trap interact 0", ""] + scripts
for key, value in env.items():
command.append("--env")
command.append(f"{key}={value}")
if additional_flags is not None:
for key, value in additional_flags.items():
command.append(key)
command.append(value)
SCRIPT_DIR.mkdir(exist_ok=True)
script_file = SCRIPT_DIR / f"{name}.sh"
with open(script_file, "w") as f:
f.write("set -eux\n\n")
f.write("\n".join(scripts))
f.write("\n")
command += [image, "bash", str(script_file.relative_to(REPO_ROOT))]
try:
cmd(command)
except RuntimeError as e:
clean_exit(f"Error invoking Docker: {e}")
except KeyboardInterrupt:
cmd(["docker", "stop", "--time", "1", name])
finally:
if os.getenv("DEBUG", "0") != "1":
script_file.unlink()
def docs(
tutorial_pattern: Optional[str] = None,
full: bool = False,
interactive: bool = False,
skip_build: bool = False,
docker_image: Optional[str] = None,
) -> None:
"""
Build the documentation from gallery/ and docs/. By default this builds only
the Python docs without any tutorials.
arguments:
full -- Build all language docs, not just Python (this will use the 'ci_gpu' Docker image)
tutorial-pattern -- Regex for which tutorials to execute when building docs (this will use the 'ci_gpu' Docker image)
skip_build -- skip build and setup scripts
interactive -- start a shell after running build / test scripts
docker-image -- manually specify the docker image to use
"""
build_dir = get_build_dir("gpu")
extra_setup = []
image = "ci_gpu" if docker_image is None else docker_image
if not full and tutorial_pattern is None:
image = "ci_cpu" if docker_image is None else |
docker_image
build_dir = get_build_dir("cpu")
config_script = " && ".join(
[
f"mkdir -p {build_dir}",
f"pushd {build_dir}",
"cp ../cmake/config.cmake .",
"echo set\(USE_MICRO ON\) >> config.cmake",
"popd",
]
)
requirements = [
"Sphinx==4.2.0",
"tlcpack-sphinx-addon==0.2.1",
"synr==0.5.0",
"image==1.5.33",
"git+https:
"sphinx-rtd-theme==1.0.0",
"matplotlib==3.3.4",
"commonmark==0.9.1",
"Pillow==8.3.2",
"autodocsumm==0.2.7",
"docutils==0.16",
]
extra_setup = [
"python3 -m pip install --user " + " ".join(requirements),
]
else:
check_gpu()
config_script = f"./tests/scripts/task_config_build_gpu.sh {build_dir}"
scripts = extra_setup + [
config_script,
f"./tests/scripts/task_build.py --build-dir {build_dir}",
]
if skip_build:
scripts = []
scripts.append("./tests/scripts/task_python_docs.sh")
if tutorial_pattern is None:
tutorial_pattern = os.getenv("TVM_TUTORIAL_EXEC_PATTERN", ".py" if full else "none")
env = {
"TVM_TUTORIAL_EXEC_PATTERN": tutorial_pattern,
"PYTHON_DOCS_ONLY": "0" if full else "1",
"IS_LOCAL": "1",
"TVM_LIBRARY_PATH": str(REPO_ROOT / build_dir),
}
docker(name=gen_name("docs"), image=image, scripts=scripts, env=env, interactive=interactive)
def serve_docs(directory: str = "_docs") -> None:
"""
Serve the docs using Python's http server
arguments:
directory -- Directory to serve from
"""
directory_path = Path(directory)
if not directory_path.exists():
clean_exit("Docs have not been built, run 'ci.py docs' first")
cmd([sys.executable, "-m", "http.server"], cwd=directory_path)
def lint(interactive |
: bool = False, fix: bool = False, docker_image: Optional[str] = None) -> None:
"""
Run CI's Sanity Check step
arguments:
interactive -- start a shell after running build / test scripts
fix -- where possible (currently black and clang-format) edit files in place with formatting fixes
docker-image -- manually specify the docker image to use
"""
env = {}
if fix:
env["IS_LOCAL"] = "true"
env["INPLACE_FORMAT"] = "true"
docker(
name=gen_name(f"ci-lint"),
image="ci_lint" if docker_image is None else docker_image,
scripts=["./tests/scripts/task_lint.sh"],
env=env,
interactive=interactive,
)
Option = Tuple[str, List[str]]
def generate_command(
name: str,
options: Dict[str, Option],
help: str,
precheck: Optional[Callable[[], None]] = None,
post_build: Optional[List[str]] = None,
additional_flags: Optional[Dict[str, str]] = None,
):
"""
Helper to generate CLIs that:
1. Build a with a config matching a specific CI Docker image (e.g. 'cpu')
2. Run tests (either a pre-defined set from scripts or manually via invoking
pytest)
3. (optional) Drop down into a terminal into the Docker container
"""
def fn(
tests: Optional[List[str]],
skip_build: bool = False,
interactive: bool = False,
docker_image: Optional[str] = None,
verbose: bool = False,
**kwargs,
) -> None:
"""
arguments:
tests -- pytest test IDs (e.g. tests/python or tests/python/a_file.py::a_test[param=1])
skip_build -- skip build and setup scripts
interactive -- start a shell after running build / test scripts
docker-image -- manually specify the docker image to use
verbose -- run verbose build
"""
if precheck is not None:
precheck()
build_dir = get_build_dir(name)
if skip_build:
scripts = []
else:
scripts = [ |
f"./tests/scripts/task_config_build_{name}.sh {build_dir}",
f"./tests/scripts/task_build.py --build-dir {build_dir}",
]
if post_build is not None:
scripts += post_build
if any(v for v in kwargs.values()) and tests is not None:
option_flags = ", ".join([f"--{k}" for k in options.keys()])
clean_exit(f"{option_flags} cannot be used with --tests")
if tests is not None:
scripts.append(f"python3 -m pytest {' '.join(tests)}")
for option_name, (_, extra_scripts) in options.items():
if kwargs.get(option_name, False):
scripts.extend(script.format(build_dir=build_dir) for script in extra_scripts)
docker(
name=gen_name(f"ci-{name}"),
image=f"ci_{name}" if docker_image is None else docker_image,
scripts=scripts,
env={
"TVM_LIBRARY_PATH": str(REPO_ROOT / get_build_dir(name)),
"VERBOSE": "true" if verbose else "false",
},
interactive=interactive,
additional_flags=additional_flags,
)
fn.__name__ = name
return fn, options, help
def check_arm_qemu() -> None:
"""
Check if a machine is ready to run an ARM Docker image
"""
machine = platform.machine().lower()
if "arm" in machine or "aarch64" in machine:
return
binfmt = Path("/proc/sys/fs/binfmt_misc")
if not binfmt.exists() or len(list(binfmt.glob("qemu-*"))) == 0:
clean_exit(
textwrap.dedent(
"""
You must run a one-time setup to use ARM containers on x86 via QEMU:
sudo apt install -y qemu binfmt-support qemu-user-static
docker run --rm --privileged multiarch/qemu-user-static --reset -p yes
See https:
"\n"
)
)
)
def cli_name(s: str) -> str:
return s.rep |
lace("_", "-")
def typing_get_origin(annotation):
if sys.version_info >= (3, 8):
return typing.get_origin(annotation)
else:
return annotation.__origin__
def typing_get_args(annotation):
if sys.version_info >= (3, 8):
return typing.get_args(annotation)
else:
return annotation.__args__
def is_optional_type(annotation):
return (
hasattr(annotation, "__origin__")
and (typing_get_origin(annotation) == typing.Union)
and (type(None) in typing_get_args(annotation))
)
def add_subparser(
func: Callable,
subparsers: Any,
options: Optional[Dict[str, Option]] = None,
help: Optional[str] = None,
) -> Any:
"""
Utility function to make it so subparser commands can be defined locally
as a function rather than directly via argparse and manually dispatched
out.
"""
split = [s.strip() for s in func.__doc__.split("arguments:\n")]
if len(split) == 1:
args_help = None
command_help = split[0]
else:
command_help, args_help = split
if help is not None:
command_help = help
arg_help_texts = {}
if args_help is not None:
for line in args_help.split("\n"):
line = line.strip()
name, help_text = [t.strip() for t in line.split(" -- ")]
arg_help_texts[cli_name(name)] = help_text
subparser = subparsers.add_parser(cli_name(func.__name__), help=command_help)
seen_prefixes = set()
signature = inspect.signature(func)
for name, value in signature.parameters.items():
if name == "kwargs":
continue
arg_cli_name = cli_name(name)
kwargs: Dict[str, Union[str, bool]] = {"help": arg_help_texts[arg_cli_name]}
is_optional = is_optional_type(value.annotation)
if is_optional:
arg_type = typing_get_args(value.annotation)[0]
else:
arg_type = value.annotation
has_default = False
if value.default i |
s not value.empty:
kwargs["default"] = value.default
has_default = True
if arg_type is bool:
kwargs["action"] = "store_true"
else:
kwargs["required"] = not is_optional and not has_default
if str(arg_type).startswith("typing.List"):
kwargs["action"] = "append"
if arg_cli_name[0] not in seen_prefixes:
subparser.add_argument(f"-{arg_cli_name[0]}", f"--{arg_cli_name}", **kwargs)
seen_prefixes.add(arg_cli_name[0])
else:
subparser.add_argument(f"--{arg_cli_name}", **kwargs)
if options is not None:
for option_name, (help, _) in options.items():
option_cli_name = cli_name(option_name)
if option_cli_name[0] not in seen_prefixes:
subparser.add_argument(
f"-{option_cli_name[0]}", f"--{option_cli_name}", action="store_true", help=help
)
seen_prefixes.add(option_cli_name[0])
else:
subparser.add_argument(f"--{option_cli_name}", action="store_true", help=help)
return subparser
CPP_UNITTEST = ("run c++ unitests", ["./tests/scripts/task_cpp_unittest.sh {build_dir}"])
generated = [
generate_command(
name="gpu",
help="Run GPU build and test(s)",
options={
"cpp": CPP_UNITTEST,
"topi": ("run topi tests", ["./tests/scripts/task_python_topi.sh"]),
"unittest": (
"run unit tests",
[
"./tests/scripts/task_java_unittest.sh",
"./tests/scripts/task_python_unittest_gpuonly.sh",
"./tests/scripts/task_python_integration_gpuonly.sh",
],
),
"frontend": ("run frontend tests", ["./tests/scripts/task_python_frontend.sh"]),
},
),
generate_command(
name="cpu",
help="Run CPU build and test(s)",
options={
"cpp": CPP_UNITTEST, |
"integration": (
"run integration tests",
["./tests/scripts/task_python_integration.sh"],
),
"unittest": (
"run unit tests",
[
"./tests/scripts/task_python_unittest.sh",
"./tests/scripts/task_python_vta_fsim.sh",
"./tests/scripts/task_python_vta_tsim.sh",
],
),
"frontend": ("run frontend tests", ["./tests/scripts/task_python_frontend_cpu.sh"]),
},
),
generate_command(
name="minimal",
help="Run minimal CPU build and test(s)",
options={
"cpp": CPP_UNITTEST,
"unittest": (
"run unit tests",
[
"./tests/scripts/task_python_unittest.sh",
],
),
},
),
generate_command(
name="i386",
help="Run i386 build and test(s)",
options={
"cpp": CPP_UNITTEST,
"integration": (
"run integration tests",
[
"./tests/scripts/task_python_unittest.sh",
"./tests/scripts/task_python_integration_i386only.sh",
],
),
},
),
generate_command(
name="wasm",
help="Run WASM build and test(s)",
options={
"cpp": CPP_UNITTEST,
"test": ("run WASM tests", ["./tests/scripts/task_web_wasm.sh"]),
},
),
generate_command(
name="cortexm",
help="Run Cortex-M build and test(s)",
options={
"cpp": CPP_UNITTEST,
"test": (
"run microTVM tests",
[
"./tests/scripts/task_python_microtvm.sh",
"./tests/scripts/task_demo_microtvm.sh",
],
),
},
),
generate_command(
name="hexagon",
help="Run Hexagon build and test(s)", |
post_build=["./tests/scripts/task_build_hexagon_api.sh --output build-hexagon"],
options={
"cpp": CPP_UNITTEST,
"test": (
"run Hexagon API/Python tests",
[
"./tests/scripts/task_python_hexagon.sh",
],
),
},
),
generate_command(
name="arm",
help="Run ARM build and test(s) (native or via QEMU on x86)",
precheck=check_arm_qemu,
options={
"cpp": CPP_UNITTEST,
"python": (
"run full Python tests",
[
"./tests/scripts/task_python_unittest.sh",
"./tests/scripts/task_python_arm_compute_library.sh",
],
),
},
),
generate_command(
name="riscv",
help="Run RISC-V build and test(s)",
options={
"cpp": CPP_UNITTEST,
"python": (
"run full Python tests",
[
"./tests/scripts/task_riscv_microtvm.sh",
],
),
},
),
generate_command(
name="adreno",
help="Run Adreno build and test(s)",
post_build=["./tests/scripts/task_build_adreno_bins.sh"],
additional_flags={
"--volume": os.environ.get("ADRENO_OPENCL", "") + ":/adreno-opencl",
"--env": "ADRENO_OPENCL=/adreno-opencl",
"--net": "host",
},
options={
"test": (
"run Adreno API/Python tests",
[
"./tests/scripts/task_python_adreno.sh " + os.environ.get("ANDROID_SERIAL", ""),
],
),
},
),
]
def main():
description = """
Run CI jobs locally via Docker. This facilitates reproducing CI failures for
fast iteration. Note that many of the Docker images required are large (the
CPU and GPU images are both over 25GB) and may take some time to download on first use. |
"""
parser = argparse.ArgumentParser(description=description)
subparsers = parser.add_subparsers(dest="command")
commands = {}
for func in [docs, serve_docs, lint]:
add_subparser(func, subparsers)
commands[cli_name(func.__name__)] = func
for func, options, help in generated:
add_subparser(func, subparsers, options, help)
commands[cli_name(func.__name__)] = func
args = parser.parse_args()
if args.command is None:
parser.print_help()
exit(1)
func = commands[args.command]
kwargs = {k: getattr(args, k) for k in dir(args) if not k.startswith("_") and k != "command"}
func(**kwargs)
if __name__ == "__main__":
main() |
import argparse |
import os |
import pickle
from pathlib |
import Path |
import csv |
import sys
from typing |
import Callable, Dict, List, Any
REPO_ROOT = Path(__file__).resolve().parent.parent.parent.parent
sys.path.append(str(REPO_ROOT / "ci" / "scripts"))
from git_utils |
import git, GitHubRepo
from github_tag_teams |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.