text
stringlengths 1
2.05k
|
---|
import Bitstream
bitstream = Bitstream(bitstream_path)
bitstream.download()
def de10nano_bitstream_program(bitstream_path):
from tvm |
import get_global_func
program = get_global_func("vta.de10nano.program")
program(bitstream_path)
def intelfocl_bitstream_program(bitstream_path, mem_size=4 * 1024 * 1024 * 1024):
from tvm |
import get_global_func
program = get_global_func("vta.oclfpga.program")
program(bitstream_path, mem_size)
def bitstream_program(target, bitstream, *args):
"""program bitstream to devices"""
if target in ["pynq", "ultra96"]:
pynq_bitstream_program(bitstream)
elif target in ["de10nano"]:
de10nano_bitstream_program(bitstream)
elif target in ["sim", "tsim"]:
return
elif target in ["intelfocl"]:
intelfocl_bitstream_program(bitstream, *args)
else:
raise RuntimeError("Unknown target {}".format(target))
if __name__ == "__main__":
main() |
"""VTA RPC client function""" |
import os
from tvm |
import rpc
from vta |
import program_bitstream
from .environment |
import get_env
from .bitstream |
import download_bitstream, get_bitstream_path
def reconfig_runtime(remote):
"""Reconfigure remote runtime based on current hardware spec.
Parameters
----------
remote : RPCSession
The TVM RPC session
"""
env = get_env()
freconfig = remote.get_function("tvm.contrib.vta.reconfig_runtime")
freconfig(env.pkg.cfg_json)
def program_fpga(remote, bitstream=None):
"""Upload and program bistream
Parameters
----------
remote : RPCSession
The TVM RPC session
bitstream : str, optional
Path to a local bistream file. If unset, tries to download from cache server.
"""
env = get_env()
if bitstream:
assert os.path.isfile(bitstream)
else:
bitstream = get_bitstream_path()
if not os.path.isfile(bitstream):
if env.TARGET == "de10nano":
return
download_bitstream()
if isinstance(remote, rpc.LocalSession):
program_bitstream.bitstream_program(env.TARGET, bitstream)
else:
fprogram = remote.get_function("tvm.contrib.vta.init")
remote.upload(bitstream)
fprogram(os.path.basename(bitstream)) |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Testing utilities, this namespace is not imported by default."""
from .utils import run
|
"""Utilities to start simulator.""" |
import ctypes |
import json |
import warnings |
import tvm
from ..environment |
import get_env
from ..libinfo |
import find_libvta
def _load_sw():
"""Load hardware library for simulator."""
env = get_env()
lib_driver_name = (
"libvta_tsim"
if env.TARGET == "tsim"
else "libvta"
if env.TARGET == "intelfocl"
else "libvta_fsim"
)
require_sim = env.TARGET in ("sim", "tsim")
libs = []
lib_driver = find_libvta(lib_driver_name, optional=(not require_sim))
if not lib_driver:
return []
try:
libs = [ctypes.CDLL(lib_driver[0], ctypes.RTLD_GLOBAL)]
except OSError as err:
if require_sim:
raise err
warnings.warn("Error when loading VTA driver {}: {}".format(lib_driver[0], err))
return []
if env.TARGET == "tsim":
lib_hw = find_libvta("libvta_hw", optional=True)
assert lib_hw
f = tvm.get_global_func("vta.tsim.init")
m = tvm.runtime.load_module(lib_hw[0], "vta-tsim")
f(m)
return lib_hw
return libs
def enabled():
"""Check if simulator is enabled."""
f = tvm.get_global_func("vta.simulator.profiler_clear", True)
return f is not None
def clear_stats():
"""Clear profiler statistics."""
env = get_env()
if env.TARGET == "sim":
f = tvm.get_global_func("vta.simulator.profiler_clear", True)
else:
f = tvm.get_global_func("vta.tsim.profiler_clear", True)
if f:
f()
def stats():
"""Get profiler statistics
Returns
-------
stats : dict
Current profiler statistics
"""
env = get_env()
if env.TARGET == "sim":
x = tvm.get_global_func("vta.simulator.profiler_status")()
else:
x = tvm.get_global_func("vta.tsim.profiler_status")()
return json.loads(x)
DEBUG_SKIP_EXEC = 1
def debug_mode(flag):
"""Set debug mode
Paramaters
----------
flag : int
The debug flag, 0 means clear all flags.
"""
tvm.get_global_func("vta.simulator.profiler_debug_mode")(flag)
LIBS = _load_sw() |
"""Test Utilities"""
from __future__ |
import absolute_ |
import as _abs |
import os
from tvm |
import rpc, autotvm
from ..environment |
import get_env
from . |
import simulator
def run(run_func):
"""Run test function on all available env.
Parameters
----------
run_func : function(env, remote)
"""
env = get_env()
if env.TARGET in ["sim", "tsim", "intelfocl"]:
local_rpc = int(os.environ.get("VTA_LOCAL_SIM_RPC", "0"))
if local_rpc:
remote = rpc.connect("127.0.0.1", local_rpc)
run_func(env, remote)
else:
if env.TARGET == "sim":
assert simulator.enabled()
run_func(env, rpc.LocalSession())
elif env.TARGET in ["pynq", "ultra96", "de10nano"]:
tracker_host = os.environ.get("TVM_TRACKER_HOST", None)
tracker_port = os.environ.get("TVM_TRACKER_PORT", None)
pynq_host = os.environ.get("VTA_RPC_HOST", None)
pynq_port = os.environ.get("VTA_RPC_PORT", None)
if tracker_host and tracker_port:
remote = autotvm.measure.request_remote(
env.TARGET, tracker_host, int(tracker_port), timeout=10000
)
run_func(env, remote)
else:
if pynq_host and pynq_port:
remote = rpc.connect(pynq_host, int(pynq_port))
run_func(env, remote)
else:
raise RuntimeError(
"Please set the VTA_RPC_HOST and VTA_RPC_PORT environment variables"
)
else:
raise RuntimeError("Unknown target %s" % env.TARGET) |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""TVM TOPI connector, eventually most of these should go to TVM repo"""
from . import bitpack
from .graphpack import graph_pack
from . import op
from .vta_conv2d import conv2d_packed, schedule_conv2d_packed
from .vta_conv2d_transpose import conv2d_transpose_packed, schedule_conv2d_transpose_packed
from .vta_group_conv2d import group_conv2d_packed, schedule_group_conv2d_packed
from .vta_dense import dense_packed, schedule_dense_packed
from . import utils
|
"""Bit packing operators"""
from __future__ |
import absolute_ |
import as _abs |
import tvm
from tvm |
import te
from tvm.topi |
import utils
from tvm.relay.op.op |
import register_compute, register_injective_schedule
from tvm.relay.op.op |
import register_pattern, OpPattern
def bitpack(data, bits, pack_type="int8", name="bitpack"):
"""Packs lowest dimension into format needed by VTA
Parameters
----------
pack_axis : int
index of the axis to pack in data
bit_axis : int
index of axis to place bit axis in resulting packed data
Returns
-------
packed : Tensor
The packed tensor.
"""
shape_vec = list(data.shape)
if pack_type == "int8":
data_width = 8
elif pack_type == "int16":
data_width = 16
elif pack_type == "int32":
data_width = 32
else:
raise RuntimeError("Unknown pack type %s" % pack_type)
assert data_width % bits == 0
lanes = data_width
assert utils.get_const_int(shape_vec[-1]) % lanes == 0, "Not a multiple of word size"
shape_vec[-1] = shape_vec[-1]
oshape = tuple(shape_vec)
def _bitpack(*indices):
ret = None
mask = tvm.tir.const((1 << bits) - 1, pack_type)
for k in range(lanes):
idx = list(indices)
idx[-1] = idx[-1] * lanes + k
elem = data(*idx).astype(pack_type)
if k == 0:
ret = elem & mask
else:
val = (elem & mask) << tvm.tir.const(k * bits, pack_type)
ret = ret | val
return ret
return te.compute(oshape, _bitpack, name=name, tag="bitpack")
@register_compute("bitpack", level=15)
def compute_bitpack(attrs, inputs):
lanes = attrs.lanes
dtype = inputs[0].dtype
assert dtype == "int8"
width = 8
assert width % lanes == 0
bits = 8
return bitpack(inputs[0], bits, dtype)
register_injective_schedule("bitpack")
register_pattern("bitpack", OpPattern.INJECTIVE) |
"""A Relay implementation of graph packing.""" |
import tvm
from tvm |
import relay
from tvm.relay |
import op, transform
from tvm.relay |
import ExprMutator
def run_opt_pass(expr, opt_pass):
"""Exectue a relay pass."""
assert isinstance(opt_pass, tvm.transform.Pass)
mod = tvm.IRModule.from_expr(expr)
mod = opt_pass(mod)
entry = mod["main"]
return entry if isinstance(expr, relay.Function) else entry.body
def _to_shape(shape):
"""convert shape into tuple."""
return tuple(int(sh) for sh in shape)
def _pack_batch_channel(data, dshape, bfactor, cfactor):
"""Pack the data channel dimension."""
assert int(dshape[0]) % bfactor == 0
assert int(dshape[1]) % cfactor == 0
data = op.reshape(
data,
newshape=(
int(dshape[0])
bfactor,
int(dshape[1])
cfactor,
int(dshape[2]),
int(dshape[3]),
),
)
data = op.transpose(data, axes=(0, 2, 4, 5, 1, 3))
return data
def _unpack_batch_channel(data, old_shape, unpack_transpose=False):
"""Unpack the data channel dimension."""
if unpack_transpose:
data = op.transpose(data, axes=(0, 4, 1, 5, 2, 3))
data = op.reshape(data, newshape=old_shape)
return data
def _channel_const_match(channel_length, cfactor_out):
"""Round the channel const variant if the value not divisible by cfactor_out"""
diff = int(channel_length) % cfactor_out
if diff != 0:
diff = cfactor_out - diff
channel_length = channel_length + diff
return diff, channel_length
def _const_shape_match(data, dshape, cfactor_out):
"""Pad the constant if the shape[0] not divisible by cfactor_out."""
assert len(dshape) == 3
pad_width = int(dshape[0]) % cfactor_out
if pad_width != 0:
pad_width = cfactor_out - pad_width
data = op.nn.pad(data, [[0, pad_width], [0, 0], [0, 0]])
dshape = tuple([dshape[0] + pad_width, dshape[1], dshape[2]])
return data, dshape
def _weight_shape_match(data, dshape, channels, cfactor_out, transpose=False):
"""Pad the weight if the shape[0] not divisible by cfactor_out."""
assert |
len(dshape) == 4
pad_width = int(dshape[0]) % cfactor_out
channels_pad = int(channels) % cfactor_out
if pad_width != 0:
pad_width = cfactor_out - pad_width
data = op.nn.pad(data, [[0, pad_width], [0, 0], [0, 0], [0, 0]])
dshape = tuple([dshape[0] + pad_width, dshape[1], dshape[2], dshape[3]])
if channels_pad != 0:
channels = channels + (cfactor_out - channels_pad)
return data, dshape, channels
def _weight_shape_match_transpose(data, dshape, channels, cfactor_out):
"""Pad the weight if the shape[1] not divisible by cfactor_out."""
assert len(dshape) == 4
pad_width = int(dshape[1]) % cfactor_out
channels_pad = int(channels) % cfactor_out
if pad_width != 0:
pad_width = cfactor_out - pad_width
data = op.nn.pad(data, [[0, 0], [0, pad_width], [0, 0], [0, 0]])
dshape = tuple(dshape[0], [dshape[1] + pad_width, dshape[2], dshape[3]])
if channels_pad != 0:
channels = channels + (cfactor_out - channels_pad)
return data, dshape, channels
def _pack_weight(data, dshape, cfactor):
"""Pack the weight into packed format."""
assert len(dshape) == 4
assert int(dshape[0]) % cfactor == 0
assert int(dshape[1]) % cfactor == 0
data = op.reshape(
data,
newshape=(
int(dshape[0])
cfactor,
int(dshape[1])
cfactor,
int(dshape[2]),
int(dshape[3]),
),
)
data = op.transpose(data, axes=(0, 2, 4, 5, 1, 3))
return data
def _pack_weight_conv2d_transpose(data, dshape, cfactor):
"""Pack the weight into packed format."""
dshape = _to_shape(dshape)
assert len(dshape) == 4
assert dshape[0] % cfactor == 0
assert dshape[1] % cfactor == 0
data = op.reshape(
data,
newshape=(
dshape[0]
cfactor,
dshape[1]
cfactor,
dshape[2],
dshape[3],
),
)
data = op.transpose(data, axes=(2, 0, 4, 5, 3 |
, 1))
return data
def _pack_const(data, dshape, dtype, bfactor, cfactor):
"""Pack a constant parameter."""
dshape = _to_shape(dshape)
assert len(dshape) == 3
assert dshape[0] % cfactor == 0
data = op.reshape(data, newshape=(dshape[0]
data = op.transpose(data, axes=(0, 2, 3, 4, 1))
data = op.broadcast_to(
data, shape=(dshape[0]
)
return data
def _get_tensor_shape(node):
"""Get node shape."""
if isinstance(node.checked_type, relay.ty.TensorType):
return _to_shape(node.checked_type.shape)
return []
def _get_tensor_type(node):
"""Get node type."""
if isinstance(node.checked_type, relay.ty.TensorType):
return node.checked_type.dtype
return "float32"
def _operator_idx_inc(expr, count_meta, operator_current_idx):
"""Increase operator index"""
if isinstance(expr, relay.expr.Constant):
operator_current_idx = operator_current_idx + 1 if count_meta else operator_current_idx
else:
operator_current_idx = operator_current_idx + 1
return operator_current_idx |
class ExprDeviceAnnot(ExprMutator):
"""Visitor to perform graph annotation on an AST.
Parameters
----------
start: int
the start location to mark run on vta (inclusive)
end: int
the end location to mark run on vta (exclusive)
Returns
---------
None
"""
def __init__(self, start=-1, end=-1):
self.ext_dev = tvm.device("ext_dev")
self.cpu_dev = tvm.device("cpu")
self.cast = op.op.get("cast")
self.counter = -1
self.start = start
self.end = end
super().__init__()
def visit_call(self, call):
"""Visit the children."""
args = [self.visit(arg) for arg in call.args]
self.counter += 1
if self.counter == self.start:
ret = relay.Call(call.op, args, call.attrs)
ret = relay.annotation.on_device(ret, self.ext_dev)
return ret
if self.counter == self.end:
ret = relay.Call(call.op, args, call.attrs)
ret = relay.annotation.on_device(ret, self.cpu_dev)
return ret
if self.counter > self.start and self.counter < self.end:
ret = relay.Call(call.op, args, call.attrs)
if self.is_float_op(call):
return ret
return relay.annotation.on_device(ret, self.ext_dev)
return relay.Call(self.visit(call.op), args, call.attrs)
def is_float_op(self, call):
"""check if this op belongs to a float op
in general, float op's odtype is float;
a special case is float->int cast, which follow this op sequence:
multiply(float) -> round(float) -> clip(float) -> cast(int);
"""
args = call.args
odtype = _get_tensor_type(call)
if odtype == "float32":
return True
if call.op == self.cast:
idtype = _get_tensor_type(args[0])
if idtype == "float32":
return True
return False |
class ExprLocator(ExprMutator):
"""Visitor to locate op on an AST."""
def __init__(self):
self.counter = -1
self.op2nodes = {}
super().__init__()
def visit_call(self, call):
"""Visit the children."""
args = [self.visit(arg) for arg in call.args]
odtype = _get_tensor_type(call)
self.counter += 1
if (call.op, odtype) in self.op2nodes:
self.op2nodes[(call.op, odtype)].append(self.counter)
else:
self.op2nodes[(call.op, odtype)] = [self.counter]
return relay.Call(self.visit(call.op), args, call.attrs) |
class ExprPack(ExprMutator):
"""Visitor to perform graph packing on an AST."""
def __init__(self, bfactor, cfactor, weight_bits):
self.bfactor = bfactor
self.cfactor = cfactor
self.weight_bits = weight_bits
self.start_pack = False
self.bitpack_start = op.op.get("annotation.bitpack_start")
self.bitpack_end = op.op.get("annotation.bitpack_end")
self.conv2d = op.op.get("nn.conv2d")
self.conv2d_transpose = op.op.get("nn.conv2d_transpose")
self.add = op.op.get("add")
self.multiply = op.op.get("multiply")
self.bias_add = op.op.get("nn.bias_add")
self.pad = op.op.get("nn.pad")
self.upsampling = op.op.get("nn.upsampling")
self.reshape = op.op.get("reshape")
self.number_of_conv2d = 0
self.unpack_transpose = True
super().__init__()
def visit_call(self, call):
"""Visit the children."""
oshape = _get_tensor_shape(call)
odtype = _get_tensor_type(call)
input_types = [arg.checked_type for arg in call.args]
args = [self.visit(arg) for arg in call.args]
if call.op == self.bitpack_start:
assert not self.start_pack
self.start_pack = True
return _pack_batch_channel(args[0], oshape, self.bfactor, self.cfactor)
if call.op == self.bitpack_end:
if self.start_pack:
self.start_pack = False
data = args[0]
data_shape = _get_tensor_shape(call.args[0])
return _unpack_batch_channel(data, data_shape, self.unpack_transpose)
if self.start_pack:
if call.op == self.conv2d and odtype == "int32":
self.number_of_conv2d += 1
assert 8 % self.weight_bits == 0
w_lanes = 8
data_layout = "NCHW%dn%dc" % (self.bfactor, self.cfactor)
kernel_layout = "OIHW%do%di" % (self.cfactor, self.cfactor) |
data, weight = args
data_shape = _to_shape(input_types[0].shape)
kernel_shape = _to_shape(input_types[1].shape)
channels = call.attrs.channels
weight, kernel_shape, channels = _weight_shape_match(
weight, kernel_shape, channels, self.cfactor
)
kernel = _pack_weight(weight, kernel_shape, self.cfactor)
if w_lanes != 1:
assert 8 % w_lanes == 0
kernel = op.bitpack(kernel, lanes=w_lanes)
conv2d = op.nn.conv2d(
data,
kernel,
strides=call.attrs.strides,
padding=call.attrs.padding,
dilation=call.attrs.dilation,
groups=call.attrs.groups,
channels=channels,
kernel_size=call.attrs.kernel_size,
data_layout=data_layout,
kernel_layout=kernel_layout,
out_dtype=call.attrs.out_dtype,
)
return conv2d
if call.op == self.conv2d_transpose and odtype == "int32":
self.number_of_conv2d += 1
assert 8 % self.weight_bits == 0
w_lanes = 8
if self.start_pack:
data_layout = "NCHW%dn%dc" % (self.bfactor, self.cfactor)
kernel_layout = "IOHW%di%do" % (self.cfactor, self.cfactor)
data, weight = args
data_shape = _to_shape(input_types[0].shape)
kernel_shape = _to_shape(input_types[1].shape)
channels = call.attrs.channels
weight, kernel_shape, channels = _weight_shape_match_transpose(
weight, kernel_shape, channels, self.cfactor
)
kernel = _pack_weight_conv2d_transpose(weight, kernel_shape, self.cfactor) |
conv2d = op.nn.conv2d_transpose(
data,
kernel,
strides=call.attrs.strides,
padding=call.attrs.padding,
dilation=call.attrs.dilation,
groups=call.attrs.groups,
channels=call.attrs.channels,
kernel_size=call.attrs.kernel_size,
data_layout=data_layout,
kernel_layout=kernel_layout,
output_padding=call.attrs.output_padding,
out_dtype=call.attrs.out_dtype,
)
return conv2d
if call.op == self.add and tuple(input_types[0].shape) == tuple(input_types[1].shape):
pass
elif call.op == self.add and len(input_types[1].shape) == 3:
data, const = args
const, input_shape = _const_shape_match(const, input_types[1].shape, self.cfactor)
const = _pack_const(
const, _to_shape(input_shape), input_types[1].dtype, self.bfactor, self.cfactor
)
return relay.Call(self.add, [data, const])
elif call.op == self.multiply and tuple(input_types[0].shape) == tuple(
input_types[1].shape
):
pass
elif call.op == self.multiply and len(input_types[1].shape) == 3:
data, const = args
const = _pack_const(
const,
_to_shape(input_types[1].shape),
input_types[1].dtype,
self.bfactor,
self.cfactor,
)
return relay.Call(self.multiply, [data, const])
elif self.start_pack and call.op == self.bias_add:
data, bias = args
bias = _pack_const(
bias,
_to_shape(input_types[1].shape),
i |
nput_types[1].dtype,
self.bfactor,
self.cfactor,
)
return relay.Call(self.add, [data, bias])
elif (
self.start_pack and call.op == op.op.get("cast") and input_types[0].dtype == "int32"
):
cast = relay.Call(op.op.get("cast"), [args[0]], call.attrs)
return cast
elif call.op == self.pad:
pad_width = call.attrs.pad_width
if len(pad_width) == 6:
pass
elif len(pad_width) == 4:
(data, pad_value) = args
new_pad_width = []
new_pad_width.extend(pad_width)
for _ in range(2):
new_pad_width.append([0, 0])
return op.nn.pad(data, pad_value=pad_value, pad_width=new_pad_width)
elif call.op == self.upsampling:
(data,) = args
scale_h = call.attrs.scale_h
scale_w = call.attrs.scale_w
data_layout = "NCHW%dn%dc" % (self.bfactor, self.cfactor)
method = call.attrs.method
align_corners = call.attrs.align_corners
return op.nn.upsampling(data, scale_h, scale_w, data_layout, method, align_corners)
elif call.op == self.reshape and len(input_types[0].shape) == 4:
(data,) = args
self.unpack_transpose = False
data = op.transpose(data, axes=(0, 4, 1, 5, 2, 3))
new_shape = [int(x) for x in input_types[0].shape]
pad, new_shape[1] = _channel_const_match(new_shape[1], self.cfactor)
data = op.reshape(data, new_shape)
if pad != 0:
new_pad_width = [[0, 0], [0, -pad], [0, 0], [0, 0]]
data = op.nn.pad(data, pad_width=new_pad_width)
return data
return relay.Call(self.visit( |
call.op), args, call.attrs) |
class BT(Exception):
pass
def get_subgraph(expr, start_name, stop_name, start_name_idx, stop_name_idx, count_meta):
"""We assume stop_name only appears once for simplicity.
This constraint will be lifted in the future.
bitpack_start and bitpack_end are both inclusive.
"""
bitpack_start = op.op.get("annotation.bitpack_start")
bitpack_end = op.op.get("annotation.bitpack_end")
anf = run_opt_pass(expr, transform.ToANormalForm())
operator_current_idx = 0
def _recursion(anf, start_found, stop_found, operator_current_idx):
"""Helper to obtain the subgraph."""
if isinstance(anf, relay.Function):
return relay.Function(
anf.params,
_recursion(anf.body, start_found, stop_found, operator_current_idx),
anf.ret_type,
anf.type_params,
anf.attrs,
)
if isinstance(anf, relay.expr.Let):
value = anf.value
if isinstance(value, relay.expr.Call):
if isinstance(value.op, tvm.ir.Op):
if value.op.name == start_name and not start_found:
if operator_current_idx == start_name_idx or start_name_idx is None:
value = relay.expr.Call(bitpack_start, [value])
start_found = True
elif value.op.name == stop_name:
if operator_current_idx == stop_name_idx or stop_name_idx is None:
raise BT()
operator_current_idx = _operator_idx_inc(value, count_meta, operator_current_idx)
try:
return relay.expr.Let(
anf.var,
value,
_recursion(anf.body, start_found, stop_found, operator_current_idx),
)
except BT:
assert start_found
assert not stop_found
stop_found = True
value = relay.expr.Call(bitpack_e |
nd, [value])
return relay.expr.Let(anf.var, value, anf.body)
else:
assert start_found
assert stop_found
return anf
annotated = _recursion(anf, False, False, operator_current_idx)
return run_opt_pass(annotated, transform.ToGraphNormalForm())
def graph_pack(
expr,
bfactor,
cfactor,
weight_bits,
start_name="nn.max_pool2d",
stop_name="nn.global_avg_pool2d",
start_name_idx=None,
stop_name_idx=None,
count_meta=False,
device_annot=False,
annot_start_name="nn.conv2d",
annot_end_name="annotation.stop_fusion",
):
"""Pack the graph into batch&channel packed format.
Parameters
----------
expr : relay.Expr
The input program.
bfactor : int
The packing factor in batch
cfactor : int
The packing factor in channel
weight_bits: int
The bit-width of the weights.
start_name: str, optional
Start packing from certain known node when start_name_idx is None.
stop_name: str, optional
Stop packing from certain known node when stop_name_idx is None.
start_name_idx: int, optional
When start_name_idx not None, start packing only when node name equal start_name
and node idx equals start_name_idx.
stop_name_idx: int, optional
When stop_name_idx not None, stop packing only when node name equal stop_name
and node index equals stop_name_idx.
count_meta:boolean, optional
When count_meta is False, the operator increase logic would not count the meta that have
the type 'relay.expr.Constant', start_name_idx and stop_name_idx follow the index from
'expr.astext(show_meta_data=False)'. When count_meta is True, the operator increase
logic would count the meta.
device_annot: boolean, optional
if we want to annoate the device_type
annot_start_name: str, optional
device annotation start node, from which we mark the nodes as `ext_dev` |
annot_end_name: str, optional
device annotation end node, after which we mark the nodes as 'cpu'
Returns
-------
expr : Expr
The transformed expression.
"""
assert isinstance(expr, relay.Function)
assert (
(start_name != stop_name)
or (start_name_idx is None != stop_name_idx is None)
or (not (start_name_idx is None and stop_name_idx is None))
or (start_name_idx < stop_name_idx)
)
expr = get_subgraph(expr, start_name, stop_name, start_name_idx, stop_name_idx, count_meta)
expr = run_opt_pass(expr, transform.InferType())
packer = ExprPack(bfactor, cfactor, weight_bits)
expr = packer.visit(expr)
assert not packer.start_pack
expr = run_opt_pass(expr, transform.InferType())
if device_annot:
expr_locator = ExprLocator()
expr_locator.visit(expr)
annot_start = op.op.get(annot_start_name)
start = expr_locator.op2nodes[(annot_start, "int32")][0]
annot_end = op.op.get(annot_end_name)
end = expr_locator.op2nodes[(annot_end, "int8")][-1] + 1
device_annot = ExprDeviceAnnot(start=start, end=end)
expr = device_annot.visit(expr)
return run_opt_pass(expr, transform.InferType())
return expr |
"""Namespace for supporting Relay operators on VTA."""
from __future__ |
import absolute_ |
import as _abs |
import tvm
from tvm |
import te
from tvm |
import autotvm
from tvm |
import topi
from tvm.relay.op |
import op as reg
from tvm.relay.op |
import strategy as _strategy
from tvm.relay.op.op |
import OpPattern, OpStrategy
from .utils |
import is_packed_layout
from .vta_conv2d |
import conv2d_packed, schedule_conv2d_packed
from .vta_conv2d_transpose |
import conv2d_transpose_packed, schedule_conv2d_transpose_packed
from .vta_group_conv2d |
import group_conv2d_packed, schedule_group_conv2d_packed
from .vta_dense |
import dense_packed, schedule_dense_packed
from ..environment |
import get_env
ENV = get_env()
reg.register_pattern("copy", OpPattern.INJECTIVE, level=15)
def compute_clip_vta(attrs, inputs, output_type):
"""Clip operator."""
x = inputs[0]
a_min = attrs.a_min
a_max = attrs.a_max
const_min = tvm.tir.const(a_min, x.dtype)
const_max = tvm.tir.const(a_max, x.dtype)
with tvm.te.tag_scope(topi.tag.ELEMWISE):
x = te.compute(x.shape, lambda *i: tvm.te.min(x(*i), const_max), name="clipA")
x = te.compute(x.shape, lambda *i: tvm.te.max(x(*i), const_min), name="clipB")
return [x]
def clip_strategy_vta(attrs, inputs, out_type, target):
strategy = OpStrategy()
strategy.add_implementation(
compute_clip_vta,
_strategy.wrap_topi_schedule(topi.generic.schedule_injective),
name="clip.vta",
)
return strategy
reg.get("clip").get_attr("FTVMStrategy").register(clip_strategy_vta, "vta")
@autotvm.register_topi_compute("add.vta")
def add_packed(cfg, lhs, rhs):
return topi.add(lhs, rhs)
@autotvm.register_topi_compute("multiply.vta")
def multiply_packed(cfg, lhs, rhs):
return topi.multiply(lhs, rhs)
def schedule_alu_packed(cfg, outs):
"""alu packed schedule"""
assert len(outs) == 1
def is_cast_op(op):
return op.name == "T_cast"
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
output = outs[0]
s = te.create_schedule([x.op for x in outs])
te.schedule.AutoInlineInjective(s)
if not (ENV.TARGET in ["sim", "tsim", "intelfocl"]):
return s
if "int" in output.dtype and len(output.shape) == 6:
ewise_inputs = []
ewise_ops = []
const_ops = []
def _traverse(op):
if topi.tag.is_broadcast(op.tag):
if not op.same_as(output.op):
if not op.axis:
const_ops.append(op)
elif not is_cast_op(op):
ewise_ops.append(op)
for tensor in op.input_tensors:
if isinst |
ance(tensor.op, tvm.te.PlaceholderOp):
ewise_inputs.append((op, tensor))
elif is_cast_op(tensor.op) and not op.same_as(output.op):
ewise_inputs.append((op, tensor))
else:
_traverse(tensor.op)
else:
for tensor in op.input_tensors:
if (not isinstance(tensor.op, tvm.te.PlaceholderOp)) and (
not is_cast_op(tensor.op)
):
_traverse(tensor.op)
op = output.op
_traverse(op)
for _, t in ewise_inputs:
if t.dtype == "float32":
return s
x_bo, x_co, x_i, x_j, x_bi, x_ci = s[output].op.axis
cfg.define_split("tile_co", x_co, num_outputs=2)
cfg.define_split("tile_h", x_i, num_outputs=2)
cfg.define_split("tile_w", x_j, num_outputs=2)
x_co0, x_co1 = cfg["tile_co"].apply(s, output, x_co)
x_i0, x_i1 = cfg["tile_h"].apply(s, output, x_i)
x_j0, x_j1 = cfg["tile_w"].apply(s, output, x_j)
s[output].reorder(x_bo, x_i0, x_co0, x_j0, x_co1, x_i1, x_j1, x_bi, x_ci)
store_pt = x_j0
for e_o in ewise_ops:
s[e_o].set_scope(ENV.acc_scope)
s[e_o].pragma(s[e_o].op.axis[0], ENV.alu)
s[e_o].compute_at(s[output], store_pt)
cache_read_ewise = []
for consumer, tensor in ewise_inputs:
cache_read_ewise.append(s.cache_read(tensor, ENV.acc_scope, [consumer]))
for tensor in cache_read_ewise:
if s[tensor].op.axis:
s[tensor].pragma(s[tensor].op.axis[0], ENV.dma_copy)
s[tensor].compute_at(s[output], store_pt)
for op in const_ops:
s[op].compute_inline()
s[output].pragma(x_co1, ENV.dma_copy)
return s
@autotvm.register_topi_schedule("add.vta")
def schedule_add_packed(cfg, outs):
return schedule_alu_packed(cfg, outs)
@autotvm.register_topi_sche |
dule("multiply.vta")
def schedule_multiply_packed(cfg, outs):
return schedule_alu_packed(cfg, outs)
def add_strategy_vta(attrs, inputs, out_type, target):
strategy = OpStrategy()
strategy.add_implementation(
_strategy.wrap_topi_compute(add_packed),
_strategy.wrap_topi_schedule(schedule_add_packed),
name="add.vta",
)
return strategy
def multiply_strategy_vta(attrs, inputs, out_type, target):
strategy = OpStrategy()
strategy.add_implementation(
_strategy.wrap_topi_compute(multiply_packed),
_strategy.wrap_topi_schedule(schedule_multiply_packed),
name="multiply.vta",
)
return strategy
if ENV.TARGET in ["sim", "intelfocl"]:
reg.get("add").get_attr("FTVMStrategy").register(add_strategy_vta, "vta")
reg.get("multiply").get_attr("FTVMStrategy").register(multiply_strategy_vta, "vta")
@_strategy.conv2d_strategy.register("vta")
def conv2d_strategy_vta(attrs, inputs, out_type, target):
"""conv2d vta strategy"""
strategy = OpStrategy()
kernel = inputs[1]
dilation = topi.utils.get_const_tuple(attrs.dilation)
groups = attrs.groups
layout = attrs.data_layout
assert dilation == (1, 1), "support for dilation limited to (1, 1)"
if is_packed_layout(layout):
if groups == 1:
assert ENV.LOG_INP_WIDTH == 3, "only support 8bit inp for now"
assert ENV.LOG_WGT_WIDTH == 3, "only support 8bit wgt for now"
assert kernel.dtype == "int8"
strategy.add_implementation(
_strategy.wrap_compute_conv2d(conv2d_packed, need_data_layout=True),
_strategy.wrap_topi_schedule(schedule_conv2d_packed),
name="conv2d_packed.vta",
)
else:
strategy.add_implementation(
_strategy.wrap_compute_conv2d(group_conv2d_packed, has_groups=True),
_strategy.wrap_topi_schedule(schedule_group_conv2d_packed),
name="group_conv2d_packed.vta",
)
re |
turn strategy
arm_tgt = tvm.target.arm_cpu(target.model)
return _strategy.arm_cpu.conv2d_strategy_arm_cpu(attrs, inputs, out_type, arm_tgt)
@_strategy.conv2d_transpose_strategy.register("vta")
def conv2d_transpose_strategy_vta(attrs, inputs, out_type, target):
"""conv2d_transpose vta strategy"""
dilation = topi.utils.get_const_tuple(attrs.dilation)
layout = attrs.data_layout
assert dilation == (1, 1), "support for dilation limited to (1, 1)"
if is_packed_layout(layout):
strategy = OpStrategy()
strategy.add_implementation(
_strategy.wrap_compute_conv2d_transpose(conv2d_transpose_packed),
_strategy.wrap_topi_schedule(schedule_conv2d_transpose_packed),
name="conv2d_transpose_packed.vta",
)
return strategy
arm_tgt = tvm.target.arm_cpu(target.model)
return _strategy.arm_cpu.conv2d_transpose_strategy_arm_cpu(attrs, inputs, out_type, arm_tgt)
@_strategy.dense_strategy.register("vta")
def dense_strategy_vta(attrs, inputs, out_type, target):
"""dense vta strategy"""
if len(inputs[0].shape) == 4:
strategy = OpStrategy()
strategy.add_implementation(
_strategy.wrap_compute_dense(dense_packed),
_strategy.wrap_topi_schedule(schedule_dense_packed),
name="dense_packed.vta",
)
return strategy
arm_tgt = tvm.target.arm_cpu(target.model)
return _strategy.x86.dense_strategy_cpu(attrs, inputs, out_type, arm_tgt) |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""VTA TOPI Utils."""
def is_packed_layout(layout):
"""Check if layout is packed layout"""
if layout == "NCHW":
return False
if "n" in layout and "c" in layout:
return True
return False
|
"""Conv2D operator declaration and schedule registration for VTA.""" |
import numpy as np |
import tvm
from tvm |
import te
from tvm |
import autotvm
from tvm |
import topi
from .utils |
import is_packed_layout
from ..environment |
import get_env
@autotvm.register_topi_compute("conv2d_packed.vta")
def conv2d_packed(cfg, data, kernel, strides, padding, dilation, layout, out_dtype):
"""Packed conv2d function."""
if not is_packed_layout(layout):
raise topi.InvalidShapeError()
assert dilation == (1, 1)
if padding[0]:
pad_data = topi.nn.pad(data, [0, 0, padding[0], padding[1], 0, 0], name="pad_data")
else:
pad_data = data
assert len(data.shape) == 6
assert len(kernel.shape) == 6
oheight = topi.utils.get_const_int((pad_data.shape[2] - kernel.shape[2])
owidth = topi.utils.get_const_int((pad_data.shape[3] - kernel.shape[3])
oshape = (data.shape[0], kernel.shape[0], oheight, owidth, data.shape[4], kernel.shape[4])
ishape = topi.utils.get_const_tuple(data.shape)
kshape = topi.utils.get_const_tuple(kernel.shape)
d_i = te.reduce_axis((0, kshape[2]), name="d_i")
d_j = te.reduce_axis((0, kshape[3]), name="d_j")
k_o = te.reduce_axis((0, ishape[1]), name="k_o")
k_i = te.reduce_axis((0, ishape[-1]), name="k_i")
hstride, wstride = strides
res = te.compute(
oshape,
lambda b_o, c_o, i, j, b_i, c_i: te.sum(
pad_data[b_o, k_o, i * hstride + d_i, j * wstride + d_j, b_i, k_i].astype(out_dtype)
* kernel[c_o, k_o, d_i, d_j, c_i, k_i].astype(out_dtype),
axis=[k_o, d_i, d_j, k_i],
),
name="res",
tag="conv2d_dense",
)
cfg.add_flop(
2
* np.prod(topi.utils.get_const_tuple(oshape))
* kshape[2]
* kshape[3]
* ishape[1]
* ishape[-1]
)
return res
@autotvm.register_topi_schedule("conv2d_packed.vta")
def schedule_conv2d_packed(cfg, outs):
"""Schedule packed conv2d"""
assert len(outs) == 1
output = outs[0]
const_ops = []
ewise_inputs = []
ewise_ops = []
conv2d_res = []
assert "int" in output.op.input_tensors[0].dtype
def _traverse(op):
if topi.tag.is_broadcast(op.tag):
if not op.sa |
me_as(output.op):
if not op.axis:
const_ops.append(op)
else:
ewise_ops.append(op)
for tensor in op.input_tensors:
if isinstance(tensor.op, tvm.te.PlaceholderOp):
ewise_inputs.append((op, tensor))
else:
_traverse(tensor.op)
else:
assert op.tag == "conv2d_dense"
conv2d_res.append(op)
_traverse(output.op)
assert len(conv2d_res) == 1
conv2d_stage = conv2d_res[0].output(0)
s = te.create_schedule(output.op)
b, c_o, x_i, x_j, _, _ = s[conv2d_stage].op.axis
c_i, _, _, _ = s[conv2d_stage].op.reduce_axis
cfg.define_split("tile_b", b, num_outputs=2)
cfg.define_split("tile_h", x_i, num_outputs=2)
cfg.define_split("tile_w", x_j, num_outputs=2)
cfg.define_split("tile_ci", c_i, num_outputs=2)
cfg.define_split("tile_co", c_o, num_outputs=2)
cfg.define_knob("oc_nthread", [1, 2])
cfg.define_knob("h_nthread", [1, 2])
data, kernel = conv2d_stage.op.input_tensors
if isinstance(data.op, tvm.te.ComputeOp) and "pad" in data.op.tag:
temp = data.op.input_tensors[0]
pad_data = data
data = temp
else:
pad_data = None
env = get_env()
if pad_data is not None:
cdata = pad_data
s[pad_data].set_scope(env.inp_scope)
else:
cdata = s.cache_read(data, env.inp_scope, [conv2d_stage])
ckernel = s.cache_read(kernel, env.wgt_scope, [conv2d_stage])
s[conv2d_stage].set_scope(env.acc_scope)
cache_read_ewise = []
for consumer, tensor in ewise_inputs:
cache_read_ewise.append(s.cache_read(tensor, env.acc_scope, [consumer]))
for op in ewise_ops:
s[op].set_scope(env.acc_scope)
s[op].pragma(s[op].op.axis[0], env.alu)
for op in const_ops:
s[op].compute_inline()
x_bo, x_co, x_i, x_j, x_bi, x_ci = s[output].op.axis
x_co0, x_co1 = cfg["tile_co"].apply(s |
, output, x_co)
x_i0, x_i1 = cfg["tile_h"].apply(s, output, x_i)
x_j0, x_j1 = cfg["tile_w"].apply(s, output, x_j)
s[output].reorder(x_bo, x_i0, x_co0, x_j0, x_co1, x_i1, x_j1, x_bi, x_ci)
store_pt = x_j0
s[conv2d_stage].compute_at(s[output], store_pt)
for op in ewise_ops:
s[op].compute_at(s[output], store_pt)
for tensor in cache_read_ewise:
s[tensor].compute_at(s[output], store_pt)
s[tensor].pragma(s[tensor].op.axis[0], env.dma_copy)
if cfg["oc_nthread"].val > 1:
_, v_t = s[output].split(x_co0, factor=cfg["oc_nthread"].val)
s[output].reorder(v_t, x_bo)
s[output].bind(v_t, te.thread_axis("cthread"))
if cfg["h_nthread"].val > 1:
_, v_t = s[output].split(x_i0, factor=cfg["h_nthread"].val)
s[output].reorder(v_t, x_bo)
s[output].bind(v_t, te.thread_axis("cthread"))
x_bo, x_co, x_i, x_j, x_bi, x_ci = s[conv2d_stage].op.axis
k_o, d_i, d_j, k_i = s[conv2d_stage].op.reduce_axis
s[conv2d_stage].reorder(x_bo, k_o, x_j, d_j, d_i, x_co, x_i, x_bi, x_ci, k_i)
k_o, _ = cfg["tile_ci"].apply(s, conv2d_stage, k_o)
s[cdata].compute_at(s[conv2d_stage], k_o)
s[ckernel].compute_at(s[conv2d_stage], k_o)
s[cdata].pragma(s[cdata].op.axis[0], env.dma_copy)
s[ckernel].pragma(s[ckernel].op.axis[0], env.dma_copy)
s[conv2d_stage].tensorize(x_bi, env.gemm)
s[output].pragma(x_co1, env.dma_copy)
return s |
"""Conv2D_transpose operator declaration and schedule registration for VTA.""" |
import numpy as np |
import tvm
from tvm |
import te
from tvm |
import autotvm
from tvm |
import topi
from tvm.topi.utils |
import get_const_tuple
from tvm.topi.nn.utils |
import get_pad_tuple
from ..environment |
import get_env
@autotvm.register_topi_compute("conv2d_transpose_packed.vta")
def conv2d_transpose_packed(cfg, data, kernel, strides, padding, out_dtype, output_padding=(0, 0)):
"""Packed conv2d_transpose compute"""
ishape = get_const_tuple(data.shape)
kshape = get_const_tuple(kernel.shape)
b, c_i, i_h, i_w, t_b, t_ci = ishape
c_o, _, k_h, k_w, t_co, t_ci = kshape
stride_h, stride_w = strides
opad_h, opad_w = output_padding
assert opad_h == 0 and opad_w == 0, "VTA does not support output padding for now"
fpad_top, fpad_left, fpad_bottom, fpad_right = get_pad_tuple(padding, (k_h, k_w))
bpad_top = k_h - 1 - fpad_top
bpad_bottom = k_h - 1 - fpad_bottom + opad_h
bpad_left = k_w - 1 - fpad_left
bpad_right = k_w - 1 - fpad_right + opad_w
dilated_input = topi.nn.dilate(data, [1, 1, stride_h, stride_w, 1, 1])
data_pad = topi.nn.pad(
dilated_input, [0, 0, bpad_top, bpad_left, 0, 0], [0, 0, bpad_bottom, bpad_right, 0, 0]
)
out_h = (i_h - 1) * stride_h - fpad_top - fpad_bottom + k_h + opad_h
out_w = (i_w - 1) * stride_w - fpad_left - fpad_right + k_w + opad_w
oshape = (b, c_o, out_h, out_w, t_b, t_co)
d_c = te.reduce_axis((0, c_i), name="d_c")
d_h = te.reduce_axis((0, k_h), name="d_h")
d_w = te.reduce_axis((0, k_w), name="d_w")
d_ci = te.reduce_axis((0, t_ci), name="d_ci")
out = te.compute(
oshape,
lambda i_n, i_c, i_h, i_w, j_n, j_c: te.sum(
data_pad(i_n, d_c, i_h + d_h, i_w + d_w, j_n, d_ci).astype(out_dtype)
* kernel[i_c, d_c, d_h, d_w, j_c, d_ci].astype(out_dtype),
axis=[d_c, d_h, d_w, d_ci],
),
tag="packed_conv2d_transpose",
name="res",
)
cfg.add_flop(
2
* np.prod(topi.utils.get_const_tuple(oshape))
* kshape[2]
* kshape[3]
* ishape[1]
* ishape[-1]
)
return out
@autotvm.register_topi_schedule("conv2d_transpose_packed.vta")
def schedule_conv2d_transpose |
_packed(cfg, outs):
"""Schedule packed conv2d_transpose"""
assert len(outs) == 1
output = outs[0]
ewise_inputs = []
ewise_ops = []
conv2d_res = []
assert output.dtype == "int8"
assert output.op.input_tensors[0].dtype == "int32"
def _traverse(op):
if topi.tag.is_broadcast(op.tag):
if not op.same_as(output.op):
ewise_ops.append(op)
for tensor in op.input_tensors:
if isinstance(tensor.op, tvm.te.PlaceholderOp):
ewise_inputs.append((op, tensor))
else:
_traverse(tensor.op)
else:
assert op.tag == "packed_conv2d_transpose"
conv2d_res.append(op)
_traverse(output.op)
assert len(conv2d_res) == 1
conv2d_stage = conv2d_res[0].output(0)
s = te.create_schedule(output.op)
b, c_o, x_i, x_j, _, c_i = s[conv2d_stage].op.axis
c_i, _, _, _ = s[conv2d_stage].op.reduce_axis
cfg.define_split("tile_b", b, num_outputs=2)
cfg.define_split("tile_h", x_i, num_outputs=2)
cfg.define_split("tile_w", x_j, num_outputs=2)
cfg.define_split("tile_ci", c_i, num_outputs=2)
cfg.define_split("tile_co", c_o, num_outputs=2)
cfg.define_knob("oc_nthread", [1, 2])
cfg.define_knob("h_nthread", [1, 2])
data, kernel = conv2d_stage.op.input_tensors
if isinstance(data.op, tvm.te.ComputeOp) and "pad" in data.op.tag:
temp = data.op.input_tensors[0]
pad_data = data
data = temp
else:
pad_data = None
env = get_env()
if pad_data is not None:
cdata = pad_data
s[pad_data].set_scope(env.inp_scope)
else:
cdata = s.cache_read(data, env.inp_scope, [conv2d_stage])
ckernel = s.cache_read(kernel, env.wgt_scope, [conv2d_stage])
s[conv2d_stage].set_scope(env.acc_scope)
cache_read_ewise = []
for consumer, tensor in ewise_inputs:
cache_read_ewise.append(s.cache_read(tensor, env.acc_scope, [consumer]))
for op |
in ewise_ops:
s[op].set_scope(env.acc_scope)
s[op].pragma(s[op].op.axis[0], env.alu)
x_bo, x_co, x_i, x_j, x_bi, x_ci = s[output].op.axis
x_co0, x_co1 = cfg["tile_co"].apply(s, output, x_co)
x_i0, x_i1 = cfg["tile_h"].apply(s, output, x_i)
x_j0, x_j1 = cfg["tile_w"].apply(s, output, x_j)
s[output].reorder(x_bo, x_i0, x_co0, x_j0, x_co1, x_i1, x_j1, x_bi, x_ci)
store_pt = x_j0
s[conv2d_stage].compute_at(s[output], store_pt)
for op in ewise_ops:
s[op].compute_at(s[output], store_pt)
for tensor in cache_read_ewise:
s[tensor].compute_at(s[output], store_pt)
s[tensor].pragma(s[tensor].op.axis[0], env.dma_copy)
if cfg["oc_nthread"].val > 1:
_, v_t = s[output].split(x_co0, factor=cfg["oc_nthread"].val)
s[output].reorder(v_t, x_bo)
s[output].bind(v_t, te.thread_axis("cthread"))
if cfg["h_nthread"].val > 1:
_, v_t = s[output].split(x_i0, factor=cfg["h_nthread"].val)
s[output].reorder(v_t, x_bo)
s[output].bind(v_t, te.thread_axis("cthread"))
x_bo, x_co, x_i, x_j, x_bi, x_ci = s[conv2d_stage].op.axis
k_o, d_i, d_j, k_i = s[conv2d_stage].op.reduce_axis
x_i, x_ii = s[conv2d_stage].split(x_i, 4)
x_j, x_jj = s[conv2d_stage].split(x_j, 2)
s[conv2d_stage].reorder(x_bo, k_o, x_j, x_co, x_i, x_jj, d_j, d_i, x_ii, x_bi, x_ci, k_i)
for axis in [d_j, d_i, x_ii, x_jj]:
s[conv2d_stage].unroll(axis)
k_o, _ = cfg["tile_ci"].apply(s, conv2d_stage, k_o)
s[cdata].compute_at(s[conv2d_stage], k_o)
s[ckernel].compute_at(s[conv2d_stage], k_o)
s[cdata].pragma(s[cdata].op.axis[0], env.dma_copy)
s[ckernel].pragma(s[ckernel].op.axis[0], env.dma_copy)
s[conv2d_stage].pragma(x_bi, "conv2d_transpose_gemm")
s[output].pragma(x_co1, env.dma_copy)
return s |
"""Dense operator declaration and schedule registration for VTA.""" |
import numpy as np |
import tvm
from tvm |
import te
from tvm |
import autotvm
from tvm |
import topi
from ..environment |
import get_env
def is_packed_layout(layout):
"""Check if layout is packed layout"""
if layout == "NCHW":
return False
if "n" in layout and "c" in layout:
return True
return False
@autotvm.register_topi_compute("dense_packed.vta")
def dense_packed(cfg, data, weight, bias=None, out_dtype=None):
"""Dense function declaration."""
if len(data.shape) != 4 or len(weight.shape) != 4:
raise topi.InvalidShapeError()
ishape = topi.utils.get_const_tuple(data.shape)
wshape = topi.utils.get_const_tuple(weight.shape)
oshape = (data.shape[0], weight.shape[0], data.shape[2], weight.shape[2])
assert ishape[1] == wshape[1]
assert ishape[3] == wshape[3]
k_o = te.reduce_axis((0, ishape[1]), name="k_o")
k_i = te.reduce_axis((0, ishape[3]), name="k_i")
res = te.compute(
oshape,
lambda b_o, c_o, b_i, c_i: te.sum(
data[b_o, k_o, b_i, k_i].astype(out_dtype)
* weight[c_o, k_o, c_i, k_i].astype(out_dtype),
axis=[k_o, k_i],
),
name="res",
tag="dense_pack",
)
cfg.add_flop(2 * np.prod(topi.utils.get_const_tuple(oshape)) * ishape[1] * ishape[3])
return res
@autotvm.register_topi_schedule("dense_packed.vta")
def schedule_dense_packed(cfg, outs):
"""Packed dense schedule."""
assert len(outs) == 1
output = outs[0]
const_ops = []
ewise_inputs = []
ewise_ops = []
dense_res = []
assert "int" in output.op.input_tensors[0].dtype
def _traverse(op):
if topi.tag.is_broadcast(op.tag):
if not op.same_as(output.op):
if not op.axis:
const_ops.append(op)
else:
ewise_ops.append(op)
for tensor in op.input_tensors:
if isinstance(tensor.op, tvm.te.PlaceholderOp):
ewise_inputs.append((op, tensor))
else:
_traverse(tensor.op)
else:
assert op.tag = |
= "dense_pack"
dense_res.append(op)
_traverse(output.op)
assert len(dense_res) == 1
dense_stage = dense_res[0].output(0)
s = te.create_schedule(output.op)
b, c_o, _, _ = s[dense_stage].op.axis
c_i, _ = s[dense_stage].op.reduce_axis
cfg.define_split("tile_b", b, num_outputs=2)
cfg.define_split("tile_ci", c_i, num_outputs=2)
cfg.define_split("tile_co", c_o, num_outputs=2)
cfg.define_knob("oc_nthread", [1, 2])
data, weight = dense_stage.op.input_tensors
env = get_env()
cdata = s.cache_read(data, env.inp_scope, [dense_stage])
cweight = s.cache_read(weight, env.wgt_scope, [dense_stage])
s[dense_stage].set_scope(env.acc_scope)
cache_read_ewise = []
for consumer, tensor in ewise_inputs:
cache_read_ewise.append(s.cache_read(tensor, env.acc_scope, [consumer]))
for op in ewise_ops:
s[op].set_scope(env.acc_scope)
s[op].pragma(s[op].op.axis[0], env.alu)
for op in const_ops:
s[op].compute_inline()
x_b, x_c, _, _ = s[output].op.axis
x_bo, x_bi = cfg["tile_b"].apply(s, output, x_b)
x_co, x_ci = cfg["tile_co"].apply(s, output, x_c)
s[output].reorder(x_bo, x_co, x_bi, x_ci)
store_pt = x_co
s[dense_stage].compute_at(s[output], store_pt)
for op in ewise_ops:
s[op].compute_at(s[output], store_pt)
for tensor in cache_read_ewise:
s[tensor].compute_at(s[output], store_pt)
s[tensor].pragma(s[tensor].op.axis[0], env.dma_copy)
if cfg["oc_nthread"].val > 1:
_, v_t = s[output].split(x_co, factor=cfg["oc_nthread"].val)
s[output].reorder(v_t, x_bo)
s[output].bind(v_t, te.thread_axis("cthread"))
x_bo, x_co, x_bi, _ = s[dense_stage].op.axis
k_o, _ = s[dense_stage].op.reduce_axis
s[dense_stage].reorder(x_bo, k_o, x_co)
k_o, _ = cfg["tile_ci"].apply(s, dense_stage, k_o)
s[cdata].compute_at(s[dense_stage], k_o)
s[cweight].compute_at(s[dense_stage], k_o)
s[cdata].pra |
gma(s[cdata].op.axis[0], env.dma_copy)
s[cweight].pragma(s[cweight].op.axis[0], env.dma_copy)
s[dense_stage].tensorize(x_bi, env.gemm)
s[output].pragma(x_ci, env.dma_copy)
return s |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.