text
stringlengths 1
2.05k
|
---|
import os |
import tvm
from tvm |
import te |
import vta |
import numpy as np
from tvm |
import rpc
from tvm.contrib |
import utils
from vta.testing |
import simulator
env = vta.get_env()
host = os.environ.get("VTA_RPC_HOST", "192.168.2.99")
port = int(os.environ.get("VTA_RPC_PORT", "9091"))
if env.TARGET == "pynq" or env.TARGET == "de10nano":
assert tvm.runtime.enabled("rpc")
remote = rpc.connect(host, port)
vta.reconfig_runtime(remote)
vta.program_fpga(remote, bitstream=None)
elif env.TARGET in ["sim", "tsim"]:
remote = rpc.LocalSession()
m = 16
n = 16
o = 1
A = te.placeholder((o, n, env.BATCH, env.BLOCK_IN), name="A", dtype=env.inp_dtype)
B = te.placeholder((m, n, env.BLOCK_OUT, env.BLOCK_IN), name="B", dtype=env.wgt_dtype)
A_buf = te.compute((o, n, env.BATCH, env.BLOCK_IN), lambda *i: A(*i), "A_buf")
B_buf = te.compute((m, n, env.BLOCK_OUT, env.BLOCK_IN), lambda *i: B(*i), "B_buf")
ko = te.reduce_axis((0, n), name="ko")
ki = te.reduce_axis((0, env.BLOCK_IN), name="ki")
C_buf = te.compute(
(o, m, env.BATCH, env.BLOCK_OUT),
lambda bo, co, bi, ci: te.sum(
A_buf[bo, ko, bi, ki].astype(env.acc_dtype) * B_buf[co, ko, ci, ki].astype(env.acc_dtype),
axis=[ko, ki],
),
name="C_buf",
)
C = te.compute(
(o, m, env.BATCH, env.BLOCK_OUT), lambda *i: C_buf(*i).astype(env.inp_dtype), name="C"
)
s = te.create_schedule(C.op)
print(tvm.lower(s, [A, B, C], simple_mode=True))
s[A_buf].set_scope(env.inp_scope)
s[B_buf].set_scope(env.wgt_scope)
s[C_buf].set_scope(env.acc_scope)
s[A_buf].compute_at(s[C_buf], ko)
s[B_buf].compute_at(s[C_buf], ko)
s[A_buf].pragma(s[A_buf].op.axis[0], env.dma_copy)
s[B_buf].pragma(s[B_buf].op.axis[0], env.dma_copy)
s[C].pragma(s[C].op.axis[0], env.dma_copy)
print(tvm.lower(s, [A, B, C], simple_mode=True))
s[C_buf].reorder(
ko, s[C_buf].op.axis[0], s[C_buf].op.axis[1], s[C_buf].op.axis[2], s[C_ |
buf].op.axis[3], ki
)
s[C_buf].tensorize(s[C_buf].op.axis[2], env.gemm)
print(vta.lower(s, [A, B, C], simple_mode=True))
my_gemm = vta.build(
s, [A, B, C], tvm.target.Target("ext_dev", host=env.target_host), name="my_gemm"
)
temp = utils.tempdir()
my_gemm.save(temp.relpath("gemm.o"))
remote.upload(temp.relpath("gemm.o"))
f = remote.load_module("gemm.o")
ctx = remote.ext_dev(0)
A_orig = np.random.randint(-128, 128, size=(o * env.BATCH, n * env.BLOCK_IN)).astype(A.dtype)
B_orig = np.random.randint(-128, 128, size=(m * env.BLOCK_OUT, n * env.BLOCK_IN)).astype(B.dtype)
A_packed = A_orig.reshape(o, env.BATCH, n, env.BLOCK_IN).transpose((0, 2, 1, 3))
B_packed = B_orig.reshape(m, env.BLOCK_OUT, n, env.BLOCK_IN).transpose((0, 2, 1, 3))
A_nd = tvm.nd.array(A_packed, ctx)
B_nd = tvm.nd.array(B_packed, ctx)
C_nd = tvm.nd.array(np.zeros((o, m, env.BATCH, env.BLOCK_OUT)).astype(C.dtype), ctx)
if env.TARGET in ["sim", "tsim"]:
simulator.clear_stats()
f(A_nd, B_nd, C_nd)
C_ref = np.dot(A_orig.astype(env.acc_dtype), B_orig.T.astype(env.acc_dtype)).astype(C.dtype)
C_ref = C_ref.reshape(o, env.BATCH, m, env.BLOCK_OUT).transpose((0, 2, 1, 3))
np.testing.assert_equal(C_ref, C_nd.numpy())
if env.TARGET in ["sim", "tsim"]:
sim_stats = simulator.stats()
print("Execution statistics:")
for k, v in sim_stats.items():
print("\t{:<16}: {:>16}".format(k, v))
print("Successful matrix multiply test!") |
"""
2D Convolution Optimization
===========================
**Author**: `Thierry Moreau <https:
This tutorial provides an overview on how to use TVM to map a 2D convolution
workload efficiently on the VTA design.
We recommend covering the :ref:`vta-mat-mult-opt` tutorial first.
2D convolution is dominant in most computer vision deep neural networks.
In this tutorial, we will demonstrate TVM schedule optimizations to map
2D convolution operators in NCHW layout onto VTA.
We also introduce the notion of latency hiding, which allows us to
maximize VTA's compute and memory resource utilization.
"""
from __future__ |
import absolute_import, print_function |
import os |
import tvm |
import tvm.testing
from tvm |
import te |
import vta |
import numpy as np
from tvm |
import rpc
from tvm.contrib |
import utils
from vta.testing |
import simulator
env = vta.get_env()
host = os.environ.get("VTA_RPC_HOST", "192.168.2.99")
port = int(os.environ.get("VTA_RPC_PORT", "9091"))
if env.TARGET == "pynq":
assert tvm.runtime.enabled("rpc")
remote = rpc.connect(host, port)
vta.reconfig_runtime(remote)
vta.program_fpga(remote, bitstream=None)
elif env.TARGET in ["sim", "tsim"]:
remote = rpc.LocalSession()
from tvm |
import topi
batch_size = 1
height = 14
width = 14
in_channels = 256
out_channels = 256
kernel_h = 3
kernel_w = 3
pad_h = 1
pad_w = 1
stride_h = 1
stride_w = 1
assert batch_size % env.BATCH == 0
assert in_channels % env.BLOCK_IN == 0
assert out_channels % env.BLOCK_OUT == 0
data_shape = (
batch_size
in_channels
height,
width,
env.BATCH,
env.BLOCK_IN,
)
kernel_shape = (
out_channels
in_channels
kernel_h,
kernel_w,
env.BLOCK_OUT,
env.BLOCK_IN,
)
fout_height = (height + 2 * pad_h - kernel_h)
fout_width = (width + 2 * pad_w - kernel_w)
output_shape = (
batch_size
out_channels
fout_height,
fout_width,
env.BATCH,
env.BLOCK_OUT,
)
dy = te.reduce_axis((0, kernel_h), name="dy")
dx = te.reduce_axis((0, kernel_w), name="dx")
ic = te.reduce_axis((0, in_channels
ic_tns = te.reduce_axis((0, env.BLOCK_IN), name="ic_tns")
data = te.placeholder(data_shape, name="data", dtype=env.inp_dtype)
kernel = te.placeholder(kernel_shape, name="kernel", dtype=env.wgt_dtype)
data_buf = topi.nn.pad(data, [0, 0, pad_h, pad_w, 0, 0], name="data_buf")
kernel_buf = te.compute(kernel_shape, lambda *i: kernel(*i), "kernel_buf")
res_conv = te.compute(
output_shape,
lambda bo, co, i, j, bi, ci: te.sum(
data_buf[bo, ic, i * stride_h + dy, j * stride_w + dx, bi, ic_tns].astype(env.acc_dtype)
* kernel_buf[co, ic, dy, dx, ci, ic_tns].astype(env.acc_dtype),
axis=[ic, dy, dx, ic_tns],
),
name="res_conv",
)
res_shr = te.compute(output_shape, lambda *i: res_conv(*i) >> 8, name="res_shr")
inp_max = (1 << (env.INP_WIDTH - 1)) - 1
res_max = te.compute(output_shape, lambda *i: tvm.te.max(res_shr(*i), 0), "res_max")
res_min = te.compute(output_shape, lambda *i: tvm.te.min(res_max(*i), inp_max), "res_min")
res = te.compute(output_shape, lambda *i: res_min(*i).astype(env.inp_dtype), name="res")
s = te.create_schedule(res.op)
print(tvm.lower(s, [data, kernel, res], simple_mode=True))
b_block = 1
oc |
_block = 128
ic_block = 16
h_block = 7
w_block = 14
b, oc, y, x, b_tns, oc_tns = s[res].op.axis
b_out, b_inn = s[res].split(b, factor=b_block)
oc_out, oc_inn = s[res].split(oc, factor=oc_block)
y_out, y_inn = s[res].split(y, factor=h_block)
x_out, x_inn = s[res].split(x, factor=w_block)
s[res].reorder(b_out, oc_out, y_out, x_out, b_inn, oc_inn, y_inn, x_inn, b_tns, oc_tns)
s[res_conv].compute_at(s[res], x_out)
s[res_shr].compute_at(s[res], x_out)
s[res_max].compute_at(s[res], x_out)
s[res_min].compute_at(s[res], x_out)
b_inn, oc_inn, y_inn, x_inn, b_tns, oc_tns = s[res_conv].op.axis
ic_out, ic_inn = s[res_conv].split(ic, factor=ic_block)
s[res_conv].reorder(ic_out, b_inn, oc_inn, y_inn, ic_inn, dy, dx, x_inn, b_tns, oc_tns, ic_tns)
v_threads = 2
_, tx = s[res].split(oc_out, factor=v_threads)
s[res].reorder(tx, b_out)
s[res].bind(tx, te.thread_axis("cthread"))
print(tvm.lower(s, [data, kernel, res], simple_mode=True))
s[data_buf].set_scope(env.inp_scope)
s[kernel_buf].set_scope(env.wgt_scope)
s[res_conv].set_scope(env.acc_scope)
s[res_shr].set_scope(env.acc_scope)
s[res_min].set_scope(env.acc_scope)
s[res_max].set_scope(env.acc_scope)
s[data_buf].compute_at(s[res_conv], ic_out)
s[kernel_buf].compute_at(s[res_conv], ic_out)
s[data_buf].pragma(s[data_buf].op.axis[0], env.dma_copy)
s[kernel_buf].pragma(s[kernel_buf].op.axis[0], env.dma_copy)
s[res].pragma(s[res].op.axis[4], env.dma_copy)
s[res_conv].tensorize(b_tns, env.gemm)
s[res_shr].pragma(s[res_shr].op.axis[0], env.alu)
s[res_min].pragma(s[res_min].op.axis[0], env.alu)
s[res_max].pragma(s[res_max].op.axis[0], env.alu)
print(vta.lower(s, [data, kernel, res], simple_mode=True))
from tvm.topi.testing |
import conv2d_nchw_python
with vta.build_config(disabled_pass={"tir.CommonSubexprElimTIR"}):
my_conv = vta.build(
s, [data, kernel, res], tvm.target.Target("ext_dev", host=env.target_host), name="my_conv"
)
temp = utils.tempdir()
my_conv.save(temp.relpath("conv2d.o"))
remote.upload(temp.relpath("conv2d.o"))
f = remote.load_module("conv2d.o")
ctx = remote.ext_dev(0)
data_np = np.random.randint(-128, 128, size=(batch_size, in_channels, height, width)).astype(
data.dtype
)
kernel_np = np.random.randint(
-128, 128, size=(out_channels, in_channels, kernel_h, kernel_w)
).astype(kernel.dtype)
data_packed = data_np.reshape(
batch_size
).transpose((0, 2, 4, 5, 1, 3))
kernel_packed = kernel_np.reshape(
out_channels
env.BLOCK_OUT,
in_channels
env.BLOCK_IN,
kernel_h,
kernel_w,
).transpose((0, 2, 4, 5, 1, 3))
data_nd = tvm.nd.array(data_packed, ctx)
kernel_nd = tvm.nd.array(kernel_packed, ctx)
res_nd = tvm.nd.array(np.zeros(output_shape).astype(res.dtype), ctx)
if env.TARGET in ["sim", "tsim"]:
simulator.clear_stats()
f(data_nd, kernel_nd, res_nd)
res_ref = conv2d_nchw_python(
data_np.astype(env.acc_dtype),
kernel_np.astype(env.acc_dtype),
(stride_h, stride_w),
(pad_h, pad_w),
).astype(env.acc_dtype)
res_ref = res_ref >> env.INP_WIDTH
res_ref = np.clip(res_ref, 0, inp_max)
res_ref = res_ref.astype(res.dtype)
res_ref = res_ref.reshape(
(
batch_size
env.BATCH,
out_channels
env.BLOCK_OUT,
fout_height,
fout_width,
)
).transpose((0, 2, 4, 5, 1, 3))
tvm.testing.assert_allclose(res_ref, res_nd.numpy())
if env.TARGET in ["sim", "tsim"]:
sim_stats = simulator.stats()
print("Execution statistics:")
for k, v in sim_stats.items():
print("\t{:<16}: {:>16}".format(k, v))
print("Successful 2D convolution test!") |
"""
.. _vta-mat-mult-opt:
Matrix Multiply Blocking
========================
**Author**: `Thierry Moreau <https:
This tutorial provides an overview on how to use TVM to map matrix
multiplication efficiently on the VTA design.
We recommend covering the :ref:`basic-mat-mult` tutorial first.
In this tutorial, we will demonstrate TVM schedule optimizations to break large
neural network operators down onto smaller blocks to achieve computation within
limited hardware accelerator resources.
"""
from __future__ |
import absolute_import, print_function |
import os |
import tvm
from tvm |
import te |
import vta |
import numpy as np
from tvm |
import rpc
from tvm.contrib |
import utils
from vta.testing |
import simulator
env = vta.get_env()
host = os.environ.get("VTA_RPC_HOST", "192.168.2.99")
port = int(os.environ.get("VTA_RPC_PORT", "9091"))
if env.TARGET == "pynq":
assert tvm.runtime.enabled("rpc")
remote = rpc.connect(host, port)
vta.reconfig_runtime(remote)
vta.program_fpga(remote, bitstream=None)
elif env.TARGET in ["sim", "tsim"]:
remote = rpc.LocalSession()
batch_size = 1
in_channels = 1024
out_channels = 1024
assert batch_size % env.BATCH == 0
assert in_channels % env.BLOCK_IN == 0
assert out_channels % env.BLOCK_OUT == 0
data_shape = (batch_size
weight_shape = (
out_channels
in_channels
env.BLOCK_OUT,
env.BLOCK_IN,
)
output_shape = (batch_size
num_ops = in_channels * out_channels * batch_size * 2
ic = te.reduce_axis((0, in_channels
ic_tns = te.reduce_axis((0, env.BLOCK_IN), name="ic_tns")
data = te.placeholder(data_shape, name="data", dtype=env.inp_dtype)
weight = te.placeholder(weight_shape, name="weight", dtype=env.wgt_dtype)
data_buf = te.compute(data_shape, lambda *i: data(*i), "data_buf")
weight_buf = te.compute(weight_shape, lambda *i: weight(*i), "weight_buf")
res_gemm = te.compute(
output_shape,
lambda bo, co, bi, ci: te.sum(
data_buf[bo, ic, bi, ic_tns].astype(env.acc_dtype)
* weight_buf[co, ic, ci, ic_tns].astype(env.acc_dtype),
axis=[ic, ic_tns],
),
name="res_gem",
)
res_shr = te.compute(output_shape, lambda *i: res_gemm(*i) >> env.INP_WIDTH, name="res_shr")
inp_max = (1 << (env.INP_WIDTH - 1)) - 1
res_max = te.compute(output_shape, lambda *i: tvm.te.max(res_shr(*i), 0), "res_max")
res_min = te.compute(output_shape, lambda *i: tvm.te.min(res_max(*i), inp_max), "res_min")
res = te.compute(output_shape, lambda *i: res_min(*i).astype(env.inp_dtype), name="res")
s = te.create_schedule(res.op)
print(tvm.lower(s, [data, weight, res], simple_mode=True))
b_block = 1
i_bloc |
k = 256
o_block = 256
b, oc, b_tns, oc_tns = s[res].op.axis
b_out, b_inn = s[res].split(b, b_block)
oc_out, oc_inn = s[res].split(oc, o_block)
s[res].reorder(b_out, oc_out, b_inn, oc_inn)
s[res_gemm].compute_at(s[res], oc_out)
s[res_shr].compute_at(s[res], oc_out)
s[res_max].compute_at(s[res], oc_out)
s[res_min].compute_at(s[res], oc_out)
b_inn, oc_inn, b_tns, oc_tns = s[res_gemm].op.axis
ic_out, ic_inn = s[res_gemm].split(ic, i_block)
s[res_gemm].reorder(ic_out, b_inn, oc_inn, ic_inn, b_tns, oc_tns, ic_tns)
print(tvm.lower(s, [data, weight, res], simple_mode=True))
s[data_buf].set_scope(env.inp_scope)
s[weight_buf].set_scope(env.wgt_scope)
s[res_gemm].set_scope(env.acc_scope)
s[res_shr].set_scope(env.acc_scope)
s[res_min].set_scope(env.acc_scope)
s[res_max].set_scope(env.acc_scope)
s[data_buf].compute_at(s[res_gemm], ic_out)
s[weight_buf].compute_at(s[res_gemm], ic_out)
s[data_buf].pragma(s[data_buf].op.axis[0], env.dma_copy)
s[weight_buf].pragma(s[weight_buf].op.axis[0], env.dma_copy)
s[res].pragma(s[res].op.axis[2], env.dma_copy)
s[res_gemm].tensorize(b_tns, env.gemm)
s[res_shr].pragma(s[res_shr].op.axis[0], env.alu)
s[res_min].pragma(s[res_min].op.axis[0], env.alu)
s[res_max].pragma(s[res_max].op.axis[0], env.alu)
print(vta.lower(s, [data, weight, res], simple_mode=True))
my_gemm = vta.build(
s, [data, weight, res], tvm.target.Target("ext_dev", host=env.target_host), name="my_gemm"
)
temp = utils.tempdir()
my_gemm.save(temp.relpath("gemm.o"))
remote.upload(temp.relpath("gemm.o"))
f = remote.load_module("gemm.o")
ctx = remote.ext_dev(0)
data_np = np.random.randint(-128, 128, size=(batch_size, in_channels)).astype(data.dtype)
weight_np = np.random.randint(-128, 128, size=(out_channels, in_channels)).astype(weight.dtype)
data_packed = data_np.reshape(
batch_size
).transpose((0, 2, 1, 3))
weight_packed = weight_np.reshape(
out_channels
).transpose((0, 2, 1, 3))
data_nd = tvm.nd.array(data_packed, ctx)
weight_nd = tvm.nd.array(weight_ |
packed, ctx)
res_nd = tvm.nd.array(np.zeros(output_shape).astype(res.dtype), ctx)
if env.TARGET in ["sim", "tsim"]:
simulator.clear_stats()
f(data_nd, weight_nd, res_nd)
res_ref = np.dot(data_np.astype(env.acc_dtype), weight_np.T.astype(env.acc_dtype))
res_ref = res_ref >> env.INP_WIDTH
res_ref = np.clip(res_ref, 0, inp_max)
res_ref = res_ref.astype(res.dtype)
res_ref = res_ref.reshape(
batch_size
).transpose((0, 2, 1, 3))
np.testing.assert_equal(res_ref, res_nd.numpy())
if env.TARGET in ["sim", "tsim"]:
sim_stats = simulator.stats()
print("Execution statistics:")
for k, v in sim_stats.items():
print("\t{:<16}: {:>16}".format(k, v))
print("Successful blocked matrix multiply test!") |
"""
.. _vta-get-started:
Get Started with VTA
====================
**Author**: `Thierry Moreau <https:
This is an introduction tutorial on how to use TVM to program the VTA design.
In this tutorial, we will demonstrate the basic TVM workflow to implement
a vector addition on the VTA design's vector ALU.
This process includes specific scheduling transformations necessary to lower
computation down to low-level accelerator operations.
To begin, we need to |
import TVM which is our deep learning optimizing compiler.
We also need to |
import the VTA python package which contains VTA specific
extensions for TVM to target the VTA design.
"""
from __future__ |
import absolute_import, print_function |
import os |
import tvm
from tvm |
import te |
import vta |
import numpy as np
env = vta.get_env()
from tvm |
import rpc
from tvm.contrib |
import utils
from vta.testing |
import simulator
host = os.environ.get("VTA_RPC_HOST", "192.168.2.99")
port = int(os.environ.get("VTA_RPC_PORT", "9091"))
if env.TARGET == "pynq" or env.TARGET == "de10nano":
assert tvm.runtime.enabled("rpc")
remote = rpc.connect(host, port)
vta.reconfig_runtime(remote)
vta.program_fpga(remote, bitstream=None)
elif env.TARGET in ("sim", "tsim", "intelfocl"):
remote = rpc.LocalSession()
if env.TARGET in ["intelfocl"]:
vta.program_fpga(remote, bitstream="vta.bitstream")
m = 64
o = 1
A = te.placeholder((o, m, env.BATCH, env.BLOCK_OUT), name="A", dtype=env.acc_dtype)
B = te.placeholder((o, m, env.BATCH, env.BLOCK_OUT), name="B", dtype=env.acc_dtype)
A_buf = te.compute((o, m, env.BATCH, env.BLOCK_OUT), lambda *i: A(*i), "A_buf")
B_buf = te.compute((o, m, env.BATCH, env.BLOCK_OUT), lambda *i: B(*i), "B_buf")
C_buf = te.compute(
(o, m, env.BATCH, env.BLOCK_OUT),
lambda *i: A_buf(*i).astype(env.acc_dtype) + B_buf(*i).astype(env.acc_dtype),
name="C_buf",
)
C = te.compute(
(o, m, env.BATCH, env.BLOCK_OUT), lambda *i: C_buf(*i).astype(env.inp_dtype), name="C"
)
s = te.create_schedule(C.op)
print(tvm.lower(s, [A, B, C], simple_mode=True))
s[A_buf].set_scope(env.acc_scope)
s[B_buf].set_scope(env.acc_scope)
s[C_buf].set_scope(env.acc_scope)
s[A_buf].pragma(s[A_buf].op.axis[0], env.dma_copy)
s[B_buf].pragma(s[B_buf].op.axis[0], env.dma_copy)
s[C].pragma(s[C].op.axis[0], env.dma_copy)
s[C_buf].pragma(C_buf.op.axis[0], env.alu)
print(vta.lower(s, [A, B, C], simple_mode=True))
my_vadd = vta.build(
s, [A, B, C], tvm.target.Target("ext_dev", host=env.target_host), name="my_vadd"
)
temp = utils.tempdir()
my_vadd.save(temp.relpath("vadd.o"))
remote.upload(temp.relpath("vadd.o"))
f = remote.load_module("vadd.o")
ctx |
= remote.ext_dev(0)
A_orig = np.random.randint(-128, 128, size=(o * env.BATCH, m * env.BLOCK_OUT)).astype(A.dtype)
B_orig = np.random.randint(-128, 128, size=(o * env.BATCH, m * env.BLOCK_OUT)).astype(B.dtype)
A_packed = A_orig.reshape(o, env.BATCH, m, env.BLOCK_OUT).transpose((0, 2, 1, 3))
B_packed = B_orig.reshape(o, env.BATCH, m, env.BLOCK_OUT).transpose((0, 2, 1, 3))
A_nd = tvm.nd.array(A_packed, ctx)
B_nd = tvm.nd.array(B_packed, ctx)
C_nd = tvm.nd.array(np.zeros((o, m, env.BATCH, env.BLOCK_OUT)).astype(C.dtype), ctx)
f(A_nd, B_nd, C_nd)
C_ref = (A_orig.astype(env.acc_dtype) + B_orig.astype(env.acc_dtype)).astype(C.dtype)
C_ref = C_ref.reshape(o, env.BATCH, m, env.BLOCK_OUT).transpose((0, 2, 1, 3))
np.testing.assert_equal(C_ref, C_nd.numpy())
print("Successful vector add test!") |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/**
* Example code to start the runtime.
*/
const path = require("path");
const fs = require("fs");
const tvmjs = require("../../dist");
const wasmPath = tvmjs.wasmPath();
const EmccWASI = require(path.join(wasmPath, "tvmjs_runtime.wasi.js"));
const wasmSource = fs.readFileSync(path.join(wasmPath, "tvmjs_runtime.wasm"));
// Here we pass the javascript module generated by emscripten as the
// LibraryProvider to provide WASI related libraries.
// the async version of the API.
tvmjs.instantiate(wasmSource, new EmccWASI())
.then((tvm) => {
const log_info = tvm.getGlobalFunc("testing.log_info_str");
log_info("hello world");
// List all the global functions from the runtime.
console.log("Runtime functions using EmccWASI\n", tvm.listGlobalFuncNames());
});
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/**
* Example code to start the runtime.
*/
const { WASI } = require('wasi');
const path = require("path");
const fs = require("fs");
const tvmjs = require("../../dist");
const wasmPath = tvmjs.wasmPath();
const wasmSource = fs.readFileSync(path.join(wasmPath, "tvmjs_runtime.wasm"));
const wasi = new WASI({ args: process.argv, env: process.env });
// Here we pass the javascript module generated by emscripten as the
// LibraryProvider to provide WASI related libraries.
const tvm = new tvmjs.Instance(new WebAssembly.Module(wasmSource), wasi);
// List all the global functions from the runtime.
console.log("Runtime using WASI\n", tvm.listGlobalFuncNames());
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/**
* Example code to start the RPC server on nodejs using WASI
*/
const { WASI } = require("wasi");
const tvmjs = require("../../dist");
// Get import returns a fresh library in each call.
const getImports = () => {
return new WASI({
args: process.argv,
env: process.env
});
};
const proxyUrl = "ws://localhost:8888/ws";
new tvmjs.RPCServer(proxyUrl, "wasm", getImports, console.log);
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Decorate emcc generated js to a WASI compatible API."""
import sys
template_head = """
function EmccWASI() {
"""
template_tail = """
this.Module = Module;
this.start = Module.wasmLibraryProvider.start;
this.imports = Module.wasmLibraryProvider.imports;
this.wasiImport = this.imports["wasi_snapshot_preview1"];
}
if (typeof module !== "undefined" && module.exports) {
module.exports = EmccWASI;
}
"""
if __name__ == "__main__":
if len(sys.argv) != 3:
print("Usage <file-in> <file-out>")
result = template_head + open(sys.argv[1]).read() + template_tail
with open(sys.argv[2], "w") as fo:
fo.write(result)
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/* eslint-disable no-unused-vars */
/**
* JS config used by --pre-js in emcc.
* Wrap module as a LibraryProvider.
*/
var __wasmLib = {};
function __wasmLibInstantiateWasm(imports, successCallback) {
__wasmLib.imports = imports;
__wasmLib.successCallback = successCallback;
}
function __wasmLibStart(wasmInstance) {
__wasmLib.successCallback(wasmInstance);
}
__wasmLib.start = __wasmLibStart;
var Module = {
"instantiateWasm": __wasmLibInstantiateWasm,
"wasmLibraryProvider": __wasmLib
};
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/* eslint-disable no-undef */
module.exports = {
testEnvironment: "node",
testMatch: [
"**/tests/node/*.js"
],
};
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import commonjs from '@rollup/plugin-commonjs';
import resolve from '@rollup/plugin-node-resolve';
export default {
input: 'dist/index.js',
output: {
file: 'dist/tvmjs.bundle.js',
format: 'umd',
name: 'tvmjs',
exports: 'named',
globals: {'ws': 'ws',
'perf_hooks': 'perf_hooks',
'@webgpu/types': 'webgputypes'}
},
plugins: [commonjs(), resolve()],
external: ['ws', 'perf_hooks', '@webgpu/types']
};
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/* eslint-disable no-undef */
// Load Emscripten Module, need to change path to root/lib
const path = require("path");
const fs = require("fs");
const assert = require("assert");
const tvmjs = require("../../dist");
const wasmPath = tvmjs.wasmPath();
const EmccWASI = require(path.join(wasmPath, "tvmjs_runtime.wasi.js"));
const wasmSource = fs.readFileSync(path.join(wasmPath, "test_addone.wasm"));
const tvm = new tvmjs.Instance(
new WebAssembly.Module(wasmSource),
new EmccWASI()
);
// Load system library
const sysLib = tvm.systemLib();
function randomArray(length, max) {
return Array.apply(null, Array(length)).map(function () {
return Math.random() * max;
});
}
test("add one", () => {
// grab pre-loaded function
const faddOne = sysLib.getFunction("add_one");
assert(tvm.isPackedFunc(faddOne));
const n = 124;
const A = tvm.empty(n).copyFrom(randomArray(n, 1));
const B = tvm.empty(n);
// call the function.
faddOne(A, B);
const AA = A.toArray(); // retrieve values in js array
const BB = B.toArray(); // retrieve values in js array
// verify
for (var i = 0; i < BB.length; ++i) {
assert(Math.abs(BB[i] - (AA[i] + 1)) < 1e-5);
}
faddOne.dispose();
});
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/* eslint-disable no-undef */
const path = require("path");
const fs = require("fs");
const assert = require("assert");
const tvmjs = require("../../dist/tvmjs.bundle")
const wasmPath = tvmjs.wasmPath();
const EmccWASI = require(path.join(wasmPath, "tvmjs_runtime.wasi.js"));
const wasmSource = fs.readFileSync(path.join(wasmPath, "tvmjs_runtime.wasm"));
let tvm = new tvmjs.Instance(new WebAssembly.Module(wasmSource), new EmccWASI());
// Basic fields.
assert(tvm.listGlobalFuncNames() !== undefined);
// Test ndarray
function testArrayCopy(dtype, arrayType) {
let data = [1, 2, 3, 4, 5, 6];
let a = tvm.empty([2, 3], dtype).copyFrom(data);
assert(a.device.toString() == "cpu(0)");
assert(a.shape[0] == 2 && a.shape[1] == 3);
let ret = a.toArray();
assert(ret instanceof arrayType);
assert(ret.toString() == arrayType.from(data).toString());
// test multiple dispose.
a.dispose();
a.dispose();
}
test("array copy", () => {
testArrayCopy("float32", Float32Array);
testArrayCopy("int", Int32Array);
testArrayCopy("int8", Int8Array);
testArrayCopy("uint8", Uint8Array);
testArrayCopy("float64", Float64Array);
});
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http:
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/* eslint-disable no-undef */
const path = require("path");
const fs = require("fs");
const assert = require("assert");
const tvmjs = require("../../dist");
const wasmPath = tvmjs.wasmPath();
const EmccWASI = require(path.join(wasmPath, "tvmjs_runtime.wasi.js"));
const wasmSource = fs.readFileSync(path.join(wasmPath, "tvmjs_runtime.wasm"));
let tvm = new tvmjs.Instance(
new WebAssembly.Module(wasmSource),
new EmccWASI()
);
test("GetGlobal", () => {
let flist = tvm.listGlobalFuncNames();
let faddOne = tvm.getGlobalFunc("testing.add_one");
let fecho = tvm.getGlobalFunc("testing.echo");
assert(faddOne(tvm.scalar(1, "int")) == 2);
assert(fecho(1123) == 1123);
assert(fecho("xyz") == "xyz");
let bytes = new Uint8Array([1, 2, 3]);
let rbytes = fecho(bytes);
assert(rbytes.length == bytes.length);
for (let i = 0; i < bytes.length; ++i) {
assert(rbytes[i] == bytes[i]);
}
assert(fecho(undefined) == undefined);
let arr = tvm.empty([2, 2]).copyFrom([1, 2, 3, 4]);
let arr2 = fecho(arr);
assert(arr.handle == arr2.handle);
assert(arr2.toArray().toString() == arr.toArray().toString());
let mod = tvm.systemLib();
let ret = fecho(mod);
assert(ret.handle == mod.handle);
assert(flist.length != 0);
mod.dispose();
r |
et.dispose();
arr.dispose();
arr2.dispose();
fecho.dispose();
faddOne.dispose();
});
test("ReturnFunc", () => { |
function addy(y) { |
function add(x, z) {
return x + y + z;
}
return add;
}
let fecho = tvm.getGlobalFunc("testing.echo");
let myf = tvm.toPackedFunc(addy);
assert(tvm.isPackedFunc(myf));
let myf2 = tvm.toPackedFunc(myf);
assert(myf2._tvmPackedCell.handle === myf._tvmPackedCell.handle);
let f = myf(10);
assert(tvm.isPackedFunc(f));
assert(f(11, 0) == 21);
assert(f("x", 1) == "x101");
assert(f("x", "yz") == "x10yz");
fecho.dispose();
myf.dispose();
myf2.dispose();
f.dispose();
f.dispose();
});
test("RegisterGlobal", () => {
tvm.registerFunc("xyz", function (x, y) {
return x + y;
});
let f = tvm.getGlobalFunc("xyz");
assert(f(1, 2) == 3);
f.dispose();
let syslib = tvm.systemLib();
syslib.dispose();
});
test("NDArrayCbArg", () => {
let use_count = tvm.getGlobalFunc("testing.object_use_count");
let fcheck = tvm.toPackedFunc(function (x) {
assert(use_count(x) == 2);
x.dispose();
});
let x = tvm.empty([2], "float32").copyFrom([1, 2]);
assert(use_count(x) == 1);
fcheck(x);
assert(use_count(x) == 1);
});
test("Logging", () => {
const log_info = tvm.getGlobalFunc("testing.log_info_str");
log_info("helow world")
log_info.dispose();
}); |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# Prepare test library for standalone wasm runtime test.
import tvm
from tvm import te
from tvm.contrib import emcc
from tvm.relay.backend import Runtime
import os
def prepare_test_libs(base_path):
runtime = Runtime("cpp", {"system-lib": True})
target = "llvm -mtriple=wasm32-unknown-unknown-wasm"
if not tvm.runtime.enabled(target):
raise RuntimeError("Target %s is not enbaled" % target)
n = te.var("n")
A = te.placeholder((n,), name="A")
B = te.compute(A.shape, lambda *i: A(*i) + 1.0, name="B")
s = te.create_schedule(B.op)
fadd = tvm.build(s, [A, B], target, runtime=runtime, name="add_one")
wasm_path = os.path.join(base_path, "test_addone.wasm")
fadd.export_library(wasm_path, emcc.create_tvmjs_wasm)
if __name__ == "__main__":
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
prepare_test_libs(os.path.join(curr_path, "../../dist/wasm"))
|
"""Simple testcode to test Javascript RPC
To use it, start a rpc proxy with "python -m tvm.exec.rpc_proxy".
Connect javascript end to the websocket port and connect to the RPC.
""" |
import tvm
from tvm |
import te
from tvm |
import rpc
from tvm.contrib |
import utils, emcc
from tvm.relay.backend |
import Runtime |
import numpy as np
proxy_host = "127.0.0.1"
proxy_port = 9090
def test_rpc():
if not tvm.runtime.enabled("rpc"):
return
target = tvm.target.Target("webgpu", host="llvm -mtriple=wasm32-unknown-unknown-wasm")
runtime = Runtime("cpp", {"system-lib": True})
if not tvm.runtime.enabled(target_host):
raise RuntimeError("Target %s is not enbaled" % target_host)
n = 2048
A = te.placeholder((n,), name="A")
B = te.compute(A.shape, lambda *i: A(*i) + 1.0, name="B")
s = te.create_schedule(B.op)
num_thread = 2
xo, xi = s[B].split(B.op.axis[0], factor=num_thread)
s[B].bind(xi, te.thread_axis("threadIdx.x"))
s[B].bind(xo, te.thread_axis("blockIdx.x"))
fadd = tvm.build(s, [A, B], target, runtime=runtime, name="addone")
temp = utils.tempdir()
wasm_path = temp.relpath("addone_gpu.wasm")
fadd.export_library(wasm_path, emcc.create_tvmjs_wasm)
wasm_binary = open(wasm_path, "rb").read()
remote = rpc.connect(
proxy_host,
proxy_port,
key="wasm",
session_constructor_args=["rpc.WasmSession", wasm_binary],
)
def check(remote):
dev = remote.webgpu(0)
adata = np.random.uniform(size=n).astype(A.dtype)
a = tvm.nd.array(adata, dev)
b = tvm.nd.array(np.zeros(n, dtype=A.dtype), dev)
np.testing.assert_equal(a.numpy(), adata)
f1 = remote.system_lib()
addone = f1.get_function("addone")
addone(a, b)
np.testing.assert_equal(b.numpy(), a.numpy() + 1)
print("Test pass..")
check(remote)
test_rpc() |
"""Simple testcode to test Javascript RPC
To use it, start a rpc proxy with "python -m tvm.exec.rpc_proxy".
Connect javascript end to the websocket port and connect to the RPC.
""" |
import tvm
from tvm |
import te
from tvm |
import rpc
from tvm.contrib |
import utils, emcc
from tvm.relay.backend |
import Runtime |
import numpy as np
proxy_host = "127.0.0.1"
proxy_port = 9090
def test_rpc():
if not tvm.runtime.enabled("rpc"):
return
runtime = Runtime("cpp", {"system-lib": True})
target = "llvm -mtriple=wasm32-unknown-unknown-wasm"
if not tvm.runtime.enabled(target):
raise RuntimeError("Target %s is not enbaled" % target)
n = te.var("n")
A = te.placeholder((n,), name="A")
B = te.compute(A.shape, lambda *i: A(*i) + 1.0, name="B")
s = te.create_schedule(B.op)
fadd = tvm.build(s, [A, B], target, runtime=runtime, name="addone")
temp = utils.tempdir()
wasm_path = temp.relpath("addone.wasm")
fadd.export_library(wasm_path, emcc.create_tvmjs_wasm)
wasm_binary = open(wasm_path, "rb").read()
remote = rpc.connect(
proxy_host,
proxy_port,
key="wasm",
session_constructor_args=["rpc.WasmSession", wasm_binary],
)
def check(remote):
faddone = remote.get_function("testing.asyncAddOne")
fecho = remote.get_function("testing.echo")
assert faddone(100) == 101
assert fecho(1, 2, 3) == 1
assert fecho(1, 2, 3) == 1
assert fecho(100, 2, 3) == 100
assert fecho("xyz") == "xyz"
assert bytes(fecho(bytearray(b"123"))) == b"123"
f1 = remote.system_lib()
dev = remote.cpu(0)
a = tvm.nd.array(np.random.uniform(size=1024).astype(A.dtype), dev)
b = tvm.nd.array(np.zeros(1024, dtype=A.dtype), dev)
addone = f1.get_function("addone")
addone(a, b)
time_f = f1.time_evaluator("addone", dev, number=100, repeat=10)
time_f(a, b)
cost = time_f(a, b).mean
print("%g secs/op" % cost)
np.testing.assert_equal(b.numpy(), a.numpy() + 1)
check(remote)
test_rpc() |
use methods::{PREDICTION_ELF, PREDICTION_ID};
use risc0_zkvm::Prover;
use risc0_zkvm::serde::{from_slice, to_vec};
use std::io;
fn main() {
// Make the prover.
let mut prover =
Prover::new(PREDICTION_ELF).expect("Prover should be constructed from valid ELF binary");
// TODO: Implement communication with the guest here
println!("Please input the sepal length, sepal width, petal length, petal width.");
let mut input = String::new();
io::stdin().read_line(&mut input).unwrap();
let mut s = input.split_whitespace();
let sepal_length: u32 = s.next().unwrap().parse().unwrap();
let sepal_width: u32 = s.next().unwrap().parse().unwrap();
let petal_length: u32 = s.next().unwrap().parse().unwrap();
let petal_width :u32 = s.next().unwrap().parse().unwrap();
prover.add_input_u32_slice(&to_vec(&sepal_length).unwrap());
prover.add_input_u32_slice(&to_vec(&sepal_width).unwrap());
prover.add_input_u32_slice(&to_vec(&petal_length).unwrap());
prover.add_input_u32_slice(&to_vec(&petal_width).unwrap());
// Run prover & generate receipt
let receipt = prover.run().expect(
"Code should be provable unless it had an error or exceeded the maximum cycle limit",
);
// TODO: Implement code for transmitting or serializing the receipt for
// other parties to verify here
// Optional: Verify receipt to confirm that recipients will also be able to
// verify your receipt
receipt.verify(&PREDICTION_ID).expect(
"Code you have proven should successfully verify; did you specify the correct method ID?",
);
// Extract journal of receipt
let c: u32 = from_slice(&receipt.journal).unwrap();
let dic = ["setosa", "versicolor", "virginica"];
// Print an assertion
println!("This is the {} flower, and I can prove it!", dic[c as usize]);
}
|
fn main() {
risc0_build::embed_methods();
}
|
// TODO: Rename this file to change the name of this method from METHOD_NAME
#![no_main]
// If you want to try std support, also update the guest Cargo.toml file
#![no_std] // std support is experimental
use risc0_zkvm::guest::env;
risc0_zkvm::guest::entry!(main);
pub fn main() {
// TODO: Implement your guest code here
let sepal_length: u32 = env::read();
let sepal_width: u32 = env::read();
let petal_length: u32 = env::read();
let petal_width: u32 = env::read();
let prediction: u32 = predict(sepal_length, sepal_width, petal_length, petal_width);
env::commit(&prediction);
}
fn predict(sepal_length: u32, sepal_width: u32, petal_length: u32, petal_width :u32) -> u32 {
if petal_width <= 80 {
return 0
}
else {
if petal_width <= 175 {
if petal_length <= 495 {
if petal_width <= 165 {
return 1
}
else {
return 2
}
}
else {
if petal_width <= 155 {
return 2
}
else {
if sepal_length <= 695 {
return 1
}
else {
return 2
}
}
}
}
else{
if petal_length <= 485 {
if sepal_length <= 595 {
return 1
}
else {
return 2
}
}
else {
return 2
}
}
}
} |
include!(concat!(env!("OUT_DIR"), "/methods.rs"));
|
{
"cells": [
{
"cell_type": "code",
"execution_count": 5,
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"%matplotlib inline"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"\n",
"
"This data sets consists of 3 different types of irises'\n",
"(Setosa, Versicolour, and Virginica) petal and sepal\n",
"length, stored in a 150x4 numpy.ndarray\n",
"\n",
"The rows being the samples and the columns being:\n",
"Sepal Length, Sepal Width, Petal Length and Petal Width.\n",
"\n",
"The below plot uses the first two features.\n",
"See [here](https:
"information on this dataset.\n"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {
"collapsed": false
},
"outputs": [
{
"data": {
"image/png": "iVBORw0KGgoAAAANSUhEUgAAApMAAAH1CAYAAACqU3UnAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/bCgiHAAAACXBIWXMAAA9hAAAPYQGoP6dpAACgpElEQVR4nOzdd1RU1xYH4N80ptBBuqDYBVHB3ns3FmLsvcSSqtFE00w1xjRNjL2XmFhjYg323ntFESuKdBiYPvv9gfAcZ0ZxGBjR/a3lei/3cNj7DnDZ3LlnHwERERhjjDHGGLOB0NEJMMYYY4yxkouLScYYY4wxZjMuJhljjDHGmM24mGSMMcYYYzbjYpIxxhhjjNmMi0nGGGOMMWYzLiYZY4wxxpjNxI4IajQakZCQAFdXVwgEAkekwBhjjDHGnoKIkJWVhcDAQAiF1u8/OqSYTEhIQHBwsCNCM8YYY4yx53Dnzh2ULl3a6rhDiklXV1cAucm5ubk5IgXGGGOMMfYUmZmZCA4Ozq/brHFIMZn31rabmxsXk4wxxhhjL7BnPZLIC3AYY4wxxpjNuJhkjDHGGGM242KSMcYYY4zZjItJxhhjjDFmMy4mGWOMMcaYzbiYZIwxxhhjNuNikjHGGGOM2YyLScYYY4wxZjMuJhljjDHGmM24mGSMMcYYYzbjYpIxxhhjjNmMi0nGGGOMMWYzLiYZY4wxxpjNuJhkjDHGGGM242KSMcYYY4zZjItJxhhjjDFmMy4mGWOMMcaYzbiYZIwxxhhjNuNikjHGGGOM2YyLScYYY4wxZjMuJhljjDHGmM24mGSMMcYYYzbjYpIxxhhjjNmMi0nGGGOMMWYzLiYZY4wxxpjNuJhkjDHGGGM242KSMcYYY4zZjItJxhhjjDFmMy4mGWOMMcaYzbiYZIwxxhhjNuNikjHGGGOM2YyLScYYY4wxZjMuJhljjDHGmM24mGSMMcYYYzbjYpIxxhhjjNmMi0nGGGOMMWYzLiYZY4wxxpjNuJhkjDHGGGM242KSMcYYY4zZjItJxhhjjDFmMy4 |
mGWOMMcaYzbiYZIwxxhhjNuNikjHGGGOM2YyLScYYY4wxZjMuJhljjDHGmM24mGSMMcYYYzbjYpIxxhhjjNmMi0nGGGOMMWYzLiYZY4wxxpjNuJhkjDHGGGM242KSMcYYY4zZjItJxhhjjDFmMy4mGWOMMcaYzbiYZIwxxhhjNuNikjHGGGOM2YyLScYYY4wxZjMuJhljjDHGmM24mGSMMcYYYzbjYpIxxhhjjNmMi0nGGGOMMWYzsaMTYIy9GjQaDTZt2oS4uDh4enqiW7du8PHxcXRajDHGComLScZYkVu/fj1Gv/kmHqakwF0igVKvx9tvvYX3x47FlClTIBKJHJ0iY4wxG3ExyRgrUv/99x/eeOMNtJPKMMHHD5UkEqQZDViSnY0ff/gBRqMRP/zwg6PTZIwxZiMBEVFxB83MzIS7uzsyMjLg5uZW3OEZY8Wofp06EFy4gL88vSESCEzGfs3KxC+qHNy5exd+fn4OypAxxpglBa3XeAEOY6zI3LhxA0dPnMBQubNZIQkAA51dIDAasW7dOgdkxxhjzB64mGSMFZmUlBQAQIjY8jORHkIh3MWS/I9jjDFW8nAxyRgrMiEhIRAKhTij1Vocv63XI1mrQWhoaDFnxhhjzF64mGSMFRk/Pz+81qkT5qpVSDcaTcaICD8pM+Hu6oro6GgHZcgYY6ywuJhkjBWp73/4ARkyGbqkpWBVdjZidTrsUqvQPy0V63JyMGPmTCgUCkenyRhjzEZcTDLGilTlypVx8MgRVG3ZEh9mpqNlUiIGpqYgNbQsNmzYgIEDBzo6RcYYY4XAfSYZY0WuSpUq2LRlC+7du4ebN2/Cw8MDYWFhEFhY4c0YY6xk4WKSMVZsgoKCEBQU5Og0GGOM2RG/zc0YY4wxxmzGxSRjjDHGGLMZF5OMMcYYY8xmXEwyxhhjjDGbcTHJGGOMMcZsxsUkY4wxxhizGReTjDHGGGPMZlxMMsYYY4wxm3ExyRhjjDHGbMbFJGOMMcYYsxkXk4wxxhhjzGZcTDLGGGOMMZtxMckYY4wxxmzGxSRjjDHGGLMZF5OMMcYYY8xmYkcnwBhzrOvXr+PatWtwd3dHvXr1IBKJHJ0SY4yxEoTvTDL2irp06RJaNW+OihUromPHjmjUqBFCg4OxYMECR6fGGGOsBOE7k4y9gmJjY9GkYUN4q9X4zcML9aROuG8wYHF6BkaMGIHMzEyMGzfO0WkyxhgrAQRERMUdNDMzE+7u7sjIyICbm1txh2fsldfzjTdw7N9/scXTG+5C0zcovsxIx3KdFvfu34eXl5eDMmSMMeZoBa3X+G1uxl4xaWlp2LBhA4ZJ5WaFJAC87eIKg16PVatWOSA7xhhjJQ0Xk4y9Yh48eAC9wYBqEonFcW+RCIFOTrh9+3YxZ8YYY6wk4mKSsVeMt7c3AOCGQW9xPMtoxEOdDr6+vsWZFmOMsRKKi0nGXjG+vr5o17o1FqpU0Fh4ZHppthJaIvTu3dsB2THGGCtpuJhk7BX09ZQpiCcj+qel4rhGAyLCA4MBUzMz8H1WJt4fOxZBQUGOTpMxxlgJwMUkY6+gOnXqYOv27Ujy90P3lCSUeZCA2on3sUivwyeffopp06Y5OkXGGGMlBPeZZOwV1bx5c1yNi8OePXsQGxsLNzc3dOrUCe7u7o5OjTHGWAnCfSYZY4wxxpgZ7jPJGGOMMcaKHBeTjDHGGGPMZvzMJGNFRKfTYePGjVi3bh2ys7MRHh6OESNGoFy5co5OjTHGGLMbvjPJWBG4e/cuIqtXxxtvvIErf29ETkwM5vz0EypUqICff/7Z0ekxxhhjdsN3JhmzM6PRiC6dOiH9Rjy2lvJFhJMTAEBlNOKnrEx88MEHKFeuHLp16+bYRBljjDE74DuTjNnZjh07cPrcOcxwdcsvJAFALhTiEzd3NJTJ8cPUqQ7MkDHGGLMfLiYZs7PNmzejjEyGeo8VknkEAgF6ymQ4dPQo0tLSHJAdY4wxZl9cTDJmZ1qtFi4CAQQCgcVxF4Ew/+MYY4yxko6LScbsrFatWrisVuO |
uXm9xfIdGjeCAAPj4+BRzZowxxpj9cTHJmJ316dMHbi4u+CQrE5onNpg6qFFjvVqFUW+/DaGQf/wYY4yVfLyamzE7c3Z2xqrVq9GtSxe0SE3GG05S+AiF2K/VYptahZatWmH8+PGOTpMxxhizC741wlgRaN++PY6dOIEWvXphrkGPSZkZuBkSjOm
"text/plain": [
"<Figure size 800x600 with 1 Axes>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"image/png": "iVBORw0KGgoAAAANSUhEUgAAAeIAAAH4CAYAAACWpO5eAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/bCgiHAAAACXBIWXMAAA9hAAAPYQGoP6dpAAEAAElEQVR4nOydd3hUVfrHv3d6S++9k4SQHhJCLwpiReyK3dV11bWvru6qq67uriu67v7UVVaxYFfEBiLSIYWQRgiQhJBAIL1Pb/f3B97rTDKTuVOSGeB8nocHmEzuPXPvnfM973veQtE0TYNAIBAIBIJX4Hl7AAQCgUAgnMsQISYQCAQCwYsQISYQCAQCwYsQISYQCAQCwYsQISYQCAQCwYsQISYQCAQCwYsQISYQCAQCwYsQISYQCAQCwYsQISYQCAQCwYsQISa4TVtbGyiKwtq1a709lHEsXLgQM2bM8PYwzkoWLlyIhQsXsv/3xefAF8dEIIyFCDHBIWvXrgVFUTb/PP7445NyzhdeeAFff/01p/eeOnUKzzzzDGpraydlLFNFYmKi1bUNDw/HvHnzsH79epvvX79+PZYvX47Q0FCIRCJER0fj6quvxtatW22+/4cffgBFUYiOjobZbJ7MjzLlfPTRR3j11Ve9PQwCwSUE3h4A4czh2WefRVJSktVrM2bMQEJCAjQaDYRCocfO9cILL+DKK6/EihUrHL731KlT+Mtf/oLExETk5eV5bAzeIC8vDw8
"text/plain": [
"<Figure size 800x600 with 1 Axes>"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"
"
"
"\n",
" |
import matplotlib.pyplot as plt\n",
"\n",
"
" |
import mpl_toolkits.mplot3d
"\n",
"from sklearn |
import datasets\n",
"from sklearn.decomposition |
import PCA\n",
"\n",
"
"iris = datasets.load_iris()\n",
"X = iris.data[:, :2]
"y = iris.target\n",
"\n",
"x_min, x_max = X[:, 0].min() - 0.5, X[:, 0].max() + 0.5\n",
"y_min, y_max = X[:, 1].min() - 0.5, X[:, 1].max() + 0.5\n",
"\n",
"plt.figure(2, figsize=(8, 6))\n",
"plt.clf()\n",
"\n",
"
"plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Set1, edgecolor=\"k\")\n",
"plt.xlabel(\"Sepal length\")\n",
"plt.ylabel(\"Sepal width\")\n",
"\n",
"plt.xlim(x_min, x_max)\n",
"plt.ylim(y_min, y_max)\n",
"plt.xticks(())\n",
"plt.yticks(())\n",
"\n",
"
"
"fig = plt.figure(1, figsize=(8, 6))\n",
"ax = fig.add_subplot(111, projection=\"3d\", elev=-150, azim=110)\n",
"\n",
"X_reduced = PCA(n_components=3).fit_transform(iris.data)\n",
"ax.scatter(\n",
" X_reduced[:, 0],\n",
" X_reduced[:, 1],\n",
" X_reduced[:, 2],\n",
" c=y,\n",
" cmap=plt.cm.Set1,\n",
" edgecolor=\"k\",\n",
" s=40,\n",
")\n",
"\n",
"ax.set_title(\"First three PCA directions\")\n",
"ax.set_xlabel(\"1st eigenvector\")\n",
"ax.xaxis.set_ticklabels([])\n",
"ax.set_ylabel(\"2nd eigenvector\")\n",
"ax.yaxis.set_ticklabels([])\n",
"ax.set_zlabel(\"3rd eigenvector\")\n",
"ax.zaxis.set_ticklabels([])\n",
"\n",
"plt.savefig(\"iris_dataset.png\")\n",
"plt.show()"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3", |
"version": "3.10.9"
}
},
"nbformat": 4,
"nbformat_minor": 0
} |
{"cells":[{"cell_type":"code","execution_count":44,"metadata":{"collapsed":false,"jupyter":{"outputs_hidden":false},"trusted":true},"outputs":[],"source":["%matplotlib inline"]},{"cell_type":"markdown","metadata":{},"source":["\n"," |
import argparse |
import ast
from typing |
import Literal, Union |
import msgpack |
import numpy as np |
class CircuitConfig():
def __init__(self, starting_index):
self.next_index = starting_index
self.outp_to_grad = {}
self.label_tensor_idx = None
self.weights_update = None
def new_gradient_tensor(self, tensor_idx):
if tensor_idx in self.outp_to_grad:
raise Exception("Tensor already allocated")
self.outp_to_grad[tensor_idx] = self.next_index
self.next_index += 1
return self.outp_to_grad[tensor_idx]
def new_tensor(self):
new_index = self.next_index
self.next_index += 1
return new_index
def new_label_tensor(self):
if self.label_tensor_idx is not None:
raise Exception("Label tensor already allocated")
self.label_tensor_idx = self.next_index
self.next_index += 1
return self.label_tensor_idx
def gradient_tensor_idx(self, tensor_idx):
return self.outp_to_grad[tensor_idx]
NO_ACTIVATION = 0
SAME = 0
VALID = 1
CONV2D = 0
CONV2D_DEPTHWISE = 1 |
class Conv2D():
def __init__(self, layer):
params = layer['params']
self.padding = params[1]
self.activation_type = params[2]
self.stride_h = params[3]
self.stride_w = params[4]
def backward(self, layer, transcript, config):
inputs_idx, inputs_shape = layer['inp_idxes'][0], layer['inp_shapes'][0]
weights_idx, weights_shape = layer['inp_idxes'][1], layer['inp_shapes'][1]
bias_idx, bias_shape = layer['inp_idxes'][2], layer['inp_shapes'][2]
output_idx, output_shape = layer['out_idxes'][0], layer['out_shapes'][0]
permuted_inputs_idx = config.new_tensor()
permutation = [3, 1, 2, 0]
permuted_inputs_shape = [inputs_shape[p] for p in permutation]
inputs_permute_layer = {
'layer_type': 'Permute',
'params': permutation,
'inp_idxes': [inputs_idx],
'out_idxes': [permuted_inputs_idx],
'inp_shapes': [inputs_shape],
'out_shapes': [permuted_inputs_shape],
'mask': [],
}
transcript.append(inputs_permute_layer)
permuted_outputs_idx = config.new_tensor()
permuted_outputs_shape = [output_shape[p] for p in permutation]
inputs_permute_layer = {
'layer_type': 'Permute',
'params': permutation,
'inp_idxes': [config.gradient_tensor_idx(output_idx)],
'out_idxes': [permuted_outputs_idx],
'inp_shapes': [output_shape],
'out_shapes': [permuted_outputs_shape],
'mask': [],
}
transcript.append(inputs_permute_layer)
dw_idx, dw_shape = config.new_tensor(), weights_shape
dw_conv = {
'layer_type': 'Conv2D',
'params': [CONV2D, VALID, NO_ACTIVATION, self.stride_h, self.stride_w],
'inp_idxes': [permuted_inputs_idx, permuted_outputs_idx],
'out_idxes': [dw_idx],
'inp_shapes': [permuted_inputs_shape, permuted_outputs_shape],
'out_shapes |
': [dw_shape],
'mask': [],
}
transcript.append(dw_conv)
config.weights_update = dw_idx
permutation = [3, 1, 2, 0]
permutation_weights_idx = config.new_tensor()
permutation_weights_shape = [weights_shape[p] for p in permutation]
permute_weights = {
'layer_type': 'Permute',
'params': permutation,
'inp_idxes': [weights_idx],
'out_idxes': [permutation_weights_idx],
'inp_shapes': [weights_shape],
'out_shapes': [permutation_weights_shape],
'mask': [],
}
transcript.append(permute_weights)
rotated_weights_idx, rotated_weights_shape = config.new_tensor(), permutation_weights_shape
rotate_layer = {
'layer_type': 'Rotate',
'params': [1, 2],
'inp_idxes': [permutation_weights_idx],
'out_idxes': [rotated_weights_idx],
'inp_shapes': [permutation_weights_shape],
'out_shapes': [rotated_weights_shape],
'mask': [],
}
transcript.append(rotate_layer)
padded_gradients_idx, padded_gradients_shape = config.new_tensor(), output_shape
padded_gradients_shape[1] += (rotated_weights_shape[1] - 1) * 2
padded_gradients_shape[2] += (rotated_weights_shape[2] - 1) * 2
pad_layer = {
'layer_type': 'Pad',
'params': [
0, 0,
rotated_weights_shape[1] - 1, rotated_weights_shape[1] - 1,
rotated_weights_shape[2] - 1, rotated_weights_shape[2] - 1,
0, 0
],
'inp_idxes': [config.gradient_tensor_idx(output_idx)],
'out_idxes': [padded_gradients_idx],
'inp_shapes': [],
'out_shapes': [],
'mask': [],
}
transcript.append(pad_layer)
dx_idx, dx_shape = config.new_gradient_tensor(inputs_idx), inputs_shape
input_conv_layer = {
'layer_type': 'Conv2D', |
'params': [CONV2D, VALID, NO_ACTIVATION, self.stride_h, self.stride_w],
'inp_idxes': [padded_gradients_idx, rotated_weights_idx],
'out_idxes': [dx_idx],
'inp_shapes': [padded_gradients_shape, rotated_weights_shape],
'out_shapes': [dx_shape],
'mask': [],
}
transcript.append(input_conv_layer)
permutation = [3, 1, 2, 0]
permuted_dw_idx = config.new_tensor()
permuted_dw_shape = [dw_shape[p] for p in permutation]
permute_dw = {
'layer_type': 'Permute',
'params': permutation,
'inp_idxes': [dw_idx],
'out_idxes': [permuted_dw_idx],
'inp_shapes': [dw_shape],
'out_shapes': [permuted_dw_shape],
'mask': [],
}
transcript.append(permute_dw)
updated_weights_idx, updated_weights_shape = config.new_tensor(), dw_shape
update_weights_layer = {
'layer_type': 'Update',
'params': [],
'inp_idxes': [weights_idx, permuted_dw_idx],
'out_idxes': [updated_weights_idx],
'inp_shapes': [weights_shape, permuted_dw_shape],
'out_shapes': [updated_weights_shape],
'mask': [],
} |
class Softmax():
def __init__(self, layer):
return
def backward(self, layer, transcript, config):
sub_layer = {
'layer_type': 'Sub',
'params': [],
'inp_idxes': [layer['out_idxes'][0], config.label_tensor_idx],
'out_idxes': [config.new_gradient_tensor(layer['inp_idxes'][0])],
'inp_shapes': [layer['out_shapes'][0], layer['out_shapes'][0]],
'out_shapes': [layer['out_shapes'][0]],
'mask': [],
}
transcript.append(sub_layer) |
class AveragePool2D():
def __init__(self, layer):
return
def backward(self, layer, transcript, config):
div_idx = config.new_tensor()
reshape_layer = {
'layer_type': 'Broadcast',
'params': [],
'inp_idxes': [config.gradient_tensor_idx(layer['out_idxes'][0])],
'out_idxes': [div_idx],
'inp_shapes': [layer['out_shapes'][0]],
'out_shapes': [layer['inp_shapes'][0]],
'mask': [],
}
transcript.append(reshape_layer)
out_idx = config.new_gradient_tensor(layer['inp_idxes'][0])
out_shape = layer['inp_shapes'][0]
div = {
'layer_type': 'Div',
'params': [layer['inp_shapes'][0][1] * layer['inp_shapes'][0][2]],
'inp_idxes': [div_idx],
'out_idxes': [out_idx],
'inp_shapes': [out_shape],
'out_shapes': [out_shape],
'mask': [],
}
transcript.append(div) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.