text
stringlengths 1
2.05k
|
---|
import tags_from_title
GITHUB_TOKEN = os.environ["GITHUB_TOKEN"]
PRS_QUERY = """
query ($owner: String!, $name: String!, $after: String, $pageSize: Int!) {
repository(owner: $owner, name: $name) {
defaultBranchRef {
name
target {
... on Commit {
oid
history(after: $after, first: $pageSize) {
pageInfo {
hasNextPage
endCursor
}
nodes {
oid
committedDate
associatedPullRequests(first: 1) {
nodes {
number
additions
changedFiles
deletions
author {
login
}
title
body
}
}
}
}
}
}
}
}
}
"""
def append_and_save(items, file):
if not file.exists():
data = []
else:
with open(file, "rb") as f:
data = pickle.load(f)
data += items
with open(file, "wb") as f:
pickle.dump(data, f)
def fetch_pr_data(args, cache):
github = GitHubRepo(user=user, repo=repo, token=GITHUB_TOKEN)
if args.from_commit is None or args.to_commit is None:
print("--from-commit and --to-commit must be specified if --skip-query is not used")
exit(1)
i = 0
page_size = 80
cursor = f"{args.from_commit} {i}"
while True:
r = github.graphql(
query=PRS_QUERY,
variables={
"owner": user,
"name": repo,
"after": cursor,
"pageSize": page_size,
},
)
data = r["data"]["repository"]["defaultBranchRef"]["target"]["history"]
if not data["pageInfo"]["hasNextPage"]:
break
cursor = data["pageInfo"]["endCursor"]
results = data["nodes"]
to_add = []
stop = False
for r in results:
if r["oid"] == |
args.to_commit:
print(f"Found {r['oid']}, stopping")
stop = True
break
else:
to_add.append(r)
oids = [r["oid"] for r in to_add]
print(oids)
append_and_save(to_add, cache)
if stop:
break
print(i)
i += page_size
def write_csv(
filename: str, data: List[Dict[str, Any]], filter: Callable[[Dict[str, Any]], bool]
) -> None:
with open(filename, "w", newline="") as csvfile:
writer = csv.writer(csvfile, quotechar='"')
writer.writerow(
(
"category",
"description",
"date",
"number",
"author",
"tags",
"title",
"additions",
"deletions",
"changed files",
)
)
for item in data:
pr = item["associatedPullRequests"]["nodes"][0]
if not filter(pr):
continue
tags = tags_from_title(pr["title"])
actual_tags = []
for t in tags:
items = [x.strip() for x in t.split(",")]
actual_tags += items
tags = actual_tags
tags = [t.lower() for t in tags]
category = ""
if len(tags) == 1:
category = tags[0]
writer.writerow(
(
category,
"",
item["committedDate"],
f'https:
pr["author"]["login"],
", ".join(tags),
pr["title"],
pr["additions"],
pr["deletions"],
pr["changedFiles"],
)
)
if __name__ == "__main__":
help = "List out commits with attached PRs since a certain commit"
parser = argparse.ArgumentParser(description=help)
parser.add_argument("--from-commit", help |
="commit to start checking PRs from")
parser.add_argument("--to-commit", help="commit to stop checking PRs from")
parser.add_argument(
"--threshold", default=150, help="sum of additions + deletions to consider large"
)
parser.add_argument(
"--skip-query", action="store_true", help="don't query GitHub and instead use cache file"
)
args = parser.parse_args()
user = "apache"
repo = "tvm"
threshold = int(args.threshold)
cache = Path("out.pkl")
if not args.skip_query:
fetch_pr_data(args, cache)
with open(cache, "rb") as f:
data = pickle.load(f)
print(f"Found {len(data)} PRs")
write_csv(
filename="out-large.csv",
data=data,
filter=lambda pr: pr["additions"] + pr["deletions"] > threshold,
)
write_csv(
filename="out-small.csv",
data=data,
filter=lambda pr: pr["additions"] + pr["deletions"] <= threshold,
) |
import argparse |
import subprocess |
import sys
LINK_BASE = "https:
COMMIT_BASE = "https:
def sprint(*args):
print(*args, file=sys.stderr)
if __name__ == "__main__":
help = "List out RFCs since a commit"
parser = argparse.ArgumentParser(description=help)
parser.add_argument("--since-commit", required=True, help="last commit to include")
parser.add_argument("--rfcs-repo", required=True, help="path to checkout of apache/tvm-rfcs")
args = parser.parse_args()
user = "apache"
repo = "tvm"
rfc_repo = args.rfcs_repo
subprocess.run("git fetch origin main", cwd=rfc_repo, shell=True)
subprocess.run("git checkout main", cwd=rfc_repo, shell=True)
subprocess.run("git reset --hard origin/main", cwd=rfc_repo, shell=True)
r = subprocess.run(
f"git log {args.since_commit}..HEAD --format='%H %s'",
cwd=rfc_repo,
shell=True,
stdout=subprocess.PIPE,
encoding="utf-8",
)
commits = r.stdout.strip().split("\n")
for commit in commits:
parts = commit.split()
commit = parts[0]
subject = " ".join(parts[1:])
r2 = subprocess.run(
f"git diff-tree --no-commit-id --name-only -r {commit}",
cwd=rfc_repo,
shell=True,
stdout=subprocess.PIPE,
encoding="utf-8",
)
files = r2.stdout.strip().split("\n")
rfc_file = None
for file in files:
if file.startswith("rfcs/") and file.endswith(".md"):
if rfc_file is not None:
sprint(f"error on {commit} {subject}")
rfc_file = file
if rfc_file is None:
sprint(f"error on {commit} {subject}")
continue
print(f" * [{subject}]({LINK_BASE + rfc_file}) ([`{commit[:7]}`]({COMMIT_BASE + commit}))") |
import argparse |
import os |
import pickle
from pathlib |
import Path |
import csv |
import sys
from collections |
import defaultdict
from typing |
import Callable, Dict, List, Any
REPO_ROOT = Path(__file__).resolve().parent.parent.parent.parent
sys.path.append(str(REPO_ROOT / "tests" / "scripts"))
def strip_header(title: str, header: str) -> str:
pos = title.lower().find(header.lower())
if pos == -1:
return title
return title[0:pos] + title[pos + len(header) :].strip()
def sprint(*args):
print(*args, file=sys.stderr)
if __name__ == "__main__":
help = "List out commits with attached PRs since a certain commit"
parser = argparse.ArgumentParser(description=help)
parser.add_argument("--notes-csv", required=True, help="csv file of categorized PRs in order")
args = parser.parse_args()
user = "apache"
repo = "tvm"
cache = Path("out.pkl")
if not cache.exists():
sprint("run gather_prs.py first to generate out.pkl")
exit(1)
with open(cache, "rb") as f:
data = pickle.load(f)
sprint(data[1])
reverse = {}
for item in data:
prs = item["associatedPullRequests"]["nodes"]
if len(prs) != 1:
continue
pr = prs[0]
reverse[pr["number"]] = pr
def pr_title(number, heading):
title = reverse[int(number)]["title"]
title = strip_header(title, heading)
return title
headings = defaultdict(lambda: defaultdict(list))
output = ""
sprint("Opening CSV")
with open(args.notes_csv) as f:
f.readline()
f.readline()
f.readline()
input_file = csv.DictReader(f)
i = 0
for row in input_file:
category = row["category"].strip()
subject = row["subject"].strip()
pr_number = row["url"].split("/")[-1]
if category == "" or subject == "":
sprint(f"Skipping {pr_number}")
continue
headings[category][subject].append(pr_number)
i += 1
def sorter(x):
if x == "Misc":
return 10
return 0
keys = lis |
t(headings.keys())
keys = list(sorted(keys))
keys = list(sorted(keys, key=sorter))
for key in keys:
value = headings[key]
if key == "DO NOT INCLUDE":
continue
value = dict(value)
output += f"
misc = []
misc += value.get("n/a", [])
misc += value.get("Misc", [])
for pr_number in misc:
output += f" *
for subheading, pr_numbers in value.items():
if subheading == "DO NOT INCLUDE":
continue
if subheading == "n/a" or subheading == "Misc":
continue
else:
output += f" * {subheading} - " + ", ".join([f"
output += "\n"
print(output) |
import urllib.request |
import logging
LOGGER = None
BASE = "https:
URL_MAP = {
"http:
"http:
"http:
"http:
"http:
"https:
"https:
"https:
"https:
"https:
"https:
"https:
"https:
"https:
"https:
"https:
"https:
"https:
"https:
"https:
"https:
"https:
"https:
"https:
"https:
"https:
"https:
"https:
"https:
"https:
"https:
"https:
"https:
"https:
"https:
"https:
"https:
"https:
"https:
} |
class TvmRequestHook(urllib.request.Request):
def __init__(self, url, *args, **kwargs):
LOGGER.info(f"Caught access to {url}")
if url in URL_MAP:
new_url = URL_MAP[url]
LOGGER.info(f"Mapped URL {url} to {new_url}")
else:
new_url = url
super().__init__(new_url, *args, **kwargs)
def init():
global LOGGER
urllib.request.Request = TvmRequestHook
LOGGER = logging.getLogger("tvm_request_hook")
LOGGER.setLevel(logging.DEBUG)
fh = logging.FileHandler("redirected_urls.log")
fh.setLevel(logging.DEBUG)
LOGGER.addHandler(fh) |
import argparse |
import shutil |
import os |
import logging |
import sys |
import multiprocessing
from pathlib |
import Path
REPO_ROOT = Path(__file__).resolve().parent.parent.parent
sys.path.append(str(REPO_ROOT / "ci" / "scripts"))
from cmd_utils |
import Sh, init_log, REPO_ROOT
if __name__ == "__main__":
init_log()
parser = argparse.ArgumentParser(description="List pytest nodeids for a folder")
parser.add_argument("--sccache-bucket", required=False, help="sccache bucket name")
parser.add_argument("--build-dir", default="build", help="build folder")
parser.add_argument("--cmake-target", help="optional build target")
args = parser.parse_args()
env = {"VTA_HW_PATH": str(Path(os.getcwd()) / "3rdparty" / "vta-hw")}
sccache_exe = shutil.which("sccache")
use_sccache = sccache_exe is not None
build_dir = Path(os.getcwd()) / args.build_dir
build_dir = build_dir.relative_to(REPO_ROOT)
if use_sccache:
if args.sccache_bucket:
env["SCCACHE_BUCKET"] = args.sccache_bucket
logging.info(f"Using sccache bucket: {args.sccache_bucket}")
else:
logging.info(f"No sccache bucket set, using local cache")
env["CXX"] = "/opt/sccache/c++"
env["CC"] = "/opt/sccache/cc"
else:
if sccache_exe is None:
reason = "'sccache' executable not found"
else:
reason = "<unknown>"
logging.info(f"Not using sccache, reason: {reason}")
sh = Sh(env)
if use_sccache:
sh.run("sccache --start-server", check=False)
logging.info("===== sccache stats =====")
sh.run("sccache --show-stats")
executors = int(os.environ.get("CI_NUM_EXECUTORS", 1))
nproc = multiprocessing.cpu_count()
available_cpus = nproc
num_cpus = max(available_cpus, 1)
sh.run("cmake -GNinja -DCMAKE_BUILD_TYPE=RelWithDebInfo ..", cwd=build_dir)
target = ""
if args.cmake_target:
target = args.cmake_target
verbose = os.environ.get("VERBOSE", "true").lower() in {"1", "true", "yes"}
ninja_args = [target, f"-j{num_cpus}"]
if verbose:
ninja_args.append("-v")
sh.run(f"cmake --build . -- " + " ".join(ninja_args), cwd=build_dir)
if use_sccache:
logging.info("===== sccache |
stats =====")
sh.run("sccache --show-stats") |
"""
This is the global script that set the version information of TVM.
This script runs and update all the locations that related to versions
List of affected files:
- tvm-root/python/tvm/_ffi/libinfo.py
- tvm-root/include/tvm/runtime/c_runtime_api.h
- tvm-root/conda/recipe/meta.yaml
- tvm-root/web/package.json
""" |
import os |
import re |
import argparse |
import logging |
import subprocess
__version__ = "0.11.dev0"
PROJ_ROOT = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
def py_str(cstr):
return cstr.decode("utf-8")
def git_describe_version():
"""Get PEP-440 compatible public and local version using git describe.
Returns
-------
pub_ver: str
Public version.
local_ver: str
Local version (with additional label appended to pub_ver).
Notes
-----
- We follow PEP 440's convention of public version
and local versions.
- Only tags conforming to vMAJOR.MINOR.REV (e.g. "v0.7.0")
are considered in order to generate the version string.
See the use of `--match` in the `git` command below.
Here are some examples:
- pub_ver = '0.7.0', local_ver = '0.7.0':
We are at the 0.7.0 release.
- pub_ver = '0.8.dev94', local_ver = '0.8.dev94+g0d07a329e':
We are at the 0.8 development cycle.
The current source contains 94 additional commits
after the most recent tag(v0.7.0),
the git short hash tag of the current commit is 0d07a329e.
"""
cmd = [
"git",
"describe",
"--tags",
"--match",
"v[0-9]*.[0-9]*.[0-9]*",
"--match",
"v[0-9]*.[0-9]*.dev[0-9]*",
]
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=PROJ_ROOT)
(out, _) = proc.communicate()
if proc.returncode != 0:
msg = py_str(out)
if msg.find("not a git repository") != -1:
return __version__, __version__
logging.warning("git describe: %s, use %s", msg, __version__)
return __version__, __version__
describe = py_str(out).strip()
arr_info = describe.split("-")
if arr_info[0].startswith("v"):
arr_info[0] = arr_info[0][1:]
if len(arr_info) == 1:
return arr_info[0], arr_info[0]
if len(arr_info) != 3:
logging.warning("Invalid output from git describe %s", describe)
return __version__ |
, __version__
dev_pos = arr_info[0].find(".dev")
if dev_pos != -1:
dev_version = arr_info[0][: arr_info[0].find(".dev")]
else:
dev_version = arr_info[0]
pub_ver = "%s.dev%s" % (dev_version, arr_info[1])
local_ver = "%s+%s" % (pub_ver, arr_info[2])
return pub_ver, local_ver
def update(file_name, pattern, repl, dry_run=False):
update = []
hit_counter = 0
need_update = False
with open(file_name) as file:
for l in file:
result = re.findall(pattern, l)
if result:
assert len(result) == 1
hit_counter += 1
if result[0] != repl:
l = re.sub(pattern, repl, l)
need_update = True
print("%s: %s -> %s" % (file_name, result[0], repl))
else:
print("%s: version is already %s" % (file_name, repl))
update.append(l)
if hit_counter != 1:
raise RuntimeError("Cannot find version in %s" % file_name)
if need_update and not dry_run:
with open(file_name, "w") as output_file:
for l in update:
output_file.write(l)
def sync_version(pub_ver, local_ver, dry_run):
"""Synchronize version."""
update(
os.path.join(PROJ_ROOT, "python", "tvm", "_ffi", "libinfo.py"),
r"(?<=__version__ = \")[.0-9a-z\+]+",
local_ver,
dry_run,
)
update(
os.path.join(PROJ_ROOT, "include", "tvm", "runtime", "c_runtime_api.h"),
r'(?<=TVM_VERSION ")[.0-9a-z\+]+',
pub_ver,
dry_run,
)
update(
os.path.join(PROJ_ROOT, "conda", "recipe", "meta.yaml"),
r"(?<=version = ')[.0-9a-z\+]+",
pub_ver,
dry_run,
)
dev_pos = pub_ver.find(".dev")
npm_ver = pub_ver if dev_pos == -1 else "%s.0-%s" % (pub_ver[:dev_pos], pub_ver[dev_pos + 1 :])
update(
os.path.join(PROJ_ROOT, "web", |
"package.json"),
r'(?<="version": ")[.0-9a-z\-\+]+',
npm_ver,
dry_run,
)
def main():
logging.basicConfig(level=logging.INFO)
parser = argparse.ArgumentParser(description="Detect and synchronize version.")
parser.add_argument(
"--print-version",
action="store_true",
help="Print version to the command line. No changes is applied to files.",
)
parser.add_argument(
"--git-describe",
action="store_true",
help="Use git describe to generate development version.",
)
parser.add_argument("--dry-run", action="store_true")
opt = parser.parse_args()
pub_ver, local_ver = __version__, __version__
if opt.git_describe:
pub_ver, local_ver = git_describe_version()
if opt.print_version:
print(local_ver)
else:
sync_version(pub_ver, local_ver, opt.dry_run)
if __name__ == "__main__":
main() |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""VTA Package is a TVM backend extension to support VTA hardware.
Besides the compiler toolchain, it also includes utility functions to
configure the hardware environment and access remote device through RPC.
"""
import sys
import tvm._ffi.base
from .autotvm import module_loader
from .bitstream import get_bitstream_path, download_bitstream
from .environment import get_env, Environment
from .rpc_client import reconfig_runtime, program_fpga
__version__ = "0.1.0"
# do not from tvm import topi when running vta.exec.rpc_server
# in lib tvm runtime only mode
if not tvm._ffi.base._RUNTIME_ONLY:
from . import top
from .build_module import build_config, lower, build
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Defines AutoTVM components used with VTA."""
from tvm.autotvm.measure import default_module_loader
from . import rpc_client
def module_loader(bitstream=None):
"""Construct a ModuleLoader implementation specialized for VTA.
Parameters
----------
bitsream : Optional[str]
Path to the bitstream to write prior to uploading code.
Returns
-------
ModuleLoader :
The ModuleLoader instance.
"""
def reprogram_fpga(remote, _build_result):
"""default_module_loader callback which reprograms the FPGA.
Parameters
----------
remote : tvm.rpc.RPCSession
RPC session established to the remote device.
_build_result : tvm.autotvm.measure.measure_methods.BuildResult
Artifact from the build phase, unused here.
"""
rpc_client.program_fpga(remote, bitstream)
rpc_client.reconfig_runtime(remote)
return default_module_loader(reprogram_fpga)
|
"""VTA specific bitstream management library."""
from __future__ |
import absolute_ |
import as _abs |
import os |
import sys
from tvm.contrib.download |
import download
from .environment |
import get_env
if sys.version_info >= (3,): |
import urllib.error as urllib2
else: |
import urllib2
BITSTREAM_URL = "https:
def get_bitstream_path():
"""Returns the path to the cached bitstream corresponding to the current config
Returns
-------
bit_path: str
Corresponding to the filepath of the bitstream
"""
env = get_env()
cache_dir = os.getenv("VTA_CACHE_PATH", os.path.join(os.getenv("HOME"), ".vta_cache/"))
cache_dir = os.path.join(cache_dir, env.TARGET)
cache_dir = os.path.join(cache_dir, env.HW_VER.replace(".", "_"))
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
bit_path = os.path.join(cache_dir, env.BITSTREAM) + ".bit"
return bit_path
def download_bitstream():
"""Downloads a cached bitstream corresponding to the current config"""
env = get_env()
success = False
bit = get_bitstream_path()
url = os.path.join(BITSTREAM_URL, env.TARGET)
url = os.path.join(url, env.HW_VER)
url = os.path.join(url, env.BITSTREAM + ".bit")
try:
download(url, bit)
except urllib2.HTTPError as err:
if err.code == 404:
raise RuntimeError(
"{} is not available. It appears that this configuration \
bistream has not been cached. Please compile your own bitstream (see hardware \
compilation guide to get Xilinx toolchains setup) and add it to your \
$VTA_CACHE_PATH. Alternatively edit your config.json back to its default \
settings. You can see the list of available bitstreams under {}".format(
url, BITSTREAM_URL
)
)
raise RuntimeError(
"Something went wrong when trying to access {}. Check your \
internet connection or proxy settings.".format(
url
)
)
return success |
"""VTA specific buildin for runtime.""" |
import tvm
from tvm.ir |
import register_intrin_lowering
from . |
import transform
from .environment |
import get_env, Environment
def EarlyRewrite():
"""Try to do storage rewrite in early pass."""
def _transform(mod, ctx):
try:
return tvm.tir.transform.StorageRewrite()(mod)
except tvm.error.TVMError:
return mod
return tvm.transform.module_pass(_transform, opt_level=0, name="tir.vta.EarlyRewrite")
def build_config(debug_flag=0, **kwargs):
"""Build a build config for VTA.
Parameters
----------
debug_flag : int
The dbeug flag to be passed.
kwargs : dict
Additional configurations.
Returns
-------
build_config: tvm.transform.PassContext
The build config that can be used in TVM.
Example
--------
.. code-block:: python
with vta.build_config():
vta_module = tvm.build(s, ...)
"""
env = get_env()
@tvm.tir.transform.prim_func_pass(opt_level=0)
def add_debug(f, *_):
debug = tvm.tir.call_extern("int32", "VTASetDebugMode", env.dev.command_handle, debug_flag)
return f.with_body(tvm.tir.stmt_seq(debug, f.body))
pass_list = [
(0, transform.InjectConv2DTransposeSkip()),
(1, transform.InjectDMAIntrin()),
(1, transform.InjectSkipCopy()),
(1, transform.AnnotateALUCoProcScope()),
(1, tvm.tir.transform.LiftAttrScope("coproc_uop_scope")),
(1, transform.LiftAllocToScopeBegin()),
(1, tvm.tir.transform.LiftAttrScope("coproc_scope")),
(1, transform.InjectCoProcSync()),
(1, EarlyRewrite()),
]
if debug_flag:
pass_list.append((1, add_debug))
pass_list.append((2, transform.InjectALUIntrin()))
pass_list.append((3, tvm.tir.transform.LowerDeviceStorageAccessInfo()))
pass_list.append((3, transform.FoldUopLoop()))
pass_list.append((3, transform.CPUAccessRewrite()))
config = {"tir.add_lower_pass": pass_list}
if kwargs.get("config"):
config.update(kwargs[config])
del kwargs["config"]
return tvm.transform.PassContext(config=config, |
**kwargs)
def lower(*args, **kwargs):
"""Thin wrapper of tvm.lower
This wrapper automatically applies VTA's build_config
if there is no user specified build_config in context.
See Also
--------
tvm.lower : The original TVM's lower function
"""
pass_ctx = tvm.transform.PassContext.current()
if not pass_ctx.config.get("add_lower_pass"):
with build_config():
return tvm.lower(*args, **kwargs)
return tvm.lower(*args, **kwargs)
def build(*args, **kwargs):
"""Thin wrapper of tvm.build
This wrapper automatically applies VTA's build_config
if there is no user specified build_config in context.
See Also
--------
tvm.build : The original TVM's build function
"""
pass_ctx = tvm.transform.PassContext.current()
if not pass_ctx.config.get("tir.add_lower_pass"):
with build_config():
return tvm.build(*args, **kwargs)
return tvm.build(*args, **kwargs)
tvm.ir.register_op_attr("tir.vta.coproc_sync", "TCallEffectKind", tvm.tir.CallEffectKind.Opaque)
tvm.ir.register_op_attr("tir.vta.coproc_dep_push", "TCallEffectKind", tvm.tir.CallEffectKind.Opaque)
tvm.ir.register_op_attr("tir.vta.coproc_dep_pop", "TCallEffectKind", tvm.tir.CallEffectKind.Opaque)
tvm.ir.register_op_attr("tir.vta.uop_push", "TCallEffectKind", tvm.tir.CallEffectKind.Opaque)
tvm.ir.register_op_attr("tir.vta.uop_push", "TGlobalSymbol", "VTAUopPush")
tvm.ir.register_op_attr("tir.vta.command_handle", "TGlobalSymbol", "VTATLSCommandHandle")
tvm.ir.register_op_attr("tir.vta.command_handle", "TCallEffectKind", tvm.tir.CallEffectKind.Opaque)
@tvm.register_func("tvm.info.mem.%s" % Environment.inp_scope)
def mem_info_inp_buffer():
spec = get_env()
return tvm.ir.make_node(
"MemoryInfo",
unit_bits=spec.INP_ELEM_BITS,
max_simd_bits=spec.INP_ELEM_BITS,
max_num_bits=spec.INP_BUFF_SIZE * 8,
head_address=None,
)
@tvm.register_func("tvm.info.mem.%s" % Environment.wgt_scope)
def mem_info_wgt_buffer(): |
spec = get_env()
return tvm.ir.make_node(
"MemoryInfo",
unit_bits=spec.WGT_ELEM_BITS,
max_simd_bits=spec.WGT_ELEM_BITS,
max_num_bits=spec.WGT_BUFF_SIZE * 8,
head_address=None,
)
@tvm.register_func("tvm.info.mem.%s" % Environment.acc_scope)
def mem_info_acc_buffer():
spec = get_env()
return tvm.ir.make_node(
"MemoryInfo",
unit_bits=spec.ACC_ELEM_BITS,
max_simd_bits=spec.ACC_ELEM_BITS,
max_num_bits=spec.ACC_BUFF_SIZE * 8,
head_address=None,
)
@register_intrin_lowering("tir.vta.coproc_sync", "default")
def coproc_sync(op):
_ = op
return tvm.tir.call_extern(
"int32",
"VTASynchronize",
get_env().dev.command_handle,
tvm.runtime.const(1 << 31, dtype="uint32"),
)
@register_intrin_lowering("tir.vta.coproc_dep_push", "default")
def coproc_dep_push(op):
return tvm.tir.call_extern(
"int32", "VTADepPush", get_env().dev.command_handle, op.args[0], op.args[1]
)
@register_intrin_lowering("tir.vta.coproc_dep_pop", "default")
def coproc_dep_pop(op):
return tvm.tir.call_extern(
"int32", "VTADepPop", get_env().dev.command_handle, op.args[0], op.args[1]
) |
"""Configurable VTA Hareware Environment scope."""
from __future__ |
import absolute_ |
import as _abs |
import os |
import json |
import copy |
import tvm
from tvm |
import te
from . |
import intrin
def get_vta_hw_path():
"""Get the VTA HW path."""
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
vta_hw_default = os.path.abspath(os.path.join(curr_path, "../../../3rdparty/vta-hw"))
VTA_HW_PATH = os.getenv("VTA_HW_PATH", vta_hw_default)
return os.path.abspath(VTA_HW_PATH)
def pkg_config(cfg):
"""Returns PkgConfig pkg config object."""
pkg_config_py = os.path.join(get_vta_hw_path(), "config/pkg_config.py")
libpkg = {"__file__": pkg_config_py}
exec(compile(open(pkg_config_py, "rb").read(), pkg_config_py, "exec"), libpkg, libpkg)
PkgConfig = libpkg["PkgConfig"]
return PkgConfig(cfg) |
class DevContext(object):
"""Internal development context
This contains all the non-user facing compiler
internal context that is hold by the Environment.
Parameters
----------
env : Environment
The environment hosting the DevContext
Note
----
This class is introduced so we have a clear separation
of developer related, and user facing attributes.
"""
MEM_ID_UOP = 0
MEM_ID_WGT = 1
MEM_ID_INP = 2
MEM_ID_ACC = 3
MEM_ID_OUT = 4
MEM_ID_ACC_8BIT = 5
ALU_OPCODE_MIN = 0
ALU_OPCODE_MAX = 1
ALU_OPCODE_ADD = 2
ALU_OPCODE_SHR = 3
ALU_OPCODE_MUL = 4
QID_LOAD_INP = 1
QID_LOAD_WGT = 1
QID_LOAD_OUT = 2
QID_STORE_OUT = 3
QID_COMPUTE = 2
def __init__(self, env):
self.vta_axis = te.thread_axis("vta")
self.vta_push_uop = tvm.tir.StringImm("VTAPushGEMMOp")
ctx = tvm.tir.call_intrin("handle", "tir.vta.command_handle")
self.command_handle = tvm.tir.Call("handle", "tir.tvm_thread_context", [ctx])
self.DEBUG_NO_SYNC = False
env._dev_ctx = self
self.gemm = intrin.gemm(env, env.mock_mode)
def get_task_qid(self, qid):
"""Get transformed queue index."""
return 1 if self.DEBUG_NO_SYNC else qid |
class Environment(object):
"""Hardware configuration object.
This object contains all the information
needed for compiling to a specific VTA backend.
Parameters
----------
cfg : dict of str to value.
The configuration parameters.
Example
--------
.. code-block:: python
new_cfg = json.load(json.load(open("new_cfg.json")))
with vta.Environment(new_cfg):
env = vta.get_env()
"""
current = None
MAX_XFER = 1 << 22
DEBUG_DUMP_INSN = 1 << 1
DEBUG_DUMP_UOP = 1 << 2
DEBUG_SKIP_READ_BARRIER = 1 << 3
DEBUG_SKIP_WRITE_BARRIER = 1 << 4
inp_scope = "local.inp_buffer"
wgt_scope = "local.wgt_buffer"
acc_scope = "local.acc_buffer"
def __init__(self, cfg):
self.pkg = pkg_config(cfg)
self.__dict__.update(self.pkg.cfg_dict)
self.INP_WIDTH = 1 << self.LOG_INP_WIDTH
self.WGT_WIDTH = 1 << self.LOG_WGT_WIDTH
self.ACC_WIDTH = 1 << self.LOG_ACC_WIDTH
self.OUT_WIDTH = 1 << self.LOG_OUT_WIDTH
self.BATCH = 1 << self.LOG_BATCH
self.BLOCK_IN = 1 << self.LOG_BLOCK_IN
self.BLOCK_OUT = 1 << self.LOG_BLOCK_OUT
self.UOP_BUFF_SIZE = 1 << self.LOG_UOP_BUFF_SIZE
self.INP_BUFF_SIZE = 1 << self.LOG_INP_BUFF_SIZE
self.WGT_BUFF_SIZE = 1 << self.LOG_WGT_BUFF_SIZE
self.ACC_BUFF_SIZE = 1 << self.LOG_ACC_BUFF_SIZE
self.OUT_BUFF_SIZE = 1 << self.LOG_OUT_BUFF_SIZE
self.INP_ELEM_BITS = self.BATCH * self.BLOCK_IN * self.INP_WIDTH
self.WGT_ELEM_BITS = self.BLOCK_OUT * self.BLOCK_IN * self.WGT_WIDTH
self.ACC_ELEM_BITS = self.BATCH * self.BLOCK_OUT * self.ACC_WIDTH
self.OUT_ELEM_BITS = self.BATCH * self.BLOCK_OUT * self.OUT_WIDTH
self.INP_ELEM_BYTES = self.INP_ELEM_BITS
self.WGT_ELEM_BYTES = self.WGT_ELEM_BITS
self.ACC_ELEM_BYTES = self.ACC_ELEM_BITS
self.OUT_ELEM_BYTES = self.OUT_ELEM_BIT |
S
self.acc_dtype = "int%d" % self.ACC_WIDTH
self.inp_dtype = "int%d" % self.INP_WIDTH
self.wgt_dtype = "int%d" % self.WGT_WIDTH
self.out_dtype = "int%d" % self.OUT_WIDTH
self.BITSTREAM = self.pkg.bitstream
self.MODEL = self.TARGET + "_" + self.BITSTREAM
self.mock_mode = False
self._mock_env = None
self._dev_ctx = None
self._last_env = None
def __enter__(self):
self._last_env = Environment.current
Environment.current = self
return self
def __exit__(self, ptype, value, trace):
Environment.current = self._last_env
@property
def cfg_dict(self):
return self.pkg.cfg_dict
@property
def dev(self):
"""Developer context"""
if self._dev_ctx is None:
self._dev_ctx = DevContext(self)
return self._dev_ctx
@property
def mock(self):
"""A mock version of the Environment
The ALU, dma_copy and intrinsics will be
mocked to be nop.
"""
if self.mock_mode:
return self
if self._mock_env is None:
self._mock_env = copy.copy(self)
self._mock_env._dev_ctx = None
self._mock_env.mock_mode = True
return self._mock_env
@property
def dma_copy(self):
"""DMA copy pragma"""
return "dma_copy" if not self.mock_mode else "skip_dma_copy"
@property
def alu(self):
"""ALU pragma"""
return "alu" if not self.mock_mode else "skip_alu"
@property
def gemm(self):
"""GEMM intrinsic"""
return self.dev.gemm
@property
def target(self):
return tvm.target.vta(model=self.MODEL)
@property
def target_host(self):
"""The target host"""
if self.TARGET in ["pynq", "de10nano"]:
return "llvm -mtriple=armv7-none-linux-gnueabihf"
if self.TARGET == "ultra96":
return "llvm -mtriple=aarch64-linux-gnu" |
if self.TARGET in ["sim", "tsim", "intelfocl"]:
return "llvm"
raise ValueError("Unknown target %s" % self.TARGET)
@property
def target_vta_cpu(self):
return tvm.target.arm_cpu(model=self.TARGET)
def get_env():
"""Get the current VTA Environment.
Returns
-------
env : Environment
The current environment.
"""
return Environment.current
def _init_env():
"""Initialize the default global env"""
config_path = os.path.join(get_vta_hw_path(), "config/vta_config.json")
if not os.path.exists(config_path):
raise RuntimeError("Cannot find config in %s" % str(config_path))
cfg = json.load(open(config_path))
return Environment(cfg)
Environment.current = _init_env() |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""VTA Command line utils."""
|
"""VTA customized TVM RPC Server
Provides additional runtime function and library loading.
"""
from __future__ |
import absolute_ |
import |
import logging |
import argparse |
import os |
import ctypes |
import json |
import tvm
from tvm |
import rpc
from tvm.contrib |
import cc
from vta |
import program_bitstream
from ..environment |
import get_env, pkg_config
from ..libinfo |
import find_libvta
def server_start():
"""VTA RPC server extension."""
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
proj_root = os.path.abspath(os.path.join(curr_path, "../../../../"))
dll_path = find_libvta("libvta")[0]
cfg_path = os.path.abspath(os.path.join(proj_root, "3rdparty/vta-hw/config/vta_config.json"))
runtime_dll = []
_load_module = tvm.get_global_func("tvm.rpc.server.load_module")
def load_vta_dll():
"""Try to load vta dll"""
if not runtime_dll:
runtime_dll.append(ctypes.CDLL(dll_path, ctypes.RTLD_GLOBAL))
logging.info("Loading VTA library: %s", dll_path)
return runtime_dll[0]
@tvm.register_func("tvm.rpc.server.load_module", override=True)
def load_module(file_name):
load_vta_dll()
return _load_module(file_name)
@tvm.register_func("device_api.ext_dev")
def ext_dev_callback():
load_vta_dll()
return tvm.get_global_func("device_api.ext_dev")()
@tvm.register_func("tvm.contrib.vta.init", override=True)
def program_fpga(file_name):
env = get_env()
if env.TARGET == "pynq":
from pynq |
import xlnk
xlnk.Xlnk().xlnk_reset()
elif env.TARGET == "de10nano":
load_vta_dll()
path = tvm.get_global_func("tvm.rpc.server.workpath")(file_name)
program_bitstream.bitstream_program(env.TARGET, path)
logging.info("Program FPGA with %s ", file_name)
@tvm.register_func("tvm.rpc.server.shutdown", override=True)
def server_shutdown():
if runtime_dll:
runtime_dll[0].VTARuntimeShutdown()
runtime_dll.pop()
@tvm.register_func("tvm.contrib.vta.reconfig_runtime", override=True)
def reconfig_runtime(cfg_json):
"""Rebuild and reload runtime with new configuration.
Parameters
----------
cfg_json : str
JSON string used for configurations.
"""
env = get_env()
if runtime_dll:
if env.TARGET == "de10nano":
print("Please reconfigure the runtime AFTER programming a bitstream.")
raise RuntimeError("Can only reconfig in the beginning of session...")
cfg = json.loads(cfg_json)
cfg["TARGET"] = env.TARGET
pkg = pkg_config(cfg)
if os.path.isfile(cfg_path):
old_cfg = json.loads(open(cfg_path, "r").read())
if pkg.same_config(old_cfg):
logging.info("Skip reconfig_runtime due to same config.")
return
cflags = ["-O2", "-std=c++17"]
cflags += pkg.cflags
ldflags = pkg.ldflags
lib_name = dll_path
source = pkg.lib_source
logging.info(
"Rebuild runtime:\n output=%s,\n cflags=%s,\n source=%s,\n ldflags=%s",
dll_path,
"\n\t".join(cflags),
"\n\t".join(source),
"\n\t".join(ldflags),
)
cc.create_shared(lib_name, source, cflags + ldflags)
with open(cfg_path, "w") as outputfile:
outputfile.write(pkg.cfg_json)
def main():
"""Main funciton"""
parser = argparse.ArgumentPar |
ser()
parser.add_argument(
"--host", type=str, default="0.0.0.0", help="The host IP address the server binds to"
)
parser.add_argument("--port", type=int, default=9091, help="The port of the RPC")
parser.add_argument("--port-end", type=int, default=9199, help="The end search port of the RPC")
parser.add_argument(
"--key", type=str, default="", help="RPC key used to identify the connection type."
)
parser.add_argument("--tracker", type=str, default="", help="Report to RPC tracker")
args = parser.parse_args()
logging.basicConfig(level=logging.INFO)
if args.tracker:
url, port = args.tracker.split(":")
port = int(port)
tracker_addr = (url, port)
if not args.key:
raise RuntimeError("Need key to present type of resource when tracker is available")
else:
tracker_addr = None
def server_init_callback(): |
import tvm |
import vta.exec.rpc_server
tvm.register_func("tvm.rpc.server.start", vta.exec.rpc_server.server_start, override=True)
server = rpc.Server(
args.host,
args.port,
args.port_end,
key=args.key,
tracker_addr=tracker_addr,
server_init_callback=server_init_callback,
)
server.proc.join()
if __name__ == "__main__":
main() |
"""VTA related intrinsics"""
from __future__ |
import absolute_ |
import as _abs |
import tvm
from tvm |
import te
def gemm(env, mock=False):
"""Matrix-matrix multiply intrinsic
Parameters
----------
env : Environment
The Environment
mock : bool
Whether create a mock version.
"""
wgt_lanes = env.WGT_ELEM_BITS
assert wgt_lanes == env.BLOCK_OUT * env.BLOCK_IN
wgt_shape = (env.BLOCK_OUT, env.BLOCK_IN)
assert wgt_shape[0] * wgt_shape[1] == wgt_lanes
inp_lanes = env.INP_ELEM_BITS
assert inp_lanes == env.BATCH * env.BLOCK_IN
inp_shape = (env.BATCH, env.BLOCK_IN)
assert inp_shape[0] * inp_shape[1] == inp_lanes
out_lanes = env.ACC_ELEM_BITS
assert out_lanes == env.BATCH * env.BLOCK_OUT
out_shape = (env.BATCH, env.BLOCK_OUT)
assert out_shape[0] * out_shape[1] == out_lanes
wgt = te.placeholder(
(wgt_shape[0], wgt_shape[1]), dtype="int%d" % env.WGT_WIDTH, name=env.wgt_scope
)
inp = te.placeholder(
(inp_shape[0], inp_shape[1]), dtype="int%d" % env.INP_WIDTH, name=env.inp_scope
)
k = te.reduce_axis((0, wgt_shape[1]), name="k")
out_dtype = "int%d" % env.ACC_WIDTH
out = te.compute(
(out_shape[0], out_shape[1]),
lambda i, j: te.sum(inp[i, k].astype(out_dtype) * wgt[j, k].astype(out_dtype), axis=[k]),
name="out",
)
wgt_layout = tvm.tir.decl_buffer(
wgt.shape,
wgt.dtype,
env.wgt_scope,
scope=env.wgt_scope,
offset_factor=wgt_lanes,
data_alignment=wgt_lanes,
)
inp_layout = tvm.tir.decl_buffer(
inp.shape,
inp.dtype,
env.inp_scope,
scope=env.inp_scope,
offset_factor=inp_lanes,
data_alignment=inp_lanes,
)
out_layout = tvm.tir.decl_buffer(
out.shape,
out.dtype,
env.acc_scope,
scope=env.acc_scope,
offset_factor=out_lanes,
data_alignment=out_lanes,
)
def intrin_func(ins, outs):
"""Matrix-matrix multiply intrinsic function"""
dinp, dwgt = ins
dout = outs[0]
def instr(index) |
:
"""Generate matrix-matrix multiply VTA instruction"""
irb = tvm.tir.ir_builder.create()
dev = env.dev
irb.scope_attr(dev.vta_axis, "coproc_scope", dev.get_task_qid(dev.QID_COMPUTE))
irb.scope_attr(dev.vta_axis, "coproc_uop_scope", dev.vta_push_uop)
if index in (0, 2):
irb.emit(
tvm.tir.call_intrin(
"int32",
"tir.vta.uop_push",
0,
0,
dout.access_ptr("rw", "int32"),
dinp.access_ptr("r", "int32"),
dwgt.access_ptr("r", "int32"),
0,
0,
0,
)
)
else:
irb.emit(
tvm.tir.call_intrin(
"int32",
"tir.vta.uop_push",
0,
1,
dout.access_ptr("rw", "int32"),
0,
0,
0,
0,
0,
)
)
return irb.get()
nop = tvm.tir.Evaluate(0)
if mock:
return (nop, nop, nop)
return (instr(0), instr(1), instr(2))
return te.decl_tensor_intrin(
out.op, intrin_func, name="GEMM", binds={inp: inp_layout, wgt: wgt_layout, out: out_layout}
) |
"""Library information."""
from __future__ |
import absolute_ |
import |
import sys |
import os
from .environment |
import get_vta_hw_path
def _get_lib_name(lib_name):
"""Get lib name with extension
Returns
-------
lib_name_ext : str
Name of VTA shared library with extension
Parameters
------------
lib_name : str
Name of VTA shared library
"""
if sys.platform.startswith("win32"):
return lib_name + ".dll"
if sys.platform.startswith("darwin"):
return lib_name + ".dylib"
return lib_name + ".so"
def find_libvta(lib_vta, optional=False):
"""Find VTA Chisel-based library
Returns
-------
lib_found : str
Library path
Parameters
------------
lib_vta : str
Name of VTA shared library
optional : bool
Enable error check
"""
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
tvm_library_path = os.environ.get("TVM_LIBRARY_PATH", None)
if tvm_library_path is None:
tvm_library_path = os.path.join(
curr_path,
os.pardir,
os.pardir,
os.pardir,
"build",
)
lib_search = [tvm_library_path, os.path.join(get_vta_hw_path(), "build")]
lib_name = _get_lib_name(lib_vta)
lib_path = [os.path.join(x, lib_name) for x in lib_search]
lib_found = [x for x in lib_path if os.path.exists(x)]
if not lib_found and not optional:
raise RuntimeError(
"Cannot find the files.\n" + "List of candidates:\n" + str("\n".join(lib_path))
)
return lib_found |
"""VTA specific bitstream program library.""" |
import os |
import argparse
def main():
"""Main function"""
parser = argparse.ArgumentParser()
parser.add_argument("target", type=str, default="", help="target")
parser.add_argument("bitstream", type=str, default="", help="bitstream path")
args = parser.parse_args()
if args.target not in ("pynq", "ultra96", "de10nano", "sim", "tsim"):
raise RuntimeError("Unknown target {}".format(args.target))
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
path_list = [
os.path.join(curr_path, "/{}".format(args.bitstream)),
os.path.join("./", "{}".format(args.bitstream)),
]
ok_path_list = [p for p in path_list if os.path.exists(p)]
if not ok_path_list:
raise RuntimeError("Cannot find bitstream file in %s" % str(path_list))
bitstream_program(args.target, args.bitstream)
def pynq_bitstream_program(bitstream_path):
from pynq |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.