python_code
stringlengths 0
229k
|
---|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
# flake8: noqa F401
import re
from typing import Optional
try:
# Internal
from .embedding_common_code_generator import *
except ImportError:
# OSS
from embedding_common_code_generator import *
import re
def generate_backward_embedding_cuda(
template_filepath: str,
optimizer: str,
filename_format: str,
kwargs: Dict[str, Any],
) -> None:
if not kwargs.get("has_gpu_support"):
return
template = env.get_template(template_filepath)
vbe_options = [True, False] if kwargs.get("has_vbe_support") else [False]
for weighted in [True, False]:
for nobag in [True, False]:
for vbe in vbe_options:
if (not nobag or (not weighted and not vbe)) and (
not kwargs.get("dense") or not vbe
):
wdesc = f"{ 'weighted' if weighted else 'unweighted' }{ '_nobag' if nobag else '' }{ '_vbe' if vbe else '' }"
filename = filename_format.format(optimizer, wdesc)
write(
filename,
template.render(
weighted=weighted,
nobag=nobag,
vbe=vbe,
is_index_select=False,
**kwargs,
),
)
print(f"[Backward Split] [{optimizer}]: {filename}")
def generate(**kwargs: Any) -> None:
optimizer = kwargs.get("optimizer")
gen_args = kwargs["args"]
#
# Generate GPU variants of the operators
#
kwargs["args"] = gen_args["cuda"]
# Generate the backward splits
generate_backward_embedding_cuda(
"embedding_backward_split_template.cu",
optimizer,
"gen_embedding_backward_{}_split_{}_cuda.cu",
kwargs,
)
# Generate the cta_per_row kernels for the backward splits
generate_backward_embedding_cuda(
"embedding_backward_split_kernel_cta_template.cu",
optimizer,
"gen_embedding_backward_{}_split_{}_kernel_cta.cu",
kwargs,
)
# Generate the warp_per_row kernels for the backward splits
generate_backward_embedding_cuda(
"embedding_backward_split_kernel_warp_template.cu",
optimizer,
"gen_embedding_backward_{}_split_{}_kernel_warp.cu",
kwargs,
)
# Generate optimizer kernel
template = env.get_template("embedding_optimizer_split_device_kernel_template.cuh")
filename = f"gen_embedding_optimizer_{optimizer}_split_device_kernel.cuh"
write(filename, template.render(**kwargs))
# Generate the backward splits (non-dense)
# We generate only the API to preserve the backward compatibility if
# has_gpu_support=True
if not kwargs.get("dense"):
template = env.get_template("embedding_backward_split_host_template.cpp")
filename = f"gen_embedding_backward_split_{optimizer}.cpp"
write(filename, template.render(**kwargs))
print(f"[Backward Split] [{optimizer}]: {filename}")
if kwargs.get("has_cpu_support") or kwargs.get("has_gpu_support"):
# Generates Python invoker for CUDA + CPU
template = env.get_template(
"split_embedding_codegen_lookup_invoker.template"
)
filename = f"lookup_{optimizer}.py"
write(filename, template.render(is_fbcode=args.is_fbcode, **kwargs))
print(f"[Backward Split] [{optimizer}]: {filename}")
#
# Generate CPU variants of the operators
#
kwargs["args"] = gen_args["cpu"]
# Generate the backward splits
if kwargs.get("has_cpu_support"):
is_approx = "approx" in optimizer
template = (
env.get_template("embedding_backward_split_cpu_approx_template.cpp")
if is_approx
else env.get_template("embedding_backward_split_cpu_template.cpp")
)
filename = f"gen_embedding_backward_{optimizer}_split_cpu.cpp"
write(filename, template.render(**kwargs))
print(f"[Backward Split] [{optimizer}]: {filename}")
# Generate the backward splits (non-dense)
if not kwargs.get("dense"):
template = env.get_template("embedding_backward_split_host_cpu_template.cpp")
filename = f"gen_embedding_backward_split_{optimizer}_cpu.cpp"
write(filename, template.render(**kwargs))
print(f"[Backward Split] [{optimizer}]: {filename}")
# Format the way to generate PackedTensorAccessors
def make_pta_acc_format(pta_str_list: List[str], func_name: str) -> List[str]:
new_str_list = []
for pta_str in pta_str_list:
if "packed_accessor" in pta_str:
match = re.search(
r"([a-zA-z0-9_]*)[.]packed_accessor([3|6][2|4])<(.*)>\(\)", pta_str
)
assert match is not None and len(match.groups()) == 3
tensor, acc_nbits, args = match.groups()
if "acc_type" in args:
match = re.search("at::acc_type<([a-zA-Z_]*), true>", args)
assert match is not None and len(match.groups()) == 1
new_type = match.group(1)
args = re.sub("at::acc_type<[a-zA-Z_]*, true>", new_type, args)
func_name_suffix = "_ACC_TYPE"
else:
func_name_suffix = ""
new_str_list.append(
f"{func_name}{func_name_suffix}({tensor}, {args}, {acc_nbits})"
)
else:
new_str_list.append(pta_str)
return new_str_list
def replace_pta_namespace(pta_str_list: List[str]) -> List[str]:
return [
pta_str.replace("at::PackedTensorAccessor", "pta::PackedTensorAccessor")
for pta_str in pta_str_list
]
def backward_indices() -> None:
template = env.get_template("embedding_backward_split_indice_weights_template.cu")
src_cu = template.render()
write("gen_embedding_backward_split_indice_weights_codegen_cuda.cu", src_cu)
src_cu = template.render(dense=True)
write("gen_embedding_backward_dense_indice_weights_codegen_cuda.cu", src_cu)
def backward_dense() -> None:
generate(
optimizer="dense",
dense=True,
args=make_args(
[
(FLOAT, "unused"),
]
),
split_precomputation=split_precomputation,
split_weight_update=split_weight_update,
split_post_update="",
split_weight_update_cpu=split_weight_update_cpu,
has_cpu_support=False,
has_gpu_support=True,
has_vbe_support=False,
)
def generate_forward_embedding_cuda(
template_filepath: str,
filename_format: str,
dense_options: List[bool],
nobag_options: List[bool],
vbe_options: List[bool],
) -> None:
template = env.get_template(template_filepath)
for dense in dense_options:
for weighted in [True, False]:
for nobag in nobag_options:
for vbe in vbe_options:
if (not nobag or (not weighted and not vbe)) and (
not dense or not vbe
):
dense_desc = f"{ 'dense' if dense else 'split'}"
weight_desc = f"{ 'weighted' if weighted else 'unweighted' }"
nobag_desc = f"{ '_nobag' if nobag else '' }"
vbe_desc = f"{ '_vbe' if vbe else '' }"
desc = (
f"{ dense_desc }_{ weight_desc }{ nobag_desc }{ vbe_desc }"
)
filename = filename_format.format(desc)
write(
filename,
template.render(
dense=dense,
weighted=weighted,
nobag=nobag,
vbe=vbe,
is_index_select=False,
),
)
print(f"[Forward Split]: {filename}")
def forward_split() -> None:
# Generate the forward splits
generate_forward_embedding_cuda(
"embedding_forward_split_template.cu",
"gen_embedding_forward_{}_codegen_cuda.cu",
dense_options=[True, False],
nobag_options=[False], # nobag is not used
vbe_options=[True, False],
)
# Generate the kernels for the forward splits
generate_forward_embedding_cuda(
"embedding_forward_split_kernel_template.cu",
"gen_embedding_forward_{}_kernel.cu",
dense_options=[True, False],
nobag_options=[True, False],
vbe_options=[True, False],
)
# Generate the kernels for the forward splits v2
generate_forward_embedding_cuda(
"embedding_forward_split_kernel_v2_template.cu",
"gen_embedding_forward_{}_v2_kernel.cu",
dense_options=[False], # dense is not supported
nobag_options=[False], # nobag is not supported
vbe_options=[False], # vbe is not supported
)
# Generate the small kernels (for nobag only) for the forward splits
template = env.get_template(
"embedding_forward_split_kernel_nobag_small_template.cu"
)
for dense in [True, False]:
wdesc = f"{ 'dense' if dense else 'split' }"
filename = f"gen_embedding_forward_{wdesc}_unweighted_nobag_kernel_small.cu"
write(filename, template.render(dense=dense, is_index_select=False))
print(f"[Forward Split]: {filename}")
# TODO: Separate this function into another codegen script
def index_select() -> None:
kwargs = make_args([(FLOAT, "unused")])
kwargs["args"] = kwargs["cuda"]
for templ_file, gen_file in [
(
"embedding_forward_split_template.cu",
"gen_batch_index_select_dim0_forward_codegen_cuda.cu",
),
(
"embedding_forward_split_kernel_template.cu",
"gen_batch_index_select_dim0_forward_kernel.cu",
),
(
"embedding_forward_split_kernel_nobag_small_template.cu",
"gen_batch_index_select_dim0_forward_kernel_small.cu",
),
(
"embedding_backward_split_template.cu",
"gen_batch_index_select_dim0_backward_codegen_cuda.cu",
),
(
"embedding_backward_split_kernel_cta_template.cu",
"gen_batch_index_select_dim0_backward_kernel_cta.cu",
),
(
"embedding_backward_split_kernel_warp_template.cu",
"gen_batch_index_select_dim0_backward_kernel_warp.cu",
),
]:
template = env.get_template(templ_file)
write(
gen_file,
template.render(
weighted=False,
dense=True,
vbe=False,
nobag=True,
is_index_select=True,
**kwargs,
),
)
template = env.get_template("embedding_backward_split_grad_template.cu")
write("gen_embedding_backward_split_grad.cu", template.render())
def forward_quantized() -> None:
@dataclass
class template_instance_params:
output_rows_per_thread: str
input_rows_in_flight: str
min_128b_rows: str
max_128b_rows: str
@dataclass
class elem_type:
enum_name: str
cpp_type_name: str
primitive_type: str
bit_width: int
template_params: List[template_instance_params]
type_map = {
"FP32": elem_type(
"FP32",
"float",
"FP",
32,
[
template_instance_params(*map(str, (2, 4, 0, 4))),
template_instance_params(*map(str, (2, 2, 4, 16))),
template_instance_params(*map(str, (1, 1, 16, 32))),
template_instance_params(*map(str, (1, 1, 32, 64))),
],
),
"FP16": elem_type(
"FP16",
"__half2",
"FP",
16,
[
template_instance_params(*map(str, (2, 8, 0, 2))),
template_instance_params(*map(str, (2, 8, 2, 4))),
template_instance_params(*map(str, (2, 4, 4, 8))),
template_instance_params(*map(str, (2, 2, 8, 16))),
template_instance_params(*map(str, (2, 1, 16, 32))),
],
),
"FP8": elem_type(
"FP8",
"uint32_t",
"FP",
8,
[
template_instance_params(*map(str, (2, 8, 0, 1))),
template_instance_params(*map(str, (2, 4, 1, 2))),
template_instance_params(*map(str, (2, 4, 2, 4))),
template_instance_params(*map(str, (2, 4, 4, 8))),
template_instance_params(*map(str, (2, 2, 4, 8))),
],
),
"INT8": elem_type(
"INT8",
"uint32_t",
"INT",
8,
[
template_instance_params(*map(str, (2, 8, 0, 1))),
template_instance_params(*map(str, (2, 4, 1, 2))),
template_instance_params(*map(str, (2, 4, 2, 4))),
template_instance_params(*map(str, (2, 4, 4, 8))),
template_instance_params(*map(str, (2, 2, 8, 16))),
],
),
"INT4": elem_type(
"INT4",
"uint32_t",
"INT",
4,
[
template_instance_params(*map(str, (4, 8, 0, 1))),
template_instance_params(*map(str, (2, 8, 1, 2))),
template_instance_params(*map(str, (1, 4, 2, 4))),
template_instance_params(*map(str, (1, 4, 4, 8))),
],
),
"INT2": elem_type(
"INT2",
"uint32_t",
"INT",
2,
[
template_instance_params(*map(str, (2, 16, 0, 1))),
template_instance_params(*map(str, (2, 8, 1, 2))),
template_instance_params(*map(str, (2, 8, 2, 4))),
],
),
}
# Generate the CUDA nbit (kernel) templates
template = env.get_template(
"embedding_forward_quantized_split_nbit_kernel_template.cu"
)
for weighted in [True, False]:
for nobag in [True, False]:
if not nobag or not weighted:
for emb_weight_type in type_map.values():
wdesc = f"{ 'weighted' if weighted else 'unweighted' }{ '_nobag' if nobag else '' }"
filename = f"gen_embedding_forward_quantized_split_nbit_kernel_{ wdesc }_{ emb_weight_type.enum_name.lower() }_codegen_cuda.cu"
write(
filename,
template.render(
weighted=weighted,
nobag=nobag,
emb_weight_type=emb_weight_type,
),
)
print(f"[Forward Quantized]: {filename}")
# Generate the CUDA nbit (host) templates
template = env.get_template(
"embedding_forward_quantized_split_nbit_host_template.cu"
)
for weighted in [True, False]:
for nobag in [True, False]:
if not nobag or not weighted:
wdesc = f"{ 'weighted' if weighted else 'unweighted' }{ '_nobag' if nobag else '' }"
filename = f"gen_embedding_forward_quantized_split_nbit_host_{ wdesc }_codegen_cuda.cu"
write(
filename,
template.render(weighted=weighted, nobag=nobag, type_map=type_map),
)
print(f"[Forward Quantized]: {filename}")
# Generate the CPU templates
template = env.get_template("embedding_forward_quantized_cpu_template.cpp")
for weighted in [True, False]:
filename = f"gen_embedding_forward_quantized_{ 'weighted' if weighted else 'unweighted' }_codegen_cpu.cpp"
write(filename, template.render(weighted=weighted, type_map=type_map))
print(f"[Forward Quantized]: {filename}")
def backward_grad() -> None:
# Generate the common grad functions
template = env.get_template("embedding_backward_split_grad_template.cu")
write("gen_embedding_backward_split_grad.cu", template.render())
def backward_indices() -> None:
template = env.get_template("embedding_backward_split_indice_weights_template.cu")
src_cu = template.render()
write("gen_embedding_backward_split_indice_weights_codegen_cuda.cu", src_cu)
src_cu = template.render(dense=True)
write("gen_embedding_backward_dense_indice_weights_codegen_cuda.cu", src_cu)
def backward_dense() -> None:
generate(
optimizer="dense",
dense=True,
args=make_args(
[
(FLOAT, "unused"),
]
),
has_cpu_support=True,
has_gpu_support=True,
has_vbe_support=False,
)
def gen__init__py() -> None:
template = env.get_template("__init__.template")
src_py = template.render()
write("__init__.py", src_py)
def emb_codegen(
install_dir: Optional[str] = None, is_fbcode: Optional[bool] = None
) -> None:
if install_dir is not None and len(install_dir) != 0:
args.install_dir = install_dir
if is_fbcode is not None:
args.is_fbcode = is_fbcode
backward_grad()
# Generate forwards and specialized backwards
backward_indices()
backward_dense()
forward_quantized()
forward_split()
# Generate backwards and optimizers
generate(**(adagrad()))
generate(**(adam()))
generate(**(lamb()))
generate(**(lars_sgd()))
generate(**(partial_rowwise_adam()))
generate(**(partial_rowwise_lamb()))
generate(**(rowwise_adagrad()))
generate(**(approx_rowwise_adagrad()))
generate(**(rowwise_adagrad_with_weight_decay()))
generate(**(approx_rowwise_adagrad_with_weight_decay()))
generate(**(rowwise_adagrad_with_counter()))
generate(**(approx_rowwise_adagrad_with_counter()))
generate(**(rowwise_weighted_adagrad()))
generate(**(sgd()))
generate(**(approx_sgd()))
generate(**(none_optimizer()))
# Generate index_select ops using TBE backend
index_select()
gen__init__py()
def main() -> None:
emb_codegen()
if __name__ == "__main__":
main()
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
# flake8: noqa F401
import argparse
import os
import re
from dataclasses import dataclass
from typing import Any, Dict, List, Optional, Tuple, Union
import jinja2
args: argparse.Namespace
_: List[str]
TENSOR: int
INT_TENSOR: int
LONG_TENSOR: int
INT: int
FLOAT: int
parser = argparse.ArgumentParser()
# By default the source template files are in the same folder as
# embedding_backward_code_generator.py;
# The install dir is by default the same as the current folder.
parser.add_argument("--install_dir", default=".", help="where to put generated file")
parser.add_argument("--opensource", action="store_false", dest="is_fbcode")
parser.add_argument("--is_rocm", action="store_true")
args, _ = parser.parse_known_args()
env = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(os.path.abspath(__file__)))
)
# Upper Limit of "max_embedding_dim (max_D)":
# BT_block_size * sizeof(float) * 4 * kWarpSize * {{ kMaxVecsPerThread }}
# needs to be smaller than the allocated shared memory size (2/3 of 96 KB
# on V100 and 160 KB on A100.
# BT_block_size * 4 * 4 * 32 * (max_D // 128) <= 64 * 1024 (V100) or 96 * 1024 (A100)
# Since BT_block_size >= 1, max_D <= 16K (V100) or 24K (A100).
# Note that if we increase max_D, it will increase the compilation time significantly.
env.globals["max_embedding_dim"] = 1024
# An optimization for ROCm
env.globals["items_per_warp"] = 128 if args.is_rocm is False else 256
env.globals["dense"] = False
def write(filename: str, s: str) -> None:
with open(os.path.join(args.install_dir, filename), "w") as f:
f.write(s)
def _arg_constructor(
type: str, name: str, gpu: bool = True, precision: int = 32
) -> str:
return (
f"{name}.packed_accessor{precision}<{type}, 1, at::RestrictPtrTraits>()"
if gpu
else f"{name}.accessor<{type}, 1>()"
)
def _arg(
type: str,
name: str,
gpu: bool = True,
precision: int = 32,
pass_by_ref: bool = False,
) -> str:
ref = "&" if pass_by_ref else ""
return (
f"at::PackedTensorAccessor{precision}<{type}, 1, at::RestrictPtrTraits>{ref} {name}"
if gpu
else f"at::TensorAccessor<{type}, 1>{ref} {name}"
)
def acc_cache_tensor_arg_constructor(name: str, gpu: bool = True) -> str:
return _arg_constructor(
"at::acc_type<" + ("cache_t" if gpu else "scalar_t") + ", true>",
name,
gpu=gpu,
precision=64,
)
def acc_cache_tensor_arg(name: str, gpu: bool = True, pass_by_ref: bool = False) -> str:
return _arg(
"at::acc_type<" + ("cache_t" if gpu else "scalar_t") + ", true>",
name,
gpu=gpu,
precision=64,
pass_by_ref=pass_by_ref,
)
def long_tensor_arg_constructor(name: str, gpu: bool = True) -> str:
return _arg_constructor("int64_t", name, gpu=gpu)
def long_tensor_arg(name: str, gpu: bool = True, pass_by_ref: bool = False) -> str:
return _arg("int64_t", name, gpu=gpu, pass_by_ref=pass_by_ref)
def int_tensor_arg_constructor(name: str, gpu: bool = True) -> str:
return _arg_constructor("int32_t", name, gpu=gpu)
def int_tensor_arg(name: str, gpu: bool = True, pass_by_ref: bool = False) -> str:
return _arg("int32_t", name, gpu=gpu, pass_by_ref=pass_by_ref)
def tensor_arg(name: str) -> str:
return f"Tensor {name}"
def double_arg(name: str, default: float = 0.0) -> str:
return f"double {name} = {default}"
def double_arg_no_default(name: str) -> str:
return f"double {name}"
def float_arg(name: str, default: float = 0.0) -> str:
return f"float {name} = {default}"
def float_arg_no_default(name: str) -> str:
return f"float {name}"
def int64_arg(name: str, default: int = 0) -> str:
return f"int64_t {name} = {default}"
def int64_arg_no_default(name: str) -> str:
return f"int64_t {name}"
def int_arg(name: str, default: int = 0) -> str:
return f"int {name} = {default}"
# Format the macro call to generate pta::PackedTensorAccessors
def make_pta_acc_format(pta_str_list: List[str], func_name: str) -> List[str]:
new_str_list = []
for pta_str in pta_str_list:
if "packed_accessor" in pta_str:
match = re.search(
r"([a-zA-z0-9_]*)[.]packed_accessor([3|6][2|4])<(.*)>\(\)", pta_str
)
assert match is not None and len(match.groups()) == 3
tensor, acc_nbits, args = match.groups()
if "acc_type" in args:
match = re.search("at::acc_type<([a-zA-Z_]*), true>", args)
assert match is not None and len(match.groups()) == 1
new_type = match.group(1)
args = re.sub("at::acc_type<[a-zA-Z_]*, true>", new_type, args)
macro_name = "MAKE_PTA_ACC_WITH_NAME"
else:
macro_name = "MAKE_PTA_WITH_NAME"
args = args.replace(", at::RestrictPtrTraits", "")
new_str_list.append(
f"{macro_name}({func_name}, {tensor}, {args}, {acc_nbits})"
)
else:
new_str_list.append(pta_str)
return new_str_list
def replace_pta_namespace(pta_str_list: List[str]) -> List[str]:
return [
pta_str.replace("at::PackedTensorAccessor", "pta::PackedTensorAccessor")
for pta_str in pta_str_list
]
env.filters["make_pta_acc_format"] = make_pta_acc_format
env.filters["replace_pta_namespace"] = replace_pta_namespace
@dataclass
class Args:
split_kernel_args: List[str]
split_kernel_args_no_defaults: List[str]
split_kernel_arg_constructors: List[str]
split_cpu_kernel_args: List[str]
split_cpu_kernel_arg_constructors: List[str]
split_function_args: List[str]
split_function_args_no_defaults: List[str]
split_saved_tensors: List[str]
split_tensors: List[str]
saved_data: List[Tuple[str, str]]
split_function_arg_names: List[str]
split_function_schemas: List[str]
split_variables: List[str]
split_ref_kernel_args: List[str]
TENSOR, INT_TENSOR, LONG_TENSOR, INT, FLOAT = range(5)
def make_args(
arg_spec: List[Union[Tuple[int, str], Tuple[int, str, Union[float, int]]]]
) -> Dict[str, Any]:
def make_kernel_arg(
ty: int, name: str, default: Union[int, float, None], pass_by_ref: bool = False
) -> str:
return {
TENSOR: lambda x: acc_cache_tensor_arg(x, pass_by_ref=pass_by_ref),
INT_TENSOR: lambda x: int_tensor_arg(x, pass_by_ref=pass_by_ref),
LONG_TENSOR: lambda x: long_tensor_arg(x, pass_by_ref=pass_by_ref),
INT: (lambda x: int64_arg(x, default=int(default)))
if default is not None
else int64_arg_no_default,
FLOAT: (lambda x: float_arg(x, default=default))
if default is not None
else float_arg_no_default,
}[ty](name)
def make_kernel_arg_constructor(ty: int, name: str) -> str:
return {
TENSOR: acc_cache_tensor_arg_constructor,
INT_TENSOR: int_tensor_arg_constructor,
LONG_TENSOR: long_tensor_arg_constructor,
INT: lambda x: x,
FLOAT: lambda x: x,
}[ty](name)
def make_cpu_kernel_arg(ty: int, name: str, default: Union[int, float]) -> str:
return {
TENSOR: lambda x: acc_cache_tensor_arg(x, gpu=False),
INT_TENSOR: lambda x: int_tensor_arg(x, gpu=False),
LONG_TENSOR: lambda x: long_tensor_arg(x, gpu=False),
INT: lambda x: int64_arg(x, default=int(default)),
FLOAT: lambda x: float_arg(x, default=default),
}[ty](name)
def make_cpu_kernel_arg_constructor(ty: int, name: str) -> str:
return {
TENSOR: lambda x: acc_cache_tensor_arg_constructor(x, gpu=False),
INT_TENSOR: lambda x: int_tensor_arg_constructor(x, gpu=False),
LONG_TENSOR: lambda x: long_tensor_arg_constructor(x, gpu=False),
INT: lambda x: x,
FLOAT: lambda x: x,
}[ty](name)
def make_function_arg(
ty: int, name: str, default: Optional[Union[int, float]]
) -> str:
return {
TENSOR: tensor_arg,
INT_TENSOR: tensor_arg,
LONG_TENSOR: tensor_arg,
INT: (lambda x: int64_arg(x, default=int(default)))
if default is not None
else int64_arg_no_default,
FLOAT: (lambda x: double_arg(x, default=default))
if default is not None
else double_arg_no_default,
}[ty](name)
def make_function_schema_arg(ty: int, name: str, default: Union[int, float]) -> str:
return {
TENSOR: tensor_arg,
INT_TENSOR: tensor_arg,
LONG_TENSOR: tensor_arg,
INT: lambda x: int_arg(x, default=int(default)),
FLOAT: lambda x: float_arg(x, default=default),
}[ty](name)
def make_ivalue_cast(ty: int) -> str:
return {INT: "toInt", FLOAT: "toDouble"}[ty]
def make_args_for_compute_device(
split_arg_spec: List[Tuple[int, str, Union[int, float]]]
) -> Args:
return Args(
split_kernel_args=[
make_kernel_arg(ty, name, default)
for (ty, name, default) in split_arg_spec
],
split_kernel_args_no_defaults=[
make_kernel_arg(ty, name, None) for (ty, name, _) in split_arg_spec
],
split_kernel_arg_constructors=[
make_kernel_arg_constructor(ty, name)
for (ty, name, default) in split_arg_spec
],
split_cpu_kernel_args=[
make_cpu_kernel_arg(ty, name, default)
for (ty, name, default) in split_arg_spec
],
split_cpu_kernel_arg_constructors=[
make_cpu_kernel_arg_constructor(ty, name)
for (ty, name, default) in split_arg_spec
],
split_function_args=[
make_function_arg(ty, name, default)
for (ty, name, default) in split_arg_spec
],
split_function_args_no_defaults=[
make_function_arg(ty, name, None)
for (ty, name, default) in split_arg_spec
],
split_tensors=[
name for (ty, name, default) in augmented_arg_spec if ty == TENSOR
],
split_saved_tensors=[
name
for (ty, name, default) in split_arg_spec
if ty in (TENSOR, INT_TENSOR, LONG_TENSOR)
],
saved_data=[
(name, make_ivalue_cast(ty))
for (ty, name, default) in augmented_arg_spec
if ty != TENSOR
],
split_function_arg_names=[name for (ty, name, default) in split_arg_spec],
split_function_schemas=[
make_function_schema_arg(ty, name, default)
for (ty, name, default) in split_arg_spec
],
split_variables=["Variable()" for _ in split_arg_spec],
split_ref_kernel_args=[
make_kernel_arg(ty, name, default, pass_by_ref=True)
for (ty, name, default) in split_arg_spec
],
)
DEFAULT_ARG_VAL = 0
augmented_arg_spec = [
item if len(item) == 3 else (*item, DEFAULT_ARG_VAL) for item in arg_spec
]
split_arg_spec = []
for (ty, arg, default) in augmented_arg_spec:
if ty in (FLOAT, INT):
split_arg_spec.append((ty, arg, default))
else:
assert ty == TENSOR
split_arg_spec.extend(
[
(TENSOR, f"{arg}_host", default),
(INT_TENSOR, f"{arg}_placements", default),
(LONG_TENSOR, f"{arg}_offsets", default),
]
)
cpu = make_args_for_compute_device(split_arg_spec)
split_arg_spec = []
for (ty, arg, default) in augmented_arg_spec:
if ty in (FLOAT, INT):
split_arg_spec.append((ty, arg, default))
else:
assert ty == TENSOR
split_arg_spec.extend(
[
(TENSOR, f"{arg}_dev", default),
(TENSOR, f"{arg}_uvm", default),
(INT_TENSOR, f"{arg}_placements", default),
(LONG_TENSOR, f"{arg}_offsets", default),
]
)
cuda = make_args_for_compute_device(split_arg_spec)
return {"cpu": cpu, "cuda": cuda}
def adagrad() -> Dict[str, Any]:
split_weight_update = """
Vec4T<cache_t> m_t(&momentum1[idx * D + d]);
m_t.acc.x += grad.acc.x * grad.acc.x;
m_t.acc.y += grad.acc.y * grad.acc.y;
m_t.acc.z += grad.acc.z * grad.acc.z;
m_t.acc.w += grad.acc.w * grad.acc.w;
m_t.store(&momentum1[idx * D + d]);
weight_new.acc.x -= learning_rate * grad.acc.x / (sqrtf(m_t.acc.x) + eps);
weight_new.acc.y -= learning_rate * grad.acc.y / (sqrtf(m_t.acc.y) + eps);
weight_new.acc.z -= learning_rate * grad.acc.z / (sqrtf(m_t.acc.z) + eps);
weight_new.acc.w -= learning_rate * grad.acc.w / (sqrtf(m_t.acc.w) + eps);
"""
split_weight_update_cpu = """
for (int64_t d = 0; d < D; ++d) {
momentum1_host[embedding_begin + d] +=
grad_buffer[d] * grad_buffer[d];
host_weights_data[embedding_begin + d] -=
learning_rate * grad_buffer[d] /
(sqrt(momentum1_host[embedding_begin + d]) + eps);
}
"""
return {
"optimizer": "adagrad",
"args": make_args(
[(TENSOR, "momentum1"), (FLOAT, "eps"), (FLOAT, "learning_rate")]
),
"split_precomputation": "",
"split_weight_update": split_weight_update,
"split_post_update": "",
"split_weight_update_cpu": split_weight_update_cpu,
"has_cpu_support": True,
"has_gpu_support": True,
"has_vbe_support": False,
}
def table_info_precomputation(momentum_prefix: str = "momentum1") -> str:
template = """
// table_begin -> (E, D, {momentum_prefix}_row_begin).
std::map<int64_t, std::tuple<int64_t, int64_t, int64_t>> table_info_map;
for (int64_t t = 0; t < T; ++t) {
const auto D = D_offsets_data[t + 1] - D_offsets_data[t];
const auto table_begin = weights_offsets_data[t];
const auto {momentum_prefix}_row_begin = {momentum_prefix}_offsets_data[t];
table_info_map[table_begin] = std::make_tuple(0, D, {momentum_prefix}_row_begin);
}
int64_t previous_table_begin = host_weights.numel();
// NOTE: table_info_map is sorted by table_begin!
for (auto it = table_info_map.rbegin(); it != table_info_map.rend(); ++it) {
const auto D = std::get<1>(it->second);
// Calculates number of rows of each table.
std::get<0>(it->second) = (previous_table_begin - it->first) / D;
previous_table_begin = it->first;
}
"""
return template.replace("{momentum_prefix}", momentum_prefix)
def rowwise_adagrad() -> Dict[str, Any]:
split_weight_update = """
weight_new.acc.x = correction * weight_new.acc.x - multiplier * grad.acc.x;
weight_new.acc.y = correction * weight_new.acc.y - multiplier * grad.acc.y;
weight_new.acc.z = correction * weight_new.acc.z - multiplier * grad.acc.z;
weight_new.acc.w = correction * weight_new.acc.w - multiplier * grad.acc.w;
"""
split_post_update = """
if (max_norm > 0.0) {
CUDA_KERNEL_ASSERT(!(std::is_same<emb_t, uint8_t>::value && !cache_weights)); // not supported for uint8 yet
// compute weight norm
at::acc_type<cache_t, true> weight_sum_square = 0.0;
#pragma unroll kMaxVecsPerThread
for (int32_t i = 0;
i < kMaxVecsPerThread && 4 * kThreadGroupSize * i + threadIdx.x * 4 < D;
++i) {
int32_t d = 4 * kThreadGroupSize * i + threadIdx.x * 4;
Vec4T<at::acc_type<cache_t, true>> weight_new = weight_row_template.load(d, qparams_template);
weight_sum_square += weight_new.acc.x * weight_new.acc.x + weight_new.acc.y * weight_new.acc.y + weight_new.acc.z * weight_new.acc.z + weight_new.acc.w * weight_new.acc.w;
}
const at::acc_type<cache_t, true> weight_norm =
sqrtf(warpReduceAllSum<at::acc_type<cache_t, true>, kThreadGroupSize>(weight_sum_square, shfl_sync_mask));
// scale by max_norm if weight_norm exceeds max_norm
if (threadIdx.x == 0) {
multiplier = weight_norm > max_norm ? max_norm / weight_norm : 1.0f;
}
multiplier = SHFL_SYNC(multiplier, 0);
if (weight_norm > max_norm) {
#pragma unroll kMaxVecsPerThread
for (int32_t i = 0;
i < kMaxVecsPerThread && 4 * kThreadGroupSize * i + threadIdx.x * 4 < D;
++i) {
int32_t d = 4 * kThreadGroupSize * i + threadIdx.x * 4;
Vec4T<at::acc_type<cache_t, true>> weight_new = weight_row_template.load(d, qparams_template);
weight_new.acc.x *= multiplier;
weight_new.acc.y *= multiplier;
weight_new.acc.z *= multiplier;
weight_new.acc.w *= multiplier;
weight_row_template.store(weight_new, d, qparams_new); // qparams_new not used if embedding is not int8
}
}
}
"""
split_precomputation = """
at::acc_type<cache_t, true> g_local_sum_square = 0.0;
#pragma unroll kMaxVecsPerThread
for (int32_t i = 0;
i < kMaxVecsPerThread && 4 * kThreadGroupSize * i + threadIdx.x * 4 < D;
++i) {
auto gx = grad_sum[i].acc.x;
auto gy = grad_sum[i].acc.y;
auto gz = grad_sum[i].acc.z;
auto gw = grad_sum[i].acc.w;
if (weight_decay_mode == 1) {
// L2 regularization
int32_t d = 4 * kThreadGroupSize * i + threadIdx.x * 4;
Vec4T<at::acc_type<cache_t, true>> weight = weight_row_template.load(d, qparams_template);
gx += weight_decay * weight.acc.x;
gy += weight_decay * weight.acc.y;
gz += weight_decay * weight.acc.z;
gw += weight_decay * weight.acc.w;
}
g_local_sum_square += gx * gx + gy * gy + gz * gz + gw * gw;
}
const at::acc_type<cache_t, true> g_avg_square =
warpReduceAllSum<at::acc_type<cache_t, true>, kThreadGroupSize>(g_local_sum_square, shfl_sync_mask) / D;
at::acc_type<cache_t, true> multiplier;
at::acc_type<cache_t, true> correction;
if (threadIdx.x == 0) {
at::acc_type<cache_t, true> new_sum_square_grads = momentum1[idx] + g_avg_square;
momentum1[idx] = new_sum_square_grads;
multiplier = learning_rate / (sqrtf(new_sum_square_grads) + eps);
if (weight_decay_mode == 1) {
// L2 regularization
correction = 1.0 - multiplier * weight_decay;
} else if (weight_decay_mode == 2) {
// Decoupled weight decay
correction = 1.0 - learning_rate * weight_decay;
} else {
// default value
correction = 1.0;
}
}
multiplier = SHFL_SYNC(multiplier, 0);
correction = SHFL_SYNC(correction, 0);
"""
split_weight_update_cpu = """
at::acc_type<grad_t, true> g_local_sum_square = 0.0;
for (int64_t d = 0; d < D; ++d) {
auto grad = grad_buffer[d];
if (weight_decay_mode == 1) {
// L2 regularization
grad += weight_decay * host_weights_data[embedding_begin + d];
}
g_local_sum_square += grad * grad;
}
auto g_avg_square = g_local_sum_square / D;
at::acc_type<grad_t, true> new_sum_square_grads = momentum1_host[momentum1_offsets_data[feature_begin] + idx] + g_avg_square;
momentum1_host[momentum1_offsets_data[feature_begin] + idx] = new_sum_square_grads;
at::acc_type<grad_t, true> multiplier;
multiplier = learning_rate / (sqrtf(new_sum_square_grads) + eps);
at::acc_type<grad_t, true> correction;
if (weight_decay_mode == 1) {
// L2 regularization
correction = 1.0 - multiplier * weight_decay;
} else if (weight_decay_mode == 2) {
// Decoupled weight decay
correction = 1.0 - learning_rate * weight_decay;
} else {
// default value
correction = 1.0;
}
for (int64_t d = 0; d < D; ++d) {
host_weights_data[embedding_begin + d] = correction * host_weights_data[embedding_begin + d] - grad_buffer[d] * multiplier;
}
"""
return {
"optimizer": "rowwise_adagrad",
"args": make_args(
[
(TENSOR, "momentum1"),
(FLOAT, "eps"),
(FLOAT, "learning_rate"),
(FLOAT, "weight_decay", 0.0),
(INT, "weight_decay_mode", 0),
(FLOAT, "max_norm", 0.0),
]
),
"split_precomputation": split_precomputation,
"split_weight_update": split_weight_update,
"split_post_update": split_post_update,
"split_weight_update_cpu": split_weight_update_cpu,
"has_cpu_support": True,
"has_gpu_support": True,
"has_vbe_support": True,
}
def approx_rowwise_adagrad() -> Dict[str, Any]:
rowwise_adagrad_args = rowwise_adagrad()
approx_split_weight_update = """
// dummy computation to avoid unused variable warning
weight_new.fma_(grad, -multiplier);
assert(false); // approx rowwise AdaGrad is not supported on GPU
"""
return {
"optimizer": "approx_rowwise_adagrad",
"args": make_args(
[
(TENSOR, "momentum1"),
(FLOAT, "eps"),
(FLOAT, "learning_rate"),
(FLOAT, "weight_decay", 0.0),
(INT, "weight_decay_mode", 0),
]
),
"split_precomputation": rowwise_adagrad_args["split_precomputation"],
"split_weight_update": approx_split_weight_update,
"split_post_update": "",
"split_weight_update_cpu": rowwise_adagrad_args["split_weight_update_cpu"],
"has_cpu_support": False,
"has_gpu_support": False,
"has_vbe_support": False,
}
def rowwise_adagrad_with_weight_decay() -> Dict[str, Any]:
split_weight_update = """
weight_new.acc.x = correction * weight_new.acc.x - multiplier * grad.acc.x;
weight_new.acc.y = correction * weight_new.acc.y - multiplier * grad.acc.y;
weight_new.acc.z = correction * weight_new.acc.z - multiplier * grad.acc.z;
weight_new.acc.w = correction * weight_new.acc.w - multiplier * grad.acc.w;
"""
split_precomputation = """
at::acc_type<cache_t, true> g_local_sum_square = 0.0;
#pragma unroll kMaxVecsPerThread
for (int32_t i = 0;
i < kMaxVecsPerThread && 4 * kThreadGroupSize * i + threadIdx.x * 4 < D;
++i) {
auto gx = grad_sum[i].acc.x;
auto gy = grad_sum[i].acc.y;
auto gz = grad_sum[i].acc.z;
auto gw = grad_sum[i].acc.w;
if (weight_decay_mode == 1) {
// L2 regularization
int32_t d = 4 * kThreadGroupSize * i + threadIdx.x * 4;
Vec4T<at::acc_type<cache_t, true>> weight = weight_row_template.load(d, qparams_template);
gx += weight_decay * weight.acc.x;
gy += weight_decay * weight.acc.y;
gz += weight_decay * weight.acc.z;
gw += weight_decay * weight.acc.w;
}
g_local_sum_square += gx * gx + gy * gy + gz * gz + gw * gw;
}
const at::acc_type<cache_t, true> g_avg_square =
warpReduceAllSum<at::acc_type<cache_t, true>, kThreadGroupSize>(g_local_sum_square, shfl_sync_mask) / D;
at::acc_type<cache_t, true> multiplier;
at::acc_type<cache_t, true> correction;
if (threadIdx.x == 0) {
at::acc_type<cache_t, true> new_sum_square_grads = momentum1[idx] + g_avg_square;
momentum1[idx] = new_sum_square_grads;
multiplier = learning_rate / (sqrtf(new_sum_square_grads) + eps);
if (weight_decay_mode == 1) {
// L2 regularization
correction = 1.0 - multiplier * weight_decay;
} else if (weight_decay_mode == 2) {
// Decoupled weight decay
correction = 1.0 - learning_rate * weight_decay;
} else {
// default value
correction = 1.0;
}
}
multiplier = SHFL_SYNC(multiplier, 0);
correction = SHFL_SYNC(correction, 0);
"""
split_weight_update_cpu = """
at::acc_type<grad_t, true> g_local_sum_square = 0.0;
for (int64_t d = 0; d < D; ++d) {
auto grad = grad_buffer[d];
if (weight_decay_mode == 1) {
// L2 regularization
grad += weight_decay * host_weights_data[embedding_begin + d];
}
g_local_sum_square += grad * grad;
}
auto g_avg_square = g_local_sum_square / D;
at::acc_type<grad_t, true> new_sum_square_grads = momentum1_host[momentum1_offsets_data[feature_begin] + idx] + g_avg_square;
momentum1_host[momentum1_offsets_data[feature_begin] + idx] = new_sum_square_grads;
at::acc_type<grad_t, true> multiplier;
multiplier = learning_rate / (sqrtf(new_sum_square_grads) + eps);
at::acc_type<grad_t, true> correction;
if (weight_decay_mode == 1) {
// L2 regularization
correction = 1.0 - multiplier * weight_decay;
} else if (weight_decay_mode == 2) {
// Decoupled weight decay
correction = 1.0 - learning_rate * weight_decay;
} else {
// default value
correction = 1.0;
}
for (int64_t d = 0; d < D; ++d) {
host_weights_data[embedding_begin + d] = correction * host_weights_data[embedding_begin + d] - grad_buffer[d] * multiplier;
}
"""
return {
"optimizer": "rowwise_adagrad_with_weight_decay",
"args": make_args(
[
(TENSOR, "momentum1"),
(FLOAT, "eps"),
(FLOAT, "learning_rate"),
(FLOAT, "weight_decay", 0.0),
(INT, "weight_decay_mode", 0),
]
),
"split_precomputation": split_precomputation,
"split_weight_update": split_weight_update,
"split_post_update": "",
"split_weight_update_cpu": split_weight_update_cpu,
"has_cpu_support": False,
"has_gpu_support": True,
"has_vbe_support": False,
}
def approx_rowwise_adagrad_with_weight_decay() -> Dict[str, Any]:
rowwise_adagrad_with_weight_decay_args = rowwise_adagrad_with_weight_decay()
approx_split_weight_update = """
// dummy computation to avoid unused variable warning
weight_new.fma_(grad, -multiplier);
assert(false); // approx rowwise AdaGrad is not supported on GPU
"""
return {
"optimizer": "approx_rowwise_adagrad_with_weight_decay",
"args": make_args(
[
(TENSOR, "momentum1"),
(FLOAT, "eps"),
(FLOAT, "learning_rate"),
(FLOAT, "weight_decay", 0.0),
(INT, "weight_decay_mode", 0),
]
),
"split_precomputation": rowwise_adagrad_with_weight_decay_args[
"split_precomputation"
],
"split_weight_update": approx_split_weight_update,
"split_post_update": "",
"split_weight_update_cpu": rowwise_adagrad_with_weight_decay_args[
"split_weight_update_cpu"
],
"has_cpu_support": False,
"has_gpu_support": True,
"has_vbe_support": False,
}
def rowwise_adagrad_with_counter() -> Dict[str, Any]:
split_weight_update = """
weight_new.acc.x = (exp_reg_correction * weight_new.acc.x - adjusted_multiplier * grad.acc.x);
weight_new.acc.y = (exp_reg_correction * weight_new.acc.y - adjusted_multiplier * grad.acc.y);
weight_new.acc.z = (exp_reg_correction * weight_new.acc.z - adjusted_multiplier * grad.acc.z);
weight_new.acc.w = (exp_reg_correction * weight_new.acc.w - adjusted_multiplier * grad.acc.w);
"""
split_precomputation = """
at::acc_type<cache_t, true> freq = 1.0;
at::acc_type<cache_t, true> l2_wd = 0.0;
at::acc_type<cache_t, true> tail_id_threshold_val = tail_id_threshold;
CUDA_KERNEL_ASSERT(max_counter > 0.0); // avoid divide by zero error
if (is_tail_id_thresh_ratio == 1){
tail_id_threshold_val = floorf(tail_id_threshold * max_counter);
}
if (counter_halflife > 0 && threadIdx.x == 0) {
// if id occurs multiple times in a batch, iter_delta=1
const auto iter_delta = prev_iter[idx] == 0 ? 1.0 : iter * 1.0 - prev_iter[idx];
prev_iter[idx] = iter * 1.0;
const auto counter_log_rho = logf(2.0) / counter_halflife;
row_counter[idx] = 1.0 + expf(-iter_delta * counter_log_rho) * row_counter[idx];
freq = counter_halflife / row_counter[idx];
if (weight_decay_mode == 1) {
// L2 regularization
l2_wd = 1.0;
}
}
freq = SHFL_SYNC(freq, 0);
l2_wd = SHFL_SYNC(l2_wd, 0);
tail_id_threshold_val = SHFL_SYNC(tail_id_threshold_val, 0);
at::acc_type<cache_t, true> g_local_sum_square = 0.0;
#pragma unroll kMaxVecsPerThread
for (int32_t i = 0;
i < kMaxVecsPerThread && 4 * kThreadGroupSize * i + threadIdx.x * 4 < D;
++i) {
int32_t d = 4 * kThreadGroupSize * i + threadIdx.x * 4;
Vec4T<at::acc_type<cache_t, true>> weight = weight_row_template.load(d, qparams_template);
auto gx = grad_sum[i].acc.x + l2_wd * freq * weight_decay * weight.acc.x;
auto gy = grad_sum[i].acc.y + l2_wd * freq * weight_decay * weight.acc.y;
auto gz = grad_sum[i].acc.z + l2_wd * freq * weight_decay * weight.acc.z;
auto gw = grad_sum[i].acc.w + l2_wd * freq * weight_decay * weight.acc.w;
g_local_sum_square += gx * gx + gy * gy + gz * gz + gw * gw;
}
const at::acc_type<cache_t, true> g_avg_square =
warpReduceAllSum<at::acc_type<cache_t, true>, kThreadGroupSize>(g_local_sum_square, shfl_sync_mask) / D;
at::acc_type<cache_t, true> multiplier;
at::acc_type<cache_t, true> adjusted_multiplier;
at::acc_type<cache_t, true> exp_reg_correction;
if (threadIdx.x == 0) {
at::acc_type<cache_t, true> new_sum_square_grads = momentum1[idx] + g_avg_square;
momentum1[idx] = new_sum_square_grads;
multiplier = learning_rate / (sqrtf(new_sum_square_grads) + eps);
adjusted_multiplier = multiplier;
if ( learning_rate_mode >=0 ) {
if (adjustment_iter <= 0 || (adjustment_iter > 0 && iter > adjustment_iter)) {
if (row_counter[idx] > tail_id_threshold_val) {
if ( learning_rate_mode == 0 ) {
adjusted_multiplier = multiplier * max(min(powf(max_counter/(row_counter[idx] + 1.0), adjustment_ub), 10.0), 1.0);
} else if ( learning_rate_mode == 1 ) {
adjusted_multiplier = multiplier * min(max(powf((row_counter[idx] + 1.0)/max_counter, adjustment_ub), 0.1), 1.0);
} else if (learning_rate_mode == 2) {
adjusted_multiplier = learning_rate / (sqrtf(adjustment_ub*row_counter[idx]) + eps);
}
}
}
}
exp_reg_correction = 1.0;
if (adjustment_iter <= 0 || (adjustment_iter > 0 && iter > adjustment_iter)) {
if (weight_decay_mode == 2) {
// Decoupled weight decay
exp_reg_correction = 1.0 - freq * weight_decay * learning_rate;
} else if (weight_decay_mode == 1) {
// L2 regularization (coupled wd)
exp_reg_correction = 1.0 - freq * weight_decay * multiplier;
}
}
}
multiplier = SHFL_SYNC(multiplier, 0);
adjusted_multiplier = SHFL_SYNC(adjusted_multiplier, 0);
exp_reg_correction = SHFL_SYNC(exp_reg_correction, 0);
"""
split_weight_update_cpu = """
at::acc_type<grad_t, true> g_local_sum_square = 0.0;
for (int64_t d = 0; d < D; ++d) {
g_local_sum_square += grad_buffer[d] * grad_buffer[d];
}
auto g_avg_square = g_local_sum_square / D;
auto offset_idx = momentum1_offsets_data[feature_begin] + idx;
at::acc_type<grad_t, true> new_sum_square_grads = momentum1_host[offset_idx] + g_avg_square;
momentum1_host[offset_idx] = new_sum_square_grads;
at::acc_type<grad_t, true> multiplier;
multiplier = learning_rate / (sqrtf(new_sum_square_grads) + eps);
const auto iter_delta = iter * 1.0 - prev_iter_host[offset_idx];
prev_iter_host[offset_idx] = iter * 1.0;
const auto exp_reg = 1.0 / (weight_decay * multiplier + 1.0);
const auto exp_reg_correction = powf(exp_reg, iter_delta);
for (int64_t d = 0; d < D; ++d) {
const auto weight = host_weights_data[embedding_begin + d];
host_weights_data[embedding_begin + d] = exp_reg_correction * weight - exp_reg * multiplier * grad_buffer[d];
}
"""
return {
"optimizer": "rowwise_adagrad_with_counter",
"args": make_args(
[
(TENSOR, "momentum1"),
(TENSOR, "prev_iter"),
(TENSOR, "row_counter"),
(FLOAT, "eps"),
(FLOAT, "learning_rate"),
(FLOAT, "weight_decay", 0.0),
(INT, "iter"),
(INT, "counter_halflife", -1),
(INT, "adjustment_iter", -1),
(FLOAT, "adjustment_ub", 1.0),
(INT, "learning_rate_mode", -1),
(INT, "weight_decay_mode", 1),
(INT, "grad_sum_decay", -1),
(FLOAT, "max_counter"),
(FLOAT, "tail_id_threshold", 0.0),
(INT, "is_tail_id_thresh_ratio", 0),
]
),
"split_precomputation": split_precomputation,
"split_weight_update": split_weight_update,
"split_post_update": "",
"split_weight_update_cpu": split_weight_update_cpu,
"has_cpu_support": True,
"has_gpu_support": True,
"has_vbe_support": False,
}
def approx_rowwise_adagrad_with_counter() -> Dict[str, Any]:
rowwise_adagrad_with_counter_args = rowwise_adagrad_with_counter()
approx_split_weight_update = """
// dummy computation to avoid unused variable warning
weight_new.fma_(grad, -multiplier);
assert(false); // approx rowwise AdaGrad is not supported on GPU
"""
return {
"optimizer": "approx_rowwise_adagrad_with_counter",
"args": make_args(
[
(TENSOR, "momentum1"),
(TENSOR, "prev_iter"),
(TENSOR, "row_counter"),
(FLOAT, "eps"),
(FLOAT, "learning_rate"),
(FLOAT, "weight_decay", 0.0),
(INT, "iter"),
(INT, "counter_halflife", -1),
(INT, "adjustment_iter", -1),
(FLOAT, "adjustment_ub", 1.0),
(INT, "learning_rate_mode", -1),
(INT, "weight_decay_mode", 1),
(INT, "grad_sum_decay", -1),
(FLOAT, "max_counter"),
(FLOAT, "tail_id_threshold", 0.0),
(INT, "is_tail_id_thresh_ratio", 0),
]
),
"split_precomputation": rowwise_adagrad_with_counter_args[
"split_precomputation"
],
"split_weight_update": approx_split_weight_update,
"split_post_update": "",
"split_weight_update_cpu": rowwise_adagrad_with_counter_args[
"split_weight_update_cpu"
],
"has_cpu_support": False,
"has_gpu_support": False,
"has_vbe_support": False,
}
def rowwise_weighted_adagrad() -> Dict[str, Any]:
split_weight_update = """
weight_new.acc.x = correction * weight_new.acc.x - multiplier * grad.acc.x;
weight_new.acc.y = correction * weight_new.acc.y - multiplier * grad.acc.y;
weight_new.acc.z = correction * weight_new.acc.z - multiplier * grad.acc.z;
weight_new.acc.w = correction * weight_new.acc.w - multiplier * grad.acc.w;
"""
split_precomputation = """
at::acc_type<cache_t, true> g_local_sum_square = 0.0;
#pragma unroll kMaxVecsPerThread
for (int32_t i = 0;
i < kMaxVecsPerThread && 4 * kThreadGroupSize * i + threadIdx.x * 4 < D;
++i) {
int32_t d = 4 * kThreadGroupSize * i + threadIdx.x * 4;
Vec4T<at::acc_type<cache_t, true>> weight = weight_row_template.load(d, qparams_template);
auto gx = grad_sum[i].acc.x + weight_decay * weight.acc.x;
auto gy = grad_sum[i].acc.y + weight_decay * weight.acc.y;
auto gz = grad_sum[i].acc.z + weight_decay * weight.acc.z;
auto gw = grad_sum[i].acc.w + weight_decay * weight.acc.w;
g_local_sum_square += gx * gx + gy * gy + gz * gz + gw * gw;
}
const at::acc_type<cache_t, true> g_avg_square =
warpReduceAllSum<at::acc_type<cache_t, true>, kThreadGroupSize>(g_local_sum_square, shfl_sync_mask) / D;
at::acc_type<cache_t, true> multiplier;
at::acc_type<cache_t, true> correction;
if (threadIdx.x == 0) {
at::acc_type<cache_t, true> lambda = sqrtf(iter + 1);
at::acc_type<cache_t, true> new_sum_square_grads = momentum1[idx] + lambda * g_avg_square;
momentum1[idx] = new_sum_square_grads;
multiplier = learning_rate * lambda / (cbrtf(new_sum_square_grads) + eps);
correction = 1.0 - multiplier * weight_decay;
}
multiplier = SHFL_SYNC(multiplier, 0);
correction = SHFL_SYNC(correction, 0);
"""
split_weight_update_cpu = """
// weight_decay not supported for cpu version
at::acc_type<grad_t, true> g_local_sum_square = 0.0;
for (int64_t d = 0; d < D; ++d) {
g_local_sum_square += grad_buffer[d] * grad_buffer[d];
}
auto g_avg_square = g_local_sum_square / D;
at::acc_type<grad_t, true> lambda = sqrtf(iter + 1);
at::acc_type<grad_t, true> new_sum_square_grads = momentum1_host[momentum1_offsets_data[feature_begin] + idx] + lambda * g_avg_square;
momentum1_host[momentum1_offsets_data[feature_begin] + idx] = new_sum_square_grads;
at::acc_type<grad_t, true> multiplier;
multiplier = learning_rate * lambda / (cbrtf(new_sum_square_grads) + eps);
for (int64_t d = 0; d < D; ++d) {
host_weights_data[embedding_begin + d] -= grad_buffer[d] * multiplier;
}
"""
return {
"optimizer": "rowwise_weighted_adagrad",
"is_experimental_optimizer": True,
"args": make_args(
[
(TENSOR, "momentum1"),
(FLOAT, "eps"),
(FLOAT, "learning_rate"),
(FLOAT, "weight_decay"),
(INT, "iter"),
]
),
"split_precomputation": split_precomputation,
"split_weight_update": split_weight_update,
"split_post_update": "",
"split_weight_update_cpu": split_weight_update_cpu,
"has_cpu_support": True,
"has_gpu_support": True,
"has_vbe_support": False,
}
def sgd() -> Dict[str, Any]:
split_weight_update = """
weight_new.fma_(grad, -learning_rate);
"""
split_weight_update_cpu = """
for (int64_t d = 0; d < D; ++d) {
host_weights_data[embedding_begin + d] -= learning_rate * grad_buffer[d];
}
"""
return {
"optimizer": "sgd",
"args": make_args([(FLOAT, "learning_rate")]),
"split_precomputation": "",
"split_weight_update": split_weight_update,
"split_post_update": "",
"split_weight_update_cpu": split_weight_update_cpu,
"has_cpu_support": True,
"has_gpu_support": True,
"has_vbe_support": True,
}
def approx_sgd() -> Dict[str, Any]:
sgd_args = sgd()
approx_split_weight_update = """
// approx_sgd not supported for GPU.
// Just do the same thing as exact sgd to avoid unused variable warning.
weight_new.fma_(grad, -learning_rate);
assert(false); // approx SGD is not supported on GPU
"""
return {
"optimizer": "approx_sgd",
"args": make_args([(FLOAT, "learning_rate")]),
"split_precomputation": "",
"split_weight_update": approx_split_weight_update,
"split_post_update": "",
"split_weight_update_cpu": sgd_args["split_weight_update_cpu"],
"has_cpu_support": False,
"has_gpu_support": False,
"has_vbe_support": False,
}
def lamb() -> Dict[str, Any]:
split_precomputation = """
at::acc_type<cache_t, true> weight_sum_sq = 0.0;
at::acc_type<cache_t, true> rtw_sum_sq = 0.0;
auto weight_row = WeightRow<emb_t, cache_t, at::acc_type<cache_t, true>>(weights, cache_weights, D, nullptr);
float2 qparams;
if (std::is_same<emb_t, uint8_t>::value && !cache_weights) {
qparams = weight_row.load_qparams();
}
#pragma unroll 1
for (int32_t i = 0;
i < kMaxVecsPerThread && 4 * kThreadGroupSize * i + threadIdx.x * 4 < D;
++i) {
int32_t d = 4 * kThreadGroupSize * i + threadIdx.x * 4;
Vec4T<at::acc_type<cache_t, true>> weight = weight_row.load(d, qparams);
Vec4T<at::acc_type<cache_t, true>> m1(&momentum1[idx * D + d]);
m1.acc.x = beta1 * m1.acc.x + (1.0 - beta1) * grad_sum[i].acc.x;
m1.acc.y = beta1 * m1.acc.y + (1.0 - beta1) * grad_sum[i].acc.y;
m1.acc.z = beta1 * m1.acc.z + (1.0 - beta1) * grad_sum[i].acc.z;
m1.acc.w = beta1 * m1.acc.w + (1.0 - beta1) * grad_sum[i].acc.w;
m1.store(&momentum1[idx * D + d]);
Vec4T<at::acc_type<cache_t, true>> m2(&momentum2[idx * D + d]);
m2.acc.x = beta2 * m2.acc.x + (1.0 - beta2) * grad_sum[i].acc.x * grad_sum[i].acc.x;
m2.acc.y = beta2 * m2.acc.y + (1.0 - beta2) * grad_sum[i].acc.y * grad_sum[i].acc.y;
m2.acc.z = beta2 * m2.acc.z + (1.0 - beta2) * grad_sum[i].acc.z * grad_sum[i].acc.z;
m2.acc.w = beta2 * m2.acc.w + (1.0 - beta2) * grad_sum[i].acc.w * grad_sum[i].acc.w;
m2.store(&momentum2[idx * D + d]);
// now, we are finished with grad_sum. We can *reuse* grad_sum to store r_t + weight_decay * weight;
grad_sum[i].acc.x = (m1.acc.x / (1.0 - powf(beta1, iter))) / (sqrtf((m2.acc.x / (1.0 - powf(beta2, iter)))) + eps) + weight_decay * weight.acc.x;
grad_sum[i].acc.y = (m1.acc.y / (1.0 - powf(beta1, iter))) / (sqrtf((m2.acc.y / (1.0 - powf(beta2, iter)))) + eps) + weight_decay * weight.acc.y;
grad_sum[i].acc.z = (m1.acc.z / (1.0 - powf(beta1, iter))) / (sqrtf((m2.acc.z / (1.0 - powf(beta2, iter)))) + eps) + weight_decay * weight.acc.z;
grad_sum[i].acc.w = (m1.acc.w / (1.0 - powf(beta1, iter))) / (sqrtf((m2.acc.w / (1.0 - powf(beta2, iter)))) + eps) + weight_decay * weight.acc.w;
weight_sum_sq += weight.acc.x * weight.acc.x + weight.acc.y * weight.acc.y + weight.acc.z * weight.acc.z + weight.acc.w * weight.acc.w;
rtw_sum_sq += grad_sum[i].acc.x * grad_sum[i].acc.x + grad_sum[i].acc.y * grad_sum[i].acc.y + grad_sum[i].acc.z * grad_sum[i].acc.z + grad_sum[i].acc.w * grad_sum[i].acc.w;
}
const auto weight_norm =
sqrtf(warpReduceAllSum<at::acc_type<cache_t, true>, kThreadGroupSize>(weight_sum_sq, shfl_sync_mask));
const auto rtw_norm =
sqrtf(warpReduceAllSum<at::acc_type<cache_t, true>, kThreadGroupSize>(rtw_sum_sq, shfl_sync_mask));
const auto true_ratio = weight_norm / rtw_norm;
"""
split_weight_update = """
weight_new.fma_(grad, -learning_rate * true_ratio);
"""
split_weight_update_cpu = ""
return {
"optimizer": "lamb",
"is_experimental_optimizer": True,
"args": make_args(
[
(TENSOR, "momentum1"),
(TENSOR, "momentum2"),
(FLOAT, "learning_rate"),
(FLOAT, "eps"),
(FLOAT, "beta1"),
(FLOAT, "beta2"),
(FLOAT, "weight_decay"),
(INT, "iter"),
]
),
"split_precomputation": split_precomputation,
"split_weight_update": split_weight_update,
"split_post_update": "",
"split_weight_update_cpu": split_weight_update_cpu,
"has_cpu_support": False,
"has_gpu_support": True,
"has_vbe_support": False,
}
def partial_rowwise_lamb() -> Dict[str, Any]:
split_precomputation = """
at::acc_type<cache_t, true> g_local_sum_square = 0.0;
#pragma unroll kMaxVecsPerThread
for (int32_t i = 0;
i < kMaxVecsPerThread && 4 * kThreadGroupSize * i + threadIdx.x * 4 < D;
++i) {
g_local_sum_square += grad_sum[i].acc.x * grad_sum[i].acc.x +
grad_sum[i].acc.y * grad_sum[i].acc.y +
grad_sum[i].acc.z * grad_sum[i].acc.z +
grad_sum[i].acc.w * grad_sum[i].acc.w;
}
const at::acc_type<cache_t, true> g_avg_square =
warpReduceAllSum<at::acc_type<cache_t, true>, kThreadGroupSize>(g_local_sum_square, shfl_sync_mask) / D;
at::acc_type<cache_t, true> m2;
if (threadIdx.x == 0) {
m2 = beta2 * momentum2[idx] + (1.0 - beta2) * g_avg_square;
momentum2[idx] = m2;
}
m2 = SHFL_SYNC(m2, 0);
at::acc_type<cache_t, true> m2_hat = 1.0 / (sqrtf((m2 / (1.0 - powf(beta2, iter)))) + eps);
at::acc_type<cache_t, true> weight_sum_sq = 0.0;
at::acc_type<cache_t, true> rtw_sum_sq = 0.0;
auto weight_row = WeightRow<emb_t, cache_t, at::acc_type<cache_t, true>>(weights, cache_weights, D, nullptr);
float2 qparams;
if (std::is_same<emb_t, uint8_t>::value && !cache_weights) {
qparams = weight_row.load_qparams();
}
#pragma unroll kMaxVecsPerThread
for (int32_t i = 0;
i < kMaxVecsPerThread && 4 * kThreadGroupSize * i + threadIdx.x * 4 < D;
++i) {
int32_t d = 4 * kThreadGroupSize * i + threadIdx.x * 4;
Vec4T<at::acc_type<cache_t, true>> m1(&momentum1[idx * D + d]);
m1.acc.x = beta1 * m1.acc.x + (1.0 - beta1) * grad_sum[i].acc.x;
m1.acc.y = beta1 * m1.acc.y + (1.0 - beta1) * grad_sum[i].acc.y;
m1.acc.z = beta1 * m1.acc.z + (1.0 - beta1) * grad_sum[i].acc.z;
m1.acc.w = beta1 * m1.acc.w + (1.0 - beta1) * grad_sum[i].acc.w;
m1.store(&momentum1[idx * D + d]);
// now, we are finished with grad_sum. We can *reuse* grad_sum to store r_t + weight_decay * weight;
Vec4T<at::acc_type<cache_t, true>> weight = weight_row.load(d, qparams);
grad_sum[i].acc.x = (m1.acc.x / (1.0 - powf(beta1, iter))) * m2_hat + weight_decay * weight.acc.x;
grad_sum[i].acc.y = (m1.acc.y / (1.0 - powf(beta1, iter))) * m2_hat + weight_decay * weight.acc.y;
grad_sum[i].acc.z = (m1.acc.z / (1.0 - powf(beta1, iter))) * m2_hat + weight_decay * weight.acc.z;
grad_sum[i].acc.w = (m1.acc.w / (1.0 - powf(beta1, iter))) * m2_hat + weight_decay * weight.acc.w;
weight_sum_sq += weight.acc.x * weight.acc.x + weight.acc.y * weight.acc.y + weight.acc.z * weight.acc.z + weight.acc.w * weight.acc.w;
rtw_sum_sq += grad_sum[i].acc.x * grad_sum[i].acc.x + grad_sum[i].acc.y * grad_sum[i].acc.y + grad_sum[i].acc.z * grad_sum[i].acc.z + grad_sum[i].acc.w * grad_sum[i].acc.w;
}
const auto weight_norm =
sqrtf(warpReduceAllSum<at::acc_type<cache_t, true>, kThreadGroupSize>(weight_sum_sq));
const auto rtw_norm =
sqrtf(warpReduceAllSum<at::acc_type<cache_t, true>, kThreadGroupSize>(rtw_sum_sq));
const auto true_ratio = weight_norm / rtw_norm;
"""
split_weight_update = """
weight_new.fma_(grad, -learning_rate * true_ratio);
"""
split_weight_update_cpu = "" # TODO
return {
"optimizer": "partial_rowwise_lamb",
"args": make_args(
[
(TENSOR, "momentum1"),
(TENSOR, "momentum2"),
(FLOAT, "learning_rate"),
(FLOAT, "eps"),
(FLOAT, "beta1"),
(FLOAT, "beta2"),
(FLOAT, "weight_decay"),
(INT, "iter"),
]
),
"split_precomputation": split_precomputation,
"split_weight_update": split_weight_update,
"split_post_update": "",
"split_weight_update_cpu": split_weight_update_cpu,
"has_cpu_support": False,
"has_gpu_support": True,
"has_vbe_support": False,
}
def adam() -> Dict[str, Any]:
split_weight_update = """
Vec4T<cache_t> m_t(&momentum1[idx * D + d]);
m_t.acc.x *= beta1;
m_t.acc.y *= beta1;
m_t.acc.z *= beta1;
m_t.acc.w *= beta1;
m_t.fma_(grad, 1.0 - beta1);
m_t.store(&momentum1[idx * D + d]);
Vec4T<cache_t> v_t(&momentum2[idx * D + d]);
v_t.acc.x *= beta2;
v_t.acc.y *= beta2;
v_t.acc.z *= beta2;
v_t.acc.w *= beta2;
grad.acc.x *= grad.acc.x;
grad.acc.y *= grad.acc.y;
grad.acc.z *= grad.acc.z;
grad.acc.w *= grad.acc.w;
v_t.fma_(grad, 1.0 - beta2);
v_t.store(&momentum2[idx * D + d]);
weight_new.acc.x -= learning_rate * (m_t.acc.x / (1.0 - powf(beta1, iter)) / (sqrtf((v_t.acc.x / (1.0 - powf(beta2, iter)))) + eps) + weight_decay * weight_new.acc.x);
weight_new.acc.y -= learning_rate * (m_t.acc.y / (1.0 - powf(beta1, iter)) / (sqrtf((v_t.acc.y / (1.0 - powf(beta2, iter)))) + eps) + weight_decay * weight_new.acc.y);
weight_new.acc.z -= learning_rate * (m_t.acc.z / (1.0 - powf(beta1, iter)) / (sqrtf((v_t.acc.z / (1.0 - powf(beta2, iter)))) + eps) + weight_decay * weight_new.acc.z);
weight_new.acc.w -= learning_rate * (m_t.acc.w / (1.0 - powf(beta1, iter)) / (sqrtf((v_t.acc.w / (1.0 - powf(beta2, iter)))) + eps) + weight_decay * weight_new.acc.w);
"""
split_weight_update_cpu = "" # TODO
return {
"optimizer": "adam",
"is_experimental_optimizer": True,
"args": make_args(
[
(TENSOR, "momentum1"),
(TENSOR, "momentum2"),
(FLOAT, "learning_rate"),
(FLOAT, "eps"),
(FLOAT, "beta1"),
(FLOAT, "beta2"),
(FLOAT, "weight_decay"),
(INT, "iter"),
]
),
"split_precomputation": "",
"split_weight_update": split_weight_update,
"split_post_update": "",
"split_weight_update_cpu": split_weight_update_cpu,
"has_cpu_support": False,
"has_gpu_support": True,
"has_vbe_support": False,
}
def partial_rowwise_adam() -> Dict[str, Any]:
split_precomputation = """
at::acc_type<cache_t, true> g_local_sum_square = 0.0;
#pragma unroll kMaxVecsPerThread
for (int32_t i = 0;
i < kMaxVecsPerThread && 4 * kThreadGroupSize * i + threadIdx.x * 4 < D;
++i) {
g_local_sum_square += grad_sum[i].acc.x * grad_sum[i].acc.x +
grad_sum[i].acc.y * grad_sum[i].acc.y +
grad_sum[i].acc.z * grad_sum[i].acc.z +
grad_sum[i].acc.w * grad_sum[i].acc.w;
}
const at::acc_type<cache_t, true> g_avg_square =
warpReduceAllSum<at::acc_type<cache_t, true>, kThreadGroupSize>(g_local_sum_square) / D;
at::acc_type<cache_t, true> v_hat_t;
if (threadIdx.x == 0) {
at::acc_type<cache_t, true> v_t = momentum2[idx] * beta2 + g_avg_square * (1.0 - beta2);
momentum2[idx] = v_t;
v_hat_t = v_t / (1.0 - powf(beta2, iter));
}
v_hat_t = SHFL_SYNC(v_hat_t, 0);
"""
split_weight_update = """
Vec4T<cache_t> m_t(&momentum1[idx * D + d]);
m_t.acc.x *= beta1;
m_t.acc.y *= beta1;
m_t.acc.z *= beta1;
m_t.acc.w *= beta1;
m_t.fma_(grad, 1.0 - beta1);
m_t.store(&momentum1[idx * D + d]);
weight_new.acc.x -= learning_rate * (m_t.acc.x / (1.0 - powf(beta1, iter)) / (sqrtf(v_hat_t) + eps) + weight_decay * weight_new.acc.x);
weight_new.acc.y -= learning_rate * (m_t.acc.y / (1.0 - powf(beta1, iter)) / (sqrtf(v_hat_t) + eps) + weight_decay * weight_new.acc.y);
weight_new.acc.z -= learning_rate * (m_t.acc.z / (1.0 - powf(beta1, iter)) / (sqrtf(v_hat_t) + eps) + weight_decay * weight_new.acc.z);
weight_new.acc.w -= learning_rate * (m_t.acc.w / (1.0 - powf(beta1, iter)) / (sqrtf(v_hat_t) + eps) + weight_decay * weight_new.acc.w);
"""
split_weight_update_cpu = "" # TODO
return {
"optimizer": "partial_rowwise_adam",
"args": make_args(
[
(TENSOR, "momentum1"),
(TENSOR, "momentum2"),
(FLOAT, "learning_rate"),
(FLOAT, "eps"),
(FLOAT, "beta1"),
(FLOAT, "beta2"),
(FLOAT, "weight_decay"),
(INT, "iter"),
]
),
"split_precomputation": split_precomputation,
"split_weight_update": split_weight_update,
"split_post_update": "",
"split_weight_update_cpu": split_weight_update_cpu,
"has_cpu_support": False,
"has_gpu_support": True,
"has_vbe_support": False,
}
def lars_sgd() -> Dict[str, Any]:
split_precomputation = """
at::acc_type<cache_t, true> weight_sum_sq = 0.0;
at::acc_type<cache_t, true> grad_sum_sq = 0.0;
auto weight_row = WeightRow<emb_t, cache_t, at::acc_type<cache_t, true>>(weights, cache_weights, D, nullptr);
float2 qparams;
if (std::is_same<emb_t, uint8_t>::value && !cache_weights) {
qparams = weight_row.load_qparams();
}
#pragma unroll kMaxVecsPerThread
for (int32_t i = 0;
i < kMaxVecsPerThread && 4 * kThreadGroupSize * i + threadIdx.x * 4 < D;
++i) {
int32_t d = 4 * kThreadGroupSize * i + threadIdx.x * 4;
Vec4T<at::acc_type<cache_t,true>> weight = weight_row.load(d, qparams);
weight_sum_sq += weight.acc.x * weight.acc.x + weight.acc.y * weight.acc.y + weight.acc.z * weight.acc.z + weight.acc.w * weight.acc.w;
grad_sum_sq += grad_sum[i].acc.x * grad_sum[i].acc.x + grad_sum[i].acc.y * grad_sum[i].acc.y + grad_sum[i].acc.z * grad_sum[i].acc.z + grad_sum[i].acc.w * grad_sum[i].acc.w;
}
const auto weight_norm =
sqrtf(warpReduceAllSum<at::acc_type<cache_t, true>, kThreadGroupSize>(weight_sum_sq));
const auto grad_norm =
sqrtf(warpReduceAllSum<at::acc_type<cache_t, true>, kThreadGroupSize>(grad_sum_sq));
const at::acc_type<cache_t, true> adjusted_lr = learning_rate * eta * weight_norm / (grad_norm + weight_decay * weight_norm);
"""
split_weight_update = """
Vec4T<cache_t> m1(&momentum1[idx * D + d]);
m1.acc.x = momentum * m1.acc.x + adjusted_lr * (grad.acc.x + weight_decay * weight_new.acc.x);
m1.acc.y = momentum * m1.acc.y + adjusted_lr * (grad.acc.y + weight_decay * weight_new.acc.y);
m1.acc.z = momentum * m1.acc.z + adjusted_lr * (grad.acc.z + weight_decay * weight_new.acc.z);
m1.acc.w = momentum * m1.acc.w + adjusted_lr * (grad.acc.w + weight_decay * weight_new.acc.w);
m1.store(&momentum1[idx * D + d]);
weight_new.acc.x -= m1.acc.x;
weight_new.acc.y -= m1.acc.y;
weight_new.acc.z -= m1.acc.z;
weight_new.acc.w -= m1.acc.w;
"""
split_weight_update_cpu = "" # TODO
return {
"optimizer": "lars_sgd",
"is_experimental_optimizer": True,
"args": make_args(
[
(TENSOR, "momentum1"),
(FLOAT, "learning_rate"),
(FLOAT, "eta"),
(FLOAT, "momentum"),
(FLOAT, "weight_decay"),
]
),
"split_precomputation": split_precomputation,
"split_weight_update": split_weight_update,
"split_post_update": "",
"split_weight_update_cpu": split_weight_update_cpu,
"has_cpu_support": False,
"has_gpu_support": True,
"has_vbe_support": False,
}
def none_optimizer() -> Dict[str, Any]:
return {
"optimizer": "none",
"dense": False,
"args": make_args(
[
(INT, "total_hash_size"),
(INT, "total_unique_indices"),
]
),
# Generate only GPU code
"has_cpu_support": False,
"has_gpu_support": True,
"has_vbe_support": False,
}
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from typing import NamedTuple
import torch
from torch import nn
class SplitEmbeddingOptimizerParams(NamedTuple):
weights_dev: nn.Parameter
# TODO: Enable weights_uvm and weights_lxu_cache support
# weights_uvm: nn.Parameter
# weights_lxu_cache: nn.Parameter
class SplitEmbeddingArgs(NamedTuple):
weights_placements: torch.Tensor
weights_offsets: torch.Tensor
max_D: int
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from typing import NamedTuple, Optional
import torch
class VBEMetadata(NamedTuple):
B_offsets: Optional[torch.Tensor]
output_offsets_feature_rank: Optional[torch.Tensor]
B_offsets_rank_per_feature: Optional[torch.Tensor]
max_B_feature_rank: int = -1
max_B: int = -1
output_size: int = -1
class CommonArgs(NamedTuple):
placeholder_autograd_tensor: torch.Tensor
dev_weights: torch.Tensor
host_weights: torch.Tensor
uvm_weights: torch.Tensor
lxu_cache_weights: torch.Tensor
weights_placements: torch.Tensor
weights_offsets: torch.Tensor
D_offsets: torch.Tensor
total_D: int
max_D: int
hash_size_cumsum: torch.Tensor
total_hash_size_bits: int
indices: torch.Tensor
offsets: torch.Tensor
pooling_mode: int
indice_weights: Optional[torch.Tensor]
feature_requires_grad: Optional[torch.Tensor]
lxu_cache_locations: torch.Tensor
output_dtype: int
vbe_metadata: VBEMetadata
is_experimental: bool
class OptimizerArgs(NamedTuple):
stochastic_rounding: bool
gradient_clipping: bool
max_gradient: float
learning_rate: float
eps: float
beta1: float
beta2: float
weight_decay: float
weight_decay_mode: int
eta: float
momentum: float
counter_halflife: int
adjustment_iter: int
adjustment_ub: float
learning_rate_mode: int
grad_sum_decay: int
tail_id_threshold: float
is_tail_id_thresh_ratio: int
total_hash_size: int # Required for OptimType.NONE
class Momentum(NamedTuple):
dev: torch.Tensor
host: torch.Tensor
uvm: torch.Tensor
offsets: torch.Tensor
placements: torch.Tensor
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
import os
import subprocess
def configureDoxyfile(input_dir, output_dir):
with open("Doxyfile.in", "r") as file:
filedata = file.read()
filedata = filedata.replace("@DOXYGEN_INPUT_DIR@", input_dir)
filedata = filedata.replace("@DOXYGEN_OUTPUT_DIR@", output_dir)
with open("Doxyfile", "w") as file:
file.write(filedata)
# Check if we're running on Read the Docs' servers
read_the_docs_build = os.environ.get("READTHEDOCS", None) == "True"
breathe_projects = {}
if read_the_docs_build:
input_dir = "../include/fbgemm"
output_dir = "build"
configureDoxyfile(input_dir, output_dir)
subprocess.call("doxygen", shell=True)
breathe_projects["fbgemm"] = output_dir + "/xml"
# -- Project information -----------------------------------------------------
project = "FBGEMM"
copyright = "2020, Facebook Inc."
author = "Facebook Inc."
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
# ...
extensions = ["breathe"]
# ...
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Breathe Configuration
breathe_default_project = "FBGEMM"
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
## This is a helper script that generates simple Caffe2 models.
from caffe2.proto import caffe2_pb2
from caffe2.python import utils
# Define a weights network
weights = caffe2_pb2.NetDef()
weights.name = "init"
op = caffe2_pb2.OperatorDef()
op.type = "fake_data_provider"
op.output.extend(["data"])
weights.op.extend([op])
weights.external_output.extend(op.output)
op = caffe2_pb2.OperatorDef()
op.type = "GivenTensorFill"
op.output.extend(["fc_w"])
op.arg.extend([utils.MakeArgument("shape", [1, 4])])
op.arg.extend([utils.MakeArgument("values", [1.0 for i in range(4)])])
weights.op.extend([op])
weights.external_output.extend(op.output)
op = caffe2_pb2.OperatorDef()
op.type = "GivenTensorFill"
op.output.extend(["fc_b"])
op.arg.extend([utils.MakeArgument("shape", [1, 4])])
op.arg.extend([utils.MakeArgument("values", [1.0 for i in range(4)])])
weights.op.extend([op])
weights.external_output.extend(op.output)
# Define an inference net
net = caffe2_pb2.NetDef()
net.name = "predict"
op = caffe2_pb2.OperatorDef()
op.type = "fake_operator"
op.input.extend(["data"])
op.output.extend(["fake_out"])
net.op.extend([op])
op = caffe2_pb2.OperatorDef()
op.type = "FC"
op.input.extend(["fake_out"])
op.input.extend(["fc_w"])
op.input.extend(["fc_b"])
op.output.extend(["fc_out"])
net.op.extend([op])
op = caffe2_pb2.OperatorDef()
op.type = "Relu"
op.input.extend(["fc_out"])
op.output.extend(["relu_out"])
net.op.extend([op])
# Relu out is what we want
net.external_output.extend(op.output)
# We want DCE to remove this one
op = caffe2_pb2.OperatorDef()
op.type = "useless_operator"
op.input.extend(["fake_out"])
op.output.extend(["useless_out"])
net.op.extend([op])
with open("predictNet.pb", "wb") as f:
f.write(net.SerializeToString())
with open("initNet.pb", "wb") as f:
f.write(weights.SerializeToString())
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from caffe2.proto import caffe2_pb2
from caffe2.python import workspace
from google.protobuf import text_format
def fix_tensor_fills(init_net_file):
init_net_pb = open(init_net_file, "rb").read()
init_net = caffe2_pb2.NetDef()
init_net.ParseFromString(init_net_pb)
for op in init_net.op:
if any("indices" in x for x in op.output):
op.type = "GivenTensorInt64Fill"
elif any("lengths" in x for x in op.output):
op.type = "GivenTensorIntFill"
open(init_net_file + "txt", "w").write(text_format.MessageToString(init_net))
open(init_net_file, "wb").write(init_net.SerializeToString())
def read_init_net_pbtxt(init_net_file):
init_net_txt = open(init_net_file, "r").read()
init_net = caffe2_pb2.NetDef()
text_format.Merge(init_net_txt, init_net)
return init_net
def read_init_net(init_net_file):
init_net_pb = open(init_net_file, "rb").read()
init_net = caffe2_pb2.NetDef()
init_net.ParseFromString(init_net_pb)
return init_net
def read_predict_net(predict_net_file):
predict_net_txt = open(predict_net_file, "r").read()
predict_net = caffe2_pb2.NetDef()
predict_net.name = "the_model"
text_format.Merge(predict_net_txt, predict_net)
return predict_net
def run(predict_net, init_net):
workspace.ResetWorkspace()
workspace.RunNetOnce(init_net)
workspace.CreateNet(predict_net)
workspace.RunNet(predict_net.name)
out = workspace.FetchBlob(predict_net.external_output[0])
print(out)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("predict_net", default="predict_net.pbtxt", nargs="?")
parser.add_argument("init_net", default="init_net.pb", nargs="?")
return parser.parse_args()
if __name__ == "__main__":
args = parse_args()
init_net = read_init_net(args.init_net)
predict_net = read_predict_net(args.predict_net)
run(predict_net, init_net)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import torch.onnx
import torchvision
from torch.autograd import Variable
# Export ONNX model from PyTorch
# Refer to https://pytorch.org/docs/stable/onnx.html
class PyTorchPretrainedModel:
def __init__(self, model_name):
self.model_name = model_name
method_to_call = getattr(torchvision.models, self.model_name)
self.model = method_to_call(pretrained=True)
self.model_parameters_num = len(list(self.model.state_dict()))
def export_onnx_model(
self, input_name, output_name, batch_size, model_path, verbose
):
dummy_input = Variable(torch.randn(batch_size, 3, 224, 224))
input_names = [input_name] + [
"learned_%d" % i for i in range(self.model_parameters_num)
]
output_names = [output_name]
torch.onnx.export(
self.model,
dummy_input,
model_path,
verbose=verbose,
input_names=input_names,
output_names=output_names,
)
if __name__ == "__main__":
# For more pretrained model in PyTorch, refer to:
# https://pytorch.org/docs/stable/torchvision/models.html
parser = argparse.ArgumentParser("ONNX model exported from PyTorch.")
parser.add_argument("--model_name", type=str, default="resnet18")
parser.add_argument("--model_path", type=str, default="resnet18.onnx")
parser.add_argument("--model_input_name", type=str, default="data")
parser.add_argument("--model_output_name", type=str, default="output")
parser.add_argument("--batch_size", type=int, default=16)
parser.add_argument("--verbose", action="store_true")
args = parser.parse_args()
pytorch_model = PyTorchPretrainedModel(args.model_name)
pytorch_model.export_onnx_model(
args.model_input_name,
args.model_output_name,
args.batch_size,
args.model_path,
args.verbose,
)
|
#!/usr/bin/env python3
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import re
from collections import defaultdict
from operator import attrgetter, itemgetter
import numpy
def formatUs(time):
"""Format human readable time (input in us)."""
if time < 1000:
return f"{time:.2f} us"
time = time / 1000
if time < 1000:
return f"{time:.2f} ms"
time = time / 1000
return f"{time:.2f} s"
class Event:
"""Class to hold TraceEvents, matches glow::TraceEvent."""
def __init__(self, name, start, end, optype):
self.name = name
self.start = start
self.end = end
self.optype = optype
self.children = []
self.child_time = 0
def __repr__(self):
return f"Event({self.name}, {self.start}, {self.end}, {self.optype})"
def printTree(self, tabs):
"""Pretty print the tree."""
indent = tabs * "\t"
print(f"{indent}{self.name} ({self.optype})")
for c in self.children:
c.printTree(tabs + 1)
def totalOverlap(self, event):
"""Returns True if this Event completely incloses the provided event."""
return self.start <= event.start and self.end >= event.end
def addChild(self, event):
"""Add an enclosed event."""
self.children.append(event)
def updateChildTime(self):
"""Determine the total time cost of all children."""
self.child_time = 0
for child in self.children:
child.updateChildTime()
self.child_time += child.end - child.start
def selfTime(self):
"""Return this Event's time cost above the sum of its children."""
return (self.end - self.start) - self.child_time
def loadEvents(filename, runtimeEvents, fixedEvent, skip):
"""Load the json trace file and create Events."""
trace = None
with open(filename) as f:
trace = json.load(f)
events = []
partialEvents = {}
for line in trace:
if "name" in line:
name = line["name"]
evtype = line["ph"]
start = int(line["ts"])
optype = "runtime"
if "args" in line:
if "type" in line["args"]:
optype = line["args"]["type"]
elif "kind" in line["args"]:
optype = line["args"]["kind"]
# If we're looking for a single event, skip others.
if (
fixedEvent
and not re.match(fixedEvent, name)
and not re.match(fixedEvent, optype)
):
continue
# if we're not including runtime events, skip them.
if not fixedEvent and not runtimeEvents and optype == "runtime":
continue
# If we're skipping some number of events, skip them.
if skip > 0:
skip = skip - 1
continue
end = 0
if evtype == "X":
end = start + int(line["dur"])
events.append(Event(name, start, end, optype))
elif evtype == "B":
partialEvents[name] = Event(name, start, end, optype)
elif evtype == "E":
if not name in partialEvents:
# This is a bug in Glow tracing, but ignore for now.
continue
ev = partialEvents[name]
ev.end = start
events.append(ev)
return events
def stackEvents(events):
"""Find all enclosed events and move them to be children. Returns a tree of Events
where parents completely enclose the timeline of their children."""
# Ensure events are sorted by time.
events = sorted(events, key=attrgetter("end"), reverse=True)
events = sorted(events, key=attrgetter("start"))
result = []
lastEvent = None
for ev in events:
# If ev is enclosed by the previous event, add it as a child.
if lastEvent:
if lastEvent.totalOverlap(ev):
lastEvent.addChild(ev)
continue
# If we're closing the previous event, recursively stack its children.
if lastEvent.children:
lastEvent.children = stackEvents(lastEvent.children)
lastEvent.updateChildTime()
# If not enclosed its a new top-level event, which may enclose other events.
lastEvent = ev
result.append(ev)
# Stack children of the last Event.
if lastEvent.children:
lastEvent.children = stackEvents(lastEvent.children)
lastEvent.updateChildTime()
return result
def dumpAccumulate(events, keyfunc, traceTime):
"""Accumulate Event durations by a key produced by keyfunc. Keyfunc is a lambda which
takes an Event as a parameter."""
nameMap = defaultdict(list)
for ev in events:
name = keyfunc(ev)
nameMap[name].append(ev.selfTime())
layers = []
for (name, times) in nameMap.items():
layers.append(
(name, len(times), numpy.mean(times), numpy.std(times), numpy.sum(times))
)
# Iterate sorted by total time.
for (name, num, mean, stddev, total) in sorted(
layers, key=itemgetter(4), reverse=True
):
mean = formatUs(mean)
stddev = formatUs(stddev)
pc = (total / traceTime) * 100
total = formatUs(total)
print(
f"{name} {num} events, mean: {mean}, stddev: {stddev}, total: {total} ({pc:.2f}%)"
)
print()
print()
def main():
parser = argparse.ArgumentParser(description="process trace json")
parser.add_argument("filename", type=str, help="filename for trace file to load")
parser.add_argument(
"--layers", action="store_true", help="aggregate and display by layer names"
)
parser.add_argument(
"--kinds", action="store_true", help="aggregate and display by op kind"
)
parser.add_argument("--runtime", action="store_true", help="include runtime events")
parser.add_argument(
"--summarize", action="store_true", help="print a summary of the trace"
)
parser.add_argument(
"--event", type=str, default="", help="restrict events matching this regex"
)
parser.add_argument(
"--skip",
type=int,
default=0,
help="skip a number of events matching conditions",
)
args = parser.parse_args()
events = loadEvents(args.filename, args.runtime, args.event, args.skip)
if not events:
return
# Stack events so we can determine selfTime.
stacked = stackEvents(events)
# Ensure events are sorted by startTime.
stacked = sorted(stacked, key=attrgetter("start"))
totalTime = stacked[-1].end - stacked[0].start
coveredTime = 0
for ev in stacked:
coveredTime += ev.end - ev.start
if args.layers:
dumpAccumulate(events, lambda ev: f"{ev.name} ({ev.optype})", coveredTime)
if args.kinds:
dumpAccumulate(events, lambda ev: ev.optype, coveredTime)
if args.event:
dumpAccumulate(events, lambda ev: f"{ev.name} ({ev.optype})", coveredTime)
if args.summarize:
print("Total time of trace:", formatUs(totalTime))
print("Time covered by events:", formatUs(coveredTime))
print("Unattributed time:", formatUs(totalTime - coveredTime))
if __name__ == "__main__":
main()
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This was mostly taken from a tutorial from Caffe2:
# caffe2/blob/master/caffe2/python/tutorials/py_gen/MNIST.py
# It currently allows to train either LeNet or an MLP on MNIST. We can then load
# the pre-trained protobuf file into Glow to run.
import os
import shutil
import caffe2.python.predictor.predictor_exporter as pe
import numpy as np
from caffe2.python import brew, core, model_helper, optimizer, workspace
from caffe2.python.predictor import mobile_exporter
# If you would like to see some really detailed initializations,
# you can change --caffe2_log_level=0 to --caffe2_log_level=-1
core.GlobalInit(["caffe2", "--caffe2_log_level=0"])
print("Necessities imported!")
# If True, a more complicated convolutional model is used
# If False, a multilayer perceptron model is used
USE_LENET_MODEL = True
# This section preps your image and test set in a lmdb database
def DownloadResource(url, path):
"""Downloads resources from s3 by url and unzips them to the provided path"""
import StringIO
import zipfile
import requests
print("Downloading... {} to {}".format(url, path))
r = requests.get(url, stream=True)
z = zipfile.ZipFile(StringIO.StringIO(r.content))
z.extractall(path)
print("Completed download and extraction.")
current_folder = os.path.join(os.path.expanduser("~"), "caffe2_notebooks")
data_folder = os.path.join(current_folder, "tutorial_data", "mnist")
root_folder = os.path.join(current_folder, "tutorial_files", "tutorial_mnist")
db_missing = False
if not os.path.exists(data_folder):
os.makedirs(data_folder)
print("Your data folder was not found!! This was generated: {}".format(data_folder))
# Look for existing database: lmdb
# MNIST lmdb can be found here:
# https://download.caffe2.ai/databases/mnist-lmdb.zip
if os.path.exists(os.path.join(data_folder, "mnist-train-nchw-lmdb")):
print("lmdb train db found!")
else:
db_missing = True
if os.path.exists(os.path.join(data_folder, "mnist-test-nchw-lmdb")):
print("lmdb test db found!")
else:
db_missing = True
# attempt the download of the db if either was missing
if db_missing:
print("one or both of the MNIST lmbd dbs not found!!")
db_url = "http://download.caffe2.ai/databases/mnist-lmdb.zip"
try:
DownloadResource(db_url, data_folder)
except Exception as ex:
print(
"Failed to download dataset. Please download it manually from {}".format(
db_url
)
)
print(
"Unzip it and place the two database folders here: {}".format(data_folder)
)
raise ex
if os.path.exists(root_folder):
print("Looks like you ran this before, so we need to cleanup those old files...")
shutil.rmtree(root_folder)
os.makedirs(root_folder)
workspace.ResetWorkspace(root_folder)
print("training data folder:" + data_folder)
print("workspace root folder:" + root_folder)
def AddInput(model, batch_size, db, db_type):
# load the data
data_uint8, label = brew.db_input(
model,
blobs_out=["data_uint8", "label"],
batch_size=batch_size,
db=db,
db_type=db_type,
)
print(data_uint8._from_net)
# cast the data to float
data = model.Cast(data_uint8, "data", to=core.DataType.FLOAT)
# scale data from [0,255] down to [0,1]
data = model.Scale(data, data, scale=float(1.0 / 256))
# don't need the gradient for the backward pass
data = model.StopGradient(data, data)
return data, label
def AddMLPModel(model, data):
size = 28 * 28 * 1
sizes = [size, size * 2, size * 2, 10]
layer = data
for i in range(len(sizes) - 1):
layer = brew.fc(
model,
layer,
"dense_{}".format(i),
dim_in=sizes[i],
dim_out=sizes[i + 1],
use_cudnn=False,
)
layer = model.net.Relu(layer, "relu_{}".format(i), use_cudnn=False)
softmax = model.net.Softmax(layer, "softmax", use_cudnn=False)
return softmax
def AddLeNetModel(model, data):
"""
This part is the standard LeNet model: from data to the softmax prediction.
For each convolutional layer we specify dim_in - number of input channels
and dim_out - number or output channels. Also each Conv and MaxPool layer changes the
image size. For example, kernel of size 5 reduces each side of an image by 4.
While when we have kernel and stride sizes equal 2 in a MaxPool layer, it divides
each side in half.
"""
# Image size: 28 x 28 -> 24 x 24
conv1 = brew.conv(
model, data, "conv1", dim_in=1, dim_out=20, kernel=5, use_cudnn=False
)
# Image size: 24 x 24 -> 12 x 12
pool1 = model.net.MaxPool(conv1, "pool1", kernel=2, stride=2, use_cudnn=False)
# Image size: 12 x 12 -> 8 x 8
conv2 = brew.conv(
model, pool1, "conv2", dim_in=20, dim_out=50, kernel=5, use_cudnn=False
)
# Image size: 8 x 8 -> 4 x 4
pool2 = model.net.MaxPool(conv2, "pool2", kernel=2, stride=2, use_cudnn=False)
# 50 * 4 * 4 stands for dim_out from previous layer multiplied by the
# image size
fc3 = brew.fc(model, pool2, "fc3", dim_in=50 * 4 * 4, dim_out=500, use_cudnn=False)
fc3 = model.net.Relu(fc3, "relu3", use_cudnn=False)
pred = brew.fc(model, fc3, "pred", 500, 10, use_cudnn=False)
softmax = model.net.Softmax(pred, "softmax", use_cudnn=False)
return softmax
def AddModel(model, data):
if USE_LENET_MODEL:
return AddLeNetModel(model, data)
else:
return AddMLPModel(model, data)
def AddAccuracy(model, softmax, label):
"""Adds an accuracy op to the model"""
accuracy = model.Accuracy([softmax, label], "accuracy", use_cudnn=False)
return accuracy
def AddTrainingOperators(model, softmax, label):
"""Adds training operators to the model."""
xent = model.LabelCrossEntropy([softmax, label], "xent", use_cudnn=False)
# compute the expected loss
loss = model.AveragedLoss(xent, "loss", use_cudnn=False)
# track the accuracy of the model
AddAccuracy(model, softmax, label)
# use the average loss we just computed to add gradient operators to the
# model
model.AddGradientOperators([loss])
optimizer.build_sgd(
model, base_learning_rate=0.1, policy="step", stepsize=1, gamma=0.999
)
arg_scope = {"order": "NCHW"}
train_model = model_helper.ModelHelper(name="mnist_train", arg_scope=arg_scope)
data, label = AddInput(
train_model,
batch_size=64,
db=os.path.join(data_folder, "mnist-train-nchw-lmdb"),
db_type="lmdb",
)
softmax = AddModel(train_model, data)
AddTrainingOperators(train_model, softmax, label)
test_model = model_helper.ModelHelper(
name="mnist_test", arg_scope=arg_scope, init_params=False
)
data, label = AddInput(
test_model,
batch_size=100,
db=os.path.join(data_folder, "mnist-test-nchw-lmdb"),
db_type="lmdb",
)
softmax = AddModel(test_model, data)
# Deployment model. We simply need the main AddModel part.
deploy_model = model_helper.ModelHelper(
name="mnist_deploy", arg_scope=arg_scope, init_params=False
)
AddModel(deploy_model, "data")
# The parameter initialization network only needs to be run once.
# Now all the parameter blobs are going to be initialized in the workspace.
workspace.RunNetOnce(train_model.param_init_net)
# overwrite=True allows you to run this cell several times and avoid errors
workspace.CreateNet(train_model.net, overwrite=True)
# Set the iterations number and track the accuracy & loss
total_iters = 200
accuracy = np.zeros(total_iters)
loss = np.zeros(total_iters)
print("The blobs in the workspace pre-train: {}".format(workspace.Blobs()))
# Now, we will manually run the network for 200 iterations.
for i in range(total_iters):
workspace.RunNet(train_model.net)
accuracy[i] = workspace.blobs["accuracy"]
loss[i] = workspace.blobs["loss"]
print("The blobs in the workspace post-train: {}".format(workspace.Blobs()))
# param_init_net here will only create a data reader
# Other parameters won't be re-created because we selected
# init_params=False before
workspace.RunNetOnce(test_model.param_init_net)
workspace.CreateNet(test_model.net, overwrite=True)
test_accuracy = np.zeros(100)
for i in range(100):
workspace.RunNet(test_model.net.Proto().name)
test_accuracy[i] = workspace.FetchBlob("accuracy")
print("test_accuracy: %f" % test_accuracy.mean())
# construct the model to be exported
# the inputs/outputs of the model are manually specified.
pe_meta = pe.PredictorExportMeta(
predict_net=deploy_model.net.Proto(),
parameters=[str(b) for b in deploy_model.params],
inputs=["data"],
outputs=["softmax"],
)
# save the model to a file. Use minidb as the file format
pe.save_to_db("minidb", os.path.join(root_folder, "mnist_model.minidb"), pe_meta)
print("The deploy model is saved to: " + root_folder + "/mnist_model.minidb")
workspace.RunNetOnce(deploy_model.param_init_net)
init_net, predict_net = mobile_exporter.Export(
workspace, deploy_model.net, deploy_model.params
)
with open("init_net.pb", "wb") as f:
f.write(init_net.SerializeToString())
with open("predict_net.pb", "wb") as f:
f.write(predict_net.SerializeToString())
with open("predict_net.pbtxt", "wb") as f:
f.write(str(deploy_model.net.Proto()))
# Now we can load the model back and run the prediction to verify it works.
# we retrieve the last input data out and use it in our prediction test
# before we scratch the workspace
blob = workspace.FetchBlob("data")
# reset the workspace, to make sure the model is actually loaded
workspace.ResetWorkspace(root_folder)
# verify that all blobs are destroyed.
print("The blobs in the workspace after reset: {}".format(workspace.Blobs()))
# load the predict net
predict_net = pe.prepare_prediction_net(
os.path.join(root_folder, "mnist_model.minidb"), "minidb"
)
# verify that blobs are loaded back
print(
"The blobs in the workspace after loading the model: {}".format(workspace.Blobs())
)
# feed the previously saved data to the loaded model
workspace.FeedBlob("data", blob)
# predict
workspace.RunNetOnce(predict_net)
softmax = workspace.FetchBlob("softmax")
|
#!/usr/bin/env python
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division, print_function
import argparse
import array
import collections
import gzip
import os.path
import pickle
import sys
import tarfile
try:
from urllib.error import URLError
except ImportError:
from urllib2 import URLError
try:
from urllib.request import urlretrieve
except ImportError:
from urllib import urlretrieve
Dataset = collections.namedtuple("TargetItem", "filename, url, handler, dest_path")
# Load a file using pickle module, and parameters vary based on different
# Python versions.
def pickle_load(file):
if sys.version_info.major >= 3:
return pickle.load(file, encoding="bytes")
return pickle.load(file)
# A helper function to extract mnist dataset from tar file, and split the dataset
# into data and labels.
def handle_mnist(filename, dest_path):
print("Extracting {} ...".format(filename))
with gzip.open(filename, "rb") as file:
training_set, _, _ = pickle_load(file)
data, labels = training_set
images_file = open(os.path.join(dest_path, "mnist_images.bin"), "wb")
data.tofile(images_file)
images_file.close()
labels_file = open(os.path.join(dest_path, "mnist_labels.bin"), "wb")
L = array.array("B", labels)
L.tofile(labels_file)
labels_file.close()
def untar(filename, dest_path, member=None):
print("Extracting {} ...".format(filename))
tar = tarfile.open(filename, "r:gz")
if not member:
tar.extractall(dest_path)
else:
tar.extract(member, dest_path)
tar.close()
DATASETS = dict(
mnist=Dataset(
"mnist.pkl.gz",
"http://www.iro.umontreal.ca/~lisa/deep/data/mnist/mnist.pkl.gz",
handle_mnist,
".",
),
cifar10=Dataset(
"cifar-10.binary.tar.gz",
"http://www.cs.toronto.edu/~kriz/cifar-10-binary.tar.gz",
untar,
".",
),
ptb=Dataset(
"ptb.tgz",
"http://www.fit.vutbr.cz/~imikolov/rnnlm/simple-examples.tgz",
untar,
"ptb",
),
fr2en=Dataset(
"fr2en.tar.gz",
"http://fb-glow-assets.s3.amazonaws.com/models/fr2en.tar.gz",
untar,
"fr2en",
),
)
DATASET_NAMES = list(DATASETS.keys())
CAFFE2_MODELS = [
"densenet121",
"inception_v1",
"inception_v2",
"lenet_mnist",
"resnet50",
"shufflenet",
"squeezenet",
"vgg19",
"zfnet512",
"bvlc_alexnet",
"en2gr",
"quant_resnet50",
]
ONNX_MODELS = [
"resnet50",
"vgg19",
"squeezenet",
"zfnet512",
"densenet121",
"shufflenet",
"inception_v1",
"inception_v2",
"bvlc_alexnet",
"lenet_mnist",
"googlenet_v1_slim",
"googlenet_v4_slim",
"resnet50_tf",
"emotion_ferplus",
"bvlc_reference_rcnn_ilsvrc13",
]
def report_download_progress(chunk_number, chunk_size, file_size):
if file_size != -1:
percent = min(1, (chunk_number * chunk_size) / file_size)
bar = "#" * int(64 * percent)
sys.stdout.write("\r0% |{:<64}| {}%".format(bar, int(percent * 100)))
def download(path, filename, url):
if not os.path.exists(path):
os.mkdir(path)
destFile = os.path.join(path, filename)
if os.path.exists(destFile):
print("{} already exists, skipping ...".format(filename))
else:
print("Downloading {} from {} ...".format(filename, url))
try:
urlretrieve(url, destFile, reporthook=report_download_progress)
except URLError:
print("Error downloading {}!".format(filename))
finally:
# Just a newline.
print()
def download_caffe2_models(outDir, models):
for modelname in models:
print("For model ", modelname)
for filename in ["predict_net.pbtxt", "predict_net.pb", "init_net.pb"]:
path = os.path.join(outDir, modelname)
url = "http://fb-glow-assets.s3.amazonaws.com/models/{}/{}".format(
modelname, filename
)
download(path, filename, url)
if modelname == "en2gr":
for filename in ["dst_dictionary.txt", "src_dictionary.txt"]:
path = os.path.join(outDir, "en2gr")
url = "http://fb-glow-assets.s3.amazonaws.com/models/en2gr/{}".format(
filename
)
download(path, filename, url)
return
def download_onnx_models(outDir, models):
for modelname in models:
if modelname in [
"resnet50",
"vgg19",
"squeezenet",
"zfnet512",
"densenet121",
"shufflenet",
]:
url = "https://s3.amazonaws.com/download.onnx/models/opset_6/{}.tar.gz".format(
modelname
)
filename = "{}.tar.gz".format(modelname)
download(outDir, filename, url)
untar(os.path.join(outDir, filename), outDir)
elif modelname in ["inception_v1", "inception_v2", "bvlc_alexnet"]:
url = "https://s3.amazonaws.com/download.onnx/models/opset_8/{}.tar.gz".format(
modelname
)
filename = "{}.tar.gz".format(modelname)
download(outDir, filename, url)
untar(os.path.join(outDir, filename), outDir)
elif modelname in ["lenet_mnist"]:
url = "http://fb-glow-assets.s3.amazonaws.com/models/{}.tar.gz".format(
modelname
)
filename = "{}.tar.gz".format(modelname)
download(outDir, filename, url)
untar(os.path.join(outDir, filename), outDir)
elif modelname in ["googlenet_v1_slim", "googlenet_v4_slim", "resnet50_tf"]:
url = "http://fb-glow-assets.s3.amazonaws.com/models/{}.onnx".format(
modelname
)
filename = "{}.onnx".format(modelname)
path = os.path.join(outDir, modelname)
download(path, filename, url)
elif modelname == "emotion_ferplus":
url = "https://onnxzoo.blob.core.windows.net/models/opset_8/emotion_ferplus/emotion_ferplus.tar.gz"
filename = "emotion_ferplus.tar.gz"
download(outDir, filename, url)
untar(os.path.join(outDir, filename), outDir, "emotion_ferplus/model.onnx")
elif modelname == "bvlc_reference_rcnn_ilsvrc13":
url = "https://s3.amazonaws.com/download.onnx/models/opset_8/bvlc_reference_rcnn_ilsvrc13.tar.gz"
filename = "bvlc_reference_rcnn_ilsvrc13.tar.gz"
download(outDir, filename, url)
untar(
os.path.join(outDir, filename),
outDir,
"bvlc_reference_rcnn_ilsvrc13/model.onnx",
)
return
def parse():
parser = argparse.ArgumentParser(description="Download datasets for Glow")
parser.add_argument("-d", "--datasets", nargs="+", choices=DATASET_NAMES)
parser.add_argument("-D", "--all-datasets", action="store_true")
parser.add_argument("-c", "--caffe2-models", nargs="+", choices=CAFFE2_MODELS)
parser.add_argument("-C", "--all-caffe2-models", action="store_true")
parser.add_argument("-o", "--onnx-models", nargs="+", choices=ONNX_MODELS)
parser.add_argument("-O", "--all-onnx-models", action="store_true")
parser.add_argument("-P", "--output-directory", default=".")
options = parser.parse_args()
if options.all_datasets:
datasets = DATASET_NAMES
elif options.datasets:
datasets = options.datasets
else:
datasets = []
if options.all_caffe2_models:
caffe2Models = CAFFE2_MODELS
elif options.caffe2_models:
caffe2Models = options.caffe2_models
else:
caffe2Models = []
if options.all_onnx_models:
onnxModels = ONNX_MODELS
elif options.onnx_models:
onnxModels = options.onnx_models
else:
onnxModels = []
return options.output_directory, datasets, caffe2Models, onnxModels
def main():
outDir, datasets, caffe2Models, onnxModels = parse()
if not os.path.exists(outDir):
os.mkdir(outDir)
outDir = os.path.join(".", outDir)
try:
for name in datasets:
dataset = DATASETS[name]
download(outDir, dataset.filename, dataset.url)
dataset.handler(
os.path.join(outDir, dataset.filename),
os.path.join(outDir, dataset.dest_path),
)
if datasets:
print("\n===Done with downloading datasets.\n\n")
if caffe2Models:
download_caffe2_models(outDir, caffe2Models)
print("===Done with downloading caffe2 models.\n\n")
if onnxModels:
download_onnx_models(outDir, onnxModels)
print("===Done with downloading onnx models.\n\n")
except KeyboardInterrupt:
print("Interrupted")
if __name__ == "__main__":
main()
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Load a pre-trained Caffe2 image classifier and run it on an image.
import argparse
import collections
import os
import time
import numpy as np
import skimage.io
from caffe2.python import workspace
print("Required modules imported.")
cmd_line_parser = argparse.ArgumentParser(
description="Run Caffe2 using provided models and inputs."
)
cmd_line_parser.add_argument(
"--image", "-i", required=True, help="Image to be processed by the neural network"
)
cmd_line_parser.add_argument(
"--directory",
"-d",
required=True,
help="Directory containing the network structure "
"<predict_net.pb> and weight <init_net.pb> files. "
"The model name is assumed to be the directory "
"name, and should correspond to a model from the "
"model_props (e.g. 'resnet50', 'lenet_mnist', "
"etc.). If the directory name is not the model "
"name, use --model-name (-m) to specify the name "
"of the supported model to use.",
)
cmd_line_parser.add_argument(
"--model-name", "-m", required=False, help="Name of the model to be used"
)
cmd_line_parser.add_argument(
"--image_mode",
required=False,
help="Image mode; one of '0to1', '0to256', or '128to127'",
)
cmd_line_parser.add_argument("--time", action="store_true")
cmd_line_parser.add_argument("--iterations", type=int, default=1)
args = cmd_line_parser.parse_args()
# 0to256 is the default input
def mode_0to256(x):
return x
def mode_0to1(x):
return x / 255
def mode_128to127(x):
return x - 128
Model = collections.namedtuple(
"Model", "blob_name, image_mode_op, image_size, num_color_channels"
)
model_props = dict(
densenet121=Model("data", mode_0to1, 224, 3),
inception_v1=Model("data", mode_128to127, 224, 3),
inception_v2=Model("data", mode_128to127, 224, 3), # unknown
resnet50=Model("gpu_0/data", mode_0to1, 224, 3),
shufflenet=Model("gpu_0/data", mode_0to1, 224, 3),
squeezenet=Model("data", mode_128to127, 224, 3),
vgg19=Model("data", mode_128to127, 224, 3),
zfnet512=Model("gpu_0/data", mode_0to256, 224, 3),
lenet_mnist=Model("data", mode_0to1, 28, 1),
resnext=Model("data", mode_0to1, 224, 3),
)
MODEL = args.model_name
if MODEL is None:
MODEL = os.path.basename(os.path.normpath(args.directory))
if MODEL not in list(model_props.keys()):
print(
"Model " + MODEL + " is not supported. Specify --model-name (-m) if "
"it is not the base name of the directory containing pb files."
)
exit(1)
MODEL_ROOT = args.directory
IMAGE_LOCATION = args.image
img = skimage.img_as_ubyte(skimage.io.imread(IMAGE_LOCATION)).astype(np.float32)
image_shape = np.array(img).shape
print("Initial img shape: " + str(image_shape))
if img.shape[0] != img.shape[1] or img.shape[0] != model_props[MODEL].image_size:
print("Invalid image dimensions for model.")
exit(2)
num_dims = len(np.array(img).shape)
if num_dims != 3:
img = np.expand_dims(img, axis=num_dims)
img = img[:, :, : model_props[MODEL].num_color_channels]
# Create a zero initiated image.
transposed_image = np.zeros(
(
1,
model_props[MODEL].num_color_channels,
model_props[MODEL].image_size,
model_props[MODEL].image_size,
)
).astype(np.float32)
for w in range(0, model_props[MODEL].image_size):
for h in range(0, model_props[MODEL].image_size):
for c in range(0, model_props[MODEL].num_color_channels):
# WHC -> CWH, RGB -> BGR
transposed_image[0][model_props[MODEL].num_color_channels - c - 1][w][
h
] = model_props[MODEL].image_mode_op(img[w][h][c])
final_image = transposed_image
print("Shape of final_image: " + str(np.array(final_image).shape))
with open(MODEL_ROOT + "/init_net.pb", "rb") as f:
init_net = f.read()
with open(MODEL_ROOT + "/predict_net.pb", "rb") as f:
predict_net = f.read()
workspace.ResetWorkspace()
blob_name = model_props[MODEL].blob_name
workspace.FeedBlob(blob_name, final_image)
print("The blobs in the workspace after FeedBlob: {}".format(workspace.Blobs()))
# Create a predictor using the loaded model.
p = workspace.Predictor(init_net, predict_net)
start = time.time()
for i in range(0, args.iterations):
results = p.run([final_image])
end = time.time()
if args.time:
print(
"Wall time per iteration (s): {:0.4f}".format((end - start) / args.iterations)
)
max_idx = np.argmax(results[0][0])
sum_probability = sum(results[0][0])
print("Max index is {}".format(max_idx))
print(
"Predicted class at index {} with probability {}".format(
max_idx, results[0][0][max_idx]
)
)
print("Number of classes {}".format(len(results[0][0])))
print("Sum of probabilities is {}".format(sum_probability))
|
#!/usr/bin/env python3
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import glob
import os
# imagenet-process : Runs preprocessing of standard imagenet images
# to work with a pretrained model (e.g. resnet)
# through glow
# usage: python3 imagenet-process.py "images/*.JPEG" processed
import PIL.Image
import torchvision
parser = argparse.ArgumentParser(description="imagenet preprocessor")
parser.add_argument("input", metavar="input", help="glob to input images")
parser.add_argument(
"output", metavar="output", default="./", help="directory to put output images"
)
parser.add_argument("--normalize", action="store_true")
args = parser.parse_args()
# create the output dir if necessary
try:
os.makedirs(args.output, exist_ok=True)
except Exception as e:
print(e)
for ifn in glob.glob(args.input):
name, ext = os.path.splitext(ifn)
name = os.path.basename(name)
outputname = os.path.join(args.output, name + ".png")
print("processing", name, "as", outputname)
im = PIL.Image.open(ifn)
im = im.convert("RGB")
resize = torchvision.transforms.Compose(
[torchvision.transforms.Resize(256), torchvision.transforms.CenterCrop(224)]
)
processed_im = resize(im)
if args.normalize:
transform_fn = torchvision.transforms.Compose(
[
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
),
]
)
else:
transform_fn = torchvision.transforms.ToTensor()
processed_im = transform_fn(processed_im)
processed_im = processed_im.unsqueeze(0)
torchvision.utils.save_image(processed_im, outputname)
|
#!/usr/bin/env python3
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import shutil
import sys
import tempfile
import pexpect
import PIL.Image as Image
import torchvision
parser = argparse.ArgumentParser(
description="Glow image-classifier Driver for " "TopK ImageNet Calculation"
)
parser.add_argument(
"--validation-images-dir",
metavar="DIR",
required=True,
help="Path to the directory containing the validation set "
"of images. Subdirectories are expected to be organized "
"such that when sorted their index corresponds to their "
"label. For example, if the validation_images_dir contains "
"{'abc/', 'def/', 'ghi/'}, then this should correspond to "
"labels {0, 1, 2} respectively.",
)
parser.add_argument(
"--batch-size",
default=1,
type=int,
metavar="N",
help="Batch size for use with the model. The total number "
"of images in the validation_images_dir should be "
"divisible by the batch size.",
)
parser.add_argument(
"--only-resize-and-save",
default=False,
action="store_true",
help="Use to pre-process images "
"to 224x224. Saves the images to "
"the validation_images_dir/processed/",
)
parser.add_argument(
"--resize-input-images",
default=False,
action="store_true",
help="Resize and center-crop images " "at runtime to 224x224.",
)
parser.add_argument(
"--verbose", default=False, action="store_true", help="Verbose printing."
)
parser.add_argument(
"--image-classifier-cmd",
default="",
help="Command to use for running the image-classifier, "
"including the binary and all of its command lime "
"parameters.",
)
# Opens and returns an image located at @param path using the PIL loader.
def pil_loader(path):
# open path as file to avoid ResourceWarning
# (https://github.com/python-pillow/Pillow/issues/835)
with open(path, "rb") as img:
img = Image.open(img)
return img.convert("RGB")
# Opens and returns an image located at @param path using the accimage loader.
def accimage_loader(path):
import accimage
try:
return accimage.Image(path)
except IOError:
# Potentially a decoding problem, fall back to PIL.Image
return pil_loader(path)
# Opens and returns an image located at @param path using either the accimage
# loader or PIL loader.
def default_image_loader(path):
if torchvision.get_image_backend() == "accimage":
return accimage_loader(path)
return pil_loader(path)
def get_sorted_img_subdirs(validation_images_dir):
img_dir_paths = []
for img_dir in os.listdir(validation_images_dir):
dir_path = os.path.join(validation_images_dir, img_dir)
if os.path.isdir(dir_path):
img_dir_paths.append(img_dir)
img_dir_paths.sort()
return img_dir_paths
# @returns two lists of the same length found in directory
# @param validation_images_dir; the first list contains paths to all images
# found, and the second list contains the corresponding labels of the image.
def get_img_paths_and_labels(validation_images_dir):
img_subdirs = get_sorted_img_subdirs(validation_images_dir)
# Create lists holding paths to each image to be classified and the label
# for that image.
img_paths = []
img_labels = []
curr_label_idx = 0
for img_subdir in img_subdirs:
img_subdir_path = os.path.join(validation_images_dir, img_subdir)
for img in os.listdir(img_subdir_path):
full_img_path = os.path.join(img_subdir_path, img)
if os.path.isfile(full_img_path):
img_paths.append(full_img_path)
img_labels.append(curr_label_idx)
curr_label_idx = curr_label_idx + 1
return img_paths, img_labels
# Given an image located at @param img_path, transform the image
# and save it to the path @param path_to_new_img.
def resize_and_save_image(img_path, path_to_new_img):
# Load the image.
img = default_image_loader(img_path)
# Use to Resize and CenterCrop the images to 224x224.
transform_resize = torchvision.transforms.Compose(
[torchvision.transforms.Resize(256), torchvision.transforms.CenterCrop(224)]
)
resized_img = transform_resize(img)
resized_img.save(path_to_new_img, format="png")
# Used to pre-process an input set of images. Takes a string of a directory
# @param validation_images_dir and saves the cropped subset of the images in a
# subdirectory `processed/`, which must not yet exist.
def save_centered_cropped_dataset(validation_images_dir):
processed_validation_images_dir = os.path.join(validation_images_dir, "processed")
print(
"Saving centered cropped input images: %s" % (processed_validation_images_dir)
)
img_subdirs = get_sorted_img_subdirs(validation_images_dir)
try:
os.makedirs(processed_validation_images_dir)
except OSError:
sys.exit("New validation directory must not exist")
# Iterate over all labels subdirectories, loading, transforming and saving
# all images to the new location.
for img_subdir in img_subdirs:
orig_img_subdir_path = os.path.join(validation_images_dir, img_subdir)
processed_img_subdir_path = os.path.join(
processed_validation_images_dir, img_subdir
)
# Create a new subdirectory for the next label.
try:
os.makedirs(processed_img_subdir_path)
except OSError:
sys.exit("New label subdirectory somehow already existed.")
# Transform and save all images in this label subdirectory.
for orig_img_filename in os.listdir(orig_img_subdir_path):
orig_img_path = os.path.join(orig_img_subdir_path, orig_img_filename)
if os.path.isfile(orig_img_path):
processed_img_path = os.path.join(
processed_img_subdir_path, orig_img_filename
)
resize_and_save_image(orig_img_path, processed_img_path)
# @returns a list of strings (of length equal to the @param batch_size) which
# are paths to images to do inference on. @param img_paths is the set of all
# image paths, @param img_index is the next index to use in @param img_paths,
# and @param tmp_dir_name is the location of where to save the images if
# @param resize_input_images is true. Note that if @param resize_input_images is
# true, then names for the temporary images are used for every batch, thus only
# @param batch_size temporary images will ever exist in @param tmp_dir_name.
def get_curr_img_paths(
img_paths, img_index, batch_size, tmp_dir_name, resize_input_images
):
curr_img_paths = []
for batch_idx in range(batch_size):
img_path = img_paths[img_index + batch_idx]
# If we are resizing the image then we are going to save it to a
# temp location to read in later for inference.
if resize_input_images:
# Save the new image to the tmp directory. Note that these names are
# reused every call to get_curr_img_paths().
path_to_tmp_img = os.path.join(
tmp_dir_name, "tmp" + str(batch_idx) + ".png"
)
resize_and_save_image(img_path, path_to_tmp_img)
img_path = path_to_tmp_img
curr_img_paths.append(img_path)
return curr_img_paths
# Verifies that the @param image_classifier_cmd is well formatted via
# assertions.
def verify_spawn_cmd(image_classifier_cmd):
split_cmd = image_classifier_cmd.split()
if "image-classifier" in split_cmd[0]:
assert "-" in split_cmd, "Streaming mode must be used."
assert "-topk=5" in split_cmd, "-topk=5 must be used."
assert any(
"-model-input-name=" in s for s in split_cmd
), "image-classifier requires -model-input-name to be specified."
assert any(
"-m=" in s for s in split_cmd
), "image-classifier requires -m to be specified"
assert any(
"-image-mode=" in s for s in split_cmd
), "image-classifier requires -image-mode to be specified"
# Prints the Top-1 and Top-5 accuracy given @param total_image_count, @param
# top1_count, and @param top5_count.
def print_topk_accuracy(total_image_count, top1_count, top5_count):
top1_accuracy = float(top1_count) / float(total_image_count)
top5_accuracy = float(top5_count) / float(total_image_count)
print("\tTop-1 accuracy: " + "{0:.4f}".format(top1_accuracy))
print("\tTop-5 accuracy: " + "{0:.4f}".format(top5_accuracy))
# Calculates and prints top-1 and top-5 accuracy for images located in
# subdirectories at @param validation_images_dir, given the command line
# parameters passed in to @param args.
def calculate_top_k(
validation_images_dir,
image_classifier_cmd,
batch_size,
resize_input_images,
verbose,
):
print("Calculating Top-1 and Top-5 accuracy...")
verify_spawn_cmd(image_classifier_cmd)
img_paths, img_labels = get_img_paths_and_labels(validation_images_dir)
total_image_count = len(img_paths)
assert (
total_image_count % batch_size == 0
), "Total number of images must be divisible by batch size"
if verbose:
print("Running image classifier with: " + image_classifier_cmd)
try:
# Create a temporary directory to store the transformed image we
# classify (if applicable) and the log of image-classifer output.
tmp_dir_name = tempfile.mkdtemp()
path_to_tmp_log = os.path.join(tmp_dir_name, "log.txt")
with open(path_to_tmp_log, "w") as fout:
classifier_proc = pexpect.spawn(
image_classifier_cmd, logfile=fout, timeout=None
)
if verbose:
print("Temp log located at: " + path_to_tmp_log)
prompt = "Enter image filenames to classify: "
top1_count = 0
top5_count = 0
# Process the images in batches as specified on the command line.
for img_index in range(0, total_image_count, batch_size):
curr_img_paths = get_curr_img_paths(
img_paths, img_index, batch_size, tmp_dir_name, resize_input_images
)
# Expect prompt from the image-classifier for the next image path.
classifier_proc.expect(prompt)
appended_paths = " ".join(curr_img_paths)
assert (
len(appended_paths) <= 1024
), "Line length is too long (max 1024): %r" % len(appended_paths)
# Send the paths to the image-classifier.
classifier_proc.sendline(appended_paths)
for batch_idx in range(batch_size):
# Now we expect the image-classifier's response with the label.
# The first line will include the path to the file, e.g.:
# File: tests/images/imagenet/cat_285.png
classifier_proc.expect(" File: " + curr_img_paths[batch_idx])
# All labels will be formatted like:
# Label-K1: 281 (probability: 0.7190)
top5_labels = []
for _ in range(5):
label_and_prob = classifier_proc.readline()
# Get the label from the line.
label = label_and_prob.split()[1]
top5_labels.append(int(label))
expected_label = img_labels[img_index + batch_idx]
if expected_label == top5_labels[0]:
top1_count += 1
if expected_label in top5_labels:
top5_count += 1
curr_completed_count = img_index + batch_size
if curr_completed_count % 100 == 0:
print(
"Finished image index %d out of %d"
% ((curr_completed_count, total_image_count))
)
if verbose:
print(" Current Top-1/5 accuracy:")
print_topk_accuracy(
curr_completed_count, top1_count, top5_count
)
else:
print("")
finally:
classifier_proc.close(force=True)
# Remove the temp directory we used to save the images and log.
shutil.rmtree(tmp_dir_name)
print(
"\nCompleted running; Final Top-1/5 accuracy across %d images:"
% (total_image_count)
)
print_topk_accuracy(total_image_count, top1_count, top5_count)
def main():
# Parse the recognized command line arguments into args.
args = parser.parse_args()
# Path to the directory containing the validation set of images.
# Subdirectories are expected to be organized such that when sorted their
# index corresponds to their label. For example, if the
# validation_images_dir contains {'abc/', 'def/', 'ghi/'}, then this should
# correspond to labels {0, 1, 2} respectively.
validation_images_dir = os.path.join(args.validation_images_dir)
assert os.path.exists(validation_images_dir), (
"Validation directory does not exist: " + validation_images_dir
)
# This is used solely to pre-process the input image set.
if args.only_resize_and_save:
save_centered_cropped_dataset(validation_images_dir)
return
calculate_top_k(
validation_images_dir,
args.image_classifier_cmd,
args.batch_size,
args.resize_input_images,
args.verbose,
)
if __name__ == "__main__":
main()
|
#!/usr/bin/env python3
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import os
import re
import sqlite3
import typing
from typing import Dict, List, Tuple
# Maintaining all nodes
NODES_MAP: Dict[str, "Node"] = {}
# Scope stack
SCOPE_STACK: List[str] = []
# Scope related information
scopeID = 0
class NodeNameAndKind(typing.NamedTuple):
"""A class that represents the named tuple of node name and kind."""
name: str
kind: str
class NodeValue(typing.NamedTuple):
"""A class that represents the named tuple of node and result number."""
node: "Node"
resNo: int
class Node:
"""A class that represents a node in the compute graph
Public attributes:
kindName_: str. The kind name.
name_: str. The node name.
inputs_: List[NodeValue]. Input node values.
users_: Dict['Node', int]. The users of this node.
"""
def __init__(self, kindName: str, name: str):
self.kindName_: str = kindName
self.name_: str = name
self.inputs_: List[NodeValue] = []
self.users_: Dict["Node", int] = {}
def __repr__(self):
return self.name_
def get_kind_name(self) -> str:
"""Gets the kind name."""
return self.kindName_
def get_name(self) -> str:
"""Gets the node name."""
return self.name_
def getNodeNameAndKind(self) -> NodeNameAndKind:
"""Gets the Name+Kind tuple."""
return (self.name_, self.kindName_)
def get_inputs(self) -> List[NodeValue]:
"""Gets the input node."""
return self.inputs_
def get_users(self) -> Dict["Node", int]:
"""Gets the user of this node."""
return self.users_
def add_user(self, u: "Node") -> None:
"""Adds one user of this node. Increment the number of uses of the user by 1."""
if u not in self.users_:
self.users_[u] = 0
self.users_[u] += 1
def remove_user(self, u: "Node") -> None:
"""Removes one use from the given user."""
if u in self.users_:
self.users_[u] -= 1
if self.users_[u] == 0:
del self.users_[u]
def has_no_uses(self) -> bool:
"""Returns True if the node has no uses."""
return len(self.users_) == 0
def set_input(self, nodeVal: NodeValue) -> None:
"""Adds one input node value."""
self.inputs_.append(nodeVal)
def replace_input(self, oldNodeVal: NodeValue, newNodeVal: NodeValue) -> None:
"""Replace one operand with another one.
Args:
oldNode: Node. Old operand node.
oldResNo: int. Old operand result number.
newNode: Node. New operand node.
newResNo: int. New operand result number.
"""
try:
self.inputs_.remove(oldNodeVal)
except ValueError:
print("Removed input value must already exist in the node's input list. ")
self.inputs_.append(newNodeVal)
def set_scope_of_creation(self, creationScopeName: str) -> None:
self.creationScopeName_ = creationScopeName
class DottyPrinter:
"""A class for generating the dotty graph file
Public attributes:
vertices_: List[str]. Vertices in the dotty file.
edges_: List[str]. Edges in the dotty file.
uniqueVertexMap_: Dict[Node, int]. A map for node with their unique index.
uniqueVertexNo_: int. A incrementing number that represents the number of unique nodes in the graph.
colors_: List[str]. A list for colors for nodes in the dotty graph.
"""
def __init__(self, nodesMap: Dict[NodeNameAndKind, Node]):
self.nodesMap_ = nodesMap
self.vertices_: List[str] = []
self.edges_: List[str] = []
self.uniqueVertexMap_: Dict[Node, int] = {}
self.uniqueVertexNo_: int = 0
self.colors_: List[str] = [
"AliceBlue",
"CadetBlue1",
"Coral",
"DarkOliveGreen1",
"DarkSeaGreen1",
"GhostWhite",
"Khaki1",
"LavenderBlush1",
"LemonChiffon1",
"LightSkyBlue",
"MistyRose1",
"MistyRose2",
"PaleTurquoise2",
"PeachPuff1",
"PowderBlue",
"Salmon",
"Thistle1",
"Thistle3",
"Wheat1",
"Yellow2",
]
def get_unique_vertex_name(self, node: Node) -> str:
"""Get the unique vertex name given a Node object."""
if node not in self.uniqueVertexMap_:
self.uniqueVertexMap_[node] = self.uniqueVertexNo_
self.uniqueVertexNo_ += 1
return f"v{self.uniqueVertexMap_[node]}"
def dump_label(self, node: Node) -> str:
"""Returns the string for the label of the given node."""
labelStr = f"""{{ {{<Inputs>Inputs}}|
{{ {node.get_kind_name()}\lname: {node.get_name()} }}|
{{<Outputs>Outputs}} }}"""
return labelStr
def get_color(self, node: Node) -> str:
"""Returns the color for the given node."""
idx = hash(node.get_kind_name()) % len(self.colors_)
return self.colors_[idx]
def dump_node(self, node: Node) -> None:
"""Generates the dotty information for the given node."""
if not node:
return
nodeStr = f"""{self.get_unique_vertex_name(node)}[\n
\tlabel = \"{self.dump_label(node)}\"\n
\tshape = \"record\"\n
\tstyle=\"filled,rounded\"\n
\tfillcolor={self.get_color(node)}\n
penwidth = 2];\n"""
self.vertices_.append(nodeStr)
def visitNodes(self) -> None:
"""Visits all nodes in nodesMap_ and dump the dotty information for each node."""
for node in self.nodesMap_.values():
self.dump_node(node)
def visitEdges(self) -> None:
"""Visits all edges and dump the dotty information for each edge."""
for node in self.nodesMap_.values():
for nodeInput in node.get_inputs():
i = nodeInput[0]
if i.get_name() not in self.nodesMap_:
print(i.get_kind_name(), i.get_name())
edgeStr = self.get_unique_vertex_name(i) + ":Outputs -> "
edgeStr += self.get_unique_vertex_name(node) + ":Inputs"
self.edges_.append(edgeStr)
def dump_graph(self, dagName: str) -> None:
"""Visits the node graph and generates the dotty information."""
self.visitNodes()
self.visitEdges()
with open(f"{dagName}_dotty.dot", "w") as f:
f.write("digraph DAG {\n\trankdir=TB;\n")
for v in self.vertices_:
f.write(f"{v}\n")
for e in self.edges_:
f.write(f"{e};\n")
f.write("}")
def parse_args() -> Tuple[str, str, List[str]]:
"""Parse the arguments of this script."""
parser = argparse.ArgumentParser(description="Parse compilation log")
parser.add_argument("-f", "--log-file")
parser.add_argument("-d", "--db-file")
parser.add_argument("--dump-phases", nargs="+")
options = parser.parse_args()
if options.dump_phases:
dumpPhases = options.dump_phases
else:
dumpPhases = []
if options.db_file:
dbFile = options.db_file
else:
dbFile = "compilation_log_db.sqlite"
return dbFile, options.log_file, dumpPhases
def dump_dag(dagName: str) -> None:
"""A helper function to dump the DAG."""
dotty = DottyPrinter(NODES_MAP)
dotty.dump_graph(dagName)
def store_transformation_into_DB(
transID: int,
baseNode: Node,
addedNodes: List[Node],
replacedNodes: List[Node],
cursor: sqlite3.Cursor,
fullScopeName: str,
) -> None:
"""A helper function to store nodes transformations into database.
Args:
transID: int. The ID for this stored transformation.
baseNode: Node. The base node that changes its operands.
addedNodes: List[Node]. A list of added nodes in this transformation.
replacedNodes: List[Node]. A list of replaced nodes in this transformation.
cursor: sqlite3.Cursor. Cursor of the sqlite3 database.
fullScopeName: str. The full scope name of this transformation.
"""
cursor.execute(
"""INSERT INTO Log_Transformation VALUES (
?,
'OPERATOR_BASE',
?,
?,
?
)""",
(transID, baseNode.get_name(), baseNode.get_kind_name(), fullScopeName),
)
for an in addedNodes:
cursor.execute(
"""INSERT INTO Log_Transformation VALUES (
?,
'ADD_OPERAND',
?,
?,
?
)""",
(transID, an.get_name(), an.get_kind_name(), fullScopeName),
)
for rn in replacedNodes:
cursor.execute(
"""INSERT INTO Log_Transformation VALUES (
?,
'REMOVE_OPERAND',
?,
?,
?
)""",
(transID, rn.get_name(), rn.get_kind_name(), fullScopeName),
)
def find_all_replaced_nodes(replacedNode: Node) -> List[Node]:
"""Find all nodes that will lose user after the given node is removed.
After one node lost all its uses (e.g. after replaceAllUsesOfWith()), we go through
all of its parents to collect all nodes that will consequently lose all their uses.
Args:
replacedNode: Node. The node that just lost all uses.
"""
replacedNodeList = []
activeDCEList = [replacedNode]
while len(activeDCEList):
DCEnode = activeDCEList.pop()
replacedNodeList.append(DCEnode)
for nv in DCEnode.inputs_:
n = nv.node
if len(n.users_) <= 1:
activeDCEList.append(n)
return replacedNodeList
def init_db(sqliteFile: str) -> sqlite3.Connection:
"""Initialize a sqlite3 database connection."""
if os.path.isfile(sqliteFile):
os.remove(sqliteFile)
# Connect to database file.
conn = sqlite3.connect(sqliteFile)
cursor = conn.cursor()
cursor.execute(
"""CREATE TABLE Log_Transformation (
trans_id INTEGER,
operation_type VARCHAR(200),
node_name VARCHAR(200),
node_kind VARCHAR(200),
full_scope VARCHAR(200)
)"""
)
cursor.execute(
"""CREATE TABLE Log_Scope (
scope_id INTEGER,
scope_str VARCHAR(200),
full_scope_str VARCHAR(200)
)"""
)
cursor.execute(
"""CREATE TABLE Log_Node (
node_name VARCHAR(200),
node_kind VARCHAR(200),
create_scope_id INTEGER,
delete_scope_id INTEGER
)"""
)
cursor.execute(
"""CREATE TABLE Log_Node_Operation (
scope_id INTEGER,
operation VARCHAR(200),
node_name VARCHAR(200),
node_kind VARCHAR(200)
)"""
)
return conn
def process(log: Dict, dumpPhases: List[str], conn: sqlite3.Connection) -> None:
"""Process all the log lines.
Extract their information and reconstruct the node graph. And dump DAGs at given compilation phases.
Args:
logLines: List[str]. All lines of compilation log.
dumpPhases: List[str]. The phase at which to dump the DAG.
conn: sqlite3.Connection. The connection to a sqlite3 database that will store all the transformation in the compilation lop.
"""
# DB related vars
cursor = conn.cursor()
# Record nodes transformation
replacedNodes: List[Node] = []
addedNodes: List[Node] = []
recordTransformation = False
stopRecordTranformationNames = {
"optimizeFunctionBeforeLowering",
"optimizeFunction",
}
transID = 0
def process_create(event: Dict) -> None:
global scopeID
createdNode = Node(event["kind"], event["create"])
createdNode.set_scope_of_creation(SCOPE_STACK[-1])
NODES_MAP[createdNode.get_name()] = createdNode
cursor.execute(
"""INSERT INTO Log_Node VALUES (
?,
?,
?,
?
)""",
(event["create"], event["kind"], scopeID, -1),
)
cursor.execute(
"""INSERT INTO Log_Node_Operation VALUES (
?,
'CREATE',
?,
?
)""",
(scopeID, event["create"], event["kind"]),
)
if len(event["inputs"]) == 0:
# there's no node input for Splat
assert event["kind"] in (
"Splat",
"Constant",
"Placeholder",
), "This node kind shouldn't have any inputs."
for i in event["inputs"]:
name, resNo = i.split(":", 1)
if name in NODES_MAP:
inputNode = NODES_MAP[name]
createdNode.set_input(NodeValue(inputNode, resNo))
inputNode.add_user(createdNode)
if recordTransformation:
addedNodes.append(createdNode)
def process_delete(event: Dict) -> None:
global scopeID
deletedNode = NODES_MAP[event["delete"]]
for inputNode in deletedNode.inputs_:
i = inputNode[0]
i.remove_user(deletedNode)
del NODES_MAP[deletedNode.get_name()]
cursor.execute(
"""UPDATE Log_Node
SET delete_scope_id=?
WHERE node_name=?
""",
(scopeID, event["delete"]),
)
cursor.execute(
"""INSERT INTO Log_Node_Operation VALUES (
?,
'DELETE',
?,
?
)""",
(scopeID, event["delete"], event["kind"]),
)
def process_input_change(event: Dict) -> None:
changedNode = NODES_MAP[event["input_change"]]
# Don't touch the line of node input changing into null, it only happened
# in module destructor.
if event["after"] == "NONE":
return
prevNodeName, prevResNo = event["before"].split(":", 1)
newNodeName, newResNo = event["after"].split(":", 1)
prevNode = NODES_MAP[prevNodeName]
newNode = NODES_MAP[newNodeName]
# change the input of changedNode
changedNode.replace_input(
NodeValue(prevNode, prevResNo), NodeValue(newNode, newResNo)
)
prevNode.remove_user(changedNode)
newNode.add_user(changedNode)
# Record nodes transformation
if recordTransformation:
if prevNode.has_no_uses():
replacedNodes = find_all_replaced_nodes(prevNode)
store_transformation_into_DB(
transID, changedNode, addedNodes, replacedNodes, cursor, scopeName
)
transID += 1
addedNodes = []
replacedNodes = []
def process_scope(scopeName: str, phase: List) -> None:
global scopeID
if "::" in scopeName:
scopeName = scopeName.split("::", 1)[-1]
scopeID += 1
if scopeName in dumpPhases:
dump_dag(f"before_{scopeName}_{scopeID}")
if str(scopeID) in dumpPhases:
dump_dag(f"phase_{scopedID}")
SCOPE_STACK.append(scopeName)
# Start recording transformations.
if scopeName in stopRecordTranformationNames and len(SCOPE_STACK) == 2:
recordTransformation = True
# Update scope entrance in database
cursor.execute(
"""INSERT INTO Log_Scope VALUES (
?,
?,
?
)""",
(scopeID, "ENTER " + scopeName, "ENTER " + scopeName),
)
for ev in phase:
if "create" in ev:
process_create(ev)
elif "delete" in ev:
process_delete(ev)
elif "input_change" in ev:
process_input_change(ev)
else:
name, scope = list(ev.items())[0]
process_scope(name, scope)
# Stop recording transformations.
if scopeName in stopRecordTranformationNames and len(SCOPE_STACK) == 1:
recordTransformation = False
# Update scope exit in database
cursor.execute(
"""INSERT INTO Log_Scope VALUES (
?,
?,
?
)""",
(scopeID, "EXIT " + scopeName, "EXIT " + name),
)
scopeID += 1
if scopeName in dumpPhases:
dump_dag(f"after_{scopeName}_{scopeID}")
if str(scopeID) in dumpPhases:
dump_dag(f"phase_{scopedID}")
SCOPE_STACK.pop()
print("Log Version:", log["version"])
process_scope("MODULE LOADER", log["passes"])
conn.commit()
def main():
dbFile, logFile, dumpPhases = parse_args()
log = json.load(open(logFile))
with init_db(dbFile) as conn:
process(log, dumpPhases, conn)
return
if __name__ == "__main__":
main()
|
#!/usr/bin/env python3
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import sqlite3
from typing import Dict, List
# A list of all filtered transformations.
TRANS_LIST: List["Transformation"] = []
# Mapping between added nodes and the transformation that adds these nodes.
NODES_ADDING_MAP: Dict[str, "Transformation"] = {}
class Transformation:
"""A class that represents the nodes transformation, e.g. lower,fold etc.
Public attributes:
addedNodes_: List[str]. Nodes added by this transformation.
removedNodes_: List[str]. Nodes removed by this transformation.
ancestors_: List['Transformation']. The ancestor transformation of current transformation.
scopeName_: str. The scope of current transformation.
transID_: str. The internal transformation ID in the database.
isDirectTrans_ :bool. Whether this transformation directly created/replaced the given nodeName that is passed to this script file.
"""
def __init__(self, transID: str):
self.addedNodes_: List[str] = []
self.removedNodes_: List[str] = []
self.ancestors_: List["Transformation"] = []
self.scopeName_: str = ""
self.transID_: str = transID
self.isDirectTrans_: bool = False
def appendAddedNode(self, nodeName: str) -> None:
"""Append the added nodes of this transformation."""
self.addedNodes_.append(nodeName)
def appendRemovedNode(self, nodeName: str) -> None:
"""Append the removed nodes of this transformation."""
self.removedNodes_.append(nodeName)
def addAncestor(self, ancestor: "Transformation") -> None:
"""Add ancestors of this transformation."""
self.ancestors_.append(ancestor)
def setBase(self, baseName: str) -> None:
"""Set the operator base of this node."""
self.baseNode_ = baseName
class DottyPrinter:
"""A class for generating the dotty graph file"""
def __init__(self, transList: List[Transformation]):
self.transList_ = transList
self.vertices_ = []
self.edges_ = []
def get_color(self, isDirectTrans: bool) -> str:
"""Returns the color for the given node."""
if isDirectTrans:
return "Yellow2"
else:
return "AliceBlue"
def dump_label(self, tran: Transformation) -> str:
"""Returns the string for the label of the given transformation."""
labelStr = (
rf"""{{ {{SCOPE:\l{tran.scopeName_} }}|{{ORIGINAL OPERAND CHAIN:\l\l"""
)
for rstr in tran.removedNodes_:
labelStr += rf"""{rstr}\l\l"""
labelStr += rf"}}| {{NEW OPERAND CHAIN:\l\l"
for astr in tran.addedNodes_:
labelStr += rf"""{astr}\l\l"""
labelStr += rf"}} |{{USER NODE: \l\l {tran.baseNode_}}} }}"
return labelStr
def dump_node(self, tran: Transformation) -> None:
"""Generates the dotty information for the given transformation."""
if not tran:
return
tranStr = f"""v{tran.transID_}[\n
\tlabel = \"{self.dump_label(tran)}\"\n
\tshape = \"record\"\n
\tstyle=\"filled,rounded\"\n
\tfillcolor={self.get_color(tran.isDirectTrans_)}\n
penwidth = 2];\n"""
self.vertices_.append(tranStr)
def visit_nodes(self) -> None:
"""Visits all transformation and dump the dotty information for each transformation."""
for tran in self.transList_:
self.dump_node(tran)
def visit_edges(self) -> None:
"""Visits all edges and dump the dotty information for each edge."""
for tran in self.transList_:
for anc in tran.ancestors_:
edgeStr = f"v{anc.transID_} -> v{tran.transID_}"
self.edges_.append(edgeStr)
def dump_graph(self, dottyFile: str) -> None:
"""Visits the graph and generates the dotty information."""
self.visit_nodes()
self.visit_edges()
with open(f"transformations_{dottyFile}.dot", "w") as f:
print(
f"\nWriting DAG info into dotty file transformations_{dottyFile}.dot ..."
)
f.write("digraph DAG {\n\trankdir=TB;\n")
for v in self.vertices_:
f.write(f"{v}\n")
for e in self.edges_:
f.write(f"{e};\n")
f.write("}")
def dump_dotty_DAG(dottyFile: str) -> None:
"""A helper function to dump the dotty file."""
dotty = DottyPrinter(TRANS_LIST)
dotty.dump_graph(dottyFile)
def init_db(sqliteFile: str) -> sqlite3.Connection:
"""Initialize a sqlite3 database connection."""
assert os.path.isfile(sqliteFile)
# Connect to database file.
return sqlite3.connect(sqliteFile)
def find_all_related_transformation(cursor: sqlite3.Cursor, transIDs: List[str]):
"""A recursive function that find all related transformations given a list of transformation IDs in the database.
Args:
cursor: sqlite3.Cursor. Cursor of current sqlite3 database connection.
transIDs: List[str]. A list of transformation IDs.
"""
transQueryStr = "(" + ", ".join(transIDs) + ")"
cursor.execute(
f"""
SELECT node_name
FROM Log_Transformation
WHERE trans_id in {transQueryStr} and operation_type in ('ADD_OPERAND', 'REMOVE_OPERAND')
GROUP BY node_name
"""
)
rows = cursor.fetchall()
nodesList = ["'" + r[0] + "'" for r in rows]
transQueryStr = "(" + ", ".join(nodesList) + ")"
cursor.execute(
f"""
SELECT trans_id
FROM Log_Transformation
WHERE node_name in {transQueryStr} and operation_type in ('ADD_OPERAND', 'REMOVE_OPERAND')
GROUP BY trans_id
"""
)
rows = cursor.fetchall()
newTransIDs = [str(r[0]) for r in rows]
if sorted(newTransIDs) != sorted(transIDs):
transIDs = find_all_related_transformation(cursor, newTransIDs)
return transIDs
def filter_node_transformation(
nodeName: str, conn: sqlite3.Connection, verbose: bool, dottyFile: str
):
"""Filter out all node transformation that is related to the given node.
Args:
nodeName: str. The node name that is passed to this script.
conn: sqlite3.Connection. A sqlite3 database connection.
verbose: bool. Verbosity of the output.
dottyFile: str. Dotty file name.
"""
cursor = conn.cursor()
cursor.execute(
"""
SELECT trans_id
FROM Log_Transformation
WHERE node_name = ?
GROUP BY trans_id
""",
(nodeName,),
)
rows = cursor.fetchall()
directTransIDs = [str(r[0]) for r in rows]
transIDs = find_all_related_transformation(cursor, directTransIDs)
for tid in transIDs:
cursor.execute(
"""
SELECT *
FROM Log_Transformation
WHERE trans_id = ?
""",
(tid,),
)
rows = cursor.fetchall()
if len(rows):
tran = Transformation(tid)
if tid in directTransIDs:
tran.isDirectTrans_ = True
TRANS_LIST.append(tran)
tran.scopeName_ = rows[0][4].replace("glow::", "").replace("->", r" --\> ")
for r in rows:
opr_type, name, kind = r[1:4]
if opr_type == "ADD_OPERAND":
nodeKindAndName = kind + r" \l" + name
tran.appendAddedNode(nodeKindAndName)
NODES_ADDING_MAP[nodeKindAndName] = tran
elif opr_type == "REMOVE_OPERAND":
nodeKindAndName = kind + r" \l" + name
tran.appendRemovedNode(nodeKindAndName)
if nodeKindAndName in NODES_ADDING_MAP:
tran.addAncestor(NODES_ADDING_MAP[nodeKindAndName])
elif opr_type == "OPERATOR_BASE":
nodeKindAndName = kind + r" \l" + name
tran.setBase(nodeKindAndName)
def processOutDottyName(dottyStyleName):
return dottyStyleName.split(r"\l")[1]
def checkNodeInIt(tran, nodeName):
if nodeName == processOutDottyName(tran.baseNode_):
return True
for rn in tran.removedNodes_:
if nodeName == processOutDottyName(rn):
return True
for an in tran.addedNodes_:
if nodeName == processOutDottyName(an):
return True
return False
for tran in TRANS_LIST:
if not verbose:
if not checkNodeInIt(tran, nodeName):
continue
print(f"\n===============Transformation ID: {tran.transID_} ================")
print("Scope: " + tran.scopeName_.replace(r"\>", ">"))
if nodeName == processOutDottyName(tran.baseNode_):
print("USER NODE: \n(*)" + tran.baseNode_.replace(r"\l", " "))
else:
print("USER NODE: \n" + tran.baseNode_.replace(r"\l", " "))
print("------ Previous operands set:")
for rn in tran.removedNodes_:
if nodeName == processOutDottyName(rn):
print("\t(*)" + rn.replace(r"\l", " "))
else:
print("\t" + rn.replace(r"\l", " "))
print("------ New operands set:")
for an in tran.addedNodes_:
if nodeName == processOutDottyName(an):
print("\t(*)" + an.replace(r"\l", " "))
else:
print("\t" + an.replace(r"\l", " "))
dump_dotty_DAG(dottyFile)
conn.commit()
def stat_list_phases(conn, depth=0):
cursor = conn.cursor()
cursor.execute(
"""
SELECT *
FROM Log_Scope
ORDER BY scope_id
"""
)
rows = cursor.fetchall()
currDepth = 0
print("Phase ID \tPhase Name\n-------------------------\n")
for r in rows:
if "ENTER" in r[1]:
currDepth += 1
if currDepth <= depth or depth == 0:
print(r[0], "\t" * currDepth + r[1])
if "EXIT" in r[1]:
currDepth -= 1
assert currDepth >= 0
def stat_phases_summary(conn: sqlite3.Connection, startPhase: int, endPhase: int):
cursor = conn.cursor()
cursor.execute(
"""
SELECT lng.scope_id, ls.full_scope_str, lng.operation, lng.node_kind, COUNT(node_kind)
FROM Log_Node_Operation lng
LEFT JOIN Log_Scope ls
ON lng.scope_id = ls.scope_id
WHERE lng.scope_id >= ? AND lng.scope_id < ?
GROUP By lng.node_kind
ORDER BY lng.scope_id
""",
(startPhase, endPhase),
)
rows = cursor.fetchall()
print(f"---- Between phase {startPhase} and phase {endPhase}:\n")
summaryStrs = {}
for r in rows:
scope_id, scope, opr, kind, num = r
if scope_id not in summaryStrs:
summaryStrs[scope_id] = f"Phase {scope_id}: \n [{scope}]\n"
summaryStrs[scope_id] += f"\t {opr}D {num} {kind} nodes.\n"
for sid in summaryStrs:
print(summaryStrs[sid])
def stat_phase(conn: sqlite3.Connection, phaseId: int):
cursor = conn.cursor()
cursor.execute(
"""SELECT full_scope_str FROM Log_Scope WHERE scope_id=?""", (phaseId,)
)
rows = cursor.fetchall()
fullScope = rows[0][0]
cursor.execute(
"""
SELECT node_kind, COUNT(node_kind), COUNT(node_kind)*100.0/ (SELECT Count(*) FROM Log_Node WHERE create_scope_id < ? AND delete_scope_id >= ?)
FROM Log_Node
WHERE create_scope_id < ? AND delete_scope_id >= ?
GROUP By node_kind
ORDER BY COUNT(node_kind) DESC
""",
(phaseId, phaseId, phaseId, phaseId),
)
rows = cursor.fetchall()
print(f"=== At phase {phaseId} ({fullScope}): \n")
print(
"\t{:>4s} \t{:>12s} \t\t{:>2s}\n--------------------------------------------------------".format(
"Num", "Kind", "(Percentage)"
)
)
for r in rows:
kind, num, perc = r
print("\t{:>4d} \t{:>12s} \t\t({:>2f}%)".format(num, kind, round(perc, 2)))
def process():
"""Parse args and process this script."""
parser = argparse.ArgumentParser(description="Filter compilation and optimiztion.")
parser.add_argument("--db-file")
parser.add_argument("--filter-target")
parser.add_argument("--filter-target-verbose")
parser.add_argument("--dotty-file")
parser.add_argument("--stat-list-phases", type=bool)
parser.add_argument("--stat-list-phases-depth", type=int)
parser.add_argument("--stat-phases-summary", type=int, nargs="+")
parser.add_argument("--stat-phase", type=int)
options = parser.parse_args()
assert options.db_file, "Please specify db file."
with init_db(options.db_file) as conn:
dottyFile = options.dotty_file if options.dotty_file else "dotty"
if options.filter_target:
filter_node_transformation(options.filter_target, conn, False, dottyFile)
if options.filter_target_verbose:
filter_node_transformation(
options.filter_target_verbose, conn, True, dottyFile
)
if options.stat_list_phases:
stat_list_phases(conn)
if options.stat_list_phases_depth:
stat_list_phases(conn, options.stat_list_phases_depth)
if options.stat_phases_summary:
assert len(options.stat_phases_summary) == 2
startPhase, endPhase = options.stat_phases_summary
stat_phases_summary(conn, startPhase, endPhase)
if options.stat_phase:
stat_phase(conn, options.stat_phase)
def main():
process()
if __name__ == "__main__":
main()
|
#!/usr/bin/env python
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import matplotlib.pyplot as plt
import numpy as np
import yaml
# Command line options.
parser = argparse.ArgumentParser(
usage="Helper script to print the histogram from a Glow YAML profile."
)
parser.add_argument(
"-f", "--file", dest="file", required=True, type=str, help="Profile YAML file path."
)
parser.add_argument(
"-n",
"--name",
dest="name",
required=True,
type=str,
help="Node value name to plot.",
)
parser.add_argument(
"-l",
"--log-scale",
dest="log_scale",
required=False,
default=False,
action="store_true",
help="Plot the histogram on a logarithmic scale (base 10).",
)
args = parser.parse_args()
# Get arguments.
profile = args.file
name = args.name
log_scale = args.log_scale
# Verify profile exists.
if not os.path.isfile(profile):
print('File "%s" not found!' % profile)
exit(1)
# Read YAML data.
print('Reading file "%s" ...' % profile)
data = None
with open(profile, "r") as stream:
try:
data = yaml.safe_load(stream)
except yaml.YAMLError as err:
print(err)
# Search YAML entry for node value.
print('Searching node value name "%s" ...' % name)
entry = None
for item in data:
if item["nodeOutputName"] == name:
entry = item
if not entry:
print('Node value "%s" not found!' % name)
exit(1)
# Extract data.
hist_min = entry["min"]
hist_max = entry["max"]
histogram = np.array(entry["histogram"])
num_bins = len(histogram)
bin_width = (hist_max - hist_min) / num_bins
bin_centers = [(hist_min + idx * bin_width + bin_width / 2) for idx in range(num_bins)]
if log_scale:
histogram = np.log10(histogram)
histogram = np.maximum(histogram, np.zeros(histogram.shape))
# Plot histogram.
fig = plt.figure()
plt.plot(bin_centers, histogram)
plt.bar(bin_centers, histogram, bin_width)
fig.suptitle('Histogram for "%s" with range [%f, %f]' % (name, hist_min, hist_max))
plt.grid()
plt.xlabel("Range")
plt.ylabel("Bins [%s]" % ("Log Scale" if log_scale else "Linear Scale"))
plt.show()
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import onnx
import torch
import torch.nn
import torch.onnx
from onnx import helper, TensorProto
# GRU enums
GRU_DIR_FORWARD = "forward"
GRU_DIR_REVERSE = "reverse"
GRU_DIR_BIDIRECTIONAL = "bidirectional"
GRU_DIRS = [GRU_DIR_FORWARD, GRU_DIR_REVERSE, GRU_DIR_BIDIRECTIONAL]
# ONNX utility
def make_init(name, type, tensor):
return helper.make_tensor(
name=name,
data_type=type,
dims=tensor.shape,
vals=tensor.reshape(tensor.size).tolist(),
)
# Function to generate GRU ONNX test model
def gen_gru_onnx_test_model(
model_path,
seq_length,
batch_size,
hidden_size,
input_size,
direction,
has_bias,
has_sequence_lens,
has_initial_h,
linear_before_reset=False,
):
# Validate parameters
assert direction in GRU_DIRS, "ONNX GRU direction invalid!"
assert not has_sequence_lens, "ONNX GRU Variable sequence length not supported"
# Get number of directions
num_directions = 2 if (direction == GRU_DIR_BIDIRECTIONAL) else 1
# Tensor sizes
X_shape = [seq_length, batch_size, input_size]
W_shape = [num_directions, 3 * hidden_size, input_size]
R_shape = [num_directions, 3 * hidden_size, hidden_size]
B_shape = [num_directions, 6 * hidden_size]
sequence_lens_shape = [batch_size]
initial_h_shape = [num_directions, batch_size, hidden_size]
Y_shape = [seq_length, num_directions, batch_size, hidden_size]
# Generate random inputs (weights are assumed concatenated in ONNX format: z,r,h)
np.random.seed(1)
X = np.random.randn(*X_shape)
W = np.random.randn(*W_shape)
R = np.random.randn(*R_shape)
B = np.random.randn(*B_shape) if has_bias else np.zeros(B_shape)
sequence_lens = (
np.random.randint(1, seq_length, batch_size)
if has_sequence_lens
else np.tile(seq_length, batch_size)
)
initial_h = (
np.random.randn(*initial_h_shape)
if has_initial_h
else np.zeros(initial_h_shape)
)
# Function to get all the weight components for the given direction
def get_weights(dir_idx):
Wz = np.reshape(
W[dir_idx, 0 * hidden_size : 1 * hidden_size, :], [hidden_size, input_size]
)
Wr = np.reshape(
W[dir_idx, 1 * hidden_size : 2 * hidden_size, :], [hidden_size, input_size]
)
Wh = np.reshape(
W[dir_idx, 2 * hidden_size : 3 * hidden_size, :], [hidden_size, input_size]
)
Rz = np.reshape(
R[dir_idx, 0 * hidden_size : 1 * hidden_size, :], [hidden_size, hidden_size]
)
Rr = np.reshape(
R[dir_idx, 1 * hidden_size : 2 * hidden_size, :], [hidden_size, hidden_size]
)
Rh = np.reshape(
R[dir_idx, 2 * hidden_size : 3 * hidden_size, :], [hidden_size, hidden_size]
)
bWz = np.reshape(B[dir_idx, 0 * hidden_size : 1 * hidden_size], [hidden_size])
bWr = np.reshape(B[dir_idx, 1 * hidden_size : 2 * hidden_size], [hidden_size])
bWh = np.reshape(B[dir_idx, 2 * hidden_size : 3 * hidden_size], [hidden_size])
bRz = np.reshape(B[dir_idx, 3 * hidden_size : 4 * hidden_size], [hidden_size])
bRr = np.reshape(B[dir_idx, 4 * hidden_size : 5 * hidden_size], [hidden_size])
bRh = np.reshape(B[dir_idx, 5 * hidden_size : 6 * hidden_size], [hidden_size])
return Wz, Wr, Wh, Rz, Rr, Rh, bWz, bWr, bWh, bRz, bRr, bRh
# Function to get PyTorch weights (which are in the r,z,h order)
def get_torch_weights(dir_idx):
Wz, Wr, Wh, Rz, Rr, Rh, bWz, bWr, bWh, bRz, bRr, bRh = get_weights(dir_idx)
W_torch = np.concatenate((Wr, Wz, Wh), 0)
R_torch = np.concatenate((Rr, Rz, Rh), 0)
bW_torch = np.concatenate((bWr, bWz, bWh), 0)
bR_torch = np.concatenate((bRr, bRz, bRh), 0)
return (W_torch, R_torch, bW_torch, bR_torch)
# ----------------------------------------- COMPUTE pyTORCH REFERENCE ----------------------------------------------
# Compute reference using Pytorch. Pytorch GRU has only forward/bidirectional so we will do the reverse GRU using
# a Pytorch forward GRU.
gru = torch.nn.GRU(
input_size=input_size,
hidden_size=hidden_size,
num_layers=1,
bias=True,
batch_first=False,
dropout=0,
bidirectional=(direction == GRU_DIR_BIDIRECTIONAL),
)
# Get GRU state dictionary
gru_state_dict = gru.state_dict()
# Assign forward weights
forwardEnabled = direction in [GRU_DIR_FORWARD, GRU_DIR_BIDIRECTIONAL]
if forwardEnabled:
forward_dir_idx = 0
(W_torch, R_torch, bW_torch, bR_torch) = get_torch_weights(forward_dir_idx)
gru_state_dict["weight_ih_l0"] = torch.tensor(W_torch, dtype=torch.float32)
gru_state_dict["weight_hh_l0"] = torch.tensor(R_torch, dtype=torch.float32)
gru_state_dict["bias_ih_l0"] = torch.tensor(bW_torch, dtype=torch.float32)
gru_state_dict["bias_hh_l0"] = torch.tensor(bR_torch, dtype=torch.float32)
# Assign reverse weights
reverseEnabled = direction in [GRU_DIR_REVERSE, GRU_DIR_BIDIRECTIONAL]
if reverseEnabled:
if direction == GRU_DIR_REVERSE:
reverse_dir_idx = 0
(W_torch, R_torch, bW_torch, bR_torch) = get_torch_weights(reverse_dir_idx)
gru_state_dict["weight_ih_l0"] = torch.tensor(W_torch, dtype=torch.float32)
gru_state_dict["weight_hh_l0"] = torch.tensor(R_torch, dtype=torch.float32)
gru_state_dict["bias_ih_l0"] = torch.tensor(bW_torch, dtype=torch.float32)
gru_state_dict["bias_hh_l0"] = torch.tensor(bR_torch, dtype=torch.float32)
else:
reverse_dir_idx = 1
(W_torch, R_torch, bW_torch, bR_torch) = get_torch_weights(reverse_dir_idx)
gru_state_dict["weight_ih_l0_reverse"] = torch.tensor(
W_torch, dtype=torch.float32
)
gru_state_dict["weight_hh_l0_reverse"] = torch.tensor(
R_torch, dtype=torch.float32
)
gru_state_dict["bias_ih_l0_reverse"] = torch.tensor(
bW_torch, dtype=torch.float32
)
gru_state_dict["bias_hh_l0_reverse"] = torch.tensor(
bR_torch, dtype=torch.float32
)
# Set GRU state dictionary
gru.load_state_dict(gru_state_dict, strict=True)
# Perform inference
X_torch = torch.tensor(X, dtype=torch.float32)
initial_h_torch = torch.tensor(initial_h, dtype=torch.float32)
if direction == GRU_DIR_REVERSE:
Y, next_h = gru(X_torch.flip([0]), initial_h_torch)
Y = Y.flip([0])
else:
Y, next_h = gru(X_torch, initial_h_torch)
# Reshape output to ONNX format [seq_length, num_directions, batch_size, hidden_size]
Y_ref = Y.detach().numpy()
Y_ref = np.reshape(Y_ref, [seq_length, batch_size, num_directions, hidden_size])
Y_ref = np.transpose(Y_ref, [0, 2, 1, 3])
# Reshape states to ONNX format
Y_h_ref = next_h.detach().numpy()
# --------------------------------------- COMPUTE PYTHON-NUMPY REFERENCE -------------------------------------------
# Create X slices
Xslices = list()
for t in range(seq_length):
Xslices.append(np.reshape(X[t, :, :], [batch_size, input_size]))
# Function to compute one GRU cell
def compute_gru(forward):
dir_idx = 0 if forward else (0 if direction == GRU_DIR_REVERSE else 1)
Wz, Wr, Wh, Rz, Rr, Rh, bWz, bWr, bWh, bRz, bRr, bRh = get_weights(dir_idx)
def f(x):
return 1 / (1 + np.exp(-x))
def g(x):
return np.tanh(x)
def mm(x, w):
return np.matmul(x, w.transpose())
Ht = np.reshape(initial_h[dir_idx, :, :], [batch_size, hidden_size])
Yslices = list()
for t in range(seq_length):
xt = Xslices[t] if forward else Xslices[seq_length - 1 - t]
zt = f(mm(xt, Wz) + bWz + mm(Ht, Rz) + bRz)
rt = f(mm(xt, Wr) + bWr + mm(Ht, Rr) + bRr)
if linear_before_reset:
htild = g(mm(xt, Wh) + bWh + rt * (mm(Ht, Rh) + bRh))
else:
htild = g(mm(xt, Wh) + bWh + mm(rt * Ht, Rh) + bRh)
Ht = (1 - zt) * htild + zt * Ht
Yslices.append(Ht)
return Yslices, Ht
Yslices = list()
Hslices = list()
# Compute forward GRU
forwardYslices = list()
if forwardEnabled:
Yt, Ht = compute_gru(True)
forwardYslices += Yt
Hslices.append(Ht)
# Compute reverse GRU
reverseYslices = list()
if reverseEnabled:
Yt, Ht = compute_gru(False)
reverseYslices += Yt
Hslices.append(Ht)
# Concatenate slices
for t in range(seq_length):
if forwardEnabled:
Yslices.append(forwardYslices[t])
if reverseEnabled:
Yslices.append(reverseYslices[seq_length - 1 - t])
Y_ref_np = np.concatenate(Yslices, 0).reshape(
[seq_length, num_directions, batch_size, hidden_size]
)
Y_h_ref_np = np.concatenate(Hslices, 0).reshape(
[num_directions, batch_size, hidden_size]
)
# Use numpy implementation when linear_before_reset = False, else assert errors
if linear_before_reset is False:
Y_ref = Y_ref_np
Y_h_ref = Y_h_ref_np
else:
assert (
np.max(np.abs(Y_ref - Y_ref_np)) < 1e-6
), "Mismatch between Pytorch and Numpy GRU implementation"
assert (
np.max(np.abs(Y_h_ref - Y_h_ref_np)) < 1e-6
), "Mismatch between Pytorch and Numpy GRU implementation"
# ---------------------------------------------- NODE DEFINITION --------------------------------------------------
# Node inputs
node_inputs = [
"X",
"W",
"R",
"B" if has_bias else "",
"",
"initial_h" if has_initial_h else "",
]
# Node outputs
node_outputs = ["Y", "Y_h"]
# GRU node definition
gru_node_def = onnx.helper.make_node(
"GRU",
name="gru",
inputs=node_inputs,
outputs=node_outputs,
hidden_size=hidden_size,
direction=direction,
linear_before_reset=linear_before_reset,
)
# Error node definition
err_node_def = onnx.helper.make_node(
"Sub", name="error", inputs=["Y", "Y_ref"], outputs=["Y_err"]
)
# --------------------------------------------- GRAPH DEFINITION --------------------------------------------------
graph_input = list()
graph_init = list()
graph_output = list()
# GRU inputs
graph_input.append(helper.make_tensor_value_info("X", TensorProto.FLOAT, X_shape))
graph_input.append(helper.make_tensor_value_info("W", TensorProto.FLOAT, W_shape))
graph_input.append(helper.make_tensor_value_info("R", TensorProto.FLOAT, R_shape))
if has_bias:
graph_input.append(
helper.make_tensor_value_info("B", TensorProto.FLOAT, B_shape)
)
if has_sequence_lens:
graph_input.append(
helper.make_tensor_value_info(
"sequence_lens", TensorProto.INT32, sequence_lens_shape
)
)
if has_initial_h:
graph_input.append(
helper.make_tensor_value_info(
"initial_h", TensorProto.FLOAT, initial_h_shape
)
)
# Reference input
graph_input.append(
helper.make_tensor_value_info("Y_ref", TensorProto.FLOAT, Y_shape)
)
# GRU initializers
graph_init.append(make_init("X", TensorProto.FLOAT, X))
graph_init.append(make_init("W", TensorProto.FLOAT, W))
graph_init.append(make_init("R", TensorProto.FLOAT, R))
if has_bias:
graph_init.append(make_init("B", TensorProto.FLOAT, B))
if has_sequence_lens:
graph_init.append(make_init("sequence_lens", TensorProto.INT32, sequence_lens))
if has_initial_h:
graph_init.append(make_init("initial_h", TensorProto.FLOAT, initial_h))
# Reference initializer
graph_init.append(make_init("Y_ref", TensorProto.FLOAT, Y_ref))
# Graph outputs
graph_output.append(
helper.make_tensor_value_info("Y_err", TensorProto.FLOAT, Y_shape)
)
# Define graph (GraphProto)
graph_name = "gru_test"
graph_def = helper.make_graph(
[gru_node_def, err_node_def],
graph_name,
inputs=graph_input,
outputs=graph_output,
)
# Set initializers
graph_def.initializer.extend(graph_init)
# --------------------------------------------- MODEL DEFINITION --------------------------------------------------
# Define model (ModelProto)
model_def = helper.make_model(graph_def, producer_name="onnx-gru")
# Check model
onnx.checker.check_model(model_def)
# Print model
with open(model_path, "w") as f:
f.write(str(model_def))
# Forward GRU
gen_gru_onnx_test_model(
model_path="gruForward.onnxtxt",
seq_length=2,
batch_size=5,
hidden_size=4,
input_size=3,
direction="forward",
has_bias=True,
has_sequence_lens=False,
has_initial_h=True,
linear_before_reset=False,
)
# Reverse GRU
gen_gru_onnx_test_model(
model_path="gruReverse.onnxtxt",
seq_length=2,
batch_size=5,
hidden_size=4,
input_size=3,
direction="reverse",
has_bias=True,
has_sequence_lens=False,
has_initial_h=True,
linear_before_reset=False,
)
# Bidirectional GRU
gen_gru_onnx_test_model(
model_path="gruBidirectional.onnxtxt",
seq_length=2,
batch_size=5,
hidden_size=4,
input_size=3,
direction="bidirectional",
has_bias=True,
has_sequence_lens=False,
has_initial_h=True,
linear_before_reset=False,
)
# Forward no bias GRU
gen_gru_onnx_test_model(
model_path="gruForwardNoBias.onnxtxt",
seq_length=1,
batch_size=5,
hidden_size=4,
input_size=3,
direction="forward",
has_bias=False,
has_sequence_lens=False,
has_initial_h=True,
linear_before_reset=False,
)
# Forward no state GRU
gen_gru_onnx_test_model(
model_path="gruForwardNoState.onnxtxt",
seq_length=1,
batch_size=5,
hidden_size=4,
input_size=3,
direction="forward",
has_bias=True,
has_sequence_lens=False,
has_initial_h=False,
linear_before_reset=False,
)
# Forward with linear before reset
gen_gru_onnx_test_model(
model_path="gruForwardLinearBeforeReset.onnxtxt",
seq_length=1,
batch_size=5,
hidden_size=4,
input_size=3,
direction="forward",
has_bias=True,
has_sequence_lens=False,
has_initial_h=True,
linear_before_reset=True,
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This is a helper script that generates Caffe2 models.
# The generated model will be used for Caffe2 importer unittest:
# ./tests/unittests/caffe2ImporterTest.cpp
# Run $>python gen_caffe2_model.py to get the model files.
from caffe2.proto import caffe2_pb2
from caffe2.python import utils
from google.protobuf import text_format
# Define a weights network
weights = caffe2_pb2.NetDef()
weights.name = "init"
op = caffe2_pb2.OperatorDef()
op.type = "GivenTensorFill"
op.output.extend(["conv_w"])
op.arg.extend([utils.MakeArgument("shape", [1, 1, 2, 2])])
op.arg.extend([utils.MakeArgument("values", [1.0 for i in range(4)])])
weights.op.extend([op])
op = caffe2_pb2.OperatorDef()
op.type = "GivenTensorFill"
op.output.extend(["conv_b"])
op.arg.extend([utils.MakeArgument("shape", [1])])
op.arg.extend([utils.MakeArgument("values", [2.0 for i in range(1)])])
weights.op.extend([op])
weights.external_output.extend(op.output)
# Define an inference net
net = caffe2_pb2.NetDef()
net.name = "predict"
op = caffe2_pb2.OperatorDef()
op.type = "Conv"
op.input.extend(["data"])
op.input.extend(["conv_w"])
op.input.extend(["conv_b"])
op.arg.add().CopyFrom(utils.MakeArgument("kernel", 2))
op.arg.add().CopyFrom(utils.MakeArgument("stride", 1))
op.arg.add().CopyFrom(utils.MakeArgument("group", 1))
op.arg.add().CopyFrom(utils.MakeArgument("pad", 1))
op.output.extend(["conv_out"])
net.op.extend([op])
net.external_output.extend(op.output)
# Generate model in text format.
with open("predict_net.pbtxt", "w") as f:
f.write(text_format.MessageToString(net))
with open("init_net.pbtxt", "w") as f:
f.write(text_format.MessageToString(weights))
# Generate model in binary format.
with open("predict_net.pb", "wb") as f:
f.write(net.SerializeToString())
with open("init_net.pb", "wb") as f:
f.write(weights.SerializeToString())
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This is a helper script that generates the TensorFlowLite models used
# for testing the Glow importer. The models will be generated in a local
# folder here 'tflite_models'. In order for the models to be used for unit
# testing the files must be copied in the folder:
# 'glow\tests\models\tfliteModels'
# To generate the models you need to run this script without arguments:
# python gen_tflite_models.py
# Python requirements: Python 3.6
# Python package requirements:
# TensorFlow 2.1.0
# Keras 2.3.1
# Numpy 1.16.2
# shutil, os, other dependencies
import os
import shutil
import numpy as np
import tensorflow as tf
import tensorflow.keras.backend as keras_backend
import tensorflow.keras.layers as layers
from tensorflow.keras.models import Model
from tensorflow.python.tools import freeze_graph
# ----------------------------------------------------------------------------------------------------------------------
# UTILS
# ----------------------------------------------------------------------------------------------------------------------
# Temporary folder path.
TEMP_DIR = os.path.join(os.path.dirname(__file__), "temp")
# Output model folder.
OUT_DIR = os.path.join(os.path.dirname(__file__), "tflite_models")
# Clean temporary directory.
def clean_dir(dir_path):
if os.path.exists(dir_path):
shutil.rmtree(dir_path)
os.mkdir(dir_path)
# Remove temporary directory.
def rm_dir(dir_path):
if os.path.exists(dir_path):
shutil.rmtree(dir_path)
# Function to save a model in TensorFlowLite format.
def save_model(model, filename):
# Print status.
print('Saving model "%s" ...' % filename)
# Clean temporary folder.
clean_dir(TEMP_DIR)
# Get model inputs.
model_inputs_dict = dict()
model_inputs_array = []
for idx in range(len(model.inputs)):
model_inputs_dict["input_%d" % idx] = model.inputs[idx]
model_inputs_array.append(model.inputs[idx].op.name)
# Get model outputs.
model_outputs_dict = dict()
model_outputs_array = []
for idx in range(len(model.outputs)):
if idx == 0:
output_name = model.outputs[idx].op.name
else:
output_name = model.outputs[idx].name
model_outputs_dict[output_name] = model.outputs[idx]
model_outputs_array.append(output_name)
# Save TensorFlow checkpoint.
tf.saved_model.simple_save(
keras_backend.get_session(),
os.path.join(TEMP_DIR, "checkpoint"),
inputs=model_inputs_dict,
outputs=model_outputs_dict,
)
# Freeze TensorFlow graph.
freeze_graph.freeze_graph(
None,
None,
None,
None,
model.outputs[0].op.name,
None,
None,
os.path.join(TEMP_DIR, "model.pb"),
False,
"",
input_saved_model_dir=os.path.join(TEMP_DIR, "checkpoint"),
)
# Convert and save TensorFlowLite model.
converter = tf.lite.TFLiteConverter.from_frozen_graph(
os.path.join(TEMP_DIR, "model.pb"),
input_arrays=model_inputs_array,
output_arrays=model_outputs_array,
)
converter.dump_graphviz_video = False
converter.allow_custom_ops = True
tflite_model = converter.convert()
model_filename = os.path.join(OUT_DIR, filename)
if not model_filename.endswith(".tflite"):
model_filename += ".tflite"
open(model_filename, "wb").write(tflite_model)
# Clean temporary folder.
rm_dir(TEMP_DIR)
# Function to save a tensor in binary format. In order for the GIT system
# to correctly recognize these files as binary we will add a leading '0'
# byte into the file.
def save_tensor(tensor, filename):
byte_array = b"\x00" + tensor.tobytes(order="C")
with open(os.path.join(OUT_DIR, filename), "wb") as fh:
fh.write(byte_array)
# Create output directory.
clean_dir(OUT_DIR)
# ----------------------------------------------------------------------------------------------------------------------
# Strided Slice
# ----------------------------------------------------------------------------------------------------------------------
def gen_strided_slice(
name,
input_shape,
begin,
end,
strides,
begin_mask=0,
end_mask=0,
ellipsis_mask=0,
new_axis_mask=0,
shrink_axis_mask=0,
):
# Create model.
inp = layers.Input(
name="input", batch_size=input_shape[0], shape=input_shape[1:], dtype=tf.float32
)
out = tf.strided_slice(
inp,
begin,
end,
strides,
begin_mask,
end_mask,
ellipsis_mask,
new_axis_mask,
shrink_axis_mask,
)
model = Model(inputs=[inp], outputs=[out])
# Create data.
np.random.seed(0)
inp_tensor = np.random.rand(*input_shape).astype(np.float32)
out_tensor = model.predict([inp_tensor])
# Save model.
save_model(model, name)
# Save data.
save_tensor(inp_tensor, name + ".inp0")
save_tensor(out_tensor, name + ".out0")
# Clear session.
keras_backend.clear_session()
# Basic test. Default strides are 1.
gen_strided_slice(
name="strided_slice_test0",
input_shape=(1, 2, 3),
begin=(0, 0, 0),
end=(1, 1, 1),
strides=(1, 1, 1),
)
# Test begin_mask. Ignore "begin" value for 2nd dimension and use value for maximum range.
gen_strided_slice(
name="strided_slice_test1",
input_shape=(1, 3, 4),
begin=(0, 2, 3),
end=(1, 3, 4),
strides=(1, 1, 1),
begin_mask=2,
)
# Test end_mask. Ignore "end" value for 2nd dimension and use value for maximum range.
gen_strided_slice(
name="strided_slice_test2",
input_shape=(1, 3, 4),
begin=(0, 0, 0),
end=(1, 1, 1),
strides=(1, 1, 1),
end_mask=2,
)
# Test begin_mask & end_mask. Ignore "begin"/"end" value for 2nd dimension and use values for maximum range.
gen_strided_slice(
name="strided_slice_test3",
input_shape=(1, 3, 4),
begin=(0, 1, 1),
end=(1, 2, 2),
strides=(1, 1, 1),
begin_mask=2,
end_mask=2,
)
# Test ellipsis_mask. Test access pattern [0, ..., 0] where the ellipsis position is marked as 0's for begin/end.
gen_strided_slice(
name="strided_slice_test4",
input_shape=(1, 3, 4),
begin=(0, 0, 0),
end=(1, 0, 1),
strides=(1, 1, 1),
begin_mask=0,
end_mask=0,
ellipsis_mask=2,
)
# Test new_axis_mask.
gen_strided_slice(
name="strided_slice_test5",
input_shape=(1, 3, 4),
begin=(0, 0, 0),
end=(1, 2, 3),
strides=(1, 1, 1),
new_axis_mask=2,
)
# Test shrink_axis_mask.
gen_strided_slice(
name="strided_slice_test6",
input_shape=(1, 3, 4),
begin=(0, 0, 0),
end=(1, 2, 3),
strides=(1, 1, 1),
shrink_axis_mask=2,
)
# ----------------------------------------------------------------------------------------------------------------------
# Select
# ----------------------------------------------------------------------------------------------------------------------
def gen_select_test(name, input_shape):
# Create model.
cond = layers.Input(
name="cond", batch_size=input_shape[0], shape=input_shape[1:], dtype=tf.bool
)
lhs = layers.Input(
name="lhs", batch_size=input_shape[0], shape=input_shape[1:], dtype=tf.float32
)
rhs = layers.Input(
name="rhs", batch_size=input_shape[0], shape=input_shape[1:], dtype=tf.float32
)
out = tf.where(cond, x=lhs, y=rhs)
model = Model(inputs=[cond, lhs, rhs], outputs=[out])
# Create data.
np.random.seed(0)
cond_tensor = np.random.randint(low=0, high=2, size=input_shape).astype(np.bool)
lhs_tensor = np.random.rand(*input_shape).astype(np.float32)
rhs_tensor = np.random.rand(*input_shape).astype(np.float32)
out_tensor = model.predict([cond_tensor, lhs_tensor, rhs_tensor])
# Save model.
save_model(model, name)
# Save data.
save_tensor(cond_tensor, name + ".inp0")
save_tensor(lhs_tensor, name + ".inp1")
save_tensor(rhs_tensor, name + ".inp2")
save_tensor(out_tensor, name + ".out0")
# Clear session.
keras_backend.clear_session()
gen_select_test(name="select", input_shape=(1, 2, 3))
# ----------------------------------------------------------------------------------------------------------------------
# LogSoftmax
# ----------------------------------------------------------------------------------------------------------------------
def gen_log_softmax_test(name, input_shape, axis):
# Create model.
inp = layers.Input(name="input", batch_size=input_shape[0], shape=input_shape[1:])
out = tf.nn.log_softmax(inp, axis=axis)
model = Model(inputs=[inp], outputs=[out])
# Create data.
np.random.seed(0)
inp_tensor = np.random.rand(*input_shape).astype(np.float32)
out_tensor = model.predict(inp_tensor)
# Save model.
save_model(model, name)
# Save data.
save_tensor(inp_tensor, name + ".inp0")
save_tensor(out_tensor, name + ".out0")
# Clear session.
keras_backend.clear_session()
gen_log_softmax_test(name="log_softmax", input_shape=(1, 3), axis=-1)
# ----------------------------------------------------------------------------------------------------------------------
# GATHER ND
# ----------------------------------------------------------------------------------------------------------------------
def gen_gather_nd_test(name, data_shape, indices_shape):
# Create model.
data = layers.Input(
name="data", batch_size=data_shape[0], shape=data_shape[1:], dtype=tf.float32
)
indices = layers.Input(
name="indices",
batch_size=indices_shape[0],
shape=indices_shape[1:],
dtype=tf.int32,
)
out = tf.gather_nd(data, indices, batch_dims=0)
model = Model(inputs=[data, indices], outputs=[out])
# Create data.
np.random.seed(0)
data_tensor = np.random.rand(*data_shape).astype(np.float32)
indices_tensor = np.random.randint(
low=0, high=data_shape, size=indices_shape
).astype(np.int32)
out_tensor = model.predict([data_tensor, indices_tensor])
# Save model.
save_model(model, name)
# Save data.
save_tensor(data_tensor, name + ".inp0")
save_tensor(indices_tensor, name + ".inp1")
save_tensor(out_tensor, name + ".out0")
# Clear session.
keras_backend.clear_session()
gen_gather_nd_test(name="gather_nd", data_shape=(2, 3, 4), indices_shape=(2, 3))
# ----------------------------------------------------------------------------------------------------------------------
# GATHER
# ----------------------------------------------------------------------------------------------------------------------
def gen_gather_test(name, data_shape, indices_shape, axis):
# Create model.
data = layers.Input(
name="data", batch_size=data_shape[0], shape=data_shape[1:], dtype=tf.float32
)
indices = layers.Input(
name="indices",
batch_size=indices_shape[0],
shape=indices_shape[1:],
dtype=tf.int32,
)
out = tf.gather(data, indices, axis=axis, batch_dims=0)
model = Model(inputs=[data, indices], outputs=[out])
# Create data.
np.random.seed(0)
data_tensor = np.random.rand(*data_shape).astype(np.float32)
indices_tensor = np.random.randint(data_shape[axis], size=indices_shape).astype(
np.int32
)
out_tensor = model.predict([data_tensor, indices_tensor])
# Save model.
save_model(model, name)
# Save data.
save_tensor(data_tensor, name + ".inp0")
save_tensor(indices_tensor, name + ".inp1")
save_tensor(out_tensor, name + ".out0")
# Clear session.
keras_backend.clear_session()
gen_gather_test(
name="gather_axis0", data_shape=(1, 2, 3, 4), indices_shape=(1, 5), axis=0
)
gen_gather_test(
name="gather_axis1", data_shape=(1, 2, 3, 4), indices_shape=(1, 5), axis=1
)
# ----------------------------------------------------------------------------------------------------------------------
# CAST
# ----------------------------------------------------------------------------------------------------------------------
def gen_cast_test(name, input_shape, dtype):
# Create model.
inp = layers.Input(name="input", batch_size=input_shape[0], shape=input_shape[1:])
out = tf.cast(inp, dtype=dtype)
model = Model(inputs=[inp], outputs=[out])
# Create data.
np.random.seed(0)
inp_tensor = np.random.rand(*input_shape).astype(np.float32)
out_tensor = model.predict(inp_tensor)
# Save model.
save_model(model, name)
# Save data.
save_tensor(inp_tensor, name + ".inp0")
save_tensor(out_tensor, name + ".out0")
# Clear session.
keras_backend.clear_session()
gen_cast_test(name="cast_f32_to_int32", input_shape=(1, 1, 2, 12), dtype=tf.int32)
# ----------------------------------------------------------------------------------------------------------------------
# Logical operators
# ----------------------------------------------------------------------------------------------------------------------
def gen_unary_logical_operator_test(name, type):
# Create model.
inp = layers.Input(name="input1", batch_size=1, shape=2, dtype=tf.bool)
if type == "not":
out = tf.math.logical_not(inp)
else:
print('Logical unary operator "%s" not supported!')
exit(1)
model = Model(inputs=[inp], outputs=[out])
# Create data.
inp_tensor = np.array([[False, True]]).astype(bool)
out_tensor = model.predict([inp_tensor])
# Save model.
save_model(model, name)
# Save data.
save_tensor(inp_tensor, name + ".inp0")
save_tensor(out_tensor, name + ".out0")
# Clear session.
keras_backend.clear_session()
gen_unary_logical_operator_test(name="logical_not", type="not")
def gen_binary_logical_operator_test(name, type):
# Create model.
inp1 = layers.Input(name="input1", batch_size=1, shape=4, dtype=tf.bool)
inp2 = layers.Input(name="input2", batch_size=1, shape=4, dtype=tf.bool)
if type == "and":
out = tf.math.logical_and(inp1, inp2)
elif type == "or":
out = tf.math.logical_or(inp1, inp2)
else:
print('Logical binary operator "%s" not supported!')
exit(1)
model = Model(inputs=[inp1, inp2], outputs=[out])
# Create data.
inp1_tensor = np.array([[False, True, False, True]]).astype(bool)
inp2_tensor = np.array([[False, False, True, True]]).astype(bool)
out_tensor = model.predict([inp1_tensor, inp2_tensor])
# Save model.
save_model(model, name)
# Save data.
save_tensor(inp1_tensor, name + ".inp0")
save_tensor(inp2_tensor, name + ".inp1")
save_tensor(out_tensor, name + ".out0")
# Clear session.
keras_backend.clear_session()
gen_binary_logical_operator_test(name="logical_and", type="and")
gen_binary_logical_operator_test(name="logical_or", type="or")
def gen_cmp_operator_test(name, type):
# Create model.
inp1 = layers.Input(name="input1", batch_size=1, shape=3)
inp2 = layers.Input(name="input2", batch_size=1, shape=3)
if type == "equal":
out = tf.math.equal(inp1, inp2)
elif type == "not_equal":
out = tf.math.not_equal(inp1, inp2)
elif type == "less":
out = tf.math.less(inp1, inp2)
elif type == "less_equal":
out = tf.math.less_equal(inp1, inp2)
elif type == "greater":
out = tf.math.greater(inp1, inp2)
elif type == "greater_equal":
out = tf.math.greater_equal(inp1, inp2)
else:
print('Logical operator "%s" not supported!')
exit(1)
model = Model(inputs=[inp1, inp2], outputs=[out])
# Create data.
inp1_tensor = np.array([[1.0, 1.0, -1.0]]).astype(np.float32)
inp2_tensor = np.array([[1.0, -1.0, 1.0]]).astype(np.float32)
out_tensor = model.predict([inp1_tensor, inp2_tensor])
# Save model.
save_model(model, name)
# Save data.
save_tensor(inp1_tensor, name + ".inp0")
save_tensor(inp2_tensor, name + ".inp1")
save_tensor(out_tensor, name + ".out0")
# Clear session.
keras_backend.clear_session()
gen_cmp_operator_test(name="equal", type="equal")
gen_cmp_operator_test(name="not_equal", type="not_equal")
gen_cmp_operator_test(name="less", type="less")
gen_cmp_operator_test(name="less_equal", type="less_equal")
gen_cmp_operator_test(name="greater", type="greater")
gen_cmp_operator_test(name="greater_equal", type="greater_equal")
# ----------------------------------------------------------------------------------------------------------------------
# Unary operators
# ----------------------------------------------------------------------------------------------------------------------
def gen_unary_operator_test(name, type, input_shape):
# Create model.
inp = layers.Input(name="input", batch_size=input_shape[0], shape=input_shape[1:])
if type == "relu":
out = tf.nn.relu(inp)
elif type == "relu_n1to1":
out = tf.clip_by_value(inp, -1.0, 1.0)
elif type == "relu6":
out = tf.nn.relu6(inp)
elif type == "sigmoid":
out = tf.nn.sigmoid(inp)
elif type == "exp":
out = tf.exp(inp)
elif type == "log":
out = tf.math.log(inp)
elif type == "tanh":
out = tf.nn.tanh(inp)
elif type == "leaky_relu":
out = tf.nn.leaky_relu(inp, alpha=0.1)
elif type == "prelu":
out = layers.PReLU(alpha_initializer="random_uniform")(inp)
elif type == "square":
out = tf.math.square(inp)
elif type == "abs":
out = tf.math.abs(inp)
elif type == "neg":
out = tf.math.negative(inp)
elif type == "sqrt":
out = tf.math.sqrt(inp)
elif type == "rsqrt":
out = tf.math.rsqrt(inp)
elif type == "sin":
out = tf.math.sin(inp)
elif type == "cos":
out = tf.math.cos(inp)
elif type == "ceil":
out = tf.math.ceil(inp)
elif type == "round":
out = tf.math.round(inp)
elif type == "floor":
out = tf.math.floor(inp)
else:
print('Unary operator "%s" not supported!')
exit(1)
model = Model(inputs=[inp], outputs=[out])
# Create data.
np.random.seed(0)
inp_tensor = np.random.randn(*input_shape).astype(np.float32)
if type in ["log", "sqrt", "rsqrt"]:
inp_tensor = np.abs(inp_tensor) + 1
out_tensor = model.predict(inp_tensor)
# Save model.
save_model(model, name)
# Save data.
save_tensor(inp_tensor, name + ".inp0")
save_tensor(out_tensor, name + ".out0")
# Clear session.
keras_backend.clear_session()
gen_unary_operator_test(name="relu", type="relu", input_shape=(1, 10))
gen_unary_operator_test(name="relu_n1to1", type="relu_n1to1", input_shape=(1, 10))
gen_unary_operator_test(name="relu6", type="relu6", input_shape=(1, 10))
gen_unary_operator_test(name="sigmoid", type="sigmoid", input_shape=(1, 10))
gen_unary_operator_test(name="tanh", type="tanh", input_shape=(1, 10))
gen_unary_operator_test(name="exp", type="exp", input_shape=(1, 10))
gen_unary_operator_test(name="log", type="log", input_shape=(1, 10))
gen_unary_operator_test(name="leaky_relu", type="leaky_relu", input_shape=(1, 10))
gen_unary_operator_test(name="prelu", type="prelu", input_shape=(1, 10))
gen_unary_operator_test(name="square", type="square", input_shape=(1, 10))
gen_unary_operator_test(name="abs", type="abs", input_shape=(1, 10))
gen_unary_operator_test(name="neg", type="neg", input_shape=(1, 10))
gen_unary_operator_test(name="sqrt", type="sqrt", input_shape=(1, 10))
gen_unary_operator_test(name="rsqrt", type="rsqrt", input_shape=(1, 10))
gen_unary_operator_test(name="sin", type="sin", input_shape=(1, 10))
gen_unary_operator_test(name="cos", type="cos", input_shape=(1, 10))
gen_unary_operator_test(name="ceil", type="ceil", input_shape=(1, 10))
gen_unary_operator_test(name="round", type="round", input_shape=(1, 10))
gen_unary_operator_test(name="floor", type="floor", input_shape=(1, 10))
# ----------------------------------------------------------------------------------------------------------------------
# Binary operators
# ----------------------------------------------------------------------------------------------------------------------
def gen_binary_operator_test(name, type, input_shape):
# Create model.
inp1 = layers.Input(name="input1", batch_size=input_shape[0], shape=input_shape[1:])
inp2 = layers.Input(name="input2", batch_size=input_shape[0], shape=input_shape[1:])
if type == "add":
out = tf.math.add(inp1, inp2)
elif type == "mul":
out = tf.math.multiply(inp1, inp2)
elif type == "sub":
out = tf.math.subtract(inp1, inp2)
elif type == "div":
out = tf.math.divide(inp1, inp2)
elif type == "pow":
out = tf.math.pow(inp1, inp2)
elif type == "max":
out = tf.math.maximum(inp1, inp2)
elif type == "min":
out = tf.math.minimum(inp1, inp2)
else:
print('Binary operator "%s" not supported!')
exit(1)
model = Model(inputs=[inp1, inp2], outputs=[out])
# Create data.
np.random.seed(0)
inp1_tensor = np.random.rand(*input_shape).astype(np.float32)
inp2_tensor = np.random.rand(*input_shape).astype(np.float32)
if type == "pow":
inp1_tensor = np.abs(inp1_tensor) + 1
out_tensor = model.predict([inp1_tensor, inp2_tensor])
# Save model.
save_model(model, name)
# Save data.
save_tensor(inp1_tensor, name + ".inp0")
save_tensor(inp2_tensor, name + ".inp1")
save_tensor(out_tensor, name + ".out0")
# Clear session.
keras_backend.clear_session()
gen_binary_operator_test(name="add", type="add", input_shape=(1, 10))
gen_binary_operator_test(name="mul", type="mul", input_shape=(1, 10))
gen_binary_operator_test(name="sub", type="sub", input_shape=(1, 10))
gen_binary_operator_test(name="div", type="div", input_shape=(1, 10))
gen_binary_operator_test(name="pow", type="pow", input_shape=(1, 10))
gen_binary_operator_test(name="max", type="max", input_shape=(1, 10))
gen_binary_operator_test(name="min", type="min", input_shape=(1, 10))
# ----------------------------------------------------------------------------------------------------------------------
# Binary broadcasted operators
# ----------------------------------------------------------------------------------------------------------------------
def gen_binary_broadcast_operator_test(name, type, shape_1, shape_2):
# Create model.
inp1 = layers.Input(name="input1", batch_size=shape_1[0], shape=shape_1[1:])
inp2 = layers.Input(name="input2", batch_size=shape_2[0], shape=shape_2[1:])
if type == "add":
out = tf.math.add(inp1, inp2)
elif type == "mul":
out = tf.math.multiply(inp1, inp2)
elif type == "sub":
out = tf.math.subtract(inp1, inp2)
elif type == "div":
out = tf.math.divide(inp1, inp2)
elif type == "max":
out = tf.math.maximum(inp1, inp2)
elif type == "min":
out = tf.math.minimum(inp1, inp2)
else:
print('Binary operator "%s" not supported!' % type)
exit(1)
model = Model(inputs=[inp1, inp2], outputs=[out])
# Create data.
np.random.seed(0)
inp1_tensor = np.random.rand(*shape_1).astype(np.float32)
inp2_tensor = np.random.rand(*shape_2).astype(np.float32)
if type == "pow":
inp1_tensor = np.abs(inp1_tensor) + 1
out_tensor = model.predict([inp1_tensor, inp2_tensor])
# Save model.
save_model(model, name)
# Save data.
save_tensor(inp1_tensor, name + ".inp0")
save_tensor(inp2_tensor, name + ".inp1")
save_tensor(out_tensor, name + ".out0")
# Clear session.
keras_backend.clear_session()
gen_binary_broadcast_operator_test(
name="add_broadcast", type="add", shape_1=(1, 5, 5, 3), shape_2=(1, 1, 1, 3)
)
gen_binary_broadcast_operator_test(
name="mul_broadcast", type="mul", shape_1=(1, 5, 5, 3), shape_2=(1, 1, 1, 3)
)
gen_binary_broadcast_operator_test(
name="sub_broadcast", type="sub", shape_1=(1, 5, 5, 3), shape_2=(1, 1, 1, 3)
)
gen_binary_broadcast_operator_test(
name="div_broadcast", type="div", shape_1=(1, 5, 5, 3), shape_2=(1, 1, 1, 3)
)
gen_binary_broadcast_operator_test(
name="max_broadcast", type="max", shape_1=(1, 5, 5, 3), shape_2=(1, 1, 1, 3)
)
gen_binary_broadcast_operator_test(
name="min_broadcast", type="min", shape_1=(1, 5, 5, 3), shape_2=(1, 1, 1, 3)
)
# ----------------------------------------------------------------------------------------------------------------------
# Conv2D
# ----------------------------------------------------------------------------------------------------------------------
def gen_conv2d_test(
name, input_shape, filters, kernels, strides, padding, dilations, activation
):
# Create model.
inp = layers.Input(name="input", batch_size=input_shape[0], shape=input_shape[1:])
out = layers.Conv2D(
filters=filters,
kernel_size=kernels,
strides=strides,
padding=padding,
dilation_rate=dilations,
activation=activation,
use_bias=True,
bias_initializer="random_normal",
)(inp)
model = Model(inputs=[inp], outputs=[out])
# Create data.
np.random.seed(0)
inp_tensor = np.random.rand(*input_shape).astype(np.float32)
out_tensor = model.predict(inp_tensor)
# Save model.
save_model(model, name)
# Save data.
save_tensor(inp_tensor, name + ".inp0")
save_tensor(out_tensor, name + ".out0")
# Clear session.
keras_backend.clear_session()
gen_conv2d_test(
name="conv2d_valid",
input_shape=(1, 5, 5, 3),
filters=2,
kernels=(2, 3),
strides=(1, 1),
padding="valid",
dilations=(1, 1),
activation="linear",
)
gen_conv2d_test(
name="conv2d_same",
input_shape=(1, 5, 5, 3),
filters=2,
kernels=(2, 3),
strides=(1, 1),
padding="same",
dilations=(1, 1),
activation="linear",
)
gen_conv2d_test(
name="conv2d_relu",
input_shape=(1, 5, 5, 3),
filters=2,
kernels=(2, 3),
strides=(1, 1),
padding="valid",
dilations=(1, 1),
activation="relu",
)
# ----------------------------------------------------------------------------------------------------------------------
# DepthwiseConv2D
# ----------------------------------------------------------------------------------------------------------------------
def gen_depthwise_conv2d_test(
name,
input_shape,
depth_multiplier,
kernels,
strides,
padding,
dilations,
activation,
):
# Create model.
inp = layers.Input(name="input", batch_size=input_shape[0], shape=input_shape[1:])
out = layers.DepthwiseConv2D(
kernel_size=kernels,
strides=strides,
padding=padding,
depth_multiplier=depth_multiplier,
dilation_rate=dilations,
activation=activation,
use_bias=True,
bias_initializer="random_normal",
)(inp)
model = Model(inputs=[inp], outputs=[out])
# Create data.
np.random.seed(0)
inp_tensor = np.random.rand(*input_shape).astype(np.float32)
out_tensor = model.predict(inp_tensor)
# Save model.
save_model(model, name)
# Save data.
save_tensor(inp_tensor, name + ".inp0")
save_tensor(out_tensor, name + ".out0")
# Clear session.
keras_backend.clear_session()
gen_depthwise_conv2d_test(
name="depthwise_conv2d_c1_m1",
input_shape=(1, 5, 5, 1),
depth_multiplier=1,
kernels=(2, 3),
strides=(1, 1),
padding="valid",
dilations=(1, 1),
activation="linear",
)
gen_depthwise_conv2d_test(
name="depthwise_conv2d_c1_m2",
input_shape=(1, 5, 5, 1),
depth_multiplier=2,
kernels=(2, 3),
strides=(1, 1),
padding="valid",
dilations=(1, 1),
activation="linear",
)
gen_depthwise_conv2d_test(
name="depthwise_conv2d_c2_m1",
input_shape=(1, 5, 5, 2),
depth_multiplier=1,
kernels=(2, 3),
strides=(1, 1),
padding="valid",
dilations=(1, 1),
activation="linear",
)
gen_depthwise_conv2d_test(
name="depthwise_conv2d_c2_m2",
input_shape=(1, 5, 5, 2),
depth_multiplier=2,
kernels=(2, 3),
strides=(1, 1),
padding="valid",
dilations=(1, 1),
activation="linear",
)
# ----------------------------------------------------------------------------------------------------------------------
# FullyConnected
# ----------------------------------------------------------------------------------------------------------------------
def gen_fully_connected_test(name, input_shape, out_channels, activation):
# Create model.
inp = layers.Input(name="input", batch_size=input_shape[0], shape=input_shape[1:])
out = layers.Dense(
units=out_channels,
activation=activation,
use_bias=True,
bias_initializer="random_normal",
)(inp)
model = Model(inputs=[inp], outputs=[out])
# Create data.
np.random.seed(0)
inp_tensor = np.random.rand(*input_shape).astype(np.float32)
out_tensor = model.predict(inp_tensor)
# Save model.
save_model(model, name)
# Save data.
save_tensor(inp_tensor, name + ".inp0")
save_tensor(out_tensor, name + ".out0")
# Clear session.
keras_backend.clear_session()
gen_fully_connected_test(
name="fully_connected", input_shape=(2, 5), out_channels=10, activation="linear"
)
# ----------------------------------------------------------------------------------------------------------------------
# MaxPool2D
# ----------------------------------------------------------------------------------------------------------------------
def gen_maxpool2d_test(name, input_shape, kernels, strides, padding):
# Create model.
inp = layers.Input(name="input", batch_size=input_shape[0], shape=input_shape[1:])
out = layers.MaxPooling2D(pool_size=kernels, strides=strides, padding=padding)(inp)
model = Model(inputs=[inp], outputs=[out])
# Create data.
np.random.seed(0)
inp_tensor = np.random.rand(*input_shape).astype(np.float32)
out_tensor = model.predict(inp_tensor)
# Save model.
save_model(model, name)
# Save data.
save_tensor(inp_tensor, name + ".inp0")
save_tensor(out_tensor, name + ".out0")
# Clear session.
keras_backend.clear_session()
gen_maxpool2d_test(
name="maxpool2d_valid",
input_shape=(1, 5, 5, 2),
kernels=(2, 3),
strides=(1, 1),
padding="valid",
)
gen_maxpool2d_test(
name="maxpool2d_same",
input_shape=(1, 5, 5, 2),
kernels=(2, 3),
strides=(1, 1),
padding="same",
)
# ----------------------------------------------------------------------------------------------------------------------
# AvgPool2D
# ----------------------------------------------------------------------------------------------------------------------
def gen_avgpool2d_test(name, input_shape, kernels, strides, padding):
# Create model.
inp = layers.Input(name="input", batch_size=input_shape[0], shape=input_shape[1:])
out = layers.AveragePooling2D(pool_size=kernels, strides=strides, padding=padding)(
inp
)
model = Model(inputs=[inp], outputs=[out])
# Create data.
np.random.seed(0)
inp_tensor = np.random.rand(*input_shape).astype(np.float32)
out_tensor = model.predict(inp_tensor)
# Save model.
save_model(model, name)
# Save data.
save_tensor(inp_tensor, name + ".inp0")
save_tensor(out_tensor, name + ".out0")
# Clear session.
keras_backend.clear_session()
gen_avgpool2d_test(
name="avgpool2d_valid",
input_shape=(1, 5, 5, 2),
kernels=(2, 3),
strides=(1, 1),
padding="valid",
)
gen_avgpool2d_test(
name="avgpool2d_same",
input_shape=(1, 5, 5, 2),
kernels=(2, 3),
strides=(1, 1),
padding="same",
)
# ----------------------------------------------------------------------------------------------------------------------
# Softmax
# ----------------------------------------------------------------------------------------------------------------------
def gen_softmax_test(name, input_shape, axis):
# Create model.
inp = layers.Input(name="input", batch_size=input_shape[0], shape=input_shape[1:])
out = layers.Softmax(axis=axis)(inp)
model = Model(inputs=[inp], outputs=[out])
# Create data.
np.random.seed(0)
inp_tensor = np.random.rand(*input_shape).astype(np.float32)
out_tensor = model.predict(inp_tensor)
# Save model.
save_model(model, name)
# Save data.
save_tensor(inp_tensor, name + ".inp0")
save_tensor(out_tensor, name + ".out0")
# Clear session.
keras_backend.clear_session()
gen_softmax_test(name="softmax", input_shape=(1, 3), axis=-1)
# ----------------------------------------------------------------------------------------------------------------------
# Transpose
# ----------------------------------------------------------------------------------------------------------------------
def gen_transpose_test(name, input_shape, perm):
# Create model.
inp = layers.Input(name="input", batch_size=input_shape[0], shape=input_shape[1:])
out = layers.Permute(perm)(inp)
model = Model(inputs=[inp], outputs=[out])
# Create data.
np.random.seed(0)
inp_tensor = np.random.rand(*input_shape).astype(np.float32)
out_tensor = model.predict(inp_tensor)
# Save model.
save_model(model, name)
# Save data.
save_tensor(inp_tensor, name + ".inp0")
save_tensor(out_tensor, name + ".out0")
# Clear session.
keras_backend.clear_session()
gen_transpose_test(name="transpose", input_shape=(1, 2, 3), perm=(2, 1))
# ----------------------------------------------------------------------------------------------------------------------
# Slice
# ----------------------------------------------------------------------------------------------------------------------
def gen_slice_test(name, input_shape, begin, size):
# Create model.
inp = layers.Input(name="input", batch_size=input_shape[0], shape=input_shape[1:])
out = tf.slice(inp, begin, size)
model = Model(inputs=[inp], outputs=[out])
# Create data.
np.random.seed(0)
inp_tensor = np.random.rand(*input_shape).astype(np.float32)
out_tensor = model.predict(inp_tensor)
# Save model.
save_model(model, name)
# Save data.
save_tensor(inp_tensor, name + ".inp0")
save_tensor(out_tensor, name + ".out0")
# Clear session.
keras_backend.clear_session()
gen_slice_test(name="slice", input_shape=(1, 2, 3), begin=(0, 1, 2), size=(1, 1, 1))
gen_slice_test(
name="slice_neg_size", input_shape=(1, 2, 3), begin=(0, 1, 2), size=(1, 1, -1)
)
# ----------------------------------------------------------------------------------------------------------------------
# Reshape
# ----------------------------------------------------------------------------------------------------------------------
def gen_reshape_test(name, input_shape, shape):
# Create model.
inp = layers.Input(name="input", batch_size=input_shape[0], shape=input_shape[1:])
out = tf.reshape(inp, shape)
model = Model(inputs=[inp], outputs=[out])
# Create data.
np.random.seed(0)
inp_tensor = np.random.rand(*input_shape).astype(np.float32)
out_tensor = model.predict(inp_tensor)
# Save model.
save_model(model, name)
# Save data.
save_tensor(inp_tensor, name + ".inp0")
save_tensor(out_tensor, name + ".out0")
# Clear session.
keras_backend.clear_session()
gen_reshape_test(name="reshape", input_shape=(1, 2, 3), shape=(1, 6))
gen_reshape_test(name="reshape_neg_shape", input_shape=(1, 2, 3), shape=(1, -1))
# ----------------------------------------------------------------------------------------------------------------------
# Concat
# ----------------------------------------------------------------------------------------------------------------------
def gen_concat_test(name, input_shape, axis):
# Create model.
inp1 = layers.Input(name="input1", batch_size=input_shape[0], shape=input_shape[1:])
inp2 = layers.Input(name="input2", batch_size=input_shape[0], shape=input_shape[1:])
out = tf.concat([inp1, inp2], axis)
model = Model(inputs=[inp1, inp2], outputs=[out])
# Create data.
np.random.seed(0)
inp1_tensor = np.random.rand(*input_shape).astype(np.float32)
inp2_tensor = np.random.rand(*input_shape).astype(np.float32)
out_tensor = model.predict([inp1_tensor, inp2_tensor])
# Save model.
save_model(model, name)
# Save data.
save_tensor(inp1_tensor, name + ".inp0")
save_tensor(inp2_tensor, name + ".inp1")
save_tensor(out_tensor, name + ".out0")
# Clear session.
keras_backend.clear_session()
gen_concat_test(name="concat", input_shape=(1, 2, 3), axis=1)
gen_concat_test(name="concat_neg_axis", input_shape=(1, 2, 3), axis=-1)
# ----------------------------------------------------------------------------------------------------------------------
# Split
# ----------------------------------------------------------------------------------------------------------------------
def gen_split_test(name, input_shape, axis, num_split):
# Create model.
inp = layers.Input(name="input", batch_size=input_shape[0], shape=input_shape[1:])
outs = tf.split(inp, num_or_size_splits=num_split, axis=axis)
model = Model(inputs=[inp], outputs=outs)
# Create data.
np.random.seed(0)
inp_tensor = np.random.rand(*input_shape).astype(np.float32)
out_tensors = model.predict(inp_tensor)
# Save model.
save_model(model, name)
# Save data.
save_tensor(inp_tensor, name + ".inp0")
for idx in range(len(out_tensors)):
save_tensor(out_tensors[idx], name + (".out%d" % idx))
# Clear session.
keras_backend.clear_session()
gen_split_test(name="split", input_shape=(1, 9), axis=-1, num_split=3)
# ----------------------------------------------------------------------------------------------------------------------
# Pad
# ----------------------------------------------------------------------------------------------------------------------
def gen_pad_test(name, input_shape, pads):
# Create model.
inp = layers.Input(name="input", batch_size=input_shape[0], shape=input_shape[1:])
out = tf.pad(inp, paddings=pads)
model = Model(inputs=[inp], outputs=[out])
# Create data.
np.random.seed(0)
inp_tensor = np.random.rand(*input_shape).astype(np.float32)
out_tensor = model.predict(inp_tensor)
# Save model.
save_model(model, name)
# Save data.
save_tensor(inp_tensor, name + ".inp0")
save_tensor(out_tensor, name + ".out0")
# Clear session.
keras_backend.clear_session()
gen_pad_test(name="pad", input_shape=(1, 2, 2), pads=[[0, 0], [1, 2], [0, 3]])
# ----------------------------------------------------------------------------------------------------------------------
# ArgMin/ArgMax
# ----------------------------------------------------------------------------------------------------------------------
def gen_arg_min_max_test(name, type, input_shape, axis):
# Create model.
inp = layers.Input(name="input", batch_size=input_shape[0], shape=input_shape[1:])
if type == "min":
out = tf.math.argmin(inp, axis=axis)
else:
out = tf.math.argmax(inp, axis=axis)
model = Model(inputs=[inp], outputs=[out])
# Create data.
np.random.seed(0)
inp_tensor = np.random.rand(*input_shape).astype(np.float32)
out_tensor = model.predict(inp_tensor)
# Save model.
save_model(model, name)
# Save data.
save_tensor(inp_tensor, name + ".inp0")
save_tensor(out_tensor, name + ".out0")
# Clear session.
keras_backend.clear_session()
gen_arg_min_max_test(name="arg_min", type="min", input_shape=(1, 2, 10), axis=2)
gen_arg_min_max_test(name="arg_max", type="max", input_shape=(1, 2, 10), axis=2)
# ----------------------------------------------------------------------------------------------------------------------
# Pack
# ----------------------------------------------------------------------------------------------------------------------
def gen_pack_test(name, input_shape, axis):
# Create model.
inp1 = layers.Input(name="input1", batch_size=input_shape[0], shape=input_shape[1:])
inp2 = layers.Input(name="input2", batch_size=input_shape[0], shape=input_shape[1:])
out = tf.stack([inp1, inp2], axis=axis)
model = Model(inputs=[inp1, inp2], outputs=[out])
# Create data.
np.random.seed(0)
inp1_tensor = np.random.rand(*input_shape).astype(np.float32)
inp2_tensor = np.random.rand(*input_shape).astype(np.float32)
out_tensor = model.predict([inp1_tensor, inp2_tensor])
# Save model.
save_model(model, name)
# Save data.
save_tensor(inp1_tensor, name + ".inp0")
save_tensor(inp2_tensor, name + ".inp1")
save_tensor(out_tensor, name + ".out0")
# Clear session.
keras_backend.clear_session()
gen_pack_test(name="pack", input_shape=(2, 3, 4), axis=1)
# ----------------------------------------------------------------------------------------------------------------------
# Unpack
# ----------------------------------------------------------------------------------------------------------------------
def gen_unpack_test(name, input_shape, axis):
# Create model.
inp = layers.Input(name="input", batch_size=input_shape[0], shape=input_shape[1:])
outs = tf.unstack(inp, axis=axis)
model = Model(inputs=[inp], outputs=outs)
# Create data.
np.random.seed(0)
inp_tensor = np.random.rand(*input_shape).astype(np.float32)
out_tensors = model.predict(inp_tensor)
# Save model.
save_model(model, name)
# Save data.
save_tensor(inp_tensor, name + ".inp0")
for idx in range(len(out_tensors)):
save_tensor(out_tensors[idx], name + (".out%d" % idx))
# Clear session.
keras_backend.clear_session()
gen_unpack_test(name="unpack", input_shape=(2, 3, 4), axis=1)
# ----------------------------------------------------------------------------------------------------------------------
# Mean
# ----------------------------------------------------------------------------------------------------------------------
def gen_mean_test(name, input_shape, axis, keep_dims):
# Create model.
inp = layers.Input(name="input", batch_size=input_shape[0], shape=input_shape[1:])
out = tf.reduce_mean(inp, axis=axis, keepdims=keep_dims)
model = Model(inputs=[inp], outputs=[out])
# Create data.
np.random.seed(0)
inp_tensor = np.random.rand(*input_shape).astype(np.float32)
out_tensor = model.predict(inp_tensor)
# Save model.
save_model(model, name)
# Save data.
save_tensor(inp_tensor, name + ".inp0")
save_tensor(out_tensor, name + ".out0")
# Clear session.
keras_backend.clear_session()
gen_mean_test(name="mean_keep_dims", input_shape=(1, 2, 10), axis=2, keep_dims=True)
gen_mean_test(name="mean_no_keep_dims", input_shape=(1, 2, 10), axis=2, keep_dims=False)
gen_mean_test(
name="mean_multiple_axis_keep_dims",
input_shape=(1, 2, 10),
axis=(1, 2),
keep_dims=True,
)
gen_mean_test(
name="mean_multiple_axis_no_keep_dims",
input_shape=(1, 2, 10),
axis=(1, 2),
keep_dims=False,
)
# ----------------------------------------------------------------------------------------------------------------------
# Tile
# ----------------------------------------------------------------------------------------------------------------------
def gen_tile_test(name, input_shape, tiles):
# Create model.
inp = layers.Input(name="input", batch_size=input_shape[0], shape=input_shape[1:])
out = tf.tile(inp, multiples=tiles)
model = Model(inputs=[inp], outputs=[out])
# Create data.
np.random.seed(0)
inp_tensor = np.random.rand(*input_shape).astype(np.float32)
out_tensor = model.predict(inp_tensor)
# Save model.
save_model(model, name)
# Save data.
save_tensor(inp_tensor, name + ".inp0")
save_tensor(out_tensor, name + ".out0")
# Clear session.
keras_backend.clear_session()
gen_tile_test(name="tile", input_shape=(1, 2, 3), tiles=[1, 3, 2])
# ----------------------------------------------------------------------------------------------------------------------
# RESIZE NEAREST
# ----------------------------------------------------------------------------------------------------------------------
def gen_resize_nearest_test(name, input_shape, output_shape):
# Create model.
inp = layers.Input(name="input", batch_size=input_shape[0], shape=input_shape[1:])
out = tf.compat.v1.image.resize_nearest_neighbor(inp, size=output_shape)
model = Model(inputs=[inp], outputs=[out])
# Create data.
np.random.seed(0)
inp_tensor = np.random.rand(*input_shape).astype(np.float32)
out_tensor = model.predict(inp_tensor)
# Save model.
save_model(model, name)
# Save data.
save_tensor(inp_tensor, name + ".inp0")
save_tensor(out_tensor, name + ".out0")
# Clear session.
keras_backend.clear_session()
gen_resize_nearest_test(
name="resize_nearest", input_shape=(1, 3, 4, 2), output_shape=(5, 7)
)
# ----------------------------------------------------------------------------------------------------------------------
# RESIZE BILINEAR
# ----------------------------------------------------------------------------------------------------------------------
def gen_resize_bilinear_test(name, input_shape, output_shape):
# Create model.
inp = layers.Input(name="input", batch_size=input_shape[0], shape=input_shape[1:])
out = tf.compat.v1.image.resize_bilinear(inp, size=output_shape)
model = Model(inputs=[inp], outputs=[out])
# Create data.
np.random.seed(0)
inp_tensor = np.random.rand(*input_shape).astype(np.float32)
out_tensor = model.predict(inp_tensor)
# Save model.
save_model(model, name)
# Save data.
save_tensor(inp_tensor, name + ".inp0")
save_tensor(out_tensor, name + ".out0")
# Clear session.
keras_backend.clear_session()
gen_resize_bilinear_test(
name="resize_bilinear", input_shape=(1, 3, 4, 2), output_shape=(5, 7)
)
# ----------------------------------------------------------------------------------------------------------------------
# SPACE TO DEPTH
# ----------------------------------------------------------------------------------------------------------------------
def gen_space_to_depth_test(name, input_shape, block_size):
# Create model.
inp = layers.Input(name="input", batch_size=input_shape[0], shape=input_shape[1:])
out = tf.compat.v1.space_to_depth(inp, block_size=block_size)
model = Model(inputs=[inp], outputs=[out])
# Create data.
np.random.seed(0)
inp_tensor = np.random.rand(*input_shape).astype(np.float32)
out_tensor = model.predict(inp_tensor)
# Save model.
save_model(model, name)
# Save data.
save_tensor(inp_tensor, name + ".inp0")
save_tensor(out_tensor, name + ".out0")
# Clear session.
keras_backend.clear_session()
gen_space_to_depth_test(name="space_to_depth", input_shape=(1, 2, 4, 3), block_size=2)
# ----------------------------------------------------------------------------------------------------------------------
# DEPTH TO SPACE
# ----------------------------------------------------------------------------------------------------------------------
# Note: Older version of TensorFlow handles this operator as custom. This test is generated separately by manually
# editing the 'space_to_depth' test.
def gen_depth_to_space_test(name, input_shape, block_size):
# Create model.
inp = layers.Input(name="input", batch_size=input_shape[0], shape=input_shape[1:])
out = tf.nn.depth_to_space(inp, block_size=block_size)
model = Model(inputs=[inp], outputs=[out])
# Create data.
np.random.seed(0)
inp_tensor = np.random.rand(*input_shape).astype(np.float32)
out_tensor = model.predict(inp_tensor)
# Save model.
save_model(model, name)
# Save data.
save_tensor(inp_tensor, name + ".inp0")
save_tensor(out_tensor, name + ".out0")
# Clear session.
keras_backend.clear_session()
gen_depth_to_space_test(name="depth_to_space", input_shape=(1, 1, 2, 12), block_size=2)
|
#!/usr/bin/env python
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from PIL import Image
# This script is used to visualize memory allocations in the Glow compiler.
#
# Usage: ./visualize.py dump.txt
#
# The script will dump a sequence of bitmap files that can be combined into a
# video. Example: heap100123.bmp, heap heap100124.bmp, heap100125.bmp ... )
#
# On mac and linux this command will generate a gif file:
# convert -delay 10 -loop 0 *.bmp video.gif
#
# The input file should contain a list of allocation/deallocation commands.
# Allocation commands (marked with the letter 'a') report the start address and
# the size of the buffer, and the deallocation commands (marked with the letter
# 'd') report the address of the buffer. You can generate these command lists
# by inserting printf calls into the Glow memory allocator.
#
# Example input:
# a 348864 20000
# a 368896 20000
# a 388928 20000
# a 408960 200000
# d 388928
# d 368896
# d 348864
content = open(sys.argv[1]).read()
lines = content.split("\n")
canvas_size = 512
pixelsize = 8
img = Image.new("RGB", (canvas_size, canvas_size), "black")
pixels = img.load()
# Use this number to assign file names to frames in the video.
filename_counter = 10000000
# Maps from address to size
sizes = {}
color_index = 0
colors = [
(218, 112, 214),
(255, 182, 193),
(250, 235, 215),
(255, 250, 205),
(210, 105, 30),
(210, 180, 140),
(188, 143, 143),
(255, 240, 245),
(230, 230, 250),
(255, 255, 240),
]
def getColor():
global color_index
color_index += 1
return colors[color_index % len(colors)]
def setPixel(addr, color):
# Don't draw out-of-bounds pixels.
if addr >= canvas_size * canvas_size:
return
# Only draw pixels that are aligned to the block size.
if addr % pixelsize != 0:
return
# Draw large pixels.
sx = addr % canvas_size
sy = addr / canvas_size
sx = int(sx / pixelsize)
sy = int(sy / pixelsize)
for x in range(pixelsize):
for y in range(pixelsize):
pixels[sx * pixelsize + x, sy * pixelsize + y] = color
def saveFrame():
global filename_counter
filename_counter += 1
img.save("heap" + str(filename_counter) + ".bmp")
for line in lines:
tokens = line.split()
if len(tokens) < 1:
break
print(tokens)
if tokens[0] == "a":
frm = int(tokens[1])
sz = int(tokens[2])
sizes[frm] = sz
if frm + sz >= canvas_size * canvas_size:
continue
for i in range(sz):
setPixel(i + frm, (255, 255, 255)) # allocate
saveFrame()
cc = getColor()
for i in range(sz):
setPixel(i + frm, cc) # allocated
saveFrame()
if tokens[0] == "d":
frm = int(tokens[1])
sz = sizes[frm]
if frm + sz >= canvas_size * canvas_size:
continue
for i in range(sz):
setPixel(i + frm, (128, 0, 0)) # deallocate
saveFrame()
for i in range(sz):
setPixel(i + frm, (15, 15, 15)) # previously allocated
saveFrame()
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This is a helper script that generates ONNX models.
# The generated model will be used for ONNX importer unittest:
# ./tests/unittests/onnxImporterTest.cpp
# Run $>python gen_onnx_model.py to get the ONNX model.
import numpy as np
import onnx
from onnx import AttributeProto, GraphProto, helper, TensorProto
# The protobuf definition can be found here:
# https://github.com/onnx/onnx/blob/master/onnx/onnx.proto
W = np.array(
[[[[1.0, 1.0], [1.0, 1.0]]]] # (1, 1, 2, 2) tensor for convolution weights
).astype(np.float32)
B = np.array([2.0]).astype(np.float32)
# Convolution with padding. "data" represents the input data,
# which will be provided by ONNX importer unittests.
node_def = onnx.helper.make_node(
"Conv",
inputs=["data", "W", "B"],
outputs=["y"],
kernel_shape=[2, 2],
strides=[1, 1],
pads=[1, 1, 1, 1],
name="conv1",
)
weight_tensor = helper.make_tensor(
name="W", data_type=TensorProto.FLOAT, dims=(1, 1, 2, 2), vals=W.reshape(4).tolist()
)
bias_tensor = helper.make_tensor(
name="B", data_type=TensorProto.FLOAT, dims=(1,), vals=B.reshape(1).tolist()
)
# Create the graph (GraphProto)
graph_def = helper.make_graph(
[node_def],
"test-model",
inputs=[
helper.make_tensor_value_info("data", TensorProto.FLOAT, [1, 1, 3, 3]),
helper.make_tensor_value_info("W", TensorProto.FLOAT, [1, 1, 2, 2]),
helper.make_tensor_value_info("B", TensorProto.FLOAT, [1]),
],
outputs=[helper.make_tensor_value_info("y", TensorProto.FLOAT, [1, 1, 4, 4])],
)
graph_def.initializer.extend([weight_tensor])
graph_def.initializer.extend([bias_tensor])
graph_def.initializer[0].name = "W"
graph_def.initializer[1].name = "B"
# Create the model (ModelProto)
model_def = helper.make_model(graph_def, producer_name="onnx-conv")
with open("simpleConv.onnxtxt", "w") as f:
f.write(str(model_def))
onnx.checker.check_model(model_def)
print("The model is checked!")
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import onnx
import tensorflow as tf
from onnx import helper, TensorProto
from tensorflow.python.ops import gen_audio_ops as audio_ops
# ONNX utility.
def make_init(name, dtype, tensor):
return helper.make_tensor(
name=name,
data_type=dtype,
dims=tensor.shape,
vals=tensor.reshape(tensor.size).tolist(),
)
# Function to generate AudioSpectrogram ONNX test model.
def gen_spectrogram_onnx_test_model(
model_path, window_count, window_size, stride, magnitude_squared=True
):
# Tensor sizes.
input_length = window_size + (window_count - 1) * stride
fft_length = int(2 ** np.ceil(np.log2(window_size)))
input_shape = [1, input_length]
spectrogram_length = int(fft_length / 2 + 1)
spectrogram_shape = [window_count, spectrogram_length]
# Generate random input data.
np.random.seed(1)
input_data = np.random.randn(*input_shape)
# ----------------------------------------- COMPUTE TensorFlow REFERENCE -------------------------------------------
# Define TensorFlow model.
tf_input = tf.constant(
input_data.reshape([input_length, 1]), name="input", dtype=tf.float32
)
tf_spectrogram = audio_ops.audio_spectrogram(
tf_input,
window_size=window_size,
stride=stride,
magnitude_squared=magnitude_squared,
)
# Run TensorFlow model and get reference output.
with tf.Session() as sess:
spectrogram_ref = sess.run(tf_spectrogram)
spectrogram_ref = np.reshape(spectrogram_ref, spectrogram_shape)
# ---------------------------------------------- NODE DEFINITION --------------------------------------------------
# AudioSpectrogram node definition.
spectrogram_node_def = onnx.helper.make_node(
"AudioSpectrogram",
name="audio_spectrogram",
inputs=["input"],
outputs=["spectrogram"],
window_size=int(window_size),
stride=int(stride),
magnitude_squared=int(magnitude_squared),
)
# Error node definition.
err_node_def = onnx.helper.make_node(
"Sub",
name="error",
inputs=["spectrogram", "spectrogram_ref"],
outputs=["spectrogram_err"],
)
# --------------------------------------------- GRAPH DEFINITION --------------------------------------------------
graph_input = list()
graph_init = list()
graph_output = list()
# Graph inputs.
graph_input.append(
helper.make_tensor_value_info("input", TensorProto.FLOAT, input_shape)
)
graph_input.append(
helper.make_tensor_value_info(
"spectrogram_ref", TensorProto.FLOAT, spectrogram_shape
)
)
# Graph initializers.
graph_init.append(make_init("input", TensorProto.FLOAT, input_data))
graph_init.append(make_init("spectrogram_ref", TensorProto.FLOAT, spectrogram_ref))
# Graph outputs.
graph_output.append(
helper.make_tensor_value_info(
"spectrogram_err", TensorProto.FLOAT, spectrogram_shape
)
)
# Graph name.
graph_name = "audio_spectrogram_test"
# Define graph (GraphProto).
graph_def = helper.make_graph(
[spectrogram_node_def, err_node_def],
graph_name,
inputs=graph_input,
outputs=graph_output,
)
# Set initializers.
graph_def.initializer.extend(graph_init)
# --------------------------------------------- MODEL DEFINITION --------------------------------------------------
# Define model (ModelProto).
model_def = helper.make_model(graph_def, producer_name="onnx-audio-spectrogram")
# Print model.
with open(model_path, "w") as f:
f.write(str(model_def))
# One window spectrogram.
gen_spectrogram_onnx_test_model(
model_path="audioSpectrogramOneWindow.onnxtxt",
window_count=1,
window_size=512,
stride=256,
magnitude_squared=True,
)
# Two window spectrogram.
gen_spectrogram_onnx_test_model(
model_path="audioSpectrogramTwoWindow.onnxtxt",
window_count=2,
window_size=640,
stride=320,
magnitude_squared=True,
)
# Magnitude non-squared.
gen_spectrogram_onnx_test_model(
model_path="audioSpectrogramNonSquared.onnxtxt",
window_count=1,
window_size=640,
stride=320,
magnitude_squared=False,
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from caffe2.proto import caffe2_pb2
from google.protobuf import text_format
def read_model_from_file(path):
m = caffe2_pb2.NetDef()
with open(path, "rb") as f:
if ".pbtxt" in path:
text_format.Merge(f.read(), m)
else:
m.ParseFromString(f.read())
return m
def write_model_to_file(path, m):
with open(path, "wb") as f:
if ".pbtxt" in path:
f.write(text_format.MessageToString(m))
else:
f.write(m.SerializeToString())
# Perform dead code elimination on predict_net removing any nodes that aren't
# used for producing values in predict_net.external_output. Remove any nodes in
# init_net that produce values that are no longer needed by predict_net.
def dce(init_net, predict_net):
num_predict_net_ops_original = len(predict_net.op)
num_predict_net_inputs_original = len(predict_net.external_input)
# Find the set of tensors used in the computation of the outputs.
live_predict_net_op_outputs = set(predict_net.external_output)
prev_num_live_predict_net_op_outputs = len(live_predict_net_op_outputs)
while True:
for op in predict_net.op:
for output_tensor in op.output:
if output_tensor in live_predict_net_op_outputs:
for input_tensor in op.input:
live_predict_net_op_outputs.add(input_tensor)
num_live_predict_net_op_outputs = len(live_predict_net_op_outputs)
if num_live_predict_net_op_outputs == prev_num_live_predict_net_op_outputs:
break
prev_num_live_predict_net_op_outputs = num_live_predict_net_op_outputs
# Find the ops that are required to compute the tensors used during
# computation of the outputs.
live_predict_net_ops = []
for op in predict_net.op:
for output_tensor in op.output:
if output_tensor in live_predict_net_op_outputs:
live_predict_net_ops.append(op)
# Delete all unused ops in predict_net.
num_predict_net_ops_eliminated = len(predict_net.op) - len(live_predict_net_ops)
del predict_net.op[:]
predict_net.op.extend(live_predict_net_ops)
# Find the set of all used inputs tensors in predict_net.
live_predict_net_op_inputs = set()
for op in predict_net.op:
for input_tensor in op.input:
live_predict_net_op_inputs.add(input_tensor)
# Find the set of used external_inputs.
live_predict_net_external_inputs = set()
for external_input in predict_net.external_input:
if external_input in live_predict_net_op_inputs:
live_predict_net_external_inputs.add(external_input)
# Delete unused external_inputs in predict_net.
num_predict_net_inputs_eliminated = len(predict_net.external_input) - len(
live_predict_net_external_inputs
)
del predict_net.external_input[:]
predict_net.external_input.extend(live_predict_net_external_inputs)
print(
"predict_net ops eliminated: {}/{}".format(
num_predict_net_ops_eliminated, num_predict_net_ops_original
)
)
print(
"predict_net external_inputs eliminated: {}/{}".format(
num_predict_net_inputs_eliminated, num_predict_net_inputs_original
)
)
# Everything below pertains to removing unused outputs in the init_net,
# if no init net was provided then stop here.
if init_net is None:
return
num_init_net_ops_original = len(init_net.op)
# Find the set of init_net ops with outputs needed by the init_net
live_init_net_ops = []
for op in init_net.op:
for output_tensor in op.output:
if output_tensor in live_predict_net_external_inputs:
live_init_net_ops.append(op)
# Eliminate dead init_net ops
num_init_net_ops_eliminated = len(init_net.op) - len(live_init_net_ops)
del init_net.op[:]
init_net.op.extend(live_init_net_ops)
# Update init_net external_outputs
live_init_net_op_outputs = set()
for op in init_net.op:
for output_tensor in op.output:
live_init_net_op_outputs.add(output_tensor)
live_init_net_external_outputs = set()
for output_tensor in init_net.external_output:
if output_tensor in live_init_net_op_outputs:
live_init_net_external_outputs.add(output_tensor)
del init_net.external_output[:]
init_net.external_output.extend(live_init_net_external_outputs)
print(
"init_net ops eliminated: {}/{}".format(
num_init_net_ops_eliminated, num_init_net_ops_original
)
)
if __name__ == "__main__":
parser = argparse.ArgumentParser("Caffe2 model dead code elimination")
parser.add_argument("--input_init_net_path", type=str)
parser.add_argument("--input_predict_net_path", type=str, required=True)
parser.add_argument("--output_init_net_path", type=str)
parser.add_argument("--output_predict_net_path", type=str, required=True)
args = parser.parse_args()
predict_net = read_model_from_file(args.input_predict_net_path)
init_net = None
if args.input_init_net_path is not None:
init_net = read_model_from_file(args.input_init_net_path)
dce(init_net, predict_net)
write_model_to_file(args.output_predict_net_path, predict_net)
if args.output_init_net_path is not None:
write_model_to_file(args.output_init_net_path, init_net)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import onnx
import torch
import torch.nn
import torch.onnx
from onnx import helper, TensorProto
# LSTM enums
LSTM_DIR_FORWARD = "forward"
LSTM_DIR_REVERSE = "reverse"
LSTM_DIR_BIDIRECTIONAL = "bidirectional"
LSTM_DIRS = [LSTM_DIR_FORWARD, LSTM_DIR_REVERSE, LSTM_DIR_BIDIRECTIONAL]
# ONNX utility
def make_init(name, type, tensor):
return helper.make_tensor(
name=name,
data_type=type,
dims=tensor.shape,
vals=tensor.reshape(tensor.size).tolist(),
)
# Function to generate LSTM ONNX test model
def gen_lstm_onnx_test_model(
model_path,
seq_length,
batch_size,
hidden_size,
input_size,
direction,
has_bias,
has_sequence_lens,
has_initial_h,
has_initial_c,
has_peephole,
input_forget=False,
):
# Validate parameters
assert direction in LSTM_DIRS, "ONNX LSTM direction invalid!"
assert not has_sequence_lens, "ONNX LSTM Variable sequence length not supported"
# Get number of directions
num_directions = 2 if (direction == LSTM_DIR_BIDIRECTIONAL) else 1
# Tensor sizes
X_shape = [seq_length, batch_size, input_size]
W_shape = [num_directions, 4 * hidden_size, input_size]
R_shape = [num_directions, 4 * hidden_size, hidden_size]
B_shape = [num_directions, 8 * hidden_size]
sequence_lens_shape = [batch_size]
initial_h_shape = [num_directions, batch_size, hidden_size]
initial_c_shape = [num_directions, batch_size, hidden_size]
P_shape = [num_directions, 3 * hidden_size]
Y_shape = [seq_length, num_directions, batch_size, hidden_size]
# Generate random inputs (weights are assumed concatenated in ONNX format: i,o,f,c)
np.random.seed(1)
X = np.random.randn(*X_shape)
W = np.random.randn(*W_shape)
R = np.random.randn(*R_shape)
B = np.random.randn(*B_shape) if has_bias else np.zeros(B_shape)
sequence_lens = (
np.random.randint(1, seq_length, batch_size)
if has_sequence_lens
else np.tile(seq_length, batch_size)
)
initial_h = (
np.random.randn(*initial_h_shape)
if has_initial_h
else np.zeros(initial_h_shape)
)
initial_c = (
np.random.randn(*initial_c_shape)
if has_initial_c
else np.zeros(initial_c_shape)
)
P = np.random.randn(*P_shape) if has_peephole else np.zeros(P_shape)
# Function to get all the weight components for the given direction
def get_weights(dir_idx):
Wi = np.reshape(
W[dir_idx, 0 * hidden_size : 1 * hidden_size, :], [hidden_size, input_size]
)
Wo = np.reshape(
W[dir_idx, 1 * hidden_size : 2 * hidden_size, :], [hidden_size, input_size]
)
Wf = np.reshape(
W[dir_idx, 2 * hidden_size : 3 * hidden_size, :], [hidden_size, input_size]
)
Wc = np.reshape(
W[dir_idx, 3 * hidden_size : 4 * hidden_size, :], [hidden_size, input_size]
)
Ri = np.reshape(
R[dir_idx, 0 * hidden_size : 1 * hidden_size, :], [hidden_size, hidden_size]
)
Ro = np.reshape(
R[dir_idx, 1 * hidden_size : 2 * hidden_size, :], [hidden_size, hidden_size]
)
Rf = np.reshape(
R[dir_idx, 2 * hidden_size : 3 * hidden_size, :], [hidden_size, hidden_size]
)
Rc = np.reshape(
R[dir_idx, 3 * hidden_size : 4 * hidden_size, :], [hidden_size, hidden_size]
)
bWi = np.reshape(B[dir_idx, 0 * hidden_size : 1 * hidden_size], [hidden_size])
bWo = np.reshape(B[dir_idx, 1 * hidden_size : 2 * hidden_size], [hidden_size])
bWf = np.reshape(B[dir_idx, 2 * hidden_size : 3 * hidden_size], [hidden_size])
bWc = np.reshape(B[dir_idx, 3 * hidden_size : 4 * hidden_size], [hidden_size])
bRi = np.reshape(B[dir_idx, 4 * hidden_size : 5 * hidden_size], [hidden_size])
bRo = np.reshape(B[dir_idx, 5 * hidden_size : 6 * hidden_size], [hidden_size])
bRf = np.reshape(B[dir_idx, 6 * hidden_size : 7 * hidden_size], [hidden_size])
bRc = np.reshape(B[dir_idx, 7 * hidden_size : 8 * hidden_size], [hidden_size])
Pi = np.tile(P[dir_idx, 0 * hidden_size : 1 * hidden_size], (batch_size, 1))
Po = np.tile(P[dir_idx, 1 * hidden_size : 2 * hidden_size], (batch_size, 1))
Pf = np.tile(P[dir_idx, 2 * hidden_size : 3 * hidden_size], (batch_size, 1))
return (
Wi,
Wo,
Wf,
Wc,
Ri,
Ro,
Rf,
Rc,
bWi,
bWo,
bWf,
bWc,
bRi,
bRo,
bRf,
bRc,
Pi,
Po,
Pf,
)
# Function to get PyTorch weights (which are in the i, f, c, o order)
def get_torch_weights(dir_idx):
(
Wi,
Wo,
Wf,
Wc,
Ri,
Ro,
Rf,
Rc,
bWi,
bWo,
bWf,
bWc,
bRi,
bRo,
bRf,
bRc,
Pi,
Po,
Pf,
) = get_weights(dir_idx)
W_torch = np.concatenate((Wi, Wf, Wc, Wo), 0)
R_torch = np.concatenate((Ri, Rf, Rc, Ro), 0)
bW_torch = np.concatenate((bWi, bWf, bWc, bWo), 0)
bR_torch = np.concatenate((bRi, bRf, bRc, bRo), 0)
return (W_torch, R_torch, bW_torch, bR_torch)
# ----------------------------------------- COMPUTE pyTORCH REFERENCE ----------------------------------------------
# Compute reference using Pytorch. Pytorch LSTM has only forward/bidirectional so we will do the reverse LSTM using
# a Pytorch forward LSTM.
lstm = torch.nn.LSTM(
input_size=input_size,
hidden_size=hidden_size,
num_layers=1,
bias=True,
batch_first=False,
dropout=0,
bidirectional=(direction == LSTM_DIR_BIDIRECTIONAL),
)
# Get LSTM state dictionary
lstm_state_dict = lstm.state_dict()
# Assign forward weights
forwardEnabled = direction in [LSTM_DIR_FORWARD, LSTM_DIR_BIDIRECTIONAL]
if forwardEnabled:
forward_dir_idx = 0
(W_torch, R_torch, bW_torch, bR_torch) = get_torch_weights(forward_dir_idx)
lstm_state_dict["weight_ih_l0"] = torch.tensor(W_torch, dtype=torch.float32)
lstm_state_dict["weight_hh_l0"] = torch.tensor(R_torch, dtype=torch.float32)
lstm_state_dict["bias_ih_l0"] = torch.tensor(bW_torch, dtype=torch.float32)
lstm_state_dict["bias_hh_l0"] = torch.tensor(bR_torch, dtype=torch.float32)
# Assign reverse weights
reverseEnabled = direction in [LSTM_DIR_REVERSE, LSTM_DIR_BIDIRECTIONAL]
if reverseEnabled:
if direction == LSTM_DIR_REVERSE:
reverse_dir_idx = 0
(W_torch, R_torch, bW_torch, bR_torch) = get_torch_weights(reverse_dir_idx)
lstm_state_dict["weight_ih_l0"] = torch.tensor(W_torch, dtype=torch.float32)
lstm_state_dict["weight_hh_l0"] = torch.tensor(R_torch, dtype=torch.float32)
lstm_state_dict["bias_ih_l0"] = torch.tensor(bW_torch, dtype=torch.float32)
lstm_state_dict["bias_hh_l0"] = torch.tensor(bR_torch, dtype=torch.float32)
else:
reverse_dir_idx = 1
(W_torch, R_torch, bW_torch, bR_torch) = get_torch_weights(reverse_dir_idx)
lstm_state_dict["weight_ih_l0_reverse"] = torch.tensor(
W_torch, dtype=torch.float32
)
lstm_state_dict["weight_hh_l0_reverse"] = torch.tensor(
R_torch, dtype=torch.float32
)
lstm_state_dict["bias_ih_l0_reverse"] = torch.tensor(
bW_torch, dtype=torch.float32
)
lstm_state_dict["bias_hh_l0_reverse"] = torch.tensor(
bR_torch, dtype=torch.float32
)
# Set LSTM state dictionary
lstm.load_state_dict(lstm_state_dict, strict=True)
# Perform inference
X_torch = torch.tensor(X, dtype=torch.float32)
initial_h_torch = torch.tensor(initial_h, dtype=torch.float32)
initial_c_torch = torch.tensor(initial_c, dtype=torch.float32)
if direction == LSTM_DIR_REVERSE:
Y, (next_h, next_c) = lstm(
X_torch.flip([0]), (initial_h_torch, initial_c_torch)
)
Y = Y.flip([0])
else:
Y, (next_h, next_c) = lstm(X_torch, (initial_h_torch, initial_c_torch))
# Reshape output to ONNX format [seq_length, num_directions, batch_size, hidden_size]
Y_ref = Y.detach().numpy()
Y_ref = np.reshape(Y_ref, [seq_length, batch_size, num_directions, hidden_size])
Y_ref = np.transpose(Y_ref, [0, 2, 1, 3])
# Reshape states to ONNX format
Y_h_ref = next_h.detach().numpy()
Y_c_ref = next_c.detach().numpy()
# --------------------------------------- COMPUTE PYTHON-NUMPY REFERENCE -------------------------------------------
# Create X slices
Xslices = list()
for t in range(seq_length):
Xslices.append(np.reshape(X[t, :, :], [batch_size, input_size]))
# Function to compute one LSTM cell
def compute_lstm(forward):
dir_idx = 0 if forward else (0 if direction == LSTM_DIR_REVERSE else 1)
(
Wi,
Wo,
Wf,
Wc,
Ri,
Ro,
Rf,
Rc,
bWi,
bWo,
bWf,
bWc,
bRi,
bRo,
bRf,
bRc,
Pi,
Po,
Pf,
) = get_weights(dir_idx)
def f(x):
return 1 / (1 + np.exp(-x))
def g(x):
return np.tanh(x)
def h(x):
return np.tanh(x)
def mm(x, w):
return np.matmul(x, w.transpose())
Ht = np.reshape(initial_h[dir_idx, :, :], [batch_size, hidden_size])
Ct = np.reshape(initial_c[dir_idx, :, :], [batch_size, hidden_size])
Yslices = list()
for t in range(seq_length):
xt = Xslices[t] if forward else Xslices[seq_length - 1 - t]
ft = f(mm(xt, Wf) + bWf + mm(Ht, Rf) + bRf + Pf * Ct)
if input_forget:
it = 1 - ft
else:
it = f(mm(xt, Wi) + bWi + mm(Ht, Ri) + bRi + Pi * Ct)
ctild = g(mm(xt, Wc) + bWc + mm(Ht, Rc) + bRc)
Ct = ft * Ct + it * ctild
ot = f(mm(xt, Wo) + bWo + mm(Ht, Ro) + bRo + Po * Ct)
Ht = ot * h(Ct)
Yslices.append(Ht)
return Yslices, Ht, Ct
Yslices = list()
Hslices = list()
Cslices = list()
# Compute forward LSTM
forwardYslices = list()
if forwardEnabled:
Yt, Ht, Ct = compute_lstm(True)
forwardYslices += Yt
Hslices.append(Ht)
Cslices.append(Ct)
# Compute reverse LSTM
reverseYslices = list()
if reverseEnabled:
Yt, Ht, Ct = compute_lstm(False)
reverseYslices += Yt
Hslices.append(Ht)
Cslices.append(Ct)
# Concatenate slices
for t in range(seq_length):
if forwardEnabled:
Yslices.append(forwardYslices[t])
if reverseEnabled:
Yslices.append(reverseYslices[seq_length - 1 - t])
Y_ref_np = np.concatenate(Yslices, 0).reshape(
[seq_length, num_directions, batch_size, hidden_size]
)
Y_h_ref_np = np.concatenate(Hslices, 0).reshape(
[num_directions, batch_size, hidden_size]
)
Y_c_ref_np = np.concatenate(Cslices, 0).reshape(
[num_directions, batch_size, hidden_size]
)
# Use numpy implementation when using peepholes or input_forget, else assert errors
if has_peephole or input_forget:
Y_ref = Y_ref_np
Y_h_ref = Y_h_ref_np
Y_c_ref = Y_c_ref_np
else:
assert (
np.max(np.abs(Y_ref - Y_ref_np)) < 1e-6
), "Mismatch between Pytorch and Numpy LSTM implementation"
assert (
np.max(np.abs(Y_h_ref - Y_h_ref_np)) < 1e-6
), "Mismatch between Pytorch and Numpy LSTM implementation"
assert (
np.max(np.abs(Y_c_ref - Y_c_ref_np)) < 1e-6
), "Mismatch between Pytorch and Numpy LSTM implementation"
# ---------------------------------------------- NODE DEFINITION --------------------------------------------------
# Node inputs
node_inputs = [
"X",
"W",
"R",
"B" if has_bias else "",
"",
"initial_h" if has_initial_h else "",
"initial_c" if has_initial_c else "",
"P" if has_peephole else "",
]
# Node outputs
node_outputs = ["Y", "Y_h", "Y_c"]
# LSTM node definition
lstm_node_def = onnx.helper.make_node(
"LSTM",
name="lstm",
inputs=node_inputs,
outputs=node_outputs,
hidden_size=hidden_size,
direction=direction,
input_forget=input_forget,
)
# Error node definition
err_node_def = onnx.helper.make_node(
"Sub", name="error", inputs=["Y", "Y_ref"], outputs=["Y_err"]
)
# --------------------------------------------- GRAPH DEFINITION --------------------------------------------------
graph_input = list()
graph_init = list()
graph_output = list()
# LSTM inputs
graph_input.append(helper.make_tensor_value_info("X", TensorProto.FLOAT, X_shape))
graph_input.append(helper.make_tensor_value_info("W", TensorProto.FLOAT, W_shape))
graph_input.append(helper.make_tensor_value_info("R", TensorProto.FLOAT, R_shape))
if has_bias:
graph_input.append(
helper.make_tensor_value_info("B", TensorProto.FLOAT, B_shape)
)
if has_sequence_lens:
graph_input.append(
helper.make_tensor_value_info(
"sequence_lens", TensorProto.INT32, sequence_lens_shape
)
)
if has_initial_h:
graph_input.append(
helper.make_tensor_value_info(
"initial_h", TensorProto.FLOAT, initial_h_shape
)
)
if has_initial_c:
graph_input.append(
helper.make_tensor_value_info(
"initial_c", TensorProto.FLOAT, initial_c_shape
)
)
if has_peephole:
graph_input.append(
helper.make_tensor_value_info("P", TensorProto.FLOAT, P_shape)
)
# Reference input
graph_input.append(
helper.make_tensor_value_info("Y_ref", TensorProto.FLOAT, Y_shape)
)
# LSTM initializers
graph_init.append(make_init("X", TensorProto.FLOAT, X))
graph_init.append(make_init("W", TensorProto.FLOAT, W))
graph_init.append(make_init("R", TensorProto.FLOAT, R))
if has_bias:
graph_init.append(make_init("B", TensorProto.FLOAT, B))
if has_sequence_lens:
graph_init.append(make_init("sequence_lens", TensorProto.INT32, sequence_lens))
if has_initial_h:
graph_init.append(make_init("initial_h", TensorProto.FLOAT, initial_h))
if has_initial_c:
graph_init.append(make_init("initial_c", TensorProto.FLOAT, initial_c))
if has_peephole:
graph_init.append(make_init("P", TensorProto.FLOAT, P))
# Reference initializer
graph_init.append(make_init("Y_ref", TensorProto.FLOAT, Y_ref))
# Graph outputs
graph_output.append(
helper.make_tensor_value_info("Y_err", TensorProto.FLOAT, Y_shape)
)
# Define graph (GraphProto)
graph_name = "lstm_test"
graph_def = helper.make_graph(
[lstm_node_def, err_node_def],
graph_name,
inputs=graph_input,
outputs=graph_output,
)
# Set initializers
graph_def.initializer.extend(graph_init)
# --------------------------------------------- MODEL DEFINITION --------------------------------------------------
# Define model (ModelProto)
model_def = helper.make_model(graph_def, producer_name="onnx-lstm")
# Check model
onnx.checker.check_model(model_def)
# Print model
with open(model_path, "w") as f:
f.write(str(model_def))
# Forward LSTM
gen_lstm_onnx_test_model(
model_path="lstmForward.onnxtxt",
seq_length=2,
batch_size=5,
hidden_size=4,
input_size=3,
direction="forward",
has_bias=True,
has_sequence_lens=False,
has_initial_h=True,
has_initial_c=True,
has_peephole=False,
input_forget=False,
)
# Reverse LSTM
gen_lstm_onnx_test_model(
model_path="lstmReverse.onnxtxt",
seq_length=2,
batch_size=5,
hidden_size=4,
input_size=3,
direction="reverse",
has_bias=True,
has_sequence_lens=False,
has_initial_h=True,
has_initial_c=True,
has_peephole=False,
input_forget=False,
)
# Bidirectional LSTM
gen_lstm_onnx_test_model(
model_path="lstmBidirectional.onnxtxt",
seq_length=2,
batch_size=5,
hidden_size=4,
input_size=3,
direction="bidirectional",
has_bias=True,
has_sequence_lens=False,
has_initial_h=True,
has_initial_c=True,
has_peephole=False,
input_forget=False,
)
# Forward no bias LSTM
gen_lstm_onnx_test_model(
model_path="lstmForwardNoBias.onnxtxt",
seq_length=1,
batch_size=5,
hidden_size=4,
input_size=3,
direction="forward",
has_bias=False,
has_sequence_lens=False,
has_initial_h=True,
has_initial_c=True,
has_peephole=False,
input_forget=False,
)
# Forward no state LSTM
gen_lstm_onnx_test_model(
model_path="lstmForwardNoState.onnxtxt",
seq_length=1,
batch_size=5,
hidden_size=4,
input_size=3,
direction="forward",
has_bias=True,
has_sequence_lens=False,
has_initial_h=False,
has_initial_c=False,
has_peephole=False,
input_forget=False,
)
# Forward with peephole LSTM
gen_lstm_onnx_test_model(
model_path="lstmForwardWithPeephole.onnxtxt",
seq_length=1,
batch_size=5,
hidden_size=4,
input_size=3,
direction="forward",
has_bias=True,
has_sequence_lens=False,
has_initial_h=True,
has_initial_c=True,
has_peephole=True,
input_forget=False,
)
# Forward with input forget LSTM
gen_lstm_onnx_test_model(
model_path="lstmForwardInputForget.onnxtxt",
seq_length=1,
batch_size=5,
hidden_size=4,
input_size=3,
direction="forward",
has_bias=True,
has_sequence_lens=False,
has_initial_h=True,
has_initial_c=True,
has_peephole=False,
input_forget=True,
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import onnx
import tensorflow as tf
from onnx import helper, TensorProto
from tensorflow.python.ops import gen_audio_ops as audio_ops
# ONNX utility.
def make_init(name, dtype, tensor):
return helper.make_tensor(
name=name,
data_type=dtype,
dims=tensor.shape,
vals=tensor.reshape(tensor.size).tolist(),
)
# Function to generate MFCC ONNX test model.
def gen_mfcc_onnx_test_model(
model_path,
window_count,
window_size,
stride,
sample_rate,
lower_frequency_limit,
upper_frequency_limit,
filterbank_channel_count,
dct_coefficient_count,
):
# Tensor sizes.
input_length = window_size + (window_count - 1) * stride
fft_length = int(2 ** np.ceil(np.log2(window_size)))
input_shape = [1, input_length]
spectrogram_length = int(fft_length / 2 + 1)
spectrogram_shape = [window_count, spectrogram_length]
coefficients_shape = [window_count, dct_coefficient_count]
# Generate random input data.
np.random.seed(1)
input_data = np.random.randn(*input_shape)
# ----------------------------------------- COMPUTE TensorFlow REFERENCE -------------------------------------------
# Define TensorFlow model.
tf_input = tf.constant(
input_data.reshape([input_length, 1]), name="input", dtype=tf.float32
)
tf_spectrogram = audio_ops.audio_spectrogram(
tf_input, window_size=window_size, stride=stride, magnitude_squared=True
)
tf_mfcc = audio_ops.mfcc(
spectrogram=tf_spectrogram,
sample_rate=sample_rate,
upper_frequency_limit=upper_frequency_limit,
lower_frequency_limit=lower_frequency_limit,
filterbank_channel_count=filterbank_channel_count,
dct_coefficient_count=dct_coefficient_count,
)
# Run TensorFlow model and get spectrogram input.
with tf.Session() as sess:
spectrogram = sess.run(tf_spectrogram)
spectrogram = np.reshape(spectrogram, spectrogram_shape)
# Run TensorFlow model and get reference output coefficients.
with tf.Session() as sess:
coefficients_ref = sess.run(tf_mfcc)
coefficients_ref = np.reshape(coefficients_ref, coefficients_shape)
# ---------------------------------------------- NODE DEFINITION --------------------------------------------------
# MFCC node definition.
mfcc_node_def = onnx.helper.make_node(
"MFCC",
name="mfcc",
inputs=["spectrogram"],
outputs=["coefficients"],
sample_rate=float(sample_rate),
lower_frequency_limit=float(lower_frequency_limit),
upper_frequency_limit=float(upper_frequency_limit),
filterbank_channel_count=int(filterbank_channel_count),
dct_coefficient_count=int(dct_coefficient_count),
)
# Error node definition.
err_node_def = onnx.helper.make_node(
"Sub",
name="error",
inputs=["coefficients", "coefficients_ref"],
outputs=["coefficients_err"],
)
# --------------------------------------------- GRAPH DEFINITION --------------------------------------------------
graph_input = list()
graph_init = list()
graph_output = list()
# Graph inputs.
graph_input.append(
helper.make_tensor_value_info(
"spectrogram", TensorProto.FLOAT, spectrogram_shape
)
)
graph_input.append(
helper.make_tensor_value_info(
"coefficients_ref", TensorProto.FLOAT, coefficients_shape
)
)
# Graph initializers.
graph_init.append(make_init("spectrogram", TensorProto.FLOAT, spectrogram))
graph_init.append(
make_init("coefficients_ref", TensorProto.FLOAT, coefficients_ref)
)
# Graph outputs.
graph_output.append(
helper.make_tensor_value_info(
"coefficients_err", TensorProto.FLOAT, coefficients_shape
)
)
# Graph name.
graph_name = "mfcc_test"
# Define graph (GraphProto).
graph_def = helper.make_graph(
[mfcc_node_def, err_node_def],
graph_name,
inputs=graph_input,
outputs=graph_output,
)
# Set initializers.
graph_def.initializer.extend(graph_init)
# --------------------------------------------- MODEL DEFINITION --------------------------------------------------
# Define model (ModelProto).
model_def = helper.make_model(graph_def, producer_name="onnx-mfcc")
# Print model.
with open(model_path, "w") as f:
f.write(str(model_def))
# One window MFCC.
gen_mfcc_onnx_test_model(
model_path="mfccOneWindow.onnxtxt",
window_count=1,
window_size=640,
stride=320,
sample_rate=16000,
lower_frequency_limit=20,
upper_frequency_limit=4000,
filterbank_channel_count=40,
dct_coefficient_count=10,
)
# Two window MFCC.
gen_mfcc_onnx_test_model(
model_path="mfccTwoWindow.onnxtxt",
window_count=2,
window_size=512,
stride=256,
sample_rate=16000,
lower_frequency_limit=20,
upper_frequency_limit=4000,
filterbank_channel_count=40,
dct_coefficient_count=10,
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import onnx
import torch
import torch.nn
import torch.onnx
from onnx import helper, TensorProto
# RNN enums
RNN_DIR_FORWARD = "forward"
RNN_DIR_REVERSE = "reverse"
RNN_DIR_BIDIRECTIONAL = "bidirectional"
RNN_DIRS = [RNN_DIR_FORWARD, RNN_DIR_REVERSE, RNN_DIR_BIDIRECTIONAL]
# ONNX utility
def make_init(name, type, tensor):
return helper.make_tensor(
name=name,
data_type=type,
dims=tensor.shape,
vals=tensor.reshape(tensor.size).tolist(),
)
# Function to generate RNN ONNX test model
def gen_rnn_onnx_test_model(
model_path,
seq_length,
batch_size,
hidden_size,
input_size,
direction,
has_bias,
has_sequence_lens,
has_initial_h,
):
# Validate parameters
assert direction in RNN_DIRS, "ONNX RNN direction invalid!"
assert not has_sequence_lens, "ONNX RNN Variable sequence length not supported"
# Get number of directions
num_directions = 2 if (direction == RNN_DIR_BIDIRECTIONAL) else 1
# Tensor sizes
X_shape = [seq_length, batch_size, input_size]
W_shape = [num_directions, 1 * hidden_size, input_size]
R_shape = [num_directions, 1 * hidden_size, hidden_size]
B_shape = [num_directions, 2 * hidden_size]
sequence_lens_shape = [batch_size]
initial_h_shape = [num_directions, batch_size, hidden_size]
Y_shape = [seq_length, num_directions, batch_size, hidden_size]
# Generate random inputs (weights are assumed concatenated in ONNX format: z,r,h)
np.random.seed(1)
X = np.random.randn(*X_shape)
W = np.random.randn(*W_shape)
R = np.random.randn(*R_shape)
B = np.random.randn(*B_shape) if has_bias else np.zeros(B_shape)
sequence_lens = (
np.random.randint(1, seq_length, batch_size)
if has_sequence_lens
else np.tile(seq_length, batch_size)
)
initial_h = (
np.random.randn(*initial_h_shape)
if has_initial_h
else np.zeros(initial_h_shape)
)
# Function to get all the weight components for the given direction
def get_weights(dir_idx):
Wi = np.reshape(
W[dir_idx, 0 * hidden_size : 1 * hidden_size, :], [hidden_size, input_size]
)
Ri = np.reshape(
R[dir_idx, 0 * hidden_size : 1 * hidden_size, :], [hidden_size, hidden_size]
)
bWi = np.reshape(B[dir_idx, 0 * hidden_size : 1 * hidden_size], [hidden_size])
bRi = np.reshape(B[dir_idx, 1 * hidden_size : 2 * hidden_size], [hidden_size])
return (Wi, Ri, bWi, bRi)
# Function to get PyTorch weights (which are in the r,z,h order)
def get_torch_weights(dir_idx):
Wi, Ri, bWi, bRi = get_weights(dir_idx)
W_torch = Wi
R_torch = Ri
bW_torch = bWi
bR_torch = bRi
return (W_torch, R_torch, bW_torch, bR_torch)
# ----------------------------------------- COMPUTE pyTORCH REFERENCE ----------------------------------------------
# Compute reference using Pytorch. Pytorch RNN has only forward/bidirectional so we will do the reverse RNN using
# a Pytorch forward RNN.
rnn = torch.nn.RNN(
input_size=input_size,
hidden_size=hidden_size,
num_layers=1,
nonlinearity="tanh",
bias=True,
batch_first=False,
dropout=0,
bidirectional=(direction == RNN_DIR_BIDIRECTIONAL),
)
# Get RNN state dictionary
rnn_state_dict = rnn.state_dict()
# Assign forward weights
forwardEnabled = direction in [RNN_DIR_FORWARD, RNN_DIR_BIDIRECTIONAL]
if forwardEnabled:
forward_dir_idx = 0
(W_torch, R_torch, bW_torch, bR_torch) = get_torch_weights(forward_dir_idx)
rnn_state_dict["weight_ih_l0"] = torch.tensor(W_torch, dtype=torch.float32)
rnn_state_dict["weight_hh_l0"] = torch.tensor(R_torch, dtype=torch.float32)
rnn_state_dict["bias_ih_l0"] = torch.tensor(bW_torch, dtype=torch.float32)
rnn_state_dict["bias_hh_l0"] = torch.tensor(bR_torch, dtype=torch.float32)
# Assign reverse weights
reverseEnabled = direction in [RNN_DIR_REVERSE, RNN_DIR_BIDIRECTIONAL]
if reverseEnabled:
if direction == RNN_DIR_REVERSE:
reverse_dir_idx = 0
(W_torch, R_torch, bW_torch, bR_torch) = get_torch_weights(reverse_dir_idx)
rnn_state_dict["weight_ih_l0"] = torch.tensor(W_torch, dtype=torch.float32)
rnn_state_dict["weight_hh_l0"] = torch.tensor(R_torch, dtype=torch.float32)
rnn_state_dict["bias_ih_l0"] = torch.tensor(bW_torch, dtype=torch.float32)
rnn_state_dict["bias_hh_l0"] = torch.tensor(bR_torch, dtype=torch.float32)
else:
reverse_dir_idx = 1
(W_torch, R_torch, bW_torch, bR_torch) = get_torch_weights(reverse_dir_idx)
rnn_state_dict["weight_ih_l0_reverse"] = torch.tensor(
W_torch, dtype=torch.float32
)
rnn_state_dict["weight_hh_l0_reverse"] = torch.tensor(
R_torch, dtype=torch.float32
)
rnn_state_dict["bias_ih_l0_reverse"] = torch.tensor(
bW_torch, dtype=torch.float32
)
rnn_state_dict["bias_hh_l0_reverse"] = torch.tensor(
bR_torch, dtype=torch.float32
)
# Set RNN state dictionary
rnn.load_state_dict(rnn_state_dict, strict=True)
# Perform inference
X_torch = torch.tensor(X, dtype=torch.float32)
initial_h_torch = torch.tensor(initial_h, dtype=torch.float32)
if direction == RNN_DIR_REVERSE:
Y, next_h = rnn(X_torch.flip([0]), initial_h_torch)
Y = Y.flip([0])
else:
Y, next_h = rnn(X_torch, initial_h_torch)
# Reshape output to ONNX format [seq_length, num_directions, batch_size, hidden_size]
Y_ref = Y.detach().numpy()
Y_ref = np.reshape(Y_ref, [seq_length, batch_size, num_directions, hidden_size])
Y_ref = np.transpose(Y_ref, [0, 2, 1, 3])
# Reshape states to ONNX format
Y_h_ref = next_h.detach().numpy()
# --------------------------------------- COMPUTE PYTHON-NUMPY REFERENCE -------------------------------------------
# Create X slices
Xslices = list()
for t in range(seq_length):
Xslices.append(np.reshape(X[t, :, :], [batch_size, input_size]))
# Function to compute one RNN cell
def compute_rnn(forward):
dir_idx = 0 if forward else (0 if direction == RNN_DIR_REVERSE else 1)
Wi, Ri, bWi, bRi = get_weights(dir_idx)
def f(x):
return np.tanh(x)
def mm(x, w):
return np.matmul(x, w.transpose())
Ht = np.reshape(initial_h[dir_idx, :, :], [batch_size, hidden_size])
Yslices = list()
for t in range(seq_length):
xt = Xslices[t] if forward else Xslices[seq_length - 1 - t]
Ht = f(mm(xt, Wi) + bWi + mm(Ht, Ri) + bRi)
Yslices.append(Ht)
return Yslices, Ht
Yslices = list()
Hslices = list()
# Compute forward RNN
forwardYslices = list()
if forwardEnabled:
Yt, Ht = compute_rnn(True)
forwardYslices += Yt
Hslices.append(Ht)
# Compute reverse RNN
reverseYslices = list()
if reverseEnabled:
Yt, Ht = compute_rnn(False)
reverseYslices += Yt
Hslices.append(Ht)
# Concatenate slices
for t in range(seq_length):
if forwardEnabled:
Yslices.append(forwardYslices[t])
if reverseEnabled:
Yslices.append(reverseYslices[seq_length - 1 - t])
Y_ref_np = np.concatenate(Yslices, 0).reshape(
[seq_length, num_directions, batch_size, hidden_size]
)
Y_h_ref_np = np.concatenate(Hslices, 0).reshape(
[num_directions, batch_size, hidden_size]
)
# Compare Numpy with Torch implementation.
assert (
np.max(np.abs(Y_ref - Y_ref_np)) < 1e-6
), "Mismatch between Pytorch and Numpy RNN implementation"
assert (
np.max(np.abs(Y_h_ref - Y_h_ref_np)) < 1e-6
), "Mismatch between Pytorch and Numpy RNN implementation"
# ---------------------------------------------- NODE DEFINITION --------------------------------------------------
# Node inputs
node_inputs = [
"X",
"W",
"R",
"B" if has_bias else "",
"",
"initial_h" if has_initial_h else "",
]
# Node outputs
node_outputs = ["Y", "Y_h"]
# RNN node definition
rnn_node_def = onnx.helper.make_node(
"RNN",
name="rnn",
inputs=node_inputs,
outputs=node_outputs,
hidden_size=hidden_size,
direction=direction,
)
# Error node definition
err_node_def = onnx.helper.make_node(
"Sub", name="error", inputs=["Y", "Y_ref"], outputs=["Y_err"]
)
# --------------------------------------------- GRAPH DEFINITION --------------------------------------------------
graph_input = list()
graph_init = list()
graph_output = list()
# RNN inputs
graph_input.append(helper.make_tensor_value_info("X", TensorProto.FLOAT, X_shape))
graph_input.append(helper.make_tensor_value_info("W", TensorProto.FLOAT, W_shape))
graph_input.append(helper.make_tensor_value_info("R", TensorProto.FLOAT, R_shape))
if has_bias:
graph_input.append(
helper.make_tensor_value_info("B", TensorProto.FLOAT, B_shape)
)
if has_sequence_lens:
graph_input.append(
helper.make_tensor_value_info(
"sequence_lens", TensorProto.INT32, sequence_lens_shape
)
)
if has_initial_h:
graph_input.append(
helper.make_tensor_value_info(
"initial_h", TensorProto.FLOAT, initial_h_shape
)
)
# Reference input
graph_input.append(
helper.make_tensor_value_info("Y_ref", TensorProto.FLOAT, Y_shape)
)
# RNN initializers
graph_init.append(make_init("X", TensorProto.FLOAT, X))
graph_init.append(make_init("W", TensorProto.FLOAT, W))
graph_init.append(make_init("R", TensorProto.FLOAT, R))
if has_bias:
graph_init.append(make_init("B", TensorProto.FLOAT, B))
if has_sequence_lens:
graph_init.append(make_init("sequence_lens", TensorProto.INT32, sequence_lens))
if has_initial_h:
graph_init.append(make_init("initial_h", TensorProto.FLOAT, initial_h))
# Reference initializer
graph_init.append(make_init("Y_ref", TensorProto.FLOAT, Y_ref))
# Graph outputs
graph_output.append(
helper.make_tensor_value_info("Y_err", TensorProto.FLOAT, Y_shape)
)
# Define graph (GraphProto)
graph_name = "rnn_test"
graph_def = helper.make_graph(
[rnn_node_def, err_node_def],
graph_name,
inputs=graph_input,
outputs=graph_output,
)
# Set initializers
graph_def.initializer.extend(graph_init)
# --------------------------------------------- MODEL DEFINITION --------------------------------------------------
# Define model (ModelProto)
model_def = helper.make_model(graph_def, producer_name="onnx-rnn")
# Check model
onnx.checker.check_model(model_def)
# Print model
with open(model_path, "w") as f:
f.write(str(model_def))
# Forward RNN
gen_rnn_onnx_test_model(
model_path="rnnForward.onnxtxt",
seq_length=2,
batch_size=5,
hidden_size=4,
input_size=3,
direction="forward",
has_bias=True,
has_sequence_lens=False,
has_initial_h=True,
)
# Reverse RNN
gen_rnn_onnx_test_model(
model_path="rnnReverse.onnxtxt",
seq_length=2,
batch_size=5,
hidden_size=4,
input_size=3,
direction="reverse",
has_bias=True,
has_sequence_lens=False,
has_initial_h=True,
)
# Bidirectional RNN
gen_rnn_onnx_test_model(
model_path="rnnBidirectional.onnxtxt",
seq_length=2,
batch_size=5,
hidden_size=4,
input_size=3,
direction="bidirectional",
has_bias=True,
has_sequence_lens=False,
has_initial_h=True,
)
# Forward no bias RNN
gen_rnn_onnx_test_model(
model_path="rnnForwardNoBias.onnxtxt",
seq_length=1,
batch_size=5,
hidden_size=4,
input_size=3,
direction="forward",
has_bias=False,
has_sequence_lens=False,
has_initial_h=True,
)
# Forward no state RNN
gen_rnn_onnx_test_model(
model_path="rnnForwardNoState.onnxtxt",
seq_length=1,
batch_size=5,
hidden_size=4,
input_size=3,
direction="forward",
has_bias=True,
has_sequence_lens=False,
has_initial_h=False,
)
|
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import glob
import multiprocessing
import os
import shlex
import subprocess
import sys
from collections import namedtuple
from contextlib import contextmanager
from distutils import log, sysconfig
from distutils.spawn import find_executable
from textwrap import dedent
import setuptools
import setuptools.command.build_ext
import setuptools.command.build_py
import setuptools.command.develop
try:
import torch
except ImportError as e:
print("Unable to import torch. Error:")
print("\t", e)
print("You need to install pytorch first.")
sys.exit(1)
print("torch version:", torch.__version__)
print("torch location:", os.path.dirname(os.path.realpath(torch.__file__)))
# Current setup.py file directory, i.e. glow/torch_glow.
FILE_DIR = os.path.realpath(os.path.dirname(__file__))
# Find the top directory with root Makefile, i.e. glow
TOP_DIR = os.path.realpath(os.path.dirname(FILE_DIR))
os.environ["TOP_DIR"] = TOP_DIR
# Make build directory a subdirectory of FILE_DIR, i.e.
# glow/build.
CMAKE_BUILD_DIR = os.path.join(TOP_DIR, "build")
CMAKE = find_executable("cmake") or find_executable("cmake3")
if not CMAKE:
print('Could not find "cmake". Make sure it is in your PATH.')
sys.exit(1)
install_requires = []
setup_requires = []
tests_require = []
extras_require = {}
# ################################################################################
# # Flags
# ################################################################################
# store first argument
assert len(sys.argv) > 0
first_arg = sys.argv[0]
# parse known arguments
parser = argparse.ArgumentParser()
parser.add_argument("--run_cmake", action="store_true", default=False, help="Run cmake")
parser.add_argument(
"--release", action="store_true", default=False, help="Compile with release on"
)
parser.add_argument(
"--cmake_prefix_path", type=str, help="Populates -DCMAKE_PREFIX_PATH"
)
# restore first and remaining arguments to argv
arg_parse_res = parser.parse_known_args()
args = arg_parse_res[0]
sys.argv = [first_arg] + arg_parse_res[1]
# ################################################################################
# # Utilities
# ################################################################################
@contextmanager
def cd(path):
if not os.path.isabs(path):
raise RuntimeError("Can only cd to absolute path, got: {}".format(path))
orig_path = os.getcwd()
os.chdir(path)
try:
yield
finally:
os.chdir(orig_path)
# ################################################################################
# # Customized commands
# ################################################################################
class cmake_build(setuptools.Command):
"""
Compiles everything when `python setup.py develop` is run using cmake.
Custom args can be passed to cmake by specifying the `CMAKE_ARGS`
environment variable.
"""
def initialize_options(self):
pass
def finalize_options(self):
pass
def _run_cmake(self):
with cd(CMAKE_BUILD_DIR):
cmake_args = [
CMAKE,
"-DC10_USE_GLOG=1",
"-DCMAKE_BUILD_RTTI=ON",
"-DGLOW_BUILD_PYTORCH_INTEGRATION=ON",
"-DGLOW_BUILD_EXAMPLES=OFF",
"-DGLOW_BUILD_TESTS=OFF",
"-DBUILD_SHARED_LIBS=OFF",
"-DCMAKE_EXPORT_COMPILE_COMMANDS=ON",
"-DCMAKE_BUILD_TYPE={}".format("Release" if args.release else "Debug"),
"-DPYTHON_EXECUTABLE={}".format(sys.executable),
# PyTorch cmake args
"-DPYTORCH_DIR={}".format(
os.path.dirname(os.path.realpath(torch.__file__))
),
"-DTORCH_GLOW={}".format(FILE_DIR),
]
if args.cmake_prefix_path:
cmake_args.append(
"-DCMAKE_PREFIX_PATH={}".format(args.cmake_prefix_path)
)
if "CMAKE_ARGS" in os.environ:
extra_cmake_args = shlex.split(os.environ["CMAKE_ARGS"])
log.info("Extra cmake args: {}".format(extra_cmake_args))
cmake_args.extend(extra_cmake_args)
cmake_args.append(TOP_DIR)
subprocess.check_call(cmake_args)
def _run_build(self):
with cd(CMAKE_BUILD_DIR):
build_args = [
CMAKE,
"--build",
os.curdir,
"--",
"-j",
str(multiprocessing.cpu_count()),
]
subprocess.check_call(build_args)
def run(self):
is_initial_build = not os.path.exists(CMAKE_BUILD_DIR)
if is_initial_build:
os.makedirs(CMAKE_BUILD_DIR)
if is_initial_build or args.run_cmake:
self._run_cmake()
self._run_build()
class develop(setuptools.command.develop.develop):
def run(self):
self.run_command("build_ext")
setuptools.command.develop.develop.run(self)
class build_ext(setuptools.command.build_ext.build_ext):
def run(self):
self.run_command("cmake_build")
setuptools.command.build_ext.build_ext.run(self)
def build_extensions(self):
for ext in self.extensions:
fullname = self.get_ext_fullname(ext.name)
filename = os.path.basename(self.get_ext_filename(fullname))
src = os.path.join(CMAKE_BUILD_DIR, "torch_glow", "src", filename)
dst = os.path.join(os.path.realpath(self.build_lib), "torch_glow", filename)
print("dst", dst)
if not os.path.exists(os.path.dirname(dst)):
os.makedirs(os.path.dirname(dst))
self.copy_file(src, dst)
cmdclass = {"cmake_build": cmake_build, "develop": develop, "build_ext": build_ext}
# ################################################################################
# # Extensions
# ################################################################################
ext_modules = [setuptools.Extension(name=str("torch_glow._torch_glow"), sources=[])]
# ################################################################################
# # Packages
# ################################################################################
# # no need to do fancy stuff so far
packages = setuptools.find_packages()
# ################################################################################
# # Test
# ################################################################################
setup_requires.append("pytest-runner")
tests_require.append("pytest")
# ################################################################################
# # Final
# ################################################################################
setuptools.setup(
name="torch_glow",
description="PyTorch + Glow",
ext_modules=ext_modules,
cmdclass=cmdclass,
packages=packages,
include_package_data=True,
install_requires=install_requires,
setup_requires=setup_requires,
tests_require=tests_require,
extras_require=extras_require,
author="jackm321",
author_email="[email protected]",
url="https://github.com/pytorch/glow",
)
|
# isort:skip_file
from __future__ import absolute_import, division, print_function, unicode_literals
import torch_glow
def pytest_addoption(parser):
parser.addoption("--backend", action="store", default=None)
def pytest_sessionstart(session):
backend = session.config.getoption("--backend")
if backend:
torch_glow.setGlowBackend(backend)
|
# isort:skip_file
from __future__ import absolute_import, division, print_function, unicode_literals
import itertools
import os
import unittest
from contextlib import contextmanager
from copy import deepcopy
from io import BytesIO
import numpy as np
import torch
import torch_glow
from parameterized import parameterized
GLOW_FUSION_GROUP = "glow::FusionGroup"
SUBGRAPH_ATTR = "Subgraph"
BACKEND_NAME_KEY = "BACKEND_NAME"
INTERPRETER = "Interpreter"
DEFAULT_BACKEND = os.environ.get(BACKEND_NAME_KEY, "Interpreter")
def get_backend_name():
return os.environ.get(BACKEND_NAME_KEY, INTERPRETER)
@contextmanager
def ephemeral_torchglow_settings(
fp16=False,
backend=DEFAULT_BACKEND,
fusion=False,
blocklist=None,
accept_all_layouts=False,
):
old_fp16 = torch_glow.get_convert_to_fp16()
old_clip = torch_glow.get_clip_fp16()
old_convert_fused = torch_glow.get_convert_fused_to_fp16()
old_backend = torch_glow.getGlowBackendName()
old_blocklist = torch_glow.getFusionBlocklist()
old_fusion = torch_glow.getFusionPassEnabled()
try:
if fusion:
torch_glow.enableFusionPass_DO_NOT_USE_THIS()
else:
torch_glow.disableFusionPass()
if fp16:
torch_glow.enable_convert_to_fp16()
torch_glow.enable_convert_fused_to_fp16()
torch_glow.enable_clip_fp16()
else:
torch_glow.disable_convert_to_fp16()
torch_glow.disable_convert_fused_to_fp16()
torch_glow.disable_clip_fp16()
if blocklist is None:
torch_glow.clearFusionBlocklist()
else:
torch_glow.setFusionBlocklist(list(blocklist))
if accept_all_layouts:
torch_glow.enable_accept_all_layout()
else:
torch_glow.disable_accept_all_layout()
torch_glow.setGlowBackend(backend)
yield
finally:
torch_glow.enable_convert_to_fp16() if old_fp16 else torch_glow.disable_convert_to_fp16()
torch_glow.enable_clip_fp16() if old_clip else torch_glow.disable_clip_fp16()
torch_glow.enable_convert_fused_to_fp16() if old_convert_fused else torch_glow.disable_convert_fused_to_fp16()
torch_glow.enableFusionPass_DO_NOT_USE_THIS() if old_fusion else torch_glow.disableFusionPass()
torch_glow.setGlowBackend(old_backend)
torch_glow.setFusionBlocklist(old_blocklist)
def check_skip(case):
backend = DEFAULT_BACKEND
supported = {INTERPRETER}
try:
supported = supported | case.supported_backends
except AttributeError:
pass
if backend not in supported:
case.skipTest("Skipping tests for backend: " + backend)
def assert_equivalent(
result1_name, result1, result2_name, result2, atol=5e-4, rtol=1e-3, use_eq=False
):
if isinstance(result1, tuple) or isinstance(result2, tuple):
assert isinstance(result1, tuple) and isinstance(result2, tuple)
assert len(result1) == len(result2)
return all(
assert_equivalent(
result1_name, a, result2_name, b, atol=atol, rtol=rtol, use_eq=use_eq
)
for a, b in zip(result1, result2)
)
elif result2.dtype == torch.bool:
diff = torch.eq(result1, result2)
if torch.all(diff):
return True
else:
error = f"Diff:{diff}\n"
raise AssertionError(error)
else:
matches = (
torch.equal(result1, result2)
if use_eq
else torch.allclose(result1, result2, rtol=rtol, atol=atol)
)
if matches:
return True
else:
diff = torch.abs(result1 - result2)
error = f"{result1_name} result:\n{result1}\n"
error += f"{result2_name} result:\n{result2}\n"
error += f"Diff:\n{diff}\n"
error += f"Max diff:\n{torch.max(diff)}"
raise AssertionError(error)
# To avoid linter complaining about allocating default value
DEFAULT_SKIP_BACKENDS_SET = {}
def run_comparison_tests(
module,
inputs,
fusible_ops,
fp32vfp32_atol=5e-4,
fp32vfp32_rtol=1e-3,
fp32vfp16_atol=1e-2,
fp32vfp16_rtol=1e-2,
fp16vfp16_atol=5e-4,
fp16vfp16_rtol=1e-3,
fusion_blocklist=None,
scripted=False,
check_trace=True,
skip_for_backends=DEFAULT_SKIP_BACKENDS_SET,
skip_fp32_vs_fp16=False,
skip_to_glow=False, # Ugly hack, TODO: Remove
):
# tuplify inputs
if not isinstance(inputs, tuple):
inputs = (inputs,)
# Check that test is setup properly
if not isinstance(module, torch.nn.Module):
raise AssertionError("to_glow only supports nn.Modules")
if "Interpreter" in skip_for_backends:
raise AssertionError(
"Interpreter backend can't be skipped, skip entire test until Interpreter is supported"
)
# If other_backend isn't supported then skip the test
other_backend = torch_glow.getGlowBackendName()
if other_backend in skip_for_backends:
raise unittest.SkipTest(f"backend {other_backend} not supported for this test")
# Get other Glow backend besides Interpreter to test if applicable
if other_backend == "Interpreter":
other_backend = None
if skip_to_glow and other_backend:
raise AssertionError(
f"to_glow must be used for non-interpreter backends, skip this test for {other_backend} backend until the test supports to_glow"
)
def prepare(m, inputs, fp16, backend, fusion):
""" "Helper to prepare a JIT module to run either on PyTorch or Glow"""
inputs = deepcopy(inputs)
def getJITModule():
m_jit = None
if scripted:
m_jit = torch.jit.script(m)
else:
m_jit = torch.jit.trace(m, inputs, check_trace=check_trace)
if scripted or not check_trace:
# run it once to activate the fuser if not run yet
m_jit(*inputs)
return m_jit
with torch.no_grad():
m_jit = None
if fusion:
with ephemeral_torchglow_settings(
fusion=True, fp16=fp16, backend=backend, blocklist=fusion_blocklist
):
m_jit = getJITModule()
assert_fused(
m_jit.graph_for(*(deepcopy(inputs))),
fusible_ops,
)
else:
m_jit = getJITModule()
if backend != "PyTorch": # to_glow
m_jit = torch_glow.lower(
model=m_jit,
example_inputs=inputs,
backend=backend,
convert_to_fp16=fp16,
)
return m_jit
def compare(a_name, a, b_name, b, atol, rtol, use_eq=False):
""" "Helper to compare two JIT modules, skip comparison if either is None"""
if not a:
print(f"Skipping {a_name} vs {b_name} because {a_name} not computed")
return
if not b:
print(f"Skipping {a_name} vs {b_name} because {b_name} not computed")
return
a_ouputs = a(*deepcopy(inputs))
b_ouputs = b(*deepcopy(inputs))
assert_equivalent(a_name, a_ouputs, b_name, b_ouputs, atol, rtol, use_eq)
# Prepare modules for testing
m_pytorch_fp32 = prepare(
module, inputs, fp16=False, backend="PyTorch", fusion=False
)
m_interpreter_fuser_fp32 = prepare(
module, inputs, fp16=False, backend="Interpreter", fusion=True
)
m_interpreter_fp32 = None
m_interpreter_fp16 = None
m_other_fp16 = None
if not skip_to_glow:
m_interpreter_fp32 = prepare(
module, inputs, fp16=False, backend="Interpreter", fusion=True
)
m_interpreter_fp16 = prepare(
module, inputs, fp16=True, backend="Interpreter", fusion=True
)
m_other_fp16 = None
if other_backend:
m_other_fp16 = prepare(
module, inputs, fp16=True, backend=other_backend, fusion=False
)
# JIT vs Interpreter, via to_glow, fp32-fp32
compare(
"m_pytorch_fp32",
m_pytorch_fp32,
"m_interpreter_fp32",
m_interpreter_fp32,
fp32vfp32_atol,
fp32vfp32_rtol,
)
# Interpreter vs Interpreter, via to_glow and fuser, fp32-fp32
compare(
"m_interpreter_fp32",
m_interpreter_fp32,
"m_interpreter_fuser_fp32",
m_interpreter_fuser_fp32,
fp32vfp32_atol,
fp32vfp32_rtol,
use_eq=True, # fuser and to_glow should match exactly
)
# Interpreter vs Other, via to_glow, fp16-fp16
compare(
"m_interpreter_fp16",
m_interpreter_fp16,
"m_other_fp16",
m_other_fp16,
fp16vfp16_atol,
fp16vfp16_rtol,
)
if not skip_fp32_vs_fp16:
# JIT vs Interpreter, via to_glow, fp32-fp16
compare(
"m_pytorch_fp32",
m_pytorch_fp32,
"m_interpreter_fp16",
m_interpreter_fp16,
fp32vfp16_atol,
fp32vfp16_rtol,
)
def compare_tracing_methods(
module,
*inputs,
atol=5e-4,
rtol=1e-3,
reference=None,
fusible_ops=None,
fusion_blocklist=None,
fp16=False,
scripted=False,
check_trace=True,
accept_all_layouts=False,
skip_to_glow=False, # Ugly hack, TODO: Remove
):
if not isinstance(module, torch.nn.Module):
raise AssertionError("to_glow only supports nn.Modules")
def trace(mod, ins):
if scripted:
return torch.jit.script(mod)
else:
return torch.jit.trace(mod, ins, check_trace=check_trace)
with torch.no_grad():
with ephemeral_torchglow_settings(
fusion=True,
fp16=fp16,
blocklist=fusion_blocklist,
accept_all_layouts=accept_all_layouts,
):
fusion_inputs = deepcopy(inputs)
fusion_trace = trace(module, fusion_inputs)
assert_fused(
fusion_trace.graph_for(*fusion_inputs),
(fusible_ops or []),
accept_any=fusible_ops is None,
)
fusion_result = fusion_trace(*fusion_inputs)
with ephemeral_torchglow_settings(
fusion=False, fp16=fp16, accept_all_layouts=accept_all_layouts
):
if scripted:
torchscript_result = module(*deepcopy(inputs))
else:
torchscript_inputs = deepcopy(inputs)
torchscript_trace = trace(module, torchscript_inputs)
torchscript_result = torchscript_trace(*torchscript_inputs)
with ephemeral_torchglow_settings(
fusion=False, fp16=fp16, accept_all_layouts=accept_all_layouts
):
if not skip_to_glow:
glow_inputs = deepcopy(inputs)
traced_module = trace(module, glow_inputs)
lowered_module = torch_glow.lower(
traced_module, glow_inputs, DEFAULT_BACKEND
)
glow_result = lowered_module(*glow_inputs)
if reference:
assert_equivalent(
"Reference",
reference,
"Glow fusion",
fusion_trace,
atol=atol,
rtol=rtol,
)
assert_equivalent(
"Reference",
reference,
"TorchScript",
torchscript_result,
atol=atol,
rtol=rtol,
)
if not skip_to_glow:
assert_equivalent(
"Reference", reference, "Glow", glow_result, atol=atol, rtol=rtol
)
# This is written out manually instead of using combinations in order to aid
# debugging. TODO: Clean up.
assert_equivalent(
"Glow fusion",
fusion_result,
"TorchScript",
torchscript_result,
atol=atol,
rtol=rtol,
)
if not skip_to_glow:
assert_equivalent(
"Glow fusion", fusion_result, "Glow", glow_result, atol=atol, rtol=rtol
)
assert_equivalent(
"TorchScript",
torchscript_result,
"Glow",
glow_result,
atol=atol,
rtol=rtol,
)
# Compilation test for glow lowering without executing.
# This is designed for use cases where the original graph contains placeholder operators.
def test_lowering(
module,
*inputs,
fusible_ops=None,
fusion_blocklist=None,
fp16=False,
scripted=False,
check_trace=True,
accept_all_layouts=False,
):
if not isinstance(module, torch.nn.Module):
raise AssertionError("to_glow only supports nn.Modules")
def trace(mod, ins):
if scripted:
return torch.jit.script(mod)
else:
return torch.jit.trace(mod, ins, check_trace=check_trace)
with torch.no_grad():
with ephemeral_torchglow_settings(
fusion=False, fp16=fp16, accept_all_layouts=accept_all_layouts
):
glow_inputs = deepcopy(inputs)
traced_module = trace(module, glow_inputs)
# If deferred weight loader is not set, it will throw a runtime exception
_lowered_module = torch_glow.lower(
traced_module, glow_inputs, DEFAULT_BACKEND
) # unused
def compare_tracing_methods_error(
module,
*inputs,
fusible_ops=None,
fusion_blocklist=None,
fp16=False,
):
if not isinstance(module, torch.nn.Module):
raise AssertionError("to_glow only supports nn.Modules")
def trace(mod, ins):
return torch.jit.trace(mod, ins)
with torch.no_grad():
with ephemeral_torchglow_settings(
fusion=True, fp16=fp16, blocklist=fusion_blocklist
):
fusion_inputs = deepcopy(inputs)
try:
fusion_trace = trace(module, fusion_inputs)
assert_fused(
fusion_trace.graph_for(*fusion_inputs),
*(fusible_ops or []),
accept_any=fusible_ops is None,
)
fusion_trace(*fusion_inputs)
except Exception:
pass
else:
raise AssertionError("Error expected (fusion), but none were received")
with ephemeral_torchglow_settings(fusion=False, fp16=fp16):
try:
torchscript_inputs = deepcopy(inputs)
torchscript_trace = trace(module, torchscript_inputs)
torchscript_trace(*torchscript_inputs)
except Exception:
pass
else:
raise AssertionError(
"Error expected (torchscript), but none were received"
)
with ephemeral_torchglow_settings(fusion=False, fp16=fp16):
try:
glow_inputs = deepcopy(inputs)
glow_spec = torch_glow.lower(
model=module,
example_inputs=glow_inputs,
backend=DEFAULT_BACKEND,
)
glow_trace = torch_glow.to_glow(trace(module, glow_inputs), glow_spec)
glow_trace(*glow_inputs)
except Exception:
pass
else:
raise AssertionError("Error expected (glow), but none were received")
def assert_fused(fused_graph, ops, accept_any=False, strict=False):
expected = set(ops)
fused = set()
with torch.no_grad():
for node in fused_graph.nodes():
kind = node.kind()
if kind == GLOW_FUSION_GROUP:
fused.update(map(lambda n: n.kind(), node.g(SUBGRAPH_ATTR).nodes()))
else:
assert (
kind not in expected
), f"Expected {kind} to be fused in graph\n{fused_graph}"
missing = set() if (accept_any and fused) else expected - fused
unexpected = set() if (accept_any or not strict) else fused - expected
assert (
not unexpected
), f"Expected fusion of {expected}, but {fused} was fused in graph\n{fused_graph}"
assert (
not missing
), f"Expected fusion of {expected}, but only {fused} was fused in graph\n{fused_graph}"
def graph_contains_str(graph, substr):
return graph.str().find(substr) >= 0
# Verifies equal modules for save-load tests.
def assertModulesEqual(case, mod1, mod2, message=None):
for p1, p2 in itertools.zip_longest(mod1.parameters(), mod2.parameters()):
case.assertTrue(p1.equal(p2), message)
def save_and_reload_model(model):
buf = BytesIO()
print("saving ...")
torch.jit.save(model, buf)
print("done")
print("reloading....")
buf.seek(0)
reloaded_model = torch.jit.load(buf)
print("done")
return reloaded_model
class TorchGlowTestCase(unittest.TestCase):
"""
Base class for torch_glow tests that ensure that torch.manual_seed is
called before each test.
NOTE: this won't effect arguments to the test case so make sure that test
cases generate their own inputs to the test network within the test case not
outside of it.
"""
def setUp(self):
torch.manual_seed(0)
np.random.seed(0)
print("running the setup for TorchGlowTest")
def deterministic_expand(params):
"""Takes params as a list of lambdas where each lambda produces a tuple of
unique parameters for the test"""
torch.manual_seed(0)
np.random.seed(0)
return parameterized.expand([p() for p in params])
|
# isort:skip_file
from __future__ import absolute_import, division, print_function, unicode_literals
import pickle
import torch
import torch_glow
from tests import utils
class TestCompilationSpec(utils.TorchGlowTestCase):
def build_compiliation_spec(self):
compilation_spec = torch_glow.CompilationSpec()
compilation_spec_settings = compilation_spec.get_settings()
compilation_spec_settings.set_glow_backend("CPU")
compilation_spec_settings.set_enable_fuser(True)
fuser_settings = compilation_spec.get_fuser_settings()
fuser_settings.set_min_fusion_group_size(3)
fuser_settings.set_max_fusion_merge_size(4)
fuser_settings.set_fusion_start_index(5)
fuser_settings.set_fusion_end_index(6)
fuser_settings.op_blacklist_append("aten::mean")
fuser_settings.op_blacklist_append("aten::dropout")
compilation_group = torch_glow.CompilationGroup()
input1_spec = torch_glow.input_spec_from_tensor(torch.randn(2, 3, 224, 224))
input2_spec = torch_glow.input_spec_from_tensor(
torch.randn(3, 2).to(torch.float16)
)
compilation_group.input_sets_append([input1_spec, input2_spec])
compilation_group.input_sets_append(
torch_glow.input_specs_from_tensors(
[torch.randn(1, 3, 224, 224), torch.randn(4, 1)]
)
)
compilation_group_settings = compilation_group.get_settings()
compilation_group_settings.set_convert_to_fp16(True)
compilation_group_settings.set_num_devices_to_use(50)
compilation_group_settings.set_replication_count(52)
compilation_group_settings.backend_specific_opts_insert("apple", "orange")
compilation_spec.compilation_groups_append(compilation_group)
default_compilation_group_settings = (
compilation_spec.get_default_compilation_group_settings()
)
default_compilation_group_settings.set_convert_to_fp16(False)
default_compilation_group_settings.set_num_devices_to_use(89)
default_compilation_group_settings.set_replication_count(90)
default_compilation_group_settings.backend_specific_opts_insert(
"hello", "goodbye"
)
return compilation_spec
def validate_compilation_spec(self, compilation_spec):
compilation_spec_settings = compilation_spec.get_settings()
self.assertEqual(compilation_spec_settings.get_glow_backend(), "CPU")
self.assertEqual(compilation_spec_settings.get_enable_fuser(), True)
fuser_settings = compilation_spec.get_fuser_settings()
self.assertEqual(fuser_settings.get_min_fusion_group_size(), 3)
self.assertEqual(fuser_settings.get_max_fusion_merge_size(), 4)
self.assertEqual(fuser_settings.get_fusion_start_index(), 5)
self.assertEqual(fuser_settings.get_fusion_end_index(), 6)
self.assertEqual(fuser_settings.get_op_blacklist()[0], "aten::mean")
self.assertEqual(fuser_settings.get_op_blacklist()[1], "aten::dropout")
compilation_groups = compilation_spec.get_compilation_groups()
self.assertEqual(len(compilation_groups), 1)
compilation_group = compilation_groups[0]
input_sets = compilation_group.get_input_sets()
self.assertEqual(len(input_sets), 2)
self.assertEqual(input_sets[0][0].get_dims(), [2, 3, 224, 224])
self.assertEqual(input_sets[0][1].get_dims(), [3, 2])
self.assertEqual(input_sets[1][0].get_dims(), [1, 3, 224, 224])
self.assertEqual(input_sets[1][1].get_dims(), [4, 1])
# 5 is at::Half
self.assertEqual(input_sets[0][1].get_elem_type(), 5)
compilation_group_settings = compilation_group.get_settings()
self.assertEqual(compilation_group_settings.get_convert_to_fp16(), True)
self.assertEqual(compilation_group_settings.get_num_devices_to_use(), 50)
self.assertEqual(compilation_group_settings.get_replication_count(), 52)
self.assertEqual(
compilation_group_settings.backend_specific_opts_at("apple"), "orange"
)
default_compilation_group_settings = (
compilation_spec.get_default_compilation_group_settings()
)
self.assertEqual(
default_compilation_group_settings.get_convert_to_fp16(), False
)
self.assertEqual(
default_compilation_group_settings.get_num_devices_to_use(), 89
)
self.assertEqual(default_compilation_group_settings.get_replication_count(), 90)
self.assertEqual(
default_compilation_group_settings.backend_specific_opts_at("hello"),
"goodbye",
)
def test_new_glow_compile_spec(self):
"""Test glow compile spec basics."""
compilation_spec = self.build_compiliation_spec()
# Sanity check
self.validate_compilation_spec(compilation_spec)
# Serialize and deserialize
pickled = pickle.dumps(compilation_spec)
unpickled = pickle.loads(pickled)
# Recheck the spec
self.validate_compilation_spec(unpickled)
|
# isort:skip_file
from __future__ import absolute_import, division, print_function, unicode_literals
import tempfile
import torch
import torch_glow
from tests import utils
class TestLoadBackendSpecificOptions(utils.TorchGlowTestCase):
def test_backend_specific_options(self):
"""Test loading backend specific options from YAML file."""
def test_f(a, b):
return a.add(b)
x = torch.randn(4)
y = torch.randn(4)
# Create YAML file with backend options
with tempfile.NamedTemporaryFile() as options_fd:
options_fd.write(b"interpreter-memory: 4194304\n")
options_fd.flush()
# Run Glow
torch_glow.loadBackendSpecificOptions(options_fd.name)
torch_glow.enableFusionPass_DO_NOT_USE_THIS()
glow_trace = torch.jit.trace(test_f, (x, y), check_trace=False)
glow_trace(x, y)
|
# isort:skip_file
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
import torch_glow
from tests import utils
def get_compilation_spec(inputs):
"""helper function to get the compilation spec of the submodule"""
spec = torch_glow.CompilationSpec()
spec.get_settings().set_glow_backend("Interpreter")
compilation_group = torch_glow.CompilationGroup()
spec.compilation_groups_append(compilation_group)
compilation_group.input_sets_append(torch_glow.input_specs_from_tensors(inputs))
return spec
class QuantizedModule(torch.nn.Module):
def forward(self, a, b):
return torch.ops.quantized.add(a, b, scale=1.0 / 21, zero_point=10)
class TestModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.quant = torch.nn.quantized.Quantize(
scale=1.0 / 21, zero_point=0, dtype=torch.qint8
)
self.dequant = torch.nn.quantized.DeQuantize()
self.add = QuantizedModule()
def forward(self, a, b):
return self.dequant(self.add(self.quant(a), self.quant(b)))
class TestInputSpec(utils.TorchGlowTestCase):
def test_input_spec(self):
"""Test setting quantized and non-quantized input specs."""
with torch.no_grad():
a = torch.tensor([[0.1]])
b = torch.tensor([[0.1]])
mod = TestModule()
traced_model = torch.jit.trace(mod, (a, b))
ref_result = traced_model(a, b)
# test non-quantized input
glow_mod = torch_glow.to_glow(traced_model, get_compilation_spec((a, b)))
glow_result = glow_mod(a, b)
self.assertTrue(torch.allclose(ref_result, glow_result))
# test quantized input
add_inputs = torch_glow.get_submod_inputs(mod, "add", (a, b))
glow_mod = torch_glow.to_glow_selective(
traced_model, {"add": get_compilation_spec(add_inputs)}
)
glow_result = glow_mod(a, b)
self.assertTrue(torch.allclose(ref_result, glow_result))
|
# isort:skip_file
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
import torch_glow
from tests import utils
class Qux(torch.nn.Module):
def __init__(self, x):
super(Qux, self).__init__()
self.x = x
def forward(self, a, b):
return a - b - self.x
class Baz(torch.nn.Module):
def __init__(self, x):
super(Baz, self).__init__()
self.x = x
def forward(self, a, b):
return a + b * self.x
class Bar(torch.nn.Module):
def __init__(self, x):
super(Bar, self).__init__()
self.x = x
def forward(self, a, b):
return a * b + self.x
class Foo(torch.nn.Module):
def __init__(self, bar, baz):
super(Foo, self).__init__()
self.bar = bar
self.baz = baz
def forward(self, a, b):
return self.baz(self.bar(a.reshape(1, -1), b.reshape(1, -1)), b)
class Model(torch.nn.Module):
def __init__(self, foo, qux):
super(Model, self).__init__()
self.foo = foo
self.qux = qux
def forward(self, a, b):
return self.qux(self.foo(a, b), a)
r"""
model
/ \
foo qux (Glow)
/ \
bar (Glow) baz
"""
bar = Bar(4.0)
baz = Baz(2.0)
qux = Qux(3.0)
foo = Foo(bar, baz)
model = Model(foo, qux)
def get_compilation_spec(inputs):
"""helper function to get the compilation spec of the submodule"""
spec = torch_glow.CompilationSpec()
spec.get_settings().set_glow_backend("Interpreter")
compilation_group = torch_glow.CompilationGroup()
spec.compilation_groups_append(compilation_group)
compilation_group.input_sets_append(torch_glow.input_specs_from_tensors(inputs))
return spec
class TestSelectiveToGlow(utils.TorchGlowTestCase):
def test_to_glow_selective(self):
inputs = (torch.zeros(4) + 8, torch.zeros(4) + 7)
torch_res = model(*inputs)
bar_inputs = torch_glow.get_submod_inputs(model, "foo.bar", inputs)
qux_inputs = torch_glow.get_submod_inputs(model, "qux", inputs)
glow_mod = torch_glow.to_glow_selective(
model,
{
"foo.bar": (get_compilation_spec(bar_inputs), bar_inputs),
"qux": (get_compilation_spec(qux_inputs), qux_inputs),
},
inplace=False,
)
glow_mod = torch.jit.trace(glow_mod, inputs)
glow_res = glow_mod(*inputs)
assert torch.allclose(torch_res, glow_res)
def test_to_glow_selective_already_scripted(self):
inputs = (torch.zeros(4) + 8, torch.zeros(4) + 7)
torch_res = model(*inputs)
bar_inputs = torch_glow.get_submod_inputs(model, "foo.bar", inputs)
qux_inputs = torch_glow.get_submod_inputs(model, "qux", inputs)
with torch.no_grad():
traced_model = torch.jit.trace(model, inputs)
glow_mod = torch_glow.to_glow_selective(
traced_model,
{
"foo.bar": get_compilation_spec(bar_inputs),
"qux": get_compilation_spec(qux_inputs),
},
inplace=False,
)
glow_res = glow_mod(*inputs)
assert torch.allclose(torch_res, glow_res)
|
# isort:skip_file
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
import torch_glow
from tests import utils
from tests.utils import GLOW_FUSION_GROUP
class TestOnlyTensorOutputs(utils.TorchGlowTestCase):
def test_only_tensor_outputs(self):
"""Test that Glow fuser only produces tensor outputs."""
def f(a, b):
x = (a + b).size(0)
c = a.reshape(x, -1)
return a + c
torch_glow.disableFusionPass()
a = torch.randn(5, 5)
b = torch.randn(5, 5)
jit_f = torch.jit.trace(f, (a, b))
jit_f_graph = jit_f.graph_for(a, b)
# By creating a graph with an aten::size (supported) feeding into an
# unsupported op (prim::ListConstruct), we see that even if an op is
# supported, if it produces a non-tensor output to the fusion group it
# would not be fused.
torch_glow.glowCustomFuseDebug_(
jit_f_graph, ["prim::Constant", "aten::add", "aten::size", "aten::reshape"]
)
fusion_nodes = 0
aten_sizes = 0
for node in jit_f_graph.nodes():
if node.kind() == GLOW_FUSION_GROUP:
fusion_nodes += 1
if node.kind() == "aten::size":
aten_sizes += 1
assert (
fusion_nodes == 2
), "Expected two fusion nodes to be split up with aten::size between them"
assert aten_sizes == 1, "Expected aten::size not to be fused"
|
# isort:skip_file
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
import torch.nn.functional as F
import torch_glow
from tests import utils
class TestJITVsGlowPath(utils.TorchGlowTestCase):
def test_jit_vs_glow_path(self):
"""Basic test of the JIT vs. Glow logging feature."""
torch_glow.enable_jit_vs_glow_compare()
class TestModule(torch.nn.Module):
def forward(self, input, weight):
return F.linear((input + input), weight)
n = 5
in_features = 4
out_features = 3
input = torch.randn(n, in_features)
weight = torch.randn(out_features, in_features)
utils.compare_tracing_methods(
TestModule(),
input,
weight,
fusible_ops={"aten::add", "aten::linear"},
)
def test_jit_vs_glow_int_path(self):
"""Test JIT vs. Glow logging with int type"""
torch_glow.enable_jit_vs_glow_compare()
class TestModule(torch.nn.Module):
def forward(self, a, b):
c = a + b
return c
a = torch.randn(5, 6).to(dtype=torch.int32)
b = torch.randn(5, 6).to(dtype=torch.int32)
utils.compare_tracing_methods(TestModule(), a, b, fusible_ops={"aten::add"})
def test_jit_vs_glow_inplace(self):
"""Test JIT vs. Glow logging with in-place op"""
torch_glow.enable_jit_vs_glow_compare()
class TestModule(torch.nn.Module):
def forward(self, a, b):
a += b
return a
a = torch.randn(5, 6)
b = torch.randn(5, 6)
utils.compare_tracing_methods(TestModule(), a, b, fusible_ops={"aten::add_"})
|
# isort:skip_file
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
import torch_glow
from tests import utils
from tests.utils import GLOW_FUSION_GROUP, SUBGRAPH_ATTR
class TestBlockList(utils.TorchGlowTestCase):
def test_op_blocklist(self):
"""Test Glow fuser op kind blacklisting mechanism."""
def f(a, b):
return (a + b) * (a - b)
torch_glow.enableFusionPass_DO_NOT_USE_THIS()
torch_glow.setFusionBlocklist(["aten::add"])
a = torch.randn(5, 5)
b = torch.randn(5, 5)
jit_f = torch.jit.trace(f, (a, b))
jit_f_graph = jit_f.graph_for(a, b)
fused_add = False
fused_sub = False
for node in jit_f_graph.nodes():
if node.kind() == GLOW_FUSION_GROUP:
glow_subgraph = node.g(SUBGRAPH_ATTR)
for node in glow_subgraph.nodes():
if node.kind() == "aten::add":
fused_add = True
if node.kind() == "aten::sub":
fused_sub = True
assert not fused_add, "Expected aten::add to be blacklisted"
assert fused_sub, "Expected aten::sub to not be blacklisted"
torch_glow.clearFusionBlocklist()
def test_op_index_blocklist(self):
"""Test Glow fuser index blacklisting mechanism."""
def f(a, b):
x1 = a * b
x2 = x1 * b
x3 = x2 * a
x4 = x3 / b
x5 = x4 / a
x6 = x5 / b
x7 = x6 * a
x8 = x7 * b
return x8
torch_glow.enableFusionPass_DO_NOT_USE_THIS()
torch_glow.setFusionStartIndex(3)
torch_glow.setFusionEndIndex(6)
a = torch.randn(5, 5)
b = torch.randn(5, 5)
jit_f = torch.jit.trace(f, (a, b))
jit_f_graph = jit_f.graph_for(a, b)
torch_glow.clearFusionIndices()
fused_muls = 0
fused_divs = 0
for node in jit_f_graph.nodes():
if node.kind() == GLOW_FUSION_GROUP:
glow_subgraph = node.g(SUBGRAPH_ATTR)
for node in glow_subgraph.nodes():
if node.kind() == "aten::mul":
fused_muls += 1
if node.kind() == "aten::div":
fused_divs += 1
assert fused_muls == 0, "Expected no aten::muls to be fused"
assert fused_divs == 3, "Expected all 3 aten::divs to be fused"
|
# isort:skip_file
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
import torch_glow
from tests import utils
from tests.utils import graph_contains_str
graph_str = """
graph(%input : Tensor, %weight : Tensor, %bias : Tensor):
%c : int = prim::Constant[value=4]()
%d : int = prim::Constant[value=1]()
%1 : int = aten::dim(%input)
%2 : bool = aten::eq(%1, %c)
%3 : Tensor = prim::If(%2)
block0():
%4 : Tensor = aten::t(%weight)
%5 : int = prim::Constant[value=1]()
%6 : Tensor = aten::mm(%input, %4)
%7 : Tensor = aten::add(%bias, %6, %5)
-> (%7)
block1():
%8 : Tensor = aten::t(%weight)
%9 : Tensor = aten::matmul(%input, %8)
%10 : Tensor = aten::add_(%9, %bias, %d)
-> (%10)
return (%3)
"""
class TestFuseLinear(utils.TorchGlowTestCase):
def test_fuse_linear(self):
"""Test Glow's fuseBranchedLinearPattern JIT pass"""
graph = torch._C.parse_ir(graph_str)
assert not graph_contains_str(graph, "glow::fused_linear")
torch_glow.fuseBranchedLinearPattern_(graph)
assert graph_contains_str(graph, "glow::fused_linear")
|
# isort:skip_file
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
import torch_glow
from tests import utils
class Model(torch.nn.Module):
def __init__(self):
super(Model, self).__init__()
self.linear = torch.nn.Linear(5, 10)
def forward(self, x):
return self.linear(x)
def run_model(m, input, randomize):
torch_glow.disableFusionPass()
traced_m = torch.jit.trace(m, input)
if randomize:
torch_glow.enable_randomize_constants()
else:
torch_glow.disable_randomize_constants()
spec = torch_glow.CompilationSpec()
spec.get_settings().set_glow_backend("Interpreter")
compilation_group = torch_glow.CompilationGroup()
spec.compilation_groups_append(compilation_group)
input_spec = torch_glow.InputSpec()
input_spec.set_same_as(input)
compilation_group.input_sets_append([input_spec])
glow_m = torch_glow.to_glow(traced_m, {"forward": spec})
return glow_m(input)
class TestRandomizeWeights(utils.TorchGlowTestCase):
def test_randomize_weights(self):
m = Model()
input = torch.randn(5)
normal1 = run_model(m, input, False)
normal2 = run_model(m, input, False)
rand = run_model(m, input, True)
assert torch.allclose(normal1, normal2)
assert not torch.allclose(normal1, rand)
|
# isort:skip_file
from __future__ import absolute_import, division, print_function, unicode_literals
import glob
import os
import torch
import torch_glow
from tests import utils
class Foo(torch.nn.Module):
def __init__(self):
super(Foo, self).__init__()
self.conv1 = torch.nn.Conv2d(3, 6, 3)
self.relu = torch.nn.ReLU()
self.conv2 = torch.nn.Conv2d(6, 16, 3)
def forward(self, x):
x = self.conv1(x)
x = self.relu(x)
y = self.conv2(x)
return y
class Bar(torch.nn.Module):
def __init__(self, foo):
super(Bar, self).__init__()
self.foo = foo
def forward(self, x):
y = self.foo(x)
return y
class Baz(torch.nn.Module):
def __init__(self, foo):
super(Baz, self).__init__()
self.foo = foo
def forward(self, x):
y = self.foo(x)
return (x, y)
def create_model(x, ModType):
foo = Foo()
foo = torch.ao.quantization.QuantWrapper(foo)
foo.qconfig = torch.ao.quantization.get_default_qconfig("fbgemm")
torch.ao.quantization.prepare(foo, inplace=True)
foo(x)
torch.ao.quantization.convert(foo, inplace=True)
model = ModType(foo)
return model
class TestToGlowWriteToOnnx(utils.TorchGlowTestCase):
def lower_and_write_to_onnx_helper(self, ModType, onnx_prefix):
x = torch.randn(1, 3, 8, 8)
model = create_model(x, ModType)
spec = torch_glow.CompilationSpec()
spec.get_settings().set_glow_backend("Interpreter")
compilation_group = torch_glow.CompilationGroup()
spec.compilation_groups_append(compilation_group)
input_spec = torch_glow.InputSpec()
input_spec.set_same_as(x)
compilation_group.input_sets_append([input_spec])
scripted_mod = torch.jit.trace(model, x)
torch_glow.enable_write_to_onnx()
torch_glow.set_onnx_file_name_prefix(onnx_prefix)
torch_glow.enable_write_without_randomize()
lowered_model = torch_glow.to_glow(scripted_mod, {"forward": spec})
# Run Glow model
g = lowered_model(x)
# Run reference model
t = model(x)
self.assertEqual(type(g), type(t))
self.assertEqual(len(g), len(t))
for (gi, ti) in zip(g, t):
self.assertTrue(torch.allclose(gi, ti))
assert os.path.exists(onnx_prefix + ".onnxtxt")
onnx_files = glob.glob(onnx_prefix + "*.onnx*")
for f in onnx_files:
os.remove(f)
def test_lower_and_write_to_onnx_tensor_output(self):
onnx_prefix = "write_to_onnx_test1"
self.lower_and_write_to_onnx_helper(Bar, onnx_prefix)
def test_lower_and_write_to_onnx_tuple_output(self):
onnx_prefix = "write_to_onnx_test2"
self.lower_and_write_to_onnx_helper(Baz, onnx_prefix)
|
# isort:skip_file
from __future__ import absolute_import, division, print_function, unicode_literals
import unittest
import torch
import torch_glow
from tests import utils
class TestFuseParallelBranches(utils.TorchGlowTestCase):
def test_fuse_parallel_branches_with_fusible_root(self):
r"""Test GlowFuser fusing parallel branches with a common fusible root
a = add(x, y)
/ \
b1 = add(a, x) b2 = add(a, y)
\ /
res = TupleConstruct(b1, b2)
This should be fused as
glow::FusionGroup_0
|
TupleConstruct
"""
def test_fuser(x, y):
a = x + y
branch1 = a + x
branch2 = a + y
res = (branch1, branch2)
return res
inputs = (torch.randn(2, 4), torch.randn(2, 4))
traced = torch.jit.trace(test_fuser, inputs)
torch_glow.glowCustomFuseDebug_(traced.graph)
count = 0
for node in traced.graph.nodes():
if node.kind() == "glow::FusionGroup":
count += 1
assert count == 1, f"Expect 1 glow::FusionGroup, found {count}."
# TODO: support fusing parallel branches without a common fusible root correctly
@unittest.skip("Not supported yet")
def test_fuse_parallel_branches_without_fusible_root(self):
r"""Test GlowFuser fusing parallel branches without a common fusible root
x = add(x, x) y = add(y, y)
| |
b1 = add(x, x) b2 = add(y, y)
\ /
res = TupleConstruct(b1, b2)
This should be fused as
glow::FusionGroup_0
|
TupleConstruct
"""
def test_fuser(x, y):
x = x + x
y = y + y
branch1 = x + x
branch2 = y + y
res = (branch1, branch2)
return res
inputs = (torch.randn(2, 4), torch.randn(2, 4))
traced = torch.jit.trace(test_fuser, inputs)
torch_glow.glowCustomFuseDebug_(traced.graph)
count = 0
for node in traced.graph.nodes():
if node.kind() == "glow::FusionGroup":
count += 1
assert count == 1, f"Expect 1 glow::FusionGroup, found {count}."
|
# isort:skip_file
from __future__ import absolute_import, division, print_function, unicode_literals
from collections import OrderedDict
import torch
import torch_glow
from tests import utils
def create_model(x, relu, bias=True):
"""x is an example input, relu is whether or not to include a fused relu."""
with torch.no_grad():
x_size = len(x.size())
conv_op = None
if x_size == 4:
if bias:
conv_op = torch.nn.Conv2d(3, 10, 3)
else:
conv_op = torch.nn.Conv2d(3, 10, 3, bias=False)
elif x_size == 5:
conv_op = torch.nn.Conv3d(3, 10, 3)
else:
print(f"Only 2d and 3d conv supported, got {x_size}d inputs")
exit(1)
conv_op.weight.random_(-1, 1)
if bias:
conv_op.bias.data.random_(-1, 1)
model = None
if relu:
model = torch.nn.Sequential(
OrderedDict([("conv", conv_op), ("relu", torch.nn.ReLU())])
)
model = torch.ao.quantization.fuse_modules(model, [["conv", "relu"]])
else:
model = torch.nn.Sequential(OrderedDict([("conv", conv_op)]))
model = torch.ao.quantization.QuantWrapper(model)
model.qconfig = torch.ao.quantization.get_default_qconfig("fbgemm")
torch.ao.quantization.prepare(model, inplace=True)
model(x)
torch.ao.quantization.convert(model, inplace=True)
return model
def run_to_glow(m, x):
"""Trace the model m with input x and call to_glow"""
traced_m = torch.jit.trace(m, (x))
spec = torch_glow.CompilationSpec()
spec.get_settings().set_glow_backend("Interpreter")
compilation_group = torch_glow.CompilationGroup()
spec.compilation_groups_append(compilation_group)
input_spec = torch_glow.InputSpec()
input_spec.set_same_as(x)
compilation_group.input_sets_append([input_spec])
lowered_module = torch_glow.to_glow(traced_m, spec)
return lowered_module
class TestConvToGlow(utils.TorchGlowTestCase):
def test_conv2d_to_glow(self):
x = torch.randn([1, 3, 30, 30])
m = create_model(x, False)
run_to_glow(m, x)
def test_conv2d_relu_to_glow(self):
x = torch.randn([1, 3, 30, 30])
m = create_model(x, True)
run_to_glow(m, x)
def test_conv3d_to_glow(self):
x = torch.randn([1, 3, 30, 30, 30])
m = create_model(x, False)
run_to_glow(m, x)
def test_conv3d_relu_to_glow(self):
x = torch.randn([1, 3, 30, 30, 30])
m = create_model(x, True)
run_to_glow(m, x)
def test_conv2d_to_glow_empty_bias(self):
x = torch.randn([1, 3, 30, 30])
m = create_model(x, False, False)
run_to_glow(m, x)
|
# isort:skip_file
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
import torch_glow
from tests import utils
from tests.utils import graph_contains_str
def foo(x):
y = x.dim()
if y == 1:
return x
else:
if x == 2:
return x * 2
else:
raise RuntimeError("hi")
class TestRemoveException(utils.TorchGlowTestCase):
def test_remove_exceptions(self):
"""Test Glow's removeExceptions JIT pass"""
foo_jit = torch.jit.script(foo)
graph = foo_jit.graph
assert graph_contains_str(graph, "prim::RaiseException")
torch_glow.removeExceptions_(graph)
assert not graph_contains_str(graph, "prim::RaiseException")
|
# isort:skip_file
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
import torch_glow
from tests import utils
from tests.utils import GLOW_FUSION_GROUP
class TestQuantizedCut(utils.TorchGlowTestCase):
def test_quantized_cut(self):
"""Test cut quantized chunk in the middle."""
torch._C._jit_set_profiling_executor(False)
torch._C._jit_set_profiling_mode(False)
def fun(a, b, c, d):
q = torch.nn.quantized.Quantize(
scale=1.0 / 21, zero_point=0, dtype=torch.quint8
)
dq = torch.nn.quantized.DeQuantize()
a = q(a)
b = q(b)
c = q(c)
d = q(d)
adds = torch.ops.quantized.add(a, b, scale=1.0 / 17, zero_point=5)
adds2 = torch.ops.quantized.add(c, d, scale=1.0 / 14, zero_point=4)
res = torch.ops.quantized.add_relu(
adds, adds2, scale=1.0 / 18, zero_point=6
)
res = torch.ops.quantized.add(res, res, scale=1.0 / 13, zero_point=7)
res = dq(res)
return res
with torch.no_grad():
a = torch.randn([5, 5])
b = torch.randn([5, 5])
c = torch.randn([5, 5])
d = torch.randn([5, 5])
res_torch = fun(a, b, c, d)
torch_glow.enableFusionPass_DO_NOT_USE_THIS()
# Cut using blocklist functionality
blocklist = ["quantized::add_relu"]
torch_glow.setFusionBlocklist(blocklist)
torch_glow.setGlowBackend("Interpreter")
traced_model = torch.jit.trace(fun, (a, b, c, d))
for node in traced_model.graph_for(a, b, c, d).nodes():
kind = node.kind()
# Make sure the blocklist is working
assert (
kind == GLOW_FUSION_GROUP
or kind in blocklist
or kind == "prim::Constant"
)
res_glow = traced_model(a, b, c, d)
print(res_torch)
print(res_glow)
assert torch.allclose(res_torch, res_glow)
|
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
import torch_glow
from tests import utils
class SimpleModule(torch.nn.Module):
def __init__(self):
super(SimpleModule, self).__init__()
def forward(self, x):
y = x + x
y = y + 2
return y
class TestToGlowNumDevicesToUse(utils.TorchGlowTestCase):
def devices_to_use_test_helper(self, input, num_replications):
model = SimpleModule()
spec = torch_glow.CompilationSpec()
spec.get_settings().set_glow_backend("Interpreter")
# Init with total number of devices.
torch_glow.setGlowBackendNumDevices(6)
compilation_group = torch_glow.CompilationGroup()
spec.compilation_groups_append(compilation_group)
input_spec = torch_glow.InputSpec()
input_spec.set_same_as(input)
compilation_group.input_sets_append([input_spec])
compilation_group_settings = compilation_group.get_settings()
compilation_group_settings.set_num_devices_to_use(3)
compilation_group_settings.set_replication_count(num_replications)
traced_mod = torch.jit.trace(model, input)
lowered_model = torch_glow.to_glow(traced_mod, {"forward": spec})
g = lowered_model(input)
t = model(input)
self.assertEqual(type(g), type(t))
self.assertEqual(len(g), len(t))
for (gi, ti) in zip(g, t):
self.assertTrue(torch.allclose(gi, ti))
def devices_to_use_test(self):
self.devices_to_use_test_helper(input=torch.randn(4), num_replications=2)
|
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
import torch_glow
from tests import utils
class Foo(torch.nn.Module):
def __init__(self):
super(Foo, self).__init__()
def forward(self, x, y):
return x + y
class TestToGlowMultpleInputSets(utils.TorchGlowTestCase):
def test_to_glow_multiple_groups_and_input_sets(self):
x1 = torch.randn(1, 4)
y1 = torch.randn(2, 4)
x2 = torch.randn(1, 2)
y2 = torch.randn(5, 2)
x3 = torch.randn(7)
y3 = torch.randn(3, 7)
mod = Foo()
scripted_mod = torch.jit.script(mod)
x1_y1_set = torch_glow.input_specs_from_tensors([x1, y1])
x2_y2_set = torch_glow.input_specs_from_tensors([x2, y2])
x3_y3_set = torch_glow.input_specs_from_tensors([x3, y3])
# Create two CompilationGroup, first one contains two input sets
# and the second CompilationGroup has the third input set
spec = torch_glow.CompilationSpec()
spec.get_settings().set_glow_backend("Interpreter")
compilation_group_1 = torch_glow.CompilationGroup()
compilation_group_2 = torch_glow.CompilationGroup()
spec.compilation_groups_append(compilation_group_1)
spec.compilation_groups_append(compilation_group_2)
compilation_group_1.input_sets_append(x1_y1_set)
compilation_group_1.input_sets_append(x2_y2_set)
compilation_group_2.input_sets_append(x3_y3_set)
lowered_module = torch_glow.to_glow(scripted_mod, spec)
torch_res1 = mod(x1, y1)
torch_res2 = mod(x2, y2)
torch_res3 = mod(x3, y3)
glow_res1 = lowered_module(x1, y1)
glow_res2 = lowered_module(x2, y2)
glow_res3 = lowered_module(x3, y3)
assert torch.allclose(torch_res1, glow_res1)
assert torch.allclose(torch_res2, glow_res2)
assert torch.allclose(torch_res3, glow_res3)
|
# isort:skip_file
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
import torch_glow
class LinearModel(torch.nn.Module):
def __init__(self):
super(LinearModel, self).__init__()
self.linear1 = torch.nn.Linear(5, 3)
self.linear2 = torch.nn.Linear(3, 1)
def forward(self, x):
return self.linear2(self.linear1(x))
class ConvModel(torch.nn.Module):
def __init__(self):
super(ConvModel, self).__init__()
self.conv1 = torch.nn.Conv2d(3, 3, 1)
self.conv2 = torch.nn.Conv2d(3, 3, 1)
self.linear = torch.nn.Linear(5, 5)
def forward(self, x):
return self.linear(self.conv2(self.conv1(x)))
class Model(torch.nn.Module):
def __init__(self):
super(Model, self).__init__()
self.conv = ConvModel()
self.linear = LinearModel()
def forward(self, x):
return self.linear(self.conv(x))
def test_fuse_necessary_getattrs_only():
m = Model()
x = torch.randn(1, 3, 5, 5)
torch_glow.disableFusionPass()
jit_m = torch.jit.trace(m, x)
jit_m_graph = jit_m.graph_for(x)
# don't fuse aten::_convolutions
torch_glow.glowCustomFuseDebug_(
jit_m_graph,
["prim::Constant", "prim::GetAttr", "aten::t", "aten::matmul", "aten::add_"],
)
return m(x)
|
# isort:skip_file
from __future__ import absolute_import, division, print_function, unicode_literals
import torch_glow
from tests import utils
class TestSetGlowBackend(utils.TorchGlowTestCase):
def test_set_glow_backend(self):
"""Test setting the Glow backend type"""
backend_name_before = torch_glow.getGlowBackendName()
backend_num_devices_before = torch_glow.getGlowBackendNumDevices()
torch_glow.setGlowBackend("CPU")
torch_glow.setGlowBackendNumDevices(4)
assert torch_glow.getGlowBackendName() == "CPU"
assert torch_glow.getGlowBackendNumDevices() == 4
# reset everything
torch_glow.setGlowBackend(backend_name_before)
torch_glow.setGlowBackendNumDevices(backend_num_devices_before)
|
# isort:skip_file
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
import torch_glow
from tests import utils
from tests.utils import GLOW_FUSION_GROUP
class TestMinGraphSize(utils.TorchGlowTestCase):
def test_min_graph_size(self):
"""Test Glow fuser minimum fusion group size mechanism."""
def f(a, b, c):
return (a * a * a * a) / (b * b * b) / (c * c * c * c * c)
torch_glow.disableFusionPass()
# Disable aten::div so that each group of aten::mul nodes will be forced
# into separate subgraphs
torch_glow.setFusionBlocklist(["aten::div"])
# Set minimum fusion group size to 3 nodes so that the smallest group which
# contains only 2 nodes will not be created
torch_glow.setMinFusionGroupSize(3)
a = torch.randn(5, 5)
b = torch.randn(5, 5)
c = torch.randn(5, 5)
jit_f = torch.jit.trace(f, (a, b, c))
jit_f_graph = jit_f.graph_for(a, b, c)
# print("before: ", jit_f_graph)
torch_glow.glowCustomFuseDebug_(jit_f_graph)
# print("after: ", jit_f_graph)
fusion_nodes = 0
for node in jit_f_graph.nodes():
if node.kind() == GLOW_FUSION_GROUP:
fusion_nodes += 1
assert fusion_nodes == 2, "Expected smallest fusion group to not be created"
torch_glow.clearFusionBlocklist()
torch_glow.setMinFusionGroupSize(0)
|
# isort:skip_file
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
import torch.jit
import torch_glow
from tests import utils
# Use a model containing quantized::conv2d to verify preprocessed module is
# save correctly in a lowered module (ops with packed weights like this one
# are rewirtten during lowering, therefore should only be present in the
# original graph).
class Bar(torch.nn.Module):
def __init__(self):
super(Bar, self).__init__()
with torch.no_grad():
conv = torch.nn.Conv2d(4, 2, [2, 2], groups=1)
conv.weight.random_(-1, 1)
conv.bias.data.random_(-1, 1)
self.model = torch.ao.quantization.QuantWrapper(conv)
self.model.qconfig = torch.ao.quantization.get_default_qconfig("fbgemm")
torch.ao.quantization.prepare(self.model, inplace=True)
torch.ao.quantization.convert(self.model, inplace=True)
def forward(self, x):
return self.model(x)
class TestToGlowSavePreprocessedModule(utils.TorchGlowTestCase):
def test_save_preprocessed_module(self):
with torch.no_grad():
x = torch.randn([1, 4, 4, 4], dtype=torch.float32)
model = Bar()
model.eval()
model = torch.jit.trace(model, x)
spec = torch_glow.CompilationSpec()
spec.get_settings().set_glow_backend("Interpreter")
compilation_group = torch_glow.CompilationGroup()
spec.compilation_groups_append(compilation_group)
compilation_group.input_sets_append(
torch_glow.input_specs_from_tensors([x])
)
torch_glow.disableFusionPass()
torch_glow.enable_convert_to_fp16()
glow_mod = torch_glow.to_glow(model, spec)
reloaded = utils.save_and_reload_model(glow_mod)
wrappername = "__loweredModule__"
attrname = "__processed_module"
wp = getattr(reloaded._c, wrappername)
pp = getattr(wp, attrname)
pt_model = torch.jit._recursive.wrap_cpp_module(pp)
graph = pt_model.graph_for(x)
found = False
for node in graph.nodes():
if node.kind() == "quantized::conv2d":
found = True
assert found
|
# isort:skip_file
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
import torch_glow
from tests import utils
from tests.utils import GLOW_FUSION_GROUP
class TestMaxFusionMergeSize(utils.TorchGlowTestCase):
def test_max_fusion_merge_size(self):
"""Test Glow fuser maximum fusion merge size mechanism."""
def f(a):
return a * a * a * a * a * a
torch_glow.disableFusionPass()
# Set maximum fusion merge size to 3 nodes so that the
# graph will not fit into 1 node
torch_glow.setMaxFusionMergeSize(3)
a = torch.randn(5, 5)
jit_f = torch.jit.trace(f, (a))
jit_f_graph = jit_f.graph_for(a)
# print("before: ", jit_f_graph)
torch_glow.glowCustomFuseDebug_(jit_f_graph)
# print("after: ", jit_f_graph)
fusion_nodes = 0
for node in jit_f_graph.nodes():
if node.kind() == GLOW_FUSION_GROUP:
fusion_nodes += 1
assert fusion_nodes > 1, "Expected more than one fusion group to be created"
torch_glow.setMaxFusionMergeSize(0)
def test_max_fusion_merge_size_zero(self):
"""Test Glow fuser maximum fusion merge size mechanism set to zero."""
def f(a):
return a * a * a * a * a * a
torch_glow.disableFusionPass()
# Set maximum fusion merge size to 0 so that there is
# no limit to fusion
torch_glow.setMaxFusionMergeSize(0)
a = torch.randn(5, 5)
jit_f = torch.jit.trace(f, (a))
jit_f_graph = jit_f.graph_for(a)
# print("before: ", jit_f_graph)
torch_glow.glowCustomFuseDebug_(jit_f_graph)
# print("after: ", jit_f_graph)
fusion_nodes = 0
for node in jit_f_graph.nodes():
if node.kind() == GLOW_FUSION_GROUP:
fusion_nodes += 1
assert fusion_nodes == 1, "Expected just one fusion group to be created"
torch_glow.setMaxFusionMergeSize(0)
|
from __future__ import absolute_import, division, print_function, unicode_literals
import io
import torch
import torch_glow
from tests import utils
from tests.utils import assertModulesEqual
class TwoTupleModule(torch.nn.Module):
def __init__(self):
super(TwoTupleModule, self).__init__()
def forward(self, x):
y = 2 * x
return (x, y)
class OneTupleModule(torch.nn.Module):
def __init__(self):
super(OneTupleModule, self).__init__()
def forward(self, x):
y = 2 * x
return (y,)
class TestToGlowTupleOutput(utils.TorchGlowTestCase):
def tuple_test_helper(self, ModType):
input = torch.randn(4)
model = ModType()
spec = torch_glow.CompilationSpec()
spec.get_settings().set_glow_backend("Interpreter")
compilation_group = torch_glow.CompilationGroup()
spec.compilation_groups_append(compilation_group)
input_spec = torch_glow.InputSpec()
input_spec.set_same_as(input)
compilation_group.input_sets_append([input_spec])
scripted_mod = torch.jit.script(model)
lowered_model = torch_glow.to_glow(scripted_mod, {"forward": spec})
# Run Glow model
g = lowered_model(input)
# Run reference model
t = model(input)
self.assertEqual(type(g), type(t))
self.assertEqual(len(g), len(t))
for (gi, ti) in zip(g, t):
self.assertTrue(torch.allclose(gi, ti))
# test module ser/de with tuple output
buffer = io.BytesIO()
torch.jit.save(lowered_model, buffer)
buffer.seek(0)
loaded_model = torch.jit.load(buffer)
assertModulesEqual(self, lowered_model, loaded_model)
def test_to_glow_one_tuple_output(self):
self.tuple_test_helper(OneTupleModule)
def test_to_glow_two_tuple_output(self):
self.tuple_test_helper(TwoTupleModule)
|
# isort:skip_file
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
import torch_glow
from tests import utils
class TestGlowShapeInference(utils.TorchGlowTestCase):
def test_shape_inference_basics(self):
"""Test Glow shape inference basic usage."""
def f(a):
return a * a
a = torch.randn(1)
jit_f = torch.jit.trace(f, (a))
jit_f_graph = jit_f.graph_for(a)
args = (a,)
actual = torch_glow.glow_shape_inference(
jit_f_graph,
args,
)
assert actual
def test_shape_inference_input_mismatch(self):
"""Test Glow shape inference basic error handling."""
def f(a):
return a * a
a = torch.randn(1)
jit_f = torch.jit.trace(f, (a))
jit_f_graph = jit_f.graph_for(a)
# Input/args is empty, but the funciton expects one input.
# Shape Inference should raise an exception in this case.
args = ()
self.assertRaises(
Exception,
lambda: torch_glow.glow_shape_inference(
jit_f_graph,
args,
),
)
def test_shape_inference_supported_symbols(self):
"""Test Glow shape inference unsupported symbols."""
def f(a):
return a * a
a = torch.randn(1)
jit_f = torch.jit.trace(f, (a))
jit_f_graph = jit_f.graph_for(a)
args = (a,)
actual = torch_glow.glow_shape_inference_find_unsupported_symbols(
jit_f_graph, args
)
expected = []
self.assertEqual(set(expected), set(actual))
def test_shape_inference_unsupported_symbols(self):
"""Test Glow shape inference unsupported symbols."""
def f(a):
# linalg.multi_dot is currently not supported by shape inference engine
return torch.matrix_power(torch.linalg.multi_dot([a * 3, a + 4]), 3)
a = torch.randn(3, 3)
jit_f = torch.jit.trace(f, (a))
jit_f_graph = jit_f.graph_for(a)
args = (a,)
actual = torch_glow.glow_shape_inference_find_unsupported_symbols(
jit_f_graph, args
)
expected = ["aten::linalg_multi_dot", "aten::linalg_matrix_power"]
self.assertEqual(set(expected), set(actual))
blocklist = ["aten::linalg_multi_dot"]
actual = torch_glow.glow_shape_inference_find_unsupported_symbols(
jit_f_graph, args, blocklist
)
expected = ["aten::linalg_matrix_power"]
self.assertEqual(set(expected), set(actual))
def test_shape_inference_unsupported_symbols_skip_fusion_group(self):
"""Test Glow shape inference unsupported symbols including skipping of
symbols after a secondary fusion group."""
def f(a, b):
x1 = a * b
x2 = x1 * b
x3 = x2 * a
x4 = x3 / b
x5 = x4 / a
x6 = x5 / b
x7 = x6 * a
x8 = x7 * b
return x8 * torch.linalg.multi_dot([x8, x8])
torch_glow.enableFusionPass_DO_NOT_USE_THIS()
torch_glow.setFusionStartIndex(3)
torch_glow.setFusionEndIndex(6)
a = torch.randn(5, 5)
b = torch.randn(5, 5)
jit_f = torch.jit.trace(f, (a, b))
jit_f_graph = jit_f.graph_for(a, b)
torch_glow.clearFusionIndices()
args = (a, b)
# Don't skip nodes after the last fusion node.
# in this case, one of the nodes (linalg.multi_dot) following the last fusion node
# is not supported, and should be reported.
actual = torch_glow.glow_shape_inference_find_unsupported_symbols(
jit_f_graph, args, skip_last_fusion_node=False
)
expected = [
"aten::linalg_multi_dot",
]
self.assertEqual(set(expected), set(actual))
# DO skip nodes after the last fusion node.
# in this case, one of the nodes (linalg.multi_dot) following the last fusion node
# is not supported, but is suppressed due to the skip_last_fusion_node flag.
actual = torch_glow.glow_shape_inference_find_unsupported_symbols(
jit_f_graph, args, skip_last_fusion_node=True
)
expected = []
self.assertEqual(set(expected), set(actual))
|
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
import torch_glow
from tests import utils
class TestPrintJitNodeIndices(utils.TorchGlowTestCase):
"""Test printing PyTorch jit node indices."""
def test_print_jit_indices(self):
def test_f(a, b):
c = a.add(b)
return c.add(c)
x = torch.randn(4)
y = torch.randn(4)
torch_glow.enableFusionPass_DO_NOT_USE_THIS()
torch_glow.enable_printing_jit_node_indices()
graph = torch.jit.trace(test_f, (x, y), check_trace=False)
graph(x, y)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class SimpleSizeModel(torch.nn.Module):
def __init__(self, dimension):
super(SimpleSizeModel, self).__init__()
self.dimension = dimension
def forward(self, tensor):
return tensor.size(self.dimension)
class TestSize(utils.TorchGlowTestCase):
# Need to be able to export lists from Glow fused nodes
# Commented out both test cases for not triggering internal CI
# @unittest.skip(reason="not ready")
# def test_size_basic(self):
# """Test of the PyTorch aten::size Node on Glow."""
# def test_f(a):
# b = a + a.size(0)
# return b
# x = torch.zeros([4], dtype=torch.int32)
# utils.compare_tracing_methods(test_f, x, fusible_ops={"aten::size"})
@utils.deterministic_expand(
[
lambda: (
"basic",
SimpleSizeModel(-1),
torch.randn(2, 3, 4, dtype=torch.float32),
)
]
)
def test_size(self, _, module, tensor):
utils.compare_tracing_methods(module, tensor, fusible_ops={"aten::size"})
@utils.deterministic_expand(
[
lambda: (
"oob",
SimpleSizeModel(-4),
torch.randn(2, 3, 4, dtype=torch.float32),
)
]
)
def test_size_failure(self, _, module, tensor):
with self.assertRaises(IndexError):
utils.compare_tracing_methods(module, tensor, fusible_ops={"aten::size"})
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class TestFullLike(utils.TorchGlowTestCase):
def test_empty_like_basic(self):
"""Basic test of the PyTorch empty_like Node on Glow."""
class TestModule(torch.nn.Module):
def forward(self, a):
b = torch.empty_like(a, dtype=torch.float)
c = torch.zeros_like(a, dtype=torch.float)
return a + (b * c)
x = torch.randn(2, 3, 4)
utils.compare_tracing_methods(TestModule(), x, fusible_ops={"aten::empty_like"})
def test_empty_like_no_assign_type(self):
"""Basic test of the PyTorch empty_like Node on Glow without assigning type."""
class TestModule(torch.nn.Module):
def forward(self, a):
b = torch.empty_like(a)
c = torch.zeros_like(a)
return a + (b * c)
x = torch.randn(2, 3, 4)
utils.compare_tracing_methods(TestModule(), x, fusible_ops={"aten::empty_like"})
def test_empty_like_int(self):
"""Basic test of the PyTorch empty_like Node on Glow with int type."""
class TestModule(torch.nn.Module):
def forward(self, a):
b = torch.empty_like(a, dtype=torch.int)
c = torch.zeros_like(a, dtype=torch.int)
return b * c
x = torch.randn(2, 3, 4)
utils.compare_tracing_methods(TestModule(), x, fusible_ops={"aten::empty_like"})
def test_full_like_basic(self):
"""Basic test of the PyTorch full_like Node on Glow."""
class TestModule(torch.nn.Module):
def forward(self, a):
b = torch.full_like(a, fill_value=3.1415, dtype=torch.float)
return a + b
x = torch.randn(2, 3, 4)
utils.compare_tracing_methods(TestModule(), x, fusible_ops={"aten::full_like"})
def test_full_like_no_assign_type(self):
"""Basic test of the PyTorch full_like Node on Glow without assigning type."""
class TestModule(torch.nn.Module):
def forward(self, a):
b = torch.full_like(a, fill_value=3.1415)
return a + b
x = torch.randn(2, 3, 4)
utils.compare_tracing_methods(TestModule(), x, fusible_ops={"aten::full_like"})
def test_full_like_int(self):
"""Basic test of the PyTorch full_like Node on Glow with int type."""
class TestModule(torch.nn.Module):
def forward(self, a):
b = torch.full_like(a, fill_value=4, dtype=torch.int)
c = torch.full_like(a, fill_value=5, dtype=torch.int)
return b + c
x = torch.randn(2, 3, 4)
utils.compare_tracing_methods(TestModule(), x, fusible_ops={"aten::full_like"})
def test_zeros_like_basic(self):
"""Basic test of the PyTorch zeros_like Node on Glow."""
class TestModule(torch.nn.Module):
def forward(self, a):
b = torch.zeros_like(a, dtype=torch.float)
return a + b
x = torch.randn(2, 3, 4)
utils.compare_tracing_methods(TestModule(), x, fusible_ops={"aten::zeros_like"})
def test_zeros_like_no_assign_type(self):
"""Basic test of the PyTorch zeros_like Node on Glow without assign type."""
class TestModule(torch.nn.Module):
def forward(self, a):
b = torch.zeros_like(a)
return a + b
x = torch.randn(2, 3, 4)
utils.compare_tracing_methods(TestModule(), x, fusible_ops={"aten::zeros_like"})
def test_zeros_like_int(self):
"""Basic test of the PyTorch zeros_like Node on Glow with int type."""
class TestModule(torch.nn.Module):
def forward(self, a):
b = torch.zeros_like(a, dtype=torch.int)
c = torch.zeros_like(b)
return b + c
x = torch.randn(2, 3, 4)
utils.compare_tracing_methods(TestModule(), x, fusible_ops={"aten::zeros_like"})
def test_ones_like_basic(self):
"""Basic test of the PyTorch ones_like Node on Glow."""
class TestModule(torch.nn.Module):
def forward(self, a):
b = torch.ones_like(a, dtype=torch.float)
return a + b
x = torch.randn(2, 3, 4)
utils.compare_tracing_methods(TestModule(), x, fusible_ops={"aten::ones_like"})
def test_ones_like_no_assign_type(self):
"""Basic test of the PyTorch ones_like Node on Glow without assign type."""
class TestModule(torch.nn.Module):
def forward(self, a):
b = torch.ones_like(a)
return a + b
x = torch.randn(2, 3, 4)
utils.compare_tracing_methods(TestModule(), x, fusible_ops={"aten::ones_like"})
def test_ones_like_int(self):
"""Basic test of the PyTorch ones_like Node on Glow with int type."""
class TestModule(torch.nn.Module):
def forward(self, a):
b = torch.ones_like(a, dtype=torch.int)
c = torch.ones_like(b, dtype=torch.int)
return b + c
x = torch.randn(2, 3, 4)
utils.compare_tracing_methods(TestModule(), x, fusible_ops={"aten::ones_like"})
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from tests import utils
class SimpleQuantizedLinearModel(torch.nn.Sequential):
def __init__(
self,
in_features,
out_features,
quantization,
per_tensor,
weight=None,
bias=None,
):
linear = torch.nn.Linear(in_features, out_features, bias=(bias is not None))
if weight:
linear.weight.data.fill_(weight)
else:
linear.weight.data.random_(0, 100)
if bias:
linear.bias.data.fill_(bias)
super(SimpleQuantizedLinearModel, self).__init__(
quantization, linear, torch.nn.quantized.DeQuantize()
)
weight_observer = (
torch.ao.quantization.default_weight_observer
if per_tensor
else torch.ao.quantization.default_per_channel_weight_observer
)
self.qconfig = torch.ao.quantization.QConfig(
activation=torch.ao.quantization.default_observer,
weight=weight_observer,
)
torch.ao.quantization.prepare(self, inplace=True)
torch.ao.quantization.convert(self, inplace=True)
def _make_input(size, duplications, shape, dtype=torch.float):
tensor = torch.tensor(range(size), dtype=dtype)
tensor = torch.cat(tuple(tensor for _ in range(duplications)))
tensor = torch.reshape(tensor, shape)
return tensor
class TestQuantizedLinear(utils.TorchGlowTestCase):
@utils.deterministic_expand(
[
lambda: (
"basic",
SimpleQuantizedLinearModel(
5,
5,
torch.nn.quantized.Quantize(
scale=1 / 25, zero_point=17, dtype=torch.quint8
),
False, # per_tensor
1.2,
3.0,
),
_make_input(5, 6, [3, 2, 5]),
),
lambda: (
"no_bias",
SimpleQuantizedLinearModel(
5,
3,
torch.nn.quantized.Quantize(
scale=1 / 15, zero_point=17, dtype=torch.quint8
),
False, # per_tensor
1.2,
),
_make_input(5, 6, [3, 2, 5]),
),
lambda: (
"exclude_dq",
SimpleQuantizedLinearModel(
5,
5,
torch.nn.quantized.Quantize(
scale=1 / 25, zero_point=17, dtype=torch.quint8
),
False, # per_tensor
1.2,
3.0,
),
_make_input(5, 6, [3, 2, 5]),
{"aten::dequantize"},
),
lambda: (
"rowwise",
SimpleQuantizedLinearModel(
6,
5,
torch.nn.quantized.Quantize(
scale=1 / 25, zero_point=17, dtype=torch.quint8
),
False, # per_tensor
),
_make_input(36, 1, [3, 2, 6]),
),
lambda: (
"tensorwise",
SimpleQuantizedLinearModel(
6,
5,
torch.nn.quantized.Quantize(
scale=1 / 25, zero_point=17, dtype=torch.quint8
),
True, # per_tensor
),
_make_input(36, 1, [3, 2, 6]),
),
]
)
def test_quantized_linear(self, _, model, tensor, fusion_blocklist=None):
fusible_ops = {
"aten::quantize_per_tensor",
"quantized::linear",
"aten::dequantize",
}
fusible_ops -= fusion_blocklist or set()
utils.compare_tracing_methods(
model, tensor, fusible_ops=fusible_ops, fusion_blocklist=fusion_blocklist
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
from collections import OrderedDict
import torch
from tests import utils
class TestQuantizedConv2dRelu(utils.TorchGlowTestCase):
def _test_quantized_conv2d_relu_packed(self, groups):
"""Basic test of PyTorch quantized::conv2d_relu Node with packed weights on Glow."""
with torch.no_grad():
x = torch.tensor(range(5), dtype=torch.float) / 3
x = torch.cat((x, x, x, x, x))
x = torch.cat((x, x, x))
x = torch.reshape(x, [1, 3, 5, 5])
q = torch.nn.quantized.Quantize(1, 2, torch.quint8)
conv = torch.nn.Conv2d(3, 3, [2, 2], groups=groups)
relu = torch.nn.ReLU()
dq = torch.nn.quantized.DeQuantize()
# Due to the off-by-one error, we cannot let the weights, bias & input
# to be totally random.
conv.weight.set_(
torch.arange(36 / groups, dtype=torch.float).reshape(
[3, 3 // groups, 2, 2]
)
/ 3
)
conv.bias.data.fill_(2)
model = torch.nn.Sequential(
OrderedDict(
[
("quantize", q),
("conv1", conv),
("relu1", relu),
("dequantize", dq),
]
)
)
model.eval()
model.qconfig = torch.ao.quantization.get_default_qconfig("fbgemm")
# Fuse conv and relu to conv_relu
model = torch.ao.quantization.fuse_modules(model, [["conv1", "relu1"]])
torch.ao.quantization.prepare(model, inplace=True)
torch.ao.quantization.convert(model, inplace=True)
utils.compare_tracing_methods(
model,
x,
fusible_ops={
"aten::quantize_per_tensor",
"quantized::conv2d_relu",
"aten::dequantize",
},
skip_to_glow=True,
)
def test_quantized_conv2d_relu_packed_groupwise(self):
"""PyTorch groupwise quantized::conv2d_relu Node with packed weights on Glow."""
self._test_quantized_conv2d_relu_packed(groups=3)
def test_quantized_conv2d_relu_packed_nongroupwise(self):
"""PyTorch vanilla quantized::conv2d_relu Node with packed weights on Glow."""
self._test_quantized_conv2d_relu_packed(groups=1)
def test_quantized_conv2d_relu_packed_cut_q_dq(self):
"""Basic test of PyTorch quantized::conv2d_relu Node with packed weights on Glow, with quantize and dequantize excluded."""
with torch.no_grad():
x = torch.tensor(range(5), dtype=torch.float) / 3
x = torch.cat((x, x, x, x, x))
x = torch.cat((x, x, x))
x = torch.reshape(x, [1, 3, 5, 5])
q = torch.nn.quantized.Quantize(1, 2, torch.quint8)
conv = torch.nn.Conv2d(3, 3, [2, 2], groups=1)
relu = torch.nn.ReLU()
dq = torch.nn.quantized.DeQuantize()
# Due to the off-by-one error, we cannot let the weights, bias & input
# to be totally random.
conv.weight.set_(
torch.arange(36, dtype=torch.float).reshape([3, 3, 2, 2]) / 3
)
conv.bias.data.fill_(2)
model = torch.nn.Sequential(
OrderedDict(
[
("quantize", q),
("conv1", conv),
("relu1", relu),
("dequantize", dq),
]
)
)
model.eval()
model.qconfig = torch.ao.quantization.get_default_qconfig("fbgemm")
# Fuse conv and relu to conv_relu
model = torch.ao.quantization.fuse_modules(model, [["conv1", "relu1"]])
torch.ao.quantization.prepare(model, inplace=True)
torch.ao.quantization.convert(model, inplace=True)
utils.compare_tracing_methods(
model,
x,
fusible_ops={"quantized::conv2d_relu"},
fusion_blocklist=["aten::quantize_per_tensor", "aten::dequantize"],
skip_to_glow=True,
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class SimpleQuantizedAddReluModule(torch.nn.Module):
def __init__(self, left_quantization, right_quantization, scale, zero_point):
super(SimpleQuantizedAddReluModule, self).__init__()
self.left_quantization = left_quantization
self.right_quantization = right_quantization
self.scale = scale
self.zero_point = zero_point
def forward(self, left, right):
return torch.nn.quantized.DeQuantize()(
torch.ops.quantized.add_relu(
self.left_quantization(left),
self.right_quantization(right),
scale=self.scale,
zero_point=self.zero_point,
)
)
class TestQuantizedAddRelu(utils.TorchGlowTestCase):
def test_quantized_add_relu_zerooffset(self):
"""Basic test of the PyTorch quantized::add Node_relu on Glow with zero offset."""
utils.compare_tracing_methods(
SimpleQuantizedAddReluModule(
torch.nn.quantized.Quantize(
scale=0.3, zero_point=0, dtype=torch.quint8
),
torch.nn.quantized.Quantize(
scale=0.3, zero_point=0, dtype=torch.quint8
),
0.05,
0,
),
torch.tensor([1, 2, 3, 4], dtype=torch.float32),
torch.tensor([5, 6, 7, 8], dtype=torch.float32),
skip_to_glow=True,
)
def test_quantized_add_relu(self):
"""Basic test of the PyTorch quantized::add_relu Node on Glow."""
utils.compare_tracing_methods(
SimpleQuantizedAddReluModule(
torch.nn.quantized.Quantize(
scale=1.0 / 128, zero_point=5, dtype=torch.quint8
),
torch.nn.quantized.Quantize(
scale=1.0 / 128, zero_point=10, dtype=torch.quint8
),
1.0 / 128,
3,
),
torch.rand([5, 5]),
torch.rand([5, 5]),
skip_to_glow=True,
)
def test_quantized_add_relu_cut_q_dq(self):
"""Basic test of the PyTorch quantized::add_relu Node on Glow, with quantize and dequantize excluded."""
utils.compare_tracing_methods(
SimpleQuantizedAddReluModule(
torch.nn.quantized.Quantize(
scale=1.0 / 128, zero_point=5, dtype=torch.quint8
),
torch.nn.quantized.Quantize(
scale=1.0 / 128, zero_point=10, dtype=torch.quint8
),
1.0 / 128,
3,
),
torch.rand([5, 5]),
torch.rand([5, 5]),
fusible_ops={"quantized::add_relu"},
fusion_blocklist=["aten::quantize_per_tensor", "aten::dequantize"],
skip_to_glow=True,
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class SimpleSubtractModel(torch.nn.Module):
def __init__(self):
super(SimpleSubtractModel, self).__init__()
def forward(self, a, b):
if b.size() == torch.Size([]):
return (a * a).sub(b.item())
else:
c = a.sub(b)
return c.sub(c)
class TestSub(utils.TorchGlowTestCase):
@utils.deterministic_expand(
[
lambda: ("basic", SimpleSubtractModel(), torch.randn(4), torch.randn(4)),
lambda: (
"broadcast_1",
SimpleSubtractModel(),
torch.randn(8, 3, 4, 2),
torch.randn(4, 2),
),
lambda: (
"broadcast_2",
SimpleSubtractModel(),
torch.randn(8, 3, 4, 2),
torch.randn(1, 2),
),
lambda: (
"broadcast_3",
SimpleSubtractModel(),
torch.randn(4, 2),
torch.randn(8, 3, 4, 2),
),
lambda: ("float", SimpleSubtractModel(), torch.randn(4), torch.tensor(3.9)),
lambda: (
"int",
SimpleSubtractModel(),
torch.randn(4),
torch.tensor(20),
),
lambda: (
"int64",
SimpleSubtractModel(),
torch.torch.randint(-10, 10, (2, 4), dtype=torch.int64),
torch.torch.randint(-10, 10, (2, 4), dtype=torch.int64),
),
]
)
def test_subtract(self, _, module, tensor, other):
utils.run_comparison_tests(module, (tensor, other), fusible_ops={"aten::sub"})
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class SimpleFloorDivideModule(torch.nn.Module):
def __init__(self, inplace=False):
super(SimpleFloorDivideModule, self).__init__()
self.inplace = inplace
def forward(self, a, b):
if b.size() == torch.Size([]):
b = b.item()
if self.inplace:
return (a + a).floor_divide_(b)
else:
return (a + a).floor_divide(b)
class TestFloorDiv(utils.TorchGlowTestCase):
@utils.deterministic_expand(
[
lambda: (
"basic",
SimpleFloorDivideModule(),
torch.Tensor(4).random_(0, 5),
torch.Tensor(4).random_(1, 5),
),
lambda: (
"inplace",
SimpleFloorDivideModule(True),
torch.Tensor(4).random_(0, 5),
torch.Tensor(4).random_(1, 5),
),
lambda: (
"positive_float",
SimpleFloorDivideModule(),
torch.Tensor(4).random_(0, 5),
torch.tensor(3.9),
),
lambda: (
"negative_float",
SimpleFloorDivideModule(),
torch.tensor([-4.0]),
torch.tensor([3.0]),
),
lambda: (
"positive_broadcast",
SimpleFloorDivideModule(),
torch.Tensor(8, 3, 4, 2).random_(0, 5),
torch.Tensor(4, 2).random_(1, 5),
),
lambda: (
"positive_broadcast",
SimpleFloorDivideModule(),
torch.Tensor(8, 3, 4, 2).random_(0, 5),
torch.Tensor(1, 2).random_(1, 5),
),
lambda: (
"positive_broadcast",
SimpleFloorDivideModule(),
torch.Tensor(4, 2).random_(0, 5),
torch.Tensor(8, 3, 4, 2).random_(1, 5),
),
lambda: (
"positive_int",
SimpleFloorDivideModule(),
torch.tensor([5]),
torch.tensor([4]),
),
lambda: (
"negative_int",
SimpleFloorDivideModule(),
torch.tensor([-5]),
torch.tensor([4]),
),
lambda: (
"int64",
SimpleFloorDivideModule(),
torch.torch.randint(-10, 10, (2, 4), dtype=torch.int64),
torch.torch.randint(-10, 10, (2, 4), dtype=torch.int64),
),
]
)
def test_floor_div(self, _, module, left, right):
utils.run_comparison_tests(
module,
(left, right),
fusible_ops={"aten::floor_divide"},
skip_for_backends="NNPI",
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class SimpleSliceModel(torch.nn.Module):
def __init__(self, start, end):
super(SimpleSliceModel, self).__init__()
self.start = start
self.end = end
def forward(self, x):
x = x + x
if self.start is None and self.end is None:
return x[:]
elif self.start is None:
return x[: self.end]
elif self.end is None:
return x[self.start :]
else:
return x[self.start : self.end]
class TestSlice(utils.TorchGlowTestCase):
@utils.deterministic_expand(
[
lambda: (0, 1),
lambda: (0, 2),
lambda: (0, 3),
lambda: (1, 2),
lambda: (1, 3),
lambda: (2, 3),
lambda: (-3, 1),
lambda: (-2, 2),
lambda: (-1, 3),
lambda: (-2, -1),
lambda: (0, -1),
lambda: (1, -1),
lambda: (None, 2),
lambda: (None, -1),
lambda: (0, None),
lambda: (-2, None),
lambda: (None, None),
]
)
def test_slice(self, start, end):
"""Test of the PyTorch slice Node on Glow."""
input = torch.rand(3, 2, 2)
utils.compare_tracing_methods(
SimpleSliceModel(start, end), input, fusible_ops={"aten::slice"}
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
from typing import Optional
import torch
from tests import utils
class SimpleDivModule(torch.nn.Module):
def __init__(self, rounding_mode: Optional[str] = None):
super(SimpleDivModule, self).__init__()
self.rounding_mode = rounding_mode
def forward(self, a, b):
rounding_mode = self.rounding_mode
if True: # until 3rd agr is implemented, then: rounding_mode is None:
if b.size() == torch.Size([]):
return (a * a).div(b.item())
else:
c = a.div(b)
return c.div(c)
else:
if b.size() == torch.Size([]):
return (a * a).div(b.item(), rounding_mode=rounding_mode)
else:
c = a.div(b, rounding_mode=rounding_mode)
return c.div(c, rounding_mode=rounding_mode)
class TestDiv(utils.TorchGlowTestCase):
@utils.deterministic_expand(
[
lambda: ("basic", SimpleDivModule(), torch.randn(4), torch.randn(4)),
lambda: (
"basic_rm_true",
SimpleDivModule(rounding_mode="true"),
torch.randn(4),
torch.randn(4),
),
lambda: (
"basic_rm_trunc",
SimpleDivModule(rounding_mode="trunc"),
torch.randn(4),
torch.randn(4),
),
lambda: (
"basic_rm_floor",
SimpleDivModule(rounding_mode="floor"),
torch.randn(4),
torch.randn(4),
),
lambda: (
"broadcast",
SimpleDivModule(),
torch.randn(8, 3, 4, 2),
torch.randn(4, 2),
),
lambda: (
"broadcast_rm_true",
SimpleDivModule(rounding_mode="true"),
torch.randn(8, 3, 4, 2),
torch.randn(4, 2),
),
lambda: (
"broadcast_rm_trunc",
SimpleDivModule(rounding_mode="trunc"),
torch.randn(8, 3, 4, 2),
torch.randn(4, 2),
),
lambda: (
"broadcast_rm_floor",
SimpleDivModule(rounding_mode="floor"),
torch.randn(8, 3, 4, 2),
torch.randn(4, 2),
),
lambda: (
"broadcast",
SimpleDivModule(),
torch.randn(8, 3, 4, 2),
torch.randn(1, 2),
),
lambda: (
"broadcast",
SimpleDivModule(),
torch.randn(4, 2),
torch.randn(8, 3, 4, 2),
),
lambda: (
"float_tensor",
SimpleDivModule(),
torch.randn(4),
torch.tensor(3.9),
),
lambda: (
"int_tensor",
SimpleDivModule(),
torch.tensor([4]),
torch.tensor([10]),
{"NNPI"}, # skip_for_backends
),
# This one will go through (a * a) / b.item() and b.item() is an integer.
lambda: (
"int_number",
SimpleDivModule(),
torch.tensor([4]),
torch.tensor(10),
{"NNPI"}, # skip_for_backends
),
lambda: (
"int64",
SimpleDivModule(),
torch.torch.randint(-10, 10, (2, 4), dtype=torch.int64),
torch.torch.randint(-10, 10, (2, 4), dtype=torch.int64),
{"NNPI"}, # skip_for_backends
),
]
)
def test_div(self, _, module, a, b, skip_for_backends={}):
utils.run_comparison_tests(
module,
(a, b),
fusible_ops={"aten::div"},
skip_for_backends=skip_for_backends,
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class SimpleTypeasModel(torch.nn.Module):
def __init__(self):
super(SimpleTypeasModel, self).__init__()
def forward(self, tensor, other=None):
# TODO: Understand and document the utility of the self-conversion test
# as well as the additional tensor + tensor step
other = tensor if other is None else other
if tensor.dtype != torch.bool:
tensor = tensor + tensor
typed = tensor.type_as(other)
return typed + typed
class TestTypeAs(utils.TorchGlowTestCase):
@utils.deterministic_expand(
[
lambda: (
"to_int32",
SimpleTypeasModel(),
torch.randn(4),
torch.zeros(4, dtype=torch.int32),
),
lambda: (
"from_int32",
SimpleTypeasModel(),
torch.randn(4).to(dtype=torch.int32),
torch.zeros(4),
),
lambda: (
"from_bool",
SimpleTypeasModel(),
torch.randn(4).to(dtype=torch.bool),
torch.zeros(4),
),
lambda: ("self", SimpleTypeasModel(), torch.randn(4), None, False),
lambda: (
"f2f2",
SimpleTypeasModel(),
torch.randn(4, 2),
torch.randn(8, 3, 4, 2),
False,
),
lambda: (
"f2i2",
SimpleTypeasModel(),
torch.randn(4, 2),
torch.randn(8, 3, 4, 2).to(dtype=torch.int32),
),
]
)
def test_typeas(self, _, module, tensor, other=None, should_fuse=True):
if other is not None:
utils.compare_tracing_methods(
module,
tensor,
other,
fusible_ops={"aten::type_as"} if should_fuse else {},
)
else:
utils.compare_tracing_methods(
module, tensor, fusible_ops={"aten::type_as"} if should_fuse else {}
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class CloneModel(torch.nn.Module):
def __init__(self, memory_format=torch.contiguous_format):
super(CloneModel, self).__init__()
self.memory_format = memory_format
def forward(self, a):
b = a.clone(memory_format=self.memory_format)
return b + a
class TestClone(utils.TorchGlowTestCase):
@utils.deterministic_expand(
[
lambda: ("1x3", [1, 3]),
lambda: ("8x3x5", [8, 3, 5]),
]
)
def test_clone(self, _, tensor_shape):
"""Test of the PyTorch clone method on Glow."""
utils.compare_tracing_methods(
CloneModel(),
torch.randn(tensor_shape),
fusible_ops={"aten::clone"},
)
@utils.deterministic_expand(
[
lambda: ("8x3x5x10", [8, 3, 5, 10]),
]
)
def test_clone_alt_memory_format(self, _, tensor_shape):
"""Test of the PyTorch clone method on Glow."""
utils.compare_tracing_methods(
CloneModel(memory_format=torch.channels_last),
torch.randn(tensor_shape),
fusible_ops={"aten::clone"},
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class SimpleAbsModule(torch.nn.Module):
def __init__(self):
super(SimpleAbsModule, self).__init__()
def forward(self, a):
return torch.abs(a + a)
class TestAbs(utils.TorchGlowTestCase):
def test_abs_basic(self):
"""Basic test of the PyTorch Abs Node on Glow."""
x = torch.randn(10)
utils.run_comparison_tests(
SimpleAbsModule(),
x,
fusible_ops={"aten::abs"},
)
def test_abs_3d(self):
"""Test multidimensional tensor for the PyTorch Abs Node on Glow."""
x = torch.randn(2, 3, 5)
utils.run_comparison_tests(
SimpleAbsModule(),
x,
fusible_ops={"aten::abs"},
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class SimpleAddModule(torch.nn.Module):
def __init__(self, inplace=False):
super(SimpleAddModule, self).__init__()
self.inplace = inplace
def forward(self, a, b):
if b.size() == torch.Size([]):
return (a * a).add(b.item())
if self.inplace:
c = a.add_(b)
return c.add_(c)
else:
c = a.add(b)
return c.add(c)
class TestAdd(utils.TorchGlowTestCase):
@utils.deterministic_expand(
[
lambda: ("basic", SimpleAddModule(), torch.randn(4), torch.randn(4)),
lambda: ("inplace", SimpleAddModule(True), torch.randn(4), torch.randn(4)),
lambda: (
"broadcast",
SimpleAddModule(),
torch.randn(8, 3, 4, 2),
torch.randn(4, 2),
),
lambda: (
"broadcast",
SimpleAddModule(),
torch.randn(8, 3, 4, 2),
torch.randn(1, 2),
),
lambda: (
"broadcast",
SimpleAddModule(),
torch.randn(4, 2),
torch.randn(8, 3, 4, 2),
),
lambda: ("float", SimpleAddModule(), torch.randn(4), torch.tensor(1.2345)),
lambda: (
"float_and_int",
SimpleAddModule(),
torch.randn(4),
torch.tensor(42),
True,
),
lambda: (
"int32",
SimpleAddModule(),
torch.torch.randint(-10, 10, (2, 4), dtype=torch.int32),
torch.torch.randint(-10, 10, (2, 4), dtype=torch.int32),
),
lambda: (
"int64",
SimpleAddModule(),
torch.torch.randint(-10, 10, (2, 4), dtype=torch.int64),
torch.torch.randint(-10, 10, (2, 4), dtype=torch.int64),
),
]
)
def test_add(self, _, module, a, b, skip_to_glow=False):
utils.run_comparison_tests(
module,
(a, b),
fusible_ops={"aten::add_"} if module.inplace else {"aten::add"},
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class SimpleExpModule(torch.nn.Module):
def forward(self, input):
other = torch.exp(input)
return torch.exp(other)
class TestExp(utils.TorchGlowTestCase):
def test_exp_basic(self):
"""Test of the PyTorch exp Node on Glow."""
utils.compare_tracing_methods(
SimpleExpModule(), torch.randn(4), fusible_ops={"aten::exp"}
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
import torch.nn.functional as F
from tests import utils
class SimpleAvgPool1dModule(torch.nn.Module):
def __init__(self, kernel_size, stride=None, padding=0):
super(SimpleAvgPool1dModule, self).__init__()
self.kernel_size = kernel_size
self.padding = padding
self.stride = stride
def forward(self, inputs):
return F.avg_pool1d(
inputs, self.kernel_size, padding=self.padding, stride=self.stride
)
class TestAvgPool1d(utils.TorchGlowTestCase):
def test_avg_pool1d_basic(self):
"""Basic test of the PyTorch avg_pool1d Node on Glow."""
inputs = torch.randn(1, 4, 9)
utils.compare_tracing_methods(
SimpleAvgPool1dModule(3), inputs, fusible_ops={"aten::avg_pool1d"}
)
def test_avg_pool1d_with_args(self):
"""Test of the PyTorch avg_pool1d Node with arguments on Glow."""
inputs = torch.randn(1, 4, 10)
utils.compare_tracing_methods(
SimpleAvgPool1dModule(3, stride=7), inputs, fusible_ops={"aten::avg_pool1d"}
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class SimpleIntModule(torch.nn.Module):
def __init__(self, dtype):
super(SimpleIntModule, self).__init__()
# This has to be done in the init block, because control flow statements in the
# forward method won't be fused during scripting.
if dtype == torch.int32:
self.forward = self._int32_forward
else:
self.forward = self._int64_forward
def _int32_forward(self, a):
b = a.size(0)
c = a.size(1)
bt = torch.ops.prim.NumToTensor(b)
ct = torch.ops.prim.NumToTensor(c)
d = bt + ct
d = d.to(torch.int32)
i = torch.ops.aten.Int(d)
res = torch.ops.prim.NumToTensor(i)
return res
def _int64_forward(self, a):
b = a.size(0)
c = a.size(1)
bt = torch.ops.prim.NumToTensor(b)
ct = torch.ops.prim.NumToTensor(c)
d = bt * ct
i = torch.ops.aten.Int(d)
res = torch.ops.prim.NumToTensor(i)
return res
class SimpleIntModuleEmptyShape(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, a):
d = torch._shape_as_tensor(a)[0] # tensor with empty shape
i = torch.ops.aten.Int(d)
res = torch.ops.prim.NumToTensor(i)
return res
class TestInt(utils.TorchGlowTestCase):
def test_Int(self):
"""Basic test of the PyTorch Int Node on Glow, along with constant
propagation. Using int32 dtype, and aten::add."""
x = torch.randn(2, 3, 4, dtype=torch.float32)
utils.compare_tracing_methods(
SimpleIntModule(torch.int32), x, fusible_ops={"aten::Int"}, scripted=True
)
def test_Int_mul_long(self):
"""Basic test of the PyTorch Int Node on Glow, along with constant
propagation. Using int64 dtype, and aten::mul"""
x = torch.randn(2, 3, 4, dtype=torch.float32)
utils.compare_tracing_methods(
SimpleIntModule(torch.int64), x, fusible_ops={"aten::Int"}, scripted=True
)
def test_Int_empty_shape(self):
"""Basic test of the PyTorch Int Node on Glow. Input tensor has empty shape."""
x = torch.randn(2, 3, 4, dtype=torch.float32)
utils.compare_tracing_methods(
SimpleIntModuleEmptyShape(), x, fusible_ops={"aten::Int"}, scripted=True
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class TestZero(utils.TorchGlowTestCase):
def test_zero_basic(self):
"""Basic test of the PyTorch zero Node on Glow."""
class TestModule(torch.nn.Module):
def forward(self, a):
b = torch.zeros(a.size(), dtype=torch.float)
return a + b
x = torch.randn(2, 3, 4)
utils.compare_tracing_methods(TestModule(), x, fusible_ops={"aten::zeros"})
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
from typing import Union
import torch
from tests import utils
class SimpleCompareOpsModule(torch.nn.Module):
def __init__(self, opType):
super(SimpleCompareOpsModule, self).__init__()
self.opType = opType
def forward(self, a, b):
if self.opType == "equal":
return torch.eq(a, b + 0.1)
elif self.opType == "notEqual":
return torch.ne(a, b + 0.1)
elif self.opType == "lessThan":
return torch.lt(a, b + 0.1)
elif self.opType == "lessEqual":
return torch.le(a, b + 0.1)
elif self.opType == "greaterThan":
return torch.gt(a, b + 0.1)
elif self.opType == "greaterEqual":
return torch.ge(a, b + 0.1)
class SimpleScalarVectorCmpModule(torch.nn.Module):
def __init__(self, opType: str, rhsScalar: Union[float, int]):
super().__init__()
self.opType = opType
self.rhsScalar = rhsScalar
def forward(self, a: torch.Tensor) -> torch.Tensor:
if self.opType == "equal":
return a == self.rhsScalar
if self.opType == "greaterEqual":
return a >= self.rhsScalar
if self.opType == "greaterThan":
return a > self.rhsScalar
if self.opType == "lessEqual":
return a <= self.rhsScalar
if self.opType == "lessThan":
return a < self.rhsScalar
if self.opType == "notEqual":
return a != self.rhsScalar
class TestCmp(utils.TorchGlowTestCase):
def test_equal_basic(self):
"""Basic test of the PyTorch Equal Node on Glow."""
utils.compare_tracing_methods(
SimpleCompareOpsModule("equal"),
torch.randn(3, 4, 5),
torch.randn(3, 4, 5),
fusible_ops={"aten::eq"},
)
def test_equal_bcast(self):
"""Basic test of the PyTorch Equal Node (broadcast) on Glow."""
utils.compare_tracing_methods(
SimpleCompareOpsModule("equal"),
torch.randn(3, 4, 5),
torch.randn(4, 5),
fusible_ops={"aten::eq"},
)
def test_not_equal(self):
"""Basic test of the PyTorch Not equal Node on Glow."""
utils.compare_tracing_methods(
SimpleCompareOpsModule("notEqual"),
torch.randn(3, 4, 5),
torch.randn(3, 4, 5),
fusible_ops={"aten::ne"},
)
def test_not_equal_bcast(self):
"""Basic test of the PyTorch Not equal (broadcast) Node on Glow."""
utils.compare_tracing_methods(
SimpleCompareOpsModule("notEqual"),
torch.randn(3, 4, 5),
torch.randn(4, 5),
fusible_ops={"aten::ne"},
)
def test_less_than(self):
"""Basic test of the PyTorch Less than Node on Glow."""
utils.compare_tracing_methods(
SimpleCompareOpsModule("lessThan"),
torch.randn(3, 4, 5),
torch.randn(3, 4, 5),
fusible_ops={"aten::lt"},
)
def test_less_than_bcast(self):
"""Basic test of the PyTorch Less than (broadcast) Node on Glow."""
utils.compare_tracing_methods(
SimpleCompareOpsModule("lessThan"),
torch.randn(3, 4, 5),
torch.randn(4, 5),
fusible_ops={"aten::lt"},
)
def test_less_equal(self):
"""Basic test of the PyTorch less equal Node on Glow."""
utils.compare_tracing_methods(
SimpleCompareOpsModule("lessEqual"),
torch.randn(3, 4, 5),
torch.randn(3, 4, 5),
fusible_ops={"aten::le"},
)
def test_less_equal_bcast(self):
"""Basic test of the PyTorch less equal (Broadcast) Node on Glow."""
utils.compare_tracing_methods(
SimpleCompareOpsModule("lessEqual"),
torch.randn(3, 4, 5),
torch.randn(4, 5),
fusible_ops={"aten::le"},
)
def test_greater_than(self):
"""Basic test of the PyTorch Greater than Node on Glow."""
utils.compare_tracing_methods(
SimpleCompareOpsModule("greaterThan"),
torch.randn(3, 4, 5),
torch.randn(3, 4, 5),
fusible_ops={"aten::gt"},
)
def test_greater_than_bcast(self):
"""Basic test of the PyTorch Greater than (Broadcast) Node on Glow."""
utils.compare_tracing_methods(
SimpleCompareOpsModule("greaterThan"),
torch.randn(3, 4, 5),
torch.randn(4, 5),
fusible_ops={"aten::gt"},
)
def test_greater_equal(self):
"""Basic test of the PyTorch Greater Equal Node on Glow."""
utils.compare_tracing_methods(
SimpleCompareOpsModule("greaterEqual"),
torch.randn(3, 4, 5),
torch.randn(3, 4, 5),
fusible_ops={"aten::ge"},
)
def test_greater_equal_bcast(self):
"""Basic test of the PyTorch Greater Equal (broadcast) Node on Glow."""
utils.compare_tracing_methods(
SimpleCompareOpsModule("greaterEqual"),
torch.randn(3, 4, 5),
torch.randn(4, 5),
fusible_ops={"aten::ge"},
)
@utils.deterministic_expand(
[
lambda: (
"eq_tensor_scalar",
"equal",
"aten::eq",
torch.randn(3, 4, 5),
0.5,
),
lambda: (
"gt_tensor_scalar",
"greaterThan",
"aten::gt",
torch.randn(3, 4, 5),
0.5,
),
lambda: (
"ge_tensor_scalar",
"greaterEqual",
"aten::ge",
torch.randn(3, 4, 5),
0.5,
),
lambda: (
"le_tensor_scalar",
"lessEqual",
"aten::le",
torch.randn(3, 4, 5),
0.5,
),
lambda: (
"lt_tensor_scalar",
"lessThan",
"aten::lt",
torch.randn(3, 4, 5),
0.5,
),
lambda: (
"ne_tensor_scalar",
"notEqual",
"aten::ne",
torch.randn(3, 4, 5),
0.5,
),
lambda: (
"eq_tensor_scalar_int64",
"equal",
"aten::eq",
torch.randn(3, 4, 5).to(torch.int64),
5,
),
lambda: (
"gt_tensor_scalar_int64",
"greaterThan",
"aten::gt",
torch.randn(3, 4, 5).to(torch.int64),
5,
),
lambda: (
"ge_tensor_scalar_int64",
"greaterEqual",
"aten::ge",
torch.randn(3, 4, 5).to(torch.int64),
5,
),
lambda: (
"le_tensor_scalar_int64",
"lessEqual",
"aten::le",
torch.randn(3, 4, 5).to(torch.int64),
5,
),
lambda: (
"lt_tensor_scalar_int64",
"lessThan",
"aten::lt",
torch.randn(3, 4, 5).to(torch.int64),
5,
),
lambda: (
"ne_tensor_scalar_int64",
"notEqual",
"aten::ne",
torch.randn(3, 4, 5).to(torch.int64),
5,
),
lambda: (
"eq_tensor_scalar_int32",
"equal",
"aten::eq",
torch.randn(3, 4, 5).to(torch.int32),
5,
),
lambda: (
"gt_tensor_scalar_int32",
"greaterThan",
"aten::gt",
torch.randn(3, 4, 5).to(torch.int32),
5,
),
lambda: (
"lt_tensor_scalar_int32",
"lessThan",
"aten::lt",
torch.randn(3, 4, 5).to(torch.int32),
5,
),
lambda: (
"eq_tensor_scalar_float_int",
"equal",
"aten::eq",
torch.randn(3, 4, 5),
5,
),
lambda: (
"gt_tensor_scalar_float_int",
"greaterThan",
"aten::gt",
torch.randn(3, 4, 5),
5,
),
lambda: (
"lt_tensor_scalar_float_int",
"lessThan",
"aten::lt",
torch.randn(3, 4, 5),
5,
),
]
)
def test_scalar_vector_cmp(self, _, opType, op, lhsTensor, rhsScalar):
"""Testing comparisons between tensors and scalars."""
utils.compare_tracing_methods(
SimpleScalarVectorCmpModule(opType, rhsScalar),
lhsTensor,
fusible_ops={op},
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class SelectModule(torch.nn.Module):
def __init__(self, indices, axis, rank):
super(SelectModule, self).__init__()
self.indices = indices
self.axis = axis
self.rank = rank
def forward(self, a):
if self.rank == 2:
if self.axis == 0:
return (a + a)[self.indices[0], :]
elif self.axis == 1:
return (a + a)[:, self.indices[0]]
else:
return (a + a)[self.indices[0], self.indices[1]]
elif self.rank == 3:
if self.axis == 0:
if len(self.indices) == 1:
return (a + a)[self.indices[0], :, :]
else:
return (a + a)[self.indices[0], self.indices[1], :]
elif self.axis == 1:
if len(self.indices) == 1:
return (a + a)[:, :, self.indices[0]]
else:
return (a + a)[:, self.indices[0], self.indices[1]]
else:
if len(self.indices) == 2:
return (a + a)[self.indices[0], :, self.indices[1]]
else:
return (a + a)[self.indices[0], self.indices[1], self.indices[2]]
class TestComplexSelect(utils.TorchGlowTestCase):
@utils.deterministic_expand(
[
lambda: ("2d_axis_0", SelectModule([1], 0, 2), torch.rand(2, 3)),
lambda: ("2d_axis_1", SelectModule([2], 1, 2), torch.rand(2, 3)),
lambda: ("2d_axis_0_1", SelectModule([0, 1], 2, 2), torch.rand(2, 3)),
lambda: ("3d_axis_0", SelectModule([0], 0, 3), torch.rand(3, 4, 5)),
lambda: ("3d_axis_0_1", SelectModule([2, 1], 0, 3), torch.rand(3, 4, 5)),
lambda: ("3d_axis_1", SelectModule([0], 1, 3), torch.rand(3, 4, 5)),
lambda: ("3d_axis_1_2", SelectModule([2, 1], 1, 3), torch.rand(3, 4, 5)),
lambda: ("3d_axis_0_2", SelectModule([1, 3], 2, 3), torch.rand(3, 4, 5)),
lambda: (
"3d_axis_0_1_2",
SelectModule([2, 0, 4], 1, 3),
torch.rand(3, 4, 5),
),
]
)
def test_f(self, _, module, tensor):
"""Test multidimensional tensors in the PyTorch Select Node on Glow."""
utils.compare_tracing_methods(module, tensor, fusible_ops={"aten::select"})
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class ExpandModel(torch.nn.Module):
def __init__(self, shape):
super(ExpandModel, self).__init__()
self.shape = shape
def forward(self, a):
return a.expand(self.shape)
class TestExpand(utils.TorchGlowTestCase):
@utils.deterministic_expand(
[
lambda: (
"unit_vector",
ExpandModel([3]),
torch.randn(1),
),
lambda: (
"unit_matrix",
ExpandModel([3, 4]),
torch.randn(1, 1),
),
lambda: (
"singleton_matrix",
ExpandModel([2, 4]),
torch.randn(2, 1),
),
lambda: (
"singleton_matrix_minus_one",
ExpandModel([-1, 4]),
torch.randn(2, 1),
),
lambda: (
"fourD",
ExpandModel([2, 4, 5, 8]),
torch.randn(2, 1, 5, 8),
),
lambda: (
"fourD_two_singleton",
ExpandModel([2, 4, 5, 8]),
torch.randn(2, 1, 5, 1),
),
lambda: (
"fourD_minus_ones",
ExpandModel([2, 4, -1, -1]),
torch.randn(2, 1, 5, 8),
),
lambda: (
"add_dim",
ExpandModel([3, 4, 2]),
torch.randn(4, 2),
),
lambda: (
"add_two_dims",
ExpandModel([8, 3, 4, 2]),
torch.randn(4, 2),
),
lambda: (
"add_dim_minus_one",
ExpandModel([3, -1, 2]),
torch.randn(4, 2),
),
lambda: (
"add_dim_minus_ones",
ExpandModel([3, -1, -1]),
torch.randn(4, 2),
),
lambda: (
"add_dims_minus_one",
ExpandModel([8, 3, -1, 2]),
torch.randn(4, 2),
),
lambda: (
"add_dims_minus_ones",
ExpandModel([8, 3, -1, -1]),
torch.randn(4, 2),
),
]
)
def test_expand(self, _, module, a):
"""Test of the PyTorch expand Node on Glow."""
utils.compare_tracing_methods(
module,
a,
fusible_ops={"aten::expand"},
)
class TestExpandError(utils.TorchGlowTestCase):
@utils.deterministic_expand(
[
lambda: (
"no_singleton",
ExpandModel([3, 3]),
torch.randn(2, 2),
),
lambda: (
"shape_too_small",
ExpandModel([3]),
torch.randn(2, 2),
),
lambda: (
"invalid_zero",
ExpandModel([0, 3]),
torch.randn(1, 2),
),
lambda: (
"invalid_negative",
ExpandModel([-2, 3]),
torch.randn(1, 2),
),
lambda: (
"add_dims_undefined_m1",
ExpandModel([-1, 2, 3]),
torch.randn(1, 2),
),
lambda: (
"add_dims_undefined_zero",
ExpandModel([0, 2, 3]),
torch.randn(1, 2),
),
lambda: (
"add_dims_undefined_m2",
ExpandModel([-2, 2, 3]),
torch.randn(1, 2),
),
]
)
def test_expand_error(self, _, module, a):
"""Test of the PyTorch expand Node on Glow."""
utils.compare_tracing_methods_error(
module,
a,
fusible_ops={"aten::expand"},
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
import torch.nn.functional as F
from tests import utils
class SimplePreluModule(torch.nn.Module):
def __init__(self):
super(SimplePreluModule, self).__init__()
def forward(self, inputs, weights):
return F.prelu(inputs + inputs, weights)
class TestPrelu(utils.TorchGlowTestCase):
def test_prelu_basic(self):
"""Basic test of the PyTorch prelu Node on Glow."""
utils.compare_tracing_methods(
SimplePreluModule(),
torch.randn(1, 4, 5, 5),
torch.tensor([0.25]),
fusible_ops={"aten::prelu"},
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class SimpleAndModule(torch.nn.Module):
def __init__(self):
super(SimpleAndModule, self).__init__()
def forward(self, a, b):
c = a & b
return torch.logical_or(c, b)
class TestAnd(utils.TorchGlowTestCase):
@utils.deterministic_expand(
[
lambda: (
"basic",
torch.tensor([True, True, False, False], dtype=torch.bool),
torch.tensor([True, False, True, False], dtype=torch.bool),
),
lambda: (
"basic_3d",
torch.zeros((3, 4, 5), dtype=torch.bool),
torch.ones((3, 4, 5), dtype=torch.bool),
),
lambda: (
"broadcast_3d",
torch.zeros((3, 4, 5), dtype=torch.bool),
torch.ones((4, 5), dtype=torch.bool),
),
]
)
def test_and(self, _, a, b, skip_to_glow=False):
utils.run_comparison_tests(
SimpleAndModule(),
(a, b),
fusible_ops={"aten::__and__"},
skip_to_glow=skip_to_glow,
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class SimplePowModule(torch.nn.Module):
def __init__(self, power):
super(SimplePowModule, self).__init__()
self.power = power
def forward(self, tensor):
return torch.pow(tensor, self.power)
class TestPow(utils.TorchGlowTestCase):
@utils.deterministic_expand(
[
lambda: ("float", 2.2),
lambda: ("tensor_basic", torch.randn(4) + 2),
lambda: ("tensor_size[]", torch.tensor(2.2)),
lambda: ("tensor_broadcast", torch.randn(1) + 2),
]
)
def test_pow_basic(self, _, power):
"""Test of the PyTorch pow Node on Glow."""
utils.compare_tracing_methods(
SimplePowModule(power), torch.rand(4) + 5, fusible_ops={"aten::pow"}
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class SimpleTransposeModel(torch.nn.Module):
def __init__(self, dim0=None, dim1=None, inplace=False):
super(SimpleTransposeModel, self).__init__()
self.dims = (dim0, dim1) if dim0 and dim1 else None
self.inplace = inplace
def forward(self, tensor):
t = tensor + tensor
if self.dims:
return t.transpose_(*self.dims) if self.inplace else t.transpose(*self.dims)
else:
return t.t_() if self.inplace else t.t()
class TestTranspose(utils.TorchGlowTestCase):
@utils.deterministic_expand(
[
lambda: ("2d", SimpleTransposeModel(), torch.randn(7, 4)),
lambda: ("1d", SimpleTransposeModel(), torch.randn(7)),
lambda: ("inplace", SimpleTransposeModel(inplace=True), torch.randn(7, 4)),
]
)
def test_t(self, _, module, tensor):
utils.compare_tracing_methods(module, tensor, fusible_ops={"aten::t"})
@utils.deterministic_expand(
[
lambda: ("simple", SimpleTransposeModel(1, 2), torch.randn(2, 3, 4)),
lambda: (
"inplace",
SimpleTransposeModel(1, 2, inplace=True),
torch.randn(2, 3, 4),
),
lambda: ("neg_dim", SimpleTransposeModel(-2, -1), torch.randn(2, 3, 4)),
]
)
def test_transpose(self, _, module, tensor, reference=None):
utils.compare_tracing_methods(module, tensor, fusible_ops={"aten::transpose"})
@utils.deterministic_expand(
[lambda: ("oob_neg_dim", SimpleTransposeModel(-2, -4), torch.randn(2, 3, 4))]
)
def test_transpose_failure(self, _, module, tensor):
with self.assertRaises(IndexError):
utils.compare_tracing_methods(
module, tensor, fusible_ops={"aten::transpose"}
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# isort:skip_file
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
import torch_glow
from tests import utils
from tests.utils import GLOW_FUSION_GROUP, SUBGRAPH_ATTR
class TestGetAttr(utils.TorchGlowTestCase):
def test_getattr(self):
"""Test fusion of the PyTorch prim::GetAttr Node into the Glow subgraph."""
with torch.no_grad():
class Model(torch.nn.Module):
def __init__(self):
super(Model, self).__init__()
self.linear = torch.nn.Linear(2, 1)
def forward(self, x):
return self.linear(x)
x = torch.tensor([2.0, 3.0])
torch_glow.enableFusionPass_DO_NOT_USE_THIS()
m = Model()
jit_m = torch.jit.trace(m, x)
jit_m_graph = jit_m.graph_for(x)
# Ensure all prim::GetAttrs were fused and none were left out
found_getattrs = False
for node in jit_m_graph.nodes():
kind = node.kind()
assert (
kind != "prim::GetAttr"
), "Expected all prim::GetAttrsGlow to be in Glow subgraph"
if kind == GLOW_FUSION_GROUP:
glow_subgraph = node.g(SUBGRAPH_ATTR)
for node in glow_subgraph.nodes():
if node.kind() == "prim::GetAttr":
found_getattrs = True
assert (
found_getattrs
), "Expected to find prim::GetAttrs in the Glow subgraph"
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class SimpleClampModel(torch.nn.Module):
def __init__(self, min, max):
super(SimpleClampModel, self).__init__()
self.min = min
self.max = max
def forward(self, input):
return torch.clamp(input, self.min, self.max)
class TestClamp(utils.TorchGlowTestCase):
@utils.deterministic_expand(
[
lambda: ("basic", 0.0, 0.8, torch.float),
lambda: ("no_min", None, 0.8, torch.float),
lambda: ("no_max", 0.0, None, torch.float),
lambda: ("int_basic", 4, 8, torch.int32),
]
)
def test_clamp(self, _, min, max, dtype):
"""Test of the PyTorch clamp Node on Glow."""
a = torch.randn(2, 7)
if dtype == torch.int32:
a = torch.randint(max * 2, (2, 7))
utils.compare_tracing_methods(
SimpleClampModel(min, max), a, fusible_ops={"aten::clamp"}
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class SimpleContiguousModel(torch.nn.Module):
def __init__(self, memory_format=torch.contiguous_format):
super(SimpleContiguousModel, self).__init__()
self.memory_format = memory_format
def forward(self, input):
formatted = input.contiguous(memory_format=self.memory_format)
return formatted + formatted
class TestContiguous(utils.TorchGlowTestCase):
def test_contiguous_basic(self):
"""Test of the PyTorch contiguous Node on Glow."""
x = torch.randn(2, 2, 2)
utils.compare_tracing_methods(
SimpleContiguousModel(), x, fusible_ops={"aten::contiguous"}
)
def test_with_alternate_memory_format(self):
x = torch.randn(3, 4, 5, 6)
utils.compare_tracing_methods(
SimpleContiguousModel(torch.channels_last),
x,
fusible_ops={"aten::contiguous"},
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class SimpleNormModule(torch.nn.Module):
def __init__(self, *args, **kwargs):
super(SimpleNormModule, self).__init__()
self.args = args
self.kwargs = kwargs
def forward(self, tensor):
return torch.norm(tensor, *self.args, **self.kwargs)
# TODO([email protected]): uncomment the following tests
# after https://github.com/pytorch/pytorch/pull/81761 lands
# class TestNorm(utils.TorchGlowTestCase):
# def test_norm_basic(self):
# """Basic test of the PyTorch norm Node on Glow."""
# utils.compare_tracing_methods(
# SimpleNormModule(dim=0, p=2),
# torch.arange(8, dtype=torch.float).reshape(2, 4),
# fusible_ops={"aten::linalg_vector_norm"},
# )
# def test_norm_float_p(self):
# """Test of the PyTorch norm Node that has p=2.0 on Glow."""
# utils.compare_tracing_methods(
# SimpleNormModule(dim=0, p=2.0),
# torch.arange(8, dtype=torch.float).reshape(2, 4),
# fusible_ops={"aten::linalg_vector_norm"},
# )
# def test_norm_3d_inner_axis(self):
# """Basic test of the PyTorch norm Node on Glow."""
# utils.compare_tracing_methods(
# SimpleNormModule(dim=1),
# torch.arange(8, dtype=torch.float).reshape(2, 2, 2),
# fusible_ops={"aten::linalg_vector_norm"},
# )
# def test_norm_4d_outer_axis(self):
# """Basic test of the PyTorch norm Node on Glow."""
# utils.compare_tracing_methods(
# SimpleNormModule(dim=[3]),
# torch.arange(16, dtype=torch.float).reshape(2, 2, 2, 2),
# fusible_ops={"aten::linalg_vector_norm"},
# )
# def test_norm_keepdim(self):
# """Basic test of the PyTorch norm Node on Glow."""
# utils.compare_tracing_methods(
# SimpleNormModule(dim=[1], keepdim=True),
# torch.arange(16, dtype=torch.float).reshape(2, 4, 2),
# fusible_ops={"aten::linalg_vector_norm"},
# )
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class SimpleMinModule(torch.nn.Module):
def __init__(self):
super(SimpleMinModule, self).__init__()
def forward(self, a, b):
return torch.min(a + a, b + b)
class UnaryMinModule(torch.nn.Module):
def __init__(self):
super(UnaryMinModule, self).__init__()
def forward(self, a):
return torch.min(a + a)
class TestMin(utils.TorchGlowTestCase):
def test_elementwise_min(self):
"""Test of the PyTorch min Node on Glow."""
utils.compare_tracing_methods(
SimpleMinModule(), torch.randn(7), torch.randn(7), fusible_ops={"aten::min"}
)
def test_elementwise_min_broadcast(self):
"""Test of the PyTorch min Node with broadcast on Glow."""
utils.compare_tracing_methods(
SimpleMinModule(),
torch.randn(2, 7),
torch.randn(7),
fusible_ops={"aten::min"},
)
def test_unary_min(self):
"""Test of the PyTorch unary min Node on Glow."""
utils.compare_tracing_methods(
UnaryMinModule(),
torch.randint(
20,
(
10,
10,
),
dtype=torch.int32,
),
fusible_ops={"aten::min"},
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
import torch
from tests import utils
from tests.utils import check_skip, DEFAULT_BACKEND
class TestEmbeddingBag(utils.TorchGlowTestCase):
supported_backends = {"Interpreter", "NNPI"}
def test_embedding_bag_basic(self):
"""Test of aten::embedding_bag node on glow"""
check_skip(self)
class TestModule(torch.nn.Module):
def forward(self, input, offsets, per_sample_weights):
weight = torch.FloatTensor([[1, 2.3, 3], [4, 5.1, 6.3]])
embedding_sum = torch.nn.EmbeddingBag.from_pretrained(
weight, mode="sum", include_last_offset=True
)
a = embedding_sum(input, offsets)
b = embedding_sum(input, offsets, per_sample_weights)
return a, b
input = torch.LongTensor([1, 0, 0, 1, 1])
offsets = torch.LongTensor([0, 1, 5]) # final item is endOffset
per_sample_weights = torch.FloatTensor([1, 2, 3, 4, 5])
utils.compare_tracing_methods(
TestModule(),
input,
offsets,
per_sample_weights,
fusible_ops={"aten::embedding_bag"},
)
class TestQuantizedEmbeddingBag(utils.TorchGlowTestCase):
supported_backends = {"Interpreter", "NNPI"}
@utils.deterministic_expand(
[
# explicit local param declaration required for lambda fn with loops for correct param generation
lambda num_lengths=num_lengths, is4bit=is4bit, is_weighted=is_weighted, use_fp16=use_fp16, per_sample_weights_fp16=per_sample_weights_fp16: (
"{len}{bits}{weighted}{fp16}{sample_weights}{backend}".format(
len=num_lengths,
bits="_4bit" if is4bit else "_byte",
weighted="_weighted" if is_weighted else "",
fp16="_fp16" if use_fp16 else "",
sample_weights="_sample_weights_fp16"
if per_sample_weights_fp16 and is_weighted
else "",
backend="_" + DEFAULT_BACKEND,
),
num_lengths,
is4bit,
is_weighted,
use_fp16,
per_sample_weights_fp16,
)
for num_lengths in [0, 8]
for is4bit in [False, True]
for is_weighted in [False, True]
for use_fp16 in [False, True]
for per_sample_weights_fp16 in [False, True]
]
)
def test_embedding_bag_rowwise_offsets(
self,
name,
num_lengths,
is4bit,
is_weighted,
use_fp16,
per_sample_weights_fp16,
):
"""Test of quantized::embedding_bag_byte_rowwise_offsets and
quantized::embedding_bag_4bit_rowwise_offsets node on glow"""
check_skip(self)
class TestModule(torch.nn.Module):
def __init__(self, q_weights, is4bit=False, per_sample_weights=None):
super().__init__()
self.q_weights = q_weights
self.per_sample_weights = per_sample_weights
if is4bit:
self.op = torch.ops.quantized.embedding_bag_4bit_rowwise_offsets
else:
self.op = torch.ops.quantized.embedding_bag_byte_rowwise_offsets
def forward(self, indices, offsets):
return self.op(
self.q_weights,
indices,
offsets,
mode=0,
per_sample_weights=self.per_sample_weights,
include_last_offset=True,
)
# generate random weights and indices
num_embeddings = 16
embedding_dim = 4
weights = torch.from_numpy(
(np.random.random_sample((num_embeddings, embedding_dim)) + 1).astype(
np.float32
)
)
q_weights = (
torch.ops.quantized.embedding_bag_4bit_prepack(weights)
if is4bit
else torch.ops.quantized.embedding_bag_byte_prepack(weights)
)
np_lengths = (
np.zeros(shape=[10]).astype(np.int32)
if num_lengths == 0
else np.random.randint(0, num_lengths, size=10).astype(np.int32)
)
num_lengths = np.sum(np_lengths)
lengths = torch.from_numpy(np_lengths)
indices = torch.from_numpy(
np.random.randint(
low=0, high=num_embeddings, size=num_lengths, dtype=np.int64
)
).long()
offsets = torch.cat([torch.zeros([1]), torch.cumsum(lengths, 0)]).long()
per_sample_weights_type = (
np.float16 if per_sample_weights_fp16 and is4bit else np.float32
)
per_sample_weights = torch.from_numpy(
np.random.uniform(low=0.01, high=0.5, size=[len(indices)]).astype(
per_sample_weights_type
)
)
m = TestModule(q_weights, is4bit, per_sample_weights if is_weighted else None)
utils.compare_tracing_methods(
m,
indices,
offsets,
fusible_ops={
"quantized::embedding_bag_4bit_rowwise_offsets"
if is4bit
else "quantized::embedding_bag_byte_rowwise_offsets"
},
fp16=use_fp16,
# FP16 version is known to yeild different results, so our
# test here is mainly focusing on the flow rather than actual
# accuracy. There will be additional coverage on accuracy of
# the lowered modules
atol=0.02 if (is4bit or use_fp16) else 5e-4,
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class SimpleFlattenModule(torch.nn.Module):
def __init__(self, start_dim=0, end_dim=-1):
super(SimpleFlattenModule, self).__init__()
self.start_dim = start_dim
self.end_dim = end_dim
def forward(self, input):
return torch.flatten(input, start_dim=self.start_dim, end_dim=self.end_dim)
class TestFlatten(utils.TorchGlowTestCase):
@utils.deterministic_expand(
[
lambda: ("basic", SimpleFlattenModule(), torch.randn(2, 3, 2, 5)),
lambda: ("start_at_0", SimpleFlattenModule(0, 2), torch.randn(2, 3, 2, 5)),
lambda: (
"start_in_middle",
SimpleFlattenModule(1, 2),
torch.randn(2, 3, 2, 5),
),
lambda: (
"negative_end_dim",
SimpleFlattenModule(0, -2),
torch.randn(2, 3, 2, 5),
),
lambda: ("same_dim", SimpleFlattenModule(2, 2), torch.randn(2, 3, 2, 5)),
lambda: (
"negative_start_dim",
SimpleFlattenModule(-3, -1),
torch.randn(2, 3, 2, 5),
),
]
)
def test_flatten(self, _, module, input):
utils.compare_tracing_methods(module, input, fusible_ops={"aten::flatten"})
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
import torch.nn.functional as F
from tests import utils
class SimpleGeluModule(torch.nn.Module):
def forward(self, tensor):
return F.gelu(tensor + tensor)
class TestGelu(utils.TorchGlowTestCase):
def test_gelu_basic(self):
"""Basic test of the PyTorch gelu Node on Glow."""
def test_f(a):
return F.gelu(a + a)
for _ in range(100):
x = torch.randn(10)
utils.compare_tracing_methods(
SimpleGeluModule(),
x,
check_trace=False,
atol=1e-3,
fusible_ops={"aten::gelu"},
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class SimpleBmmModule(torch.nn.Module):
def forward(self, a, b):
return (a + a).bmm(b)
class TestBmm(utils.TorchGlowTestCase):
def test_bmm(self):
"""Basic test of the PyTorch bmm Node on Glow."""
x = torch.randn(6, 4, 10)
y = torch.randn(6, 10, 2)
utils.compare_tracing_methods(
SimpleBmmModule(), x, y, fusible_ops={"aten::bmm"}
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
import torch.nn.functional as F
from tests import utils
class SimpleLinearModule(torch.nn.Module):
def __init__(self):
super(SimpleLinearModule, self).__init__()
def forward(self, input, weight, bias=None):
return F.linear((input + input), weight, bias)
class TestLinear(utils.TorchGlowTestCase):
def test_linear_basic(self):
"""Basic test of the PyTorch aten::linear op on Glow."""
def test_f(input, weight, bias=None):
return F.linear((input + input), weight, bias)
n = 5
in_features = 4
out_features = 3
input = torch.randn(n, in_features)
weight = torch.randn(out_features, in_features)
utils.compare_tracing_methods(
SimpleLinearModule(), input, weight, fusible_ops={"aten::linear"}
)
def test_linear_bias(self):
"""Test of the PyTorch aten::linear op on Glow."""
def test_f(input, weight, bias=None):
return F.linear((input + input), weight, bias)
n = 5
in_features = 4
out_features = 3
input = torch.randn(n, in_features)
weight = torch.randn(out_features, in_features)
bias = torch.randn(out_features)
utils.compare_tracing_methods(
SimpleLinearModule(), input, weight, bias, fusible_ops={"aten::linear"}
)
def test_linear_broadcast(self):
"""Test of the PyTorch aten::linear op with broadcasting on Glow."""
def test_f(input, weight, bias=None):
return F.linear((input + input), weight, bias)
n = 5
in_features = 4
out_features = 3
input = torch.randn(n, 9, 7, in_features)
weight = torch.randn(out_features, in_features)
utils.compare_tracing_methods(
SimpleLinearModule(), input, weight, fusible_ops={"aten::linear"}
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class IndexPutModule(torch.nn.Module):
def __init__(self, indices, accumulate=False):
super(IndexPutModule, self).__init__()
self.indices = indices
self.accumulate = accumulate
def forward(self, tensor, val):
tensor.index_put_(self.indices, val, accumulate=self.accumulate)
tensor = tensor + tensor
return tensor
class TestIndexPut(utils.TorchGlowTestCase):
@utils.deterministic_expand(
[
lambda: (
"basic",
IndexPutModule([torch.tensor([1, 1]), torch.tensor([0, 1])]),
torch.zeros(2, 3),
torch.tensor([1.0, 2.0]),
),
lambda: (
"3d_0",
IndexPutModule(
[torch.tensor([1, 1]), torch.tensor([0, 1]), torch.tensor([0, 1])]
),
torch.zeros(2, 3, 4),
torch.tensor([1.0, 2.0]),
),
lambda: (
"3d_1",
IndexPutModule(
[
torch.tensor([1, 1, 0]),
torch.tensor([0, 1, 1]),
torch.tensor([0, 1, 0]),
]
),
torch.zeros(2, 3, 4),
torch.tensor([1.0, 2.0, 3.0]),
),
lambda: (
"broadcast_value_0",
IndexPutModule(
[
torch.tensor([2, 0, 1]),
torch.tensor([1, 2, 0]),
torch.tensor([2, 0, 1]),
]
),
torch.zeros(5, 3, 4),
torch.tensor([1.0]),
),
lambda: (
"broadcast_value_1",
IndexPutModule(
[
torch.tensor([1, 1, 2]),
torch.tensor([0, 1, 2]),
torch.tensor([0, 1, 3]),
]
),
torch.zeros(5, 3, 4),
torch.tensor([1.0]),
),
lambda: (
"broadcast_value_2",
IndexPutModule(
[
torch.tensor([1, 1, 0]),
torch.tensor([0, 1, 0]),
]
),
torch.zeros(5, 3, 4),
torch.tensor([1.0, 1.0, 1.0, 1.0]),
),
lambda: (
"accumulate_basic",
IndexPutModule([torch.tensor([1, 2]), torch.tensor([0, 1])]),
torch.zeros(4, 3),
torch.tensor([1.0, 2.0]),
),
lambda: (
"accumulate_broadcast",
IndexPutModule(
[
torch.tensor([1, 1, 2]),
torch.tensor([0, 1, 2]),
torch.tensor([0, 1, 3]),
],
True,
),
torch.ones(5, 4, 6),
torch.tensor([5.0]),
),
lambda: (
"dim_0",
IndexPutModule(
[
torch.tensor([1]),
]
),
torch.zeros(5, 3, 4),
torch.tensor([5.0]),
),
lambda: (
"dim_1",
IndexPutModule(
[
torch.tensor([1]),
]
),
torch.tensor([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]),
torch.tensor([-3.0, -4.0]),
),
lambda: (
"dim_2",
IndexPutModule(
[
torch.tensor([1, 0]),
]
),
torch.tensor([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]),
torch.tensor([-3.0, -4.0]),
),
lambda: (
"dim_3",
IndexPutModule(
[
torch.tensor([1, 0, 2]),
]
),
torch.tensor([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]),
torch.tensor([[-3.0], [-4.0], [-5.0]]),
),
]
)
def test_index_put(self, _, module, tensor, value):
utils.compare_tracing_methods(
module, tensor, value, fusible_ops={"aten::index_put_"}
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
import torch.nn.functional as F
from tests import utils
class SimpleDropoutModule(torch.nn.Module):
def __init__(self, p=0.5, training=True, inplace=False):
super(SimpleDropoutModule, self).__init__()
self.p = p
self.training = training
self.inplace = inplace
def forward(self, input):
return F.dropout(
input + input, p=self.p, training=self.training, inplace=self.inplace
)
class TestDropout(utils.TorchGlowTestCase):
def test_dropout(self):
"""Basic test of the PyTorch aten::dropout Node on Glow."""
utils.compare_tracing_methods(
SimpleDropoutModule(training=False),
torch.randn(6, 4, 10),
fusible_ops={"aten::dropout"},
)
def test_dropout_inplace(self):
"""Basic test of the PyTorch aten::dropout_ Node on Glow."""
# Expect fuser to out-of-place the operator
utils.compare_tracing_methods(
SimpleDropoutModule(training=False, inplace=True),
torch.randn(6, 4, 10),
fusible_ops={"aten::dropout"},
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class SimpleToModel(torch.nn.Module):
def __init__(self, *conversions):
super(SimpleToModel, self).__init__()
self.conversions = conversions
def forward(self, tensor):
for conversion_type in self.conversions:
tensor = tensor.to(conversion_type)
return tensor
class ToWithDeviceModel(torch.nn.Module):
def __init__(self, *conversions):
super(ToWithDeviceModel, self).__init__()
self.conversions = conversions
def forward(self, tensor):
for conversion_type in self.conversions:
tensor = tensor.to(device="cpu", dtype=conversion_type)
return tensor
class SimplePrimToModel(torch.nn.Module):
def __init__(self, conversion, device=None):
super().__init__()
self.device = None
self.conversion = conversion
if self.device is None:
self.forward = self._forward_dtype
else:
self.forward = self._forward_device_dtype
def _forward_device_dtype(self, dummy):
return torch.ops.prim.NumToTensor(dummy.size(0)).to(
device=self.device, dtype=self.conversion
)
def _forward_dtype(self, dummy):
return torch.ops.prim.NumToTensor(dummy.size(0)).to(self.conversion)
class TestTo(utils.TorchGlowTestCase):
@utils.deterministic_expand(
[
lambda: ("to_int", SimpleToModel(torch.int), torch.randn(1, 2, 3, 4)),
lambda: ("to_float", SimpleToModel(torch.float), torch.randn(1, 2, 3, 4)),
lambda: (
"to_int_to_float",
SimpleToModel(torch.int, torch.float),
torch.randn(1, 2, 3, 4),
),
lambda: (
"to_int_with_device",
ToWithDeviceModel(torch.int),
torch.randn(1, 2, 3, 4),
),
lambda: ("to_cpu", SimpleToModel("cpu"), torch.randn(1, 2, 3, 4)),
lambda: (
"to_tensor",
SimpleToModel(torch.randn(3, 4).type(torch.int32)),
torch.randn(1, 2, 3, 4),
),
]
)
def test_to(self, _, module, tensor):
utils.compare_tracing_methods(module, tensor, fusible_ops={"aten::to"})
@utils.deterministic_expand(
[
lambda: (
"to_prim_dtype",
SimplePrimToModel(torch.float),
torch.randn(5, 6, 7),
),
lambda: ("to_prim_device", SimplePrimToModel("cpu"), torch.randn(5, 6, 7)),
lambda: (
"to_prim_device_with_dtype",
SimplePrimToModel(torch.float, "cuda"),
torch.randn(5, 6, 7),
),
]
)
def test_to_prim(self, _, module, tensor):
utils.compare_tracing_methods(
module,
tensor,
fusible_ops={"prim::NumToTensor", "aten::to"},
scripted=True,
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class SimplePermuteModule(torch.nn.Module):
def __init__(self, *dimensions):
super(SimplePermuteModule, self).__init__()
self.dimensions = dimensions
def forward(self, tensor):
return tensor.permute(*self.dimensions)
class TestPermute(utils.TorchGlowTestCase):
def test_permute(self):
"""Basic test of the PyTorch aten::permute node on Glow."""
utils.compare_tracing_methods(
SimplePermuteModule(0, 2, 1),
torch.randn(2, 3, 4),
fusible_ops={"aten::permute"},
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class SimpleQuantizedCatModel(torch.nn.Module):
def __init__(self, dimension, scale, zero_point):
super(SimpleQuantizedCatModel, self).__init__()
self.dimension = dimension
self.scale = scale
self.zero_point = zero_point
def forward(self, a, b):
return torch.nn.quantized.DeQuantize()(
torch.ops.quantized.cat(
(a, b),
dim=self.dimension,
scale=self.scale,
zero_point=self.zero_point,
)
)
class TestQuantizedCat(utils.TorchGlowTestCase):
@utils.deterministic_expand(
[
lambda: (
"zero_offset",
SimpleQuantizedCatModel(
0,
0.05,
0,
),
(
torch.nn.quantized.Quantize(
scale=0.3,
zero_point=0,
dtype=torch.quint8,
)(torch.randn([1, 2, 3, 4], dtype=torch.float32)),
torch.nn.quantized.Quantize(
scale=0.3,
zero_point=0,
dtype=torch.quint8,
)(torch.randn([5, 2, 3, 4], dtype=torch.float32)),
),
),
lambda: (
"basic",
SimpleQuantizedCatModel(
1,
0.05,
0,
),
(
torch.nn.quantized.Quantize(
scale=0.3,
zero_point=0.3,
dtype=torch.quint8,
)(torch.randn([8, 8, 8, 8], dtype=torch.float32)),
torch.nn.quantized.Quantize(
scale=0.3,
zero_point=0.3,
dtype=torch.quint8,
)(torch.randn([8, 8, 8, 8], dtype=torch.float32)),
),
),
lambda: (
"with_empty_tensor",
SimpleQuantizedCatModel(
0,
0.05,
0,
),
(
torch.nn.quantized.Quantize(
scale=0.2,
zero_point=0.1,
dtype=torch.quint8,
)(torch.empty(0, dtype=torch.float32)),
torch.nn.quantized.Quantize(
scale=0.2,
zero_point=0.1,
dtype=torch.quint8,
)(torch.randn([8, 8], dtype=torch.float32)),
),
),
lambda: (
"with_differing_quantizations",
SimpleQuantizedCatModel(
2,
0.05,
0,
),
(
torch.nn.quantized.Quantize(
scale=0.6,
zero_point=0.2,
dtype=torch.quint8,
)(torch.randn([7, 7, 7], dtype=torch.float32)),
torch.nn.quantized.Quantize(
scale=0.2,
zero_point=0.1,
dtype=torch.quint8,
)(torch.randn([7, 7, 7], dtype=torch.float32)),
),
),
]
)
def test_quantized_cat(self, _, module, tensors, fusion_blocklist=None):
utils.compare_tracing_methods(
module,
*tensors,
fusible_ops={"quantized::cat"},
fusion_blocklist=None,
skip_to_glow=False,
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class SimpleSplitModel(torch.nn.Module):
def __init__(self, split_size_or_sections, dimension):
super(SimpleSplitModel, self).__init__()
self.split_size_or_sections = split_size_or_sections
self.dimension = dimension
def forward(self, x):
return torch.split(x, self.split_size_or_sections, self.dimension)
class TestSplit(utils.TorchGlowTestCase):
@utils.deterministic_expand(
[
lambda: (torch.randn(8), 4, 0),
lambda: (torch.randn(10), [1, 2, 3, 4], 0),
lambda: (torch.randn(10, 10, 10), 3, 2),
lambda: (torch.randn(100, 100), [25, 50, 25], 1),
lambda: (torch.randn(100, 100), [25, 50, 25], -2),
lambda: (torch.randn(100, 100), 25, -1),
]
)
def test_split(self, tensor, split_size_or_sections, dimension):
utils.compare_tracing_methods(
SimpleSplitModel(split_size_or_sections, dimension), tensor
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class SimpleAddMmModule(torch.nn.Module):
def __init__(self, alpha=1, beta=1):
super(SimpleAddMmModule, self).__init__()
self.alpha = alpha
self.beta = beta
def forward(self, a, b, c):
return (a + a).addmm(b, c)
class TestAddMM(utils.TorchGlowTestCase):
def test_addmm_basic(self):
"""Basic test of the PyTorch addmm Node on Glow."""
utils.run_comparison_tests(
SimpleAddMmModule(),
(torch.randn(6, 4), torch.randn(6, 10), torch.randn(10, 4)),
fusible_ops={"aten::add", "aten::mm"},
fp16vfp16_atol=1e-3,
fp16vfp16_rtol=1e-3,
)
def test_addmm_broadcast(self):
"""Test of the PyTorch addmm with broadcasting add on Glow."""
utils.run_comparison_tests(
SimpleAddMmModule(),
(torch.randn(4), torch.randn(6, 10), torch.randn(10, 4)),
fusible_ops={"aten::add", "aten::mm"},
)
def test_addmm_broadcast_with_alpha_and_beta(self):
"""Test of the PyTorch addmm with broadcasting add on Glow."""
utils.run_comparison_tests(
SimpleAddMmModule(2.0, 3.0),
(torch.randn(4), torch.randn(6, 10), torch.randn(10, 4)),
fusible_ops={"aten::add", "aten::mm"},
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class SimpleMmModule(torch.nn.Module):
def __init__(self):
super(SimpleMmModule, self).__init__()
def forward(self, a, b, t):
r = torch.mm(a, b)
return r.mm(t)
class TestMm(utils.TorchGlowTestCase):
def test_mm_basic(self):
"""Test of the PyTorch mm Node on Glow."""
x = torch.randn(2, 3)
y = torch.randn(4, 3).t()
t = torch.randn(4, 2)
utils.compare_tracing_methods(
SimpleMmModule(), x, y, t, fusible_ops={"aten::mm"}, skip_to_glow=True
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class SimpleQuantizedMaxPoolModel(torch.nn.Module):
def __init__(self, scale, zero_point, dtype, kernel_size):
super(SimpleQuantizedMaxPoolModel, self).__init__()
self.scale = scale
self.zero_point = zero_point
self.dtype = dtype
def forward(self, tensor):
quantize = torch.nn.quantized.Quantize(
scale=self.scale, zero_point=self.zero_point, dtype=self.dtype
)
dequantize = torch.nn.quantized.DeQuantize()
maxpool = torch.nn.MaxPool2d(3)
dequantize = torch.nn.quantized.DeQuantize()
return dequantize(maxpool(quantize(tensor)))
class TestQuantizedMaxPool(utils.TorchGlowTestCase):
@utils.deterministic_expand(
[
lambda: (
"basic",
SimpleQuantizedMaxPoolModel(1.0 / 128, 3, torch.quint8, 3),
torch.randn(1, 4, 5, 5),
),
lambda: (
"cut_q",
SimpleQuantizedMaxPoolModel(1.0 / 128, 3, torch.quint8, 3),
torch.randn(1, 4, 5, 5),
{"aten::quantize_per_tensor"},
),
]
)
def test_quantized_maxpool(self, _, module, tensor, fusion_blocklist=None):
fusible_ops = {
"aten::max_pool2d",
"aten::quantize_per_tensor",
"aten::dequantize",
}
fusible_ops -= fusion_blocklist or set()
utils.compare_tracing_methods(
module, tensor, fusible_ops=fusible_ops, fusion_blocklist=fusion_blocklist
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.