python_code
stringlengths 0
229k
|
---|
# isort:skip_file
from __future__ import absolute_import, division, print_function, unicode_literals
import pickle
import torch
import torch_glow
from tests import utils
class TestCompilationSpec(utils.TorchGlowTestCase):
def build_compiliation_spec(self):
compilation_spec = torch_glow.CompilationSpec()
compilation_spec_settings = compilation_spec.get_settings()
compilation_spec_settings.set_glow_backend("CPU")
compilation_spec_settings.set_enable_fuser(True)
fuser_settings = compilation_spec.get_fuser_settings()
fuser_settings.set_min_fusion_group_size(3)
fuser_settings.set_max_fusion_merge_size(4)
fuser_settings.set_fusion_start_index(5)
fuser_settings.set_fusion_end_index(6)
fuser_settings.op_blacklist_append("aten::mean")
fuser_settings.op_blacklist_append("aten::dropout")
compilation_group = torch_glow.CompilationGroup()
input1_spec = torch_glow.input_spec_from_tensor(torch.randn(2, 3, 224, 224))
input2_spec = torch_glow.input_spec_from_tensor(
torch.randn(3, 2).to(torch.float16)
)
compilation_group.input_sets_append([input1_spec, input2_spec])
compilation_group.input_sets_append(
torch_glow.input_specs_from_tensors(
[torch.randn(1, 3, 224, 224), torch.randn(4, 1)]
)
)
compilation_group_settings = compilation_group.get_settings()
compilation_group_settings.set_convert_to_fp16(True)
compilation_group_settings.set_num_devices_to_use(50)
compilation_group_settings.set_replication_count(52)
compilation_group_settings.backend_specific_opts_insert("apple", "orange")
compilation_spec.compilation_groups_append(compilation_group)
default_compilation_group_settings = (
compilation_spec.get_default_compilation_group_settings()
)
default_compilation_group_settings.set_convert_to_fp16(False)
default_compilation_group_settings.set_num_devices_to_use(89)
default_compilation_group_settings.set_replication_count(90)
default_compilation_group_settings.backend_specific_opts_insert(
"hello", "goodbye"
)
return compilation_spec
def validate_compilation_spec(self, compilation_spec):
compilation_spec_settings = compilation_spec.get_settings()
self.assertEqual(compilation_spec_settings.get_glow_backend(), "CPU")
self.assertEqual(compilation_spec_settings.get_enable_fuser(), True)
fuser_settings = compilation_spec.get_fuser_settings()
self.assertEqual(fuser_settings.get_min_fusion_group_size(), 3)
self.assertEqual(fuser_settings.get_max_fusion_merge_size(), 4)
self.assertEqual(fuser_settings.get_fusion_start_index(), 5)
self.assertEqual(fuser_settings.get_fusion_end_index(), 6)
self.assertEqual(fuser_settings.get_op_blacklist()[0], "aten::mean")
self.assertEqual(fuser_settings.get_op_blacklist()[1], "aten::dropout")
compilation_groups = compilation_spec.get_compilation_groups()
self.assertEqual(len(compilation_groups), 1)
compilation_group = compilation_groups[0]
input_sets = compilation_group.get_input_sets()
self.assertEqual(len(input_sets), 2)
self.assertEqual(input_sets[0][0].get_dims(), [2, 3, 224, 224])
self.assertEqual(input_sets[0][1].get_dims(), [3, 2])
self.assertEqual(input_sets[1][0].get_dims(), [1, 3, 224, 224])
self.assertEqual(input_sets[1][1].get_dims(), [4, 1])
# 5 is at::Half
self.assertEqual(input_sets[0][1].get_elem_type(), 5)
compilation_group_settings = compilation_group.get_settings()
self.assertEqual(compilation_group_settings.get_convert_to_fp16(), True)
self.assertEqual(compilation_group_settings.get_num_devices_to_use(), 50)
self.assertEqual(compilation_group_settings.get_replication_count(), 52)
self.assertEqual(
compilation_group_settings.backend_specific_opts_at("apple"), "orange"
)
default_compilation_group_settings = (
compilation_spec.get_default_compilation_group_settings()
)
self.assertEqual(
default_compilation_group_settings.get_convert_to_fp16(), False
)
self.assertEqual(
default_compilation_group_settings.get_num_devices_to_use(), 89
)
self.assertEqual(default_compilation_group_settings.get_replication_count(), 90)
self.assertEqual(
default_compilation_group_settings.backend_specific_opts_at("hello"),
"goodbye",
)
def test_new_glow_compile_spec(self):
"""Test glow compile spec basics."""
compilation_spec = self.build_compiliation_spec()
# Sanity check
self.validate_compilation_spec(compilation_spec)
# Serialize and deserialize
pickled = pickle.dumps(compilation_spec)
unpickled = pickle.loads(pickled)
# Recheck the spec
self.validate_compilation_spec(unpickled)
|
# isort:skip_file
from __future__ import absolute_import, division, print_function, unicode_literals
import tempfile
import torch
import torch_glow
from tests import utils
class TestLoadBackendSpecificOptions(utils.TorchGlowTestCase):
def test_backend_specific_options(self):
"""Test loading backend specific options from YAML file."""
def test_f(a, b):
return a.add(b)
x = torch.randn(4)
y = torch.randn(4)
# Create YAML file with backend options
with tempfile.NamedTemporaryFile() as options_fd:
options_fd.write(b"interpreter-memory: 4194304\n")
options_fd.flush()
# Run Glow
torch_glow.loadBackendSpecificOptions(options_fd.name)
torch_glow.enableFusionPass_DO_NOT_USE_THIS()
glow_trace = torch.jit.trace(test_f, (x, y), check_trace=False)
glow_trace(x, y)
|
# isort:skip_file
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
import torch_glow
from tests import utils
def get_compilation_spec(inputs):
"""helper function to get the compilation spec of the submodule"""
spec = torch_glow.CompilationSpec()
spec.get_settings().set_glow_backend("Interpreter")
compilation_group = torch_glow.CompilationGroup()
spec.compilation_groups_append(compilation_group)
compilation_group.input_sets_append(torch_glow.input_specs_from_tensors(inputs))
return spec
class QuantizedModule(torch.nn.Module):
def forward(self, a, b):
return torch.ops.quantized.add(a, b, scale=1.0 / 21, zero_point=10)
class TestModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.quant = torch.nn.quantized.Quantize(
scale=1.0 / 21, zero_point=0, dtype=torch.qint8
)
self.dequant = torch.nn.quantized.DeQuantize()
self.add = QuantizedModule()
def forward(self, a, b):
return self.dequant(self.add(self.quant(a), self.quant(b)))
class TestInputSpec(utils.TorchGlowTestCase):
def test_input_spec(self):
"""Test setting quantized and non-quantized input specs."""
with torch.no_grad():
a = torch.tensor([[0.1]])
b = torch.tensor([[0.1]])
mod = TestModule()
traced_model = torch.jit.trace(mod, (a, b))
ref_result = traced_model(a, b)
# test non-quantized input
glow_mod = torch_glow.to_glow(traced_model, get_compilation_spec((a, b)))
glow_result = glow_mod(a, b)
self.assertTrue(torch.allclose(ref_result, glow_result))
# test quantized input
add_inputs = torch_glow.get_submod_inputs(mod, "add", (a, b))
glow_mod = torch_glow.to_glow_selective(
traced_model, {"add": get_compilation_spec(add_inputs)}
)
glow_result = glow_mod(a, b)
self.assertTrue(torch.allclose(ref_result, glow_result))
|
# isort:skip_file
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
import torch_glow
from tests import utils
class Qux(torch.nn.Module):
def __init__(self, x):
super(Qux, self).__init__()
self.x = x
def forward(self, a, b):
return a - b - self.x
class Baz(torch.nn.Module):
def __init__(self, x):
super(Baz, self).__init__()
self.x = x
def forward(self, a, b):
return a + b * self.x
class Bar(torch.nn.Module):
def __init__(self, x):
super(Bar, self).__init__()
self.x = x
def forward(self, a, b):
return a * b + self.x
class Foo(torch.nn.Module):
def __init__(self, bar, baz):
super(Foo, self).__init__()
self.bar = bar
self.baz = baz
def forward(self, a, b):
return self.baz(self.bar(a.reshape(1, -1), b.reshape(1, -1)), b)
class Model(torch.nn.Module):
def __init__(self, foo, qux):
super(Model, self).__init__()
self.foo = foo
self.qux = qux
def forward(self, a, b):
return self.qux(self.foo(a, b), a)
r"""
model
/ \
foo qux (Glow)
/ \
bar (Glow) baz
"""
bar = Bar(4.0)
baz = Baz(2.0)
qux = Qux(3.0)
foo = Foo(bar, baz)
model = Model(foo, qux)
def get_compilation_spec(inputs):
"""helper function to get the compilation spec of the submodule"""
spec = torch_glow.CompilationSpec()
spec.get_settings().set_glow_backend("Interpreter")
compilation_group = torch_glow.CompilationGroup()
spec.compilation_groups_append(compilation_group)
compilation_group.input_sets_append(torch_glow.input_specs_from_tensors(inputs))
return spec
class TestSelectiveToGlow(utils.TorchGlowTestCase):
def test_to_glow_selective(self):
inputs = (torch.zeros(4) + 8, torch.zeros(4) + 7)
torch_res = model(*inputs)
bar_inputs = torch_glow.get_submod_inputs(model, "foo.bar", inputs)
qux_inputs = torch_glow.get_submod_inputs(model, "qux", inputs)
glow_mod = torch_glow.to_glow_selective(
model,
{
"foo.bar": (get_compilation_spec(bar_inputs), bar_inputs),
"qux": (get_compilation_spec(qux_inputs), qux_inputs),
},
inplace=False,
)
glow_mod = torch.jit.trace(glow_mod, inputs)
glow_res = glow_mod(*inputs)
assert torch.allclose(torch_res, glow_res)
def test_to_glow_selective_already_scripted(self):
inputs = (torch.zeros(4) + 8, torch.zeros(4) + 7)
torch_res = model(*inputs)
bar_inputs = torch_glow.get_submod_inputs(model, "foo.bar", inputs)
qux_inputs = torch_glow.get_submod_inputs(model, "qux", inputs)
with torch.no_grad():
traced_model = torch.jit.trace(model, inputs)
glow_mod = torch_glow.to_glow_selective(
traced_model,
{
"foo.bar": get_compilation_spec(bar_inputs),
"qux": get_compilation_spec(qux_inputs),
},
inplace=False,
)
glow_res = glow_mod(*inputs)
assert torch.allclose(torch_res, glow_res)
|
# isort:skip_file
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
import torch_glow
from tests import utils
from tests.utils import GLOW_FUSION_GROUP
class TestOnlyTensorOutputs(utils.TorchGlowTestCase):
def test_only_tensor_outputs(self):
"""Test that Glow fuser only produces tensor outputs."""
def f(a, b):
x = (a + b).size(0)
c = a.reshape(x, -1)
return a + c
torch_glow.disableFusionPass()
a = torch.randn(5, 5)
b = torch.randn(5, 5)
jit_f = torch.jit.trace(f, (a, b))
jit_f_graph = jit_f.graph_for(a, b)
# By creating a graph with an aten::size (supported) feeding into an
# unsupported op (prim::ListConstruct), we see that even if an op is
# supported, if it produces a non-tensor output to the fusion group it
# would not be fused.
torch_glow.glowCustomFuseDebug_(
jit_f_graph, ["prim::Constant", "aten::add", "aten::size", "aten::reshape"]
)
fusion_nodes = 0
aten_sizes = 0
for node in jit_f_graph.nodes():
if node.kind() == GLOW_FUSION_GROUP:
fusion_nodes += 1
if node.kind() == "aten::size":
aten_sizes += 1
assert (
fusion_nodes == 2
), "Expected two fusion nodes to be split up with aten::size between them"
assert aten_sizes == 1, "Expected aten::size not to be fused"
|
# isort:skip_file
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
import torch.nn.functional as F
import torch_glow
from tests import utils
class TestJITVsGlowPath(utils.TorchGlowTestCase):
def test_jit_vs_glow_path(self):
"""Basic test of the JIT vs. Glow logging feature."""
torch_glow.enable_jit_vs_glow_compare()
class TestModule(torch.nn.Module):
def forward(self, input, weight):
return F.linear((input + input), weight)
n = 5
in_features = 4
out_features = 3
input = torch.randn(n, in_features)
weight = torch.randn(out_features, in_features)
utils.compare_tracing_methods(
TestModule(),
input,
weight,
fusible_ops={"aten::add", "aten::linear"},
)
def test_jit_vs_glow_int_path(self):
"""Test JIT vs. Glow logging with int type"""
torch_glow.enable_jit_vs_glow_compare()
class TestModule(torch.nn.Module):
def forward(self, a, b):
c = a + b
return c
a = torch.randn(5, 6).to(dtype=torch.int32)
b = torch.randn(5, 6).to(dtype=torch.int32)
utils.compare_tracing_methods(TestModule(), a, b, fusible_ops={"aten::add"})
def test_jit_vs_glow_inplace(self):
"""Test JIT vs. Glow logging with in-place op"""
torch_glow.enable_jit_vs_glow_compare()
class TestModule(torch.nn.Module):
def forward(self, a, b):
a += b
return a
a = torch.randn(5, 6)
b = torch.randn(5, 6)
utils.compare_tracing_methods(TestModule(), a, b, fusible_ops={"aten::add_"})
|
# isort:skip_file
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
import torch_glow
from tests import utils
from tests.utils import GLOW_FUSION_GROUP, SUBGRAPH_ATTR
class TestBlockList(utils.TorchGlowTestCase):
def test_op_blocklist(self):
"""Test Glow fuser op kind blacklisting mechanism."""
def f(a, b):
return (a + b) * (a - b)
torch_glow.enableFusionPass_DO_NOT_USE_THIS()
torch_glow.setFusionBlocklist(["aten::add"])
a = torch.randn(5, 5)
b = torch.randn(5, 5)
jit_f = torch.jit.trace(f, (a, b))
jit_f_graph = jit_f.graph_for(a, b)
fused_add = False
fused_sub = False
for node in jit_f_graph.nodes():
if node.kind() == GLOW_FUSION_GROUP:
glow_subgraph = node.g(SUBGRAPH_ATTR)
for node in glow_subgraph.nodes():
if node.kind() == "aten::add":
fused_add = True
if node.kind() == "aten::sub":
fused_sub = True
assert not fused_add, "Expected aten::add to be blacklisted"
assert fused_sub, "Expected aten::sub to not be blacklisted"
torch_glow.clearFusionBlocklist()
def test_op_index_blocklist(self):
"""Test Glow fuser index blacklisting mechanism."""
def f(a, b):
x1 = a * b
x2 = x1 * b
x3 = x2 * a
x4 = x3 / b
x5 = x4 / a
x6 = x5 / b
x7 = x6 * a
x8 = x7 * b
return x8
torch_glow.enableFusionPass_DO_NOT_USE_THIS()
torch_glow.setFusionStartIndex(3)
torch_glow.setFusionEndIndex(6)
a = torch.randn(5, 5)
b = torch.randn(5, 5)
jit_f = torch.jit.trace(f, (a, b))
jit_f_graph = jit_f.graph_for(a, b)
torch_glow.clearFusionIndices()
fused_muls = 0
fused_divs = 0
for node in jit_f_graph.nodes():
if node.kind() == GLOW_FUSION_GROUP:
glow_subgraph = node.g(SUBGRAPH_ATTR)
for node in glow_subgraph.nodes():
if node.kind() == "aten::mul":
fused_muls += 1
if node.kind() == "aten::div":
fused_divs += 1
assert fused_muls == 0, "Expected no aten::muls to be fused"
assert fused_divs == 3, "Expected all 3 aten::divs to be fused"
|
# isort:skip_file
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
import torch_glow
from tests import utils
from tests.utils import graph_contains_str
graph_str = """
graph(%input : Tensor, %weight : Tensor, %bias : Tensor):
%c : int = prim::Constant[value=4]()
%d : int = prim::Constant[value=1]()
%1 : int = aten::dim(%input)
%2 : bool = aten::eq(%1, %c)
%3 : Tensor = prim::If(%2)
block0():
%4 : Tensor = aten::t(%weight)
%5 : int = prim::Constant[value=1]()
%6 : Tensor = aten::mm(%input, %4)
%7 : Tensor = aten::add(%bias, %6, %5)
-> (%7)
block1():
%8 : Tensor = aten::t(%weight)
%9 : Tensor = aten::matmul(%input, %8)
%10 : Tensor = aten::add_(%9, %bias, %d)
-> (%10)
return (%3)
"""
class TestFuseLinear(utils.TorchGlowTestCase):
def test_fuse_linear(self):
"""Test Glow's fuseBranchedLinearPattern JIT pass"""
graph = torch._C.parse_ir(graph_str)
assert not graph_contains_str(graph, "glow::fused_linear")
torch_glow.fuseBranchedLinearPattern_(graph)
assert graph_contains_str(graph, "glow::fused_linear")
|
# isort:skip_file
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
import torch_glow
from tests import utils
class Model(torch.nn.Module):
def __init__(self):
super(Model, self).__init__()
self.linear = torch.nn.Linear(5, 10)
def forward(self, x):
return self.linear(x)
def run_model(m, input, randomize):
torch_glow.disableFusionPass()
traced_m = torch.jit.trace(m, input)
if randomize:
torch_glow.enable_randomize_constants()
else:
torch_glow.disable_randomize_constants()
spec = torch_glow.CompilationSpec()
spec.get_settings().set_glow_backend("Interpreter")
compilation_group = torch_glow.CompilationGroup()
spec.compilation_groups_append(compilation_group)
input_spec = torch_glow.InputSpec()
input_spec.set_same_as(input)
compilation_group.input_sets_append([input_spec])
glow_m = torch_glow.to_glow(traced_m, {"forward": spec})
return glow_m(input)
class TestRandomizeWeights(utils.TorchGlowTestCase):
def test_randomize_weights(self):
m = Model()
input = torch.randn(5)
normal1 = run_model(m, input, False)
normal2 = run_model(m, input, False)
rand = run_model(m, input, True)
assert torch.allclose(normal1, normal2)
assert not torch.allclose(normal1, rand)
|
# isort:skip_file
from __future__ import absolute_import, division, print_function, unicode_literals
import glob
import os
import torch
import torch_glow
from tests import utils
class Foo(torch.nn.Module):
def __init__(self):
super(Foo, self).__init__()
self.conv1 = torch.nn.Conv2d(3, 6, 3)
self.relu = torch.nn.ReLU()
self.conv2 = torch.nn.Conv2d(6, 16, 3)
def forward(self, x):
x = self.conv1(x)
x = self.relu(x)
y = self.conv2(x)
return y
class Bar(torch.nn.Module):
def __init__(self, foo):
super(Bar, self).__init__()
self.foo = foo
def forward(self, x):
y = self.foo(x)
return y
class Baz(torch.nn.Module):
def __init__(self, foo):
super(Baz, self).__init__()
self.foo = foo
def forward(self, x):
y = self.foo(x)
return (x, y)
def create_model(x, ModType):
foo = Foo()
foo = torch.ao.quantization.QuantWrapper(foo)
foo.qconfig = torch.ao.quantization.get_default_qconfig("fbgemm")
torch.ao.quantization.prepare(foo, inplace=True)
foo(x)
torch.ao.quantization.convert(foo, inplace=True)
model = ModType(foo)
return model
class TestToGlowWriteToOnnx(utils.TorchGlowTestCase):
def lower_and_write_to_onnx_helper(self, ModType, onnx_prefix):
x = torch.randn(1, 3, 8, 8)
model = create_model(x, ModType)
spec = torch_glow.CompilationSpec()
spec.get_settings().set_glow_backend("Interpreter")
compilation_group = torch_glow.CompilationGroup()
spec.compilation_groups_append(compilation_group)
input_spec = torch_glow.InputSpec()
input_spec.set_same_as(x)
compilation_group.input_sets_append([input_spec])
scripted_mod = torch.jit.trace(model, x)
torch_glow.enable_write_to_onnx()
torch_glow.set_onnx_file_name_prefix(onnx_prefix)
torch_glow.enable_write_without_randomize()
lowered_model = torch_glow.to_glow(scripted_mod, {"forward": spec})
# Run Glow model
g = lowered_model(x)
# Run reference model
t = model(x)
self.assertEqual(type(g), type(t))
self.assertEqual(len(g), len(t))
for (gi, ti) in zip(g, t):
self.assertTrue(torch.allclose(gi, ti))
assert os.path.exists(onnx_prefix + ".onnxtxt")
onnx_files = glob.glob(onnx_prefix + "*.onnx*")
for f in onnx_files:
os.remove(f)
def test_lower_and_write_to_onnx_tensor_output(self):
onnx_prefix = "write_to_onnx_test1"
self.lower_and_write_to_onnx_helper(Bar, onnx_prefix)
def test_lower_and_write_to_onnx_tuple_output(self):
onnx_prefix = "write_to_onnx_test2"
self.lower_and_write_to_onnx_helper(Baz, onnx_prefix)
|
# isort:skip_file
from __future__ import absolute_import, division, print_function, unicode_literals
import unittest
import torch
import torch_glow
from tests import utils
class TestFuseParallelBranches(utils.TorchGlowTestCase):
def test_fuse_parallel_branches_with_fusible_root(self):
r"""Test GlowFuser fusing parallel branches with a common fusible root
a = add(x, y)
/ \
b1 = add(a, x) b2 = add(a, y)
\ /
res = TupleConstruct(b1, b2)
This should be fused as
glow::FusionGroup_0
|
TupleConstruct
"""
def test_fuser(x, y):
a = x + y
branch1 = a + x
branch2 = a + y
res = (branch1, branch2)
return res
inputs = (torch.randn(2, 4), torch.randn(2, 4))
traced = torch.jit.trace(test_fuser, inputs)
torch_glow.glowCustomFuseDebug_(traced.graph)
count = 0
for node in traced.graph.nodes():
if node.kind() == "glow::FusionGroup":
count += 1
assert count == 1, f"Expect 1 glow::FusionGroup, found {count}."
# TODO: support fusing parallel branches without a common fusible root correctly
@unittest.skip("Not supported yet")
def test_fuse_parallel_branches_without_fusible_root(self):
r"""Test GlowFuser fusing parallel branches without a common fusible root
x = add(x, x) y = add(y, y)
| |
b1 = add(x, x) b2 = add(y, y)
\ /
res = TupleConstruct(b1, b2)
This should be fused as
glow::FusionGroup_0
|
TupleConstruct
"""
def test_fuser(x, y):
x = x + x
y = y + y
branch1 = x + x
branch2 = y + y
res = (branch1, branch2)
return res
inputs = (torch.randn(2, 4), torch.randn(2, 4))
traced = torch.jit.trace(test_fuser, inputs)
torch_glow.glowCustomFuseDebug_(traced.graph)
count = 0
for node in traced.graph.nodes():
if node.kind() == "glow::FusionGroup":
count += 1
assert count == 1, f"Expect 1 glow::FusionGroup, found {count}."
|
# isort:skip_file
from __future__ import absolute_import, division, print_function, unicode_literals
from collections import OrderedDict
import torch
import torch_glow
from tests import utils
def create_model(x, relu, bias=True):
"""x is an example input, relu is whether or not to include a fused relu."""
with torch.no_grad():
x_size = len(x.size())
conv_op = None
if x_size == 4:
if bias:
conv_op = torch.nn.Conv2d(3, 10, 3)
else:
conv_op = torch.nn.Conv2d(3, 10, 3, bias=False)
elif x_size == 5:
conv_op = torch.nn.Conv3d(3, 10, 3)
else:
print(f"Only 2d and 3d conv supported, got {x_size}d inputs")
exit(1)
conv_op.weight.random_(-1, 1)
if bias:
conv_op.bias.data.random_(-1, 1)
model = None
if relu:
model = torch.nn.Sequential(
OrderedDict([("conv", conv_op), ("relu", torch.nn.ReLU())])
)
model = torch.ao.quantization.fuse_modules(model, [["conv", "relu"]])
else:
model = torch.nn.Sequential(OrderedDict([("conv", conv_op)]))
model = torch.ao.quantization.QuantWrapper(model)
model.qconfig = torch.ao.quantization.get_default_qconfig("fbgemm")
torch.ao.quantization.prepare(model, inplace=True)
model(x)
torch.ao.quantization.convert(model, inplace=True)
return model
def run_to_glow(m, x):
"""Trace the model m with input x and call to_glow"""
traced_m = torch.jit.trace(m, (x))
spec = torch_glow.CompilationSpec()
spec.get_settings().set_glow_backend("Interpreter")
compilation_group = torch_glow.CompilationGroup()
spec.compilation_groups_append(compilation_group)
input_spec = torch_glow.InputSpec()
input_spec.set_same_as(x)
compilation_group.input_sets_append([input_spec])
lowered_module = torch_glow.to_glow(traced_m, spec)
return lowered_module
class TestConvToGlow(utils.TorchGlowTestCase):
def test_conv2d_to_glow(self):
x = torch.randn([1, 3, 30, 30])
m = create_model(x, False)
run_to_glow(m, x)
def test_conv2d_relu_to_glow(self):
x = torch.randn([1, 3, 30, 30])
m = create_model(x, True)
run_to_glow(m, x)
def test_conv3d_to_glow(self):
x = torch.randn([1, 3, 30, 30, 30])
m = create_model(x, False)
run_to_glow(m, x)
def test_conv3d_relu_to_glow(self):
x = torch.randn([1, 3, 30, 30, 30])
m = create_model(x, True)
run_to_glow(m, x)
def test_conv2d_to_glow_empty_bias(self):
x = torch.randn([1, 3, 30, 30])
m = create_model(x, False, False)
run_to_glow(m, x)
|
# isort:skip_file
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
import torch_glow
from tests import utils
from tests.utils import graph_contains_str
def foo(x):
y = x.dim()
if y == 1:
return x
else:
if x == 2:
return x * 2
else:
raise RuntimeError("hi")
class TestRemoveException(utils.TorchGlowTestCase):
def test_remove_exceptions(self):
"""Test Glow's removeExceptions JIT pass"""
foo_jit = torch.jit.script(foo)
graph = foo_jit.graph
assert graph_contains_str(graph, "prim::RaiseException")
torch_glow.removeExceptions_(graph)
assert not graph_contains_str(graph, "prim::RaiseException")
|
# isort:skip_file
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
import torch_glow
from tests import utils
from tests.utils import GLOW_FUSION_GROUP
class TestQuantizedCut(utils.TorchGlowTestCase):
def test_quantized_cut(self):
"""Test cut quantized chunk in the middle."""
torch._C._jit_set_profiling_executor(False)
torch._C._jit_set_profiling_mode(False)
def fun(a, b, c, d):
q = torch.nn.quantized.Quantize(
scale=1.0 / 21, zero_point=0, dtype=torch.quint8
)
dq = torch.nn.quantized.DeQuantize()
a = q(a)
b = q(b)
c = q(c)
d = q(d)
adds = torch.ops.quantized.add(a, b, scale=1.0 / 17, zero_point=5)
adds2 = torch.ops.quantized.add(c, d, scale=1.0 / 14, zero_point=4)
res = torch.ops.quantized.add_relu(
adds, adds2, scale=1.0 / 18, zero_point=6
)
res = torch.ops.quantized.add(res, res, scale=1.0 / 13, zero_point=7)
res = dq(res)
return res
with torch.no_grad():
a = torch.randn([5, 5])
b = torch.randn([5, 5])
c = torch.randn([5, 5])
d = torch.randn([5, 5])
res_torch = fun(a, b, c, d)
torch_glow.enableFusionPass_DO_NOT_USE_THIS()
# Cut using blocklist functionality
blocklist = ["quantized::add_relu"]
torch_glow.setFusionBlocklist(blocklist)
torch_glow.setGlowBackend("Interpreter")
traced_model = torch.jit.trace(fun, (a, b, c, d))
for node in traced_model.graph_for(a, b, c, d).nodes():
kind = node.kind()
# Make sure the blocklist is working
assert (
kind == GLOW_FUSION_GROUP
or kind in blocklist
or kind == "prim::Constant"
)
res_glow = traced_model(a, b, c, d)
print(res_torch)
print(res_glow)
assert torch.allclose(res_torch, res_glow)
|
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
import torch_glow
from tests import utils
class SimpleModule(torch.nn.Module):
def __init__(self):
super(SimpleModule, self).__init__()
def forward(self, x):
y = x + x
y = y + 2
return y
class TestToGlowNumDevicesToUse(utils.TorchGlowTestCase):
def devices_to_use_test_helper(self, input, num_replications):
model = SimpleModule()
spec = torch_glow.CompilationSpec()
spec.get_settings().set_glow_backend("Interpreter")
# Init with total number of devices.
torch_glow.setGlowBackendNumDevices(6)
compilation_group = torch_glow.CompilationGroup()
spec.compilation_groups_append(compilation_group)
input_spec = torch_glow.InputSpec()
input_spec.set_same_as(input)
compilation_group.input_sets_append([input_spec])
compilation_group_settings = compilation_group.get_settings()
compilation_group_settings.set_num_devices_to_use(3)
compilation_group_settings.set_replication_count(num_replications)
traced_mod = torch.jit.trace(model, input)
lowered_model = torch_glow.to_glow(traced_mod, {"forward": spec})
g = lowered_model(input)
t = model(input)
self.assertEqual(type(g), type(t))
self.assertEqual(len(g), len(t))
for (gi, ti) in zip(g, t):
self.assertTrue(torch.allclose(gi, ti))
def devices_to_use_test(self):
self.devices_to_use_test_helper(input=torch.randn(4), num_replications=2)
|
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
import torch_glow
from tests import utils
class Foo(torch.nn.Module):
def __init__(self):
super(Foo, self).__init__()
def forward(self, x, y):
return x + y
class TestToGlowMultpleInputSets(utils.TorchGlowTestCase):
def test_to_glow_multiple_groups_and_input_sets(self):
x1 = torch.randn(1, 4)
y1 = torch.randn(2, 4)
x2 = torch.randn(1, 2)
y2 = torch.randn(5, 2)
x3 = torch.randn(7)
y3 = torch.randn(3, 7)
mod = Foo()
scripted_mod = torch.jit.script(mod)
x1_y1_set = torch_glow.input_specs_from_tensors([x1, y1])
x2_y2_set = torch_glow.input_specs_from_tensors([x2, y2])
x3_y3_set = torch_glow.input_specs_from_tensors([x3, y3])
# Create two CompilationGroup, first one contains two input sets
# and the second CompilationGroup has the third input set
spec = torch_glow.CompilationSpec()
spec.get_settings().set_glow_backend("Interpreter")
compilation_group_1 = torch_glow.CompilationGroup()
compilation_group_2 = torch_glow.CompilationGroup()
spec.compilation_groups_append(compilation_group_1)
spec.compilation_groups_append(compilation_group_2)
compilation_group_1.input_sets_append(x1_y1_set)
compilation_group_1.input_sets_append(x2_y2_set)
compilation_group_2.input_sets_append(x3_y3_set)
lowered_module = torch_glow.to_glow(scripted_mod, spec)
torch_res1 = mod(x1, y1)
torch_res2 = mod(x2, y2)
torch_res3 = mod(x3, y3)
glow_res1 = lowered_module(x1, y1)
glow_res2 = lowered_module(x2, y2)
glow_res3 = lowered_module(x3, y3)
assert torch.allclose(torch_res1, glow_res1)
assert torch.allclose(torch_res2, glow_res2)
assert torch.allclose(torch_res3, glow_res3)
|
# isort:skip_file
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
import torch_glow
class LinearModel(torch.nn.Module):
def __init__(self):
super(LinearModel, self).__init__()
self.linear1 = torch.nn.Linear(5, 3)
self.linear2 = torch.nn.Linear(3, 1)
def forward(self, x):
return self.linear2(self.linear1(x))
class ConvModel(torch.nn.Module):
def __init__(self):
super(ConvModel, self).__init__()
self.conv1 = torch.nn.Conv2d(3, 3, 1)
self.conv2 = torch.nn.Conv2d(3, 3, 1)
self.linear = torch.nn.Linear(5, 5)
def forward(self, x):
return self.linear(self.conv2(self.conv1(x)))
class Model(torch.nn.Module):
def __init__(self):
super(Model, self).__init__()
self.conv = ConvModel()
self.linear = LinearModel()
def forward(self, x):
return self.linear(self.conv(x))
def test_fuse_necessary_getattrs_only():
m = Model()
x = torch.randn(1, 3, 5, 5)
torch_glow.disableFusionPass()
jit_m = torch.jit.trace(m, x)
jit_m_graph = jit_m.graph_for(x)
# don't fuse aten::_convolutions
torch_glow.glowCustomFuseDebug_(
jit_m_graph,
["prim::Constant", "prim::GetAttr", "aten::t", "aten::matmul", "aten::add_"],
)
return m(x)
|
# isort:skip_file
from __future__ import absolute_import, division, print_function, unicode_literals
import torch_glow
from tests import utils
class TestSetGlowBackend(utils.TorchGlowTestCase):
def test_set_glow_backend(self):
"""Test setting the Glow backend type"""
backend_name_before = torch_glow.getGlowBackendName()
backend_num_devices_before = torch_glow.getGlowBackendNumDevices()
torch_glow.setGlowBackend("CPU")
torch_glow.setGlowBackendNumDevices(4)
assert torch_glow.getGlowBackendName() == "CPU"
assert torch_glow.getGlowBackendNumDevices() == 4
# reset everything
torch_glow.setGlowBackend(backend_name_before)
torch_glow.setGlowBackendNumDevices(backend_num_devices_before)
|
# isort:skip_file
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
import torch_glow
from tests import utils
from tests.utils import GLOW_FUSION_GROUP
class TestMinGraphSize(utils.TorchGlowTestCase):
def test_min_graph_size(self):
"""Test Glow fuser minimum fusion group size mechanism."""
def f(a, b, c):
return (a * a * a * a) / (b * b * b) / (c * c * c * c * c)
torch_glow.disableFusionPass()
# Disable aten::div so that each group of aten::mul nodes will be forced
# into separate subgraphs
torch_glow.setFusionBlocklist(["aten::div"])
# Set minimum fusion group size to 3 nodes so that the smallest group which
# contains only 2 nodes will not be created
torch_glow.setMinFusionGroupSize(3)
a = torch.randn(5, 5)
b = torch.randn(5, 5)
c = torch.randn(5, 5)
jit_f = torch.jit.trace(f, (a, b, c))
jit_f_graph = jit_f.graph_for(a, b, c)
# print("before: ", jit_f_graph)
torch_glow.glowCustomFuseDebug_(jit_f_graph)
# print("after: ", jit_f_graph)
fusion_nodes = 0
for node in jit_f_graph.nodes():
if node.kind() == GLOW_FUSION_GROUP:
fusion_nodes += 1
assert fusion_nodes == 2, "Expected smallest fusion group to not be created"
torch_glow.clearFusionBlocklist()
torch_glow.setMinFusionGroupSize(0)
|
# isort:skip_file
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
import torch.jit
import torch_glow
from tests import utils
# Use a model containing quantized::conv2d to verify preprocessed module is
# save correctly in a lowered module (ops with packed weights like this one
# are rewirtten during lowering, therefore should only be present in the
# original graph).
class Bar(torch.nn.Module):
def __init__(self):
super(Bar, self).__init__()
with torch.no_grad():
conv = torch.nn.Conv2d(4, 2, [2, 2], groups=1)
conv.weight.random_(-1, 1)
conv.bias.data.random_(-1, 1)
self.model = torch.ao.quantization.QuantWrapper(conv)
self.model.qconfig = torch.ao.quantization.get_default_qconfig("fbgemm")
torch.ao.quantization.prepare(self.model, inplace=True)
torch.ao.quantization.convert(self.model, inplace=True)
def forward(self, x):
return self.model(x)
class TestToGlowSavePreprocessedModule(utils.TorchGlowTestCase):
def test_save_preprocessed_module(self):
with torch.no_grad():
x = torch.randn([1, 4, 4, 4], dtype=torch.float32)
model = Bar()
model.eval()
model = torch.jit.trace(model, x)
spec = torch_glow.CompilationSpec()
spec.get_settings().set_glow_backend("Interpreter")
compilation_group = torch_glow.CompilationGroup()
spec.compilation_groups_append(compilation_group)
compilation_group.input_sets_append(
torch_glow.input_specs_from_tensors([x])
)
torch_glow.disableFusionPass()
torch_glow.enable_convert_to_fp16()
glow_mod = torch_glow.to_glow(model, spec)
reloaded = utils.save_and_reload_model(glow_mod)
wrappername = "__loweredModule__"
attrname = "__processed_module"
wp = getattr(reloaded._c, wrappername)
pp = getattr(wp, attrname)
pt_model = torch.jit._recursive.wrap_cpp_module(pp)
graph = pt_model.graph_for(x)
found = False
for node in graph.nodes():
if node.kind() == "quantized::conv2d":
found = True
assert found
|
# isort:skip_file
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
import torch_glow
from tests import utils
from tests.utils import GLOW_FUSION_GROUP
class TestMaxFusionMergeSize(utils.TorchGlowTestCase):
def test_max_fusion_merge_size(self):
"""Test Glow fuser maximum fusion merge size mechanism."""
def f(a):
return a * a * a * a * a * a
torch_glow.disableFusionPass()
# Set maximum fusion merge size to 3 nodes so that the
# graph will not fit into 1 node
torch_glow.setMaxFusionMergeSize(3)
a = torch.randn(5, 5)
jit_f = torch.jit.trace(f, (a))
jit_f_graph = jit_f.graph_for(a)
# print("before: ", jit_f_graph)
torch_glow.glowCustomFuseDebug_(jit_f_graph)
# print("after: ", jit_f_graph)
fusion_nodes = 0
for node in jit_f_graph.nodes():
if node.kind() == GLOW_FUSION_GROUP:
fusion_nodes += 1
assert fusion_nodes > 1, "Expected more than one fusion group to be created"
torch_glow.setMaxFusionMergeSize(0)
def test_max_fusion_merge_size_zero(self):
"""Test Glow fuser maximum fusion merge size mechanism set to zero."""
def f(a):
return a * a * a * a * a * a
torch_glow.disableFusionPass()
# Set maximum fusion merge size to 0 so that there is
# no limit to fusion
torch_glow.setMaxFusionMergeSize(0)
a = torch.randn(5, 5)
jit_f = torch.jit.trace(f, (a))
jit_f_graph = jit_f.graph_for(a)
# print("before: ", jit_f_graph)
torch_glow.glowCustomFuseDebug_(jit_f_graph)
# print("after: ", jit_f_graph)
fusion_nodes = 0
for node in jit_f_graph.nodes():
if node.kind() == GLOW_FUSION_GROUP:
fusion_nodes += 1
assert fusion_nodes == 1, "Expected just one fusion group to be created"
torch_glow.setMaxFusionMergeSize(0)
|
from __future__ import absolute_import, division, print_function, unicode_literals
import io
import torch
import torch_glow
from tests import utils
from tests.utils import assertModulesEqual
class TwoTupleModule(torch.nn.Module):
def __init__(self):
super(TwoTupleModule, self).__init__()
def forward(self, x):
y = 2 * x
return (x, y)
class OneTupleModule(torch.nn.Module):
def __init__(self):
super(OneTupleModule, self).__init__()
def forward(self, x):
y = 2 * x
return (y,)
class TestToGlowTupleOutput(utils.TorchGlowTestCase):
def tuple_test_helper(self, ModType):
input = torch.randn(4)
model = ModType()
spec = torch_glow.CompilationSpec()
spec.get_settings().set_glow_backend("Interpreter")
compilation_group = torch_glow.CompilationGroup()
spec.compilation_groups_append(compilation_group)
input_spec = torch_glow.InputSpec()
input_spec.set_same_as(input)
compilation_group.input_sets_append([input_spec])
scripted_mod = torch.jit.script(model)
lowered_model = torch_glow.to_glow(scripted_mod, {"forward": spec})
# Run Glow model
g = lowered_model(input)
# Run reference model
t = model(input)
self.assertEqual(type(g), type(t))
self.assertEqual(len(g), len(t))
for (gi, ti) in zip(g, t):
self.assertTrue(torch.allclose(gi, ti))
# test module ser/de with tuple output
buffer = io.BytesIO()
torch.jit.save(lowered_model, buffer)
buffer.seek(0)
loaded_model = torch.jit.load(buffer)
assertModulesEqual(self, lowered_model, loaded_model)
def test_to_glow_one_tuple_output(self):
self.tuple_test_helper(OneTupleModule)
def test_to_glow_two_tuple_output(self):
self.tuple_test_helper(TwoTupleModule)
|
# isort:skip_file
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
import torch_glow
from tests import utils
class TestGlowShapeInference(utils.TorchGlowTestCase):
def test_shape_inference_basics(self):
"""Test Glow shape inference basic usage."""
def f(a):
return a * a
a = torch.randn(1)
jit_f = torch.jit.trace(f, (a))
jit_f_graph = jit_f.graph_for(a)
args = (a,)
actual = torch_glow.glow_shape_inference(
jit_f_graph,
args,
)
assert actual
def test_shape_inference_input_mismatch(self):
"""Test Glow shape inference basic error handling."""
def f(a):
return a * a
a = torch.randn(1)
jit_f = torch.jit.trace(f, (a))
jit_f_graph = jit_f.graph_for(a)
# Input/args is empty, but the funciton expects one input.
# Shape Inference should raise an exception in this case.
args = ()
self.assertRaises(
Exception,
lambda: torch_glow.glow_shape_inference(
jit_f_graph,
args,
),
)
def test_shape_inference_supported_symbols(self):
"""Test Glow shape inference unsupported symbols."""
def f(a):
return a * a
a = torch.randn(1)
jit_f = torch.jit.trace(f, (a))
jit_f_graph = jit_f.graph_for(a)
args = (a,)
actual = torch_glow.glow_shape_inference_find_unsupported_symbols(
jit_f_graph, args
)
expected = []
self.assertEqual(set(expected), set(actual))
def test_shape_inference_unsupported_symbols(self):
"""Test Glow shape inference unsupported symbols."""
def f(a):
# linalg.multi_dot is currently not supported by shape inference engine
return torch.matrix_power(torch.linalg.multi_dot([a * 3, a + 4]), 3)
a = torch.randn(3, 3)
jit_f = torch.jit.trace(f, (a))
jit_f_graph = jit_f.graph_for(a)
args = (a,)
actual = torch_glow.glow_shape_inference_find_unsupported_symbols(
jit_f_graph, args
)
expected = ["aten::linalg_multi_dot", "aten::linalg_matrix_power"]
self.assertEqual(set(expected), set(actual))
blocklist = ["aten::linalg_multi_dot"]
actual = torch_glow.glow_shape_inference_find_unsupported_symbols(
jit_f_graph, args, blocklist
)
expected = ["aten::linalg_matrix_power"]
self.assertEqual(set(expected), set(actual))
def test_shape_inference_unsupported_symbols_skip_fusion_group(self):
"""Test Glow shape inference unsupported symbols including skipping of
symbols after a secondary fusion group."""
def f(a, b):
x1 = a * b
x2 = x1 * b
x3 = x2 * a
x4 = x3 / b
x5 = x4 / a
x6 = x5 / b
x7 = x6 * a
x8 = x7 * b
return x8 * torch.linalg.multi_dot([x8, x8])
torch_glow.enableFusionPass_DO_NOT_USE_THIS()
torch_glow.setFusionStartIndex(3)
torch_glow.setFusionEndIndex(6)
a = torch.randn(5, 5)
b = torch.randn(5, 5)
jit_f = torch.jit.trace(f, (a, b))
jit_f_graph = jit_f.graph_for(a, b)
torch_glow.clearFusionIndices()
args = (a, b)
# Don't skip nodes after the last fusion node.
# in this case, one of the nodes (linalg.multi_dot) following the last fusion node
# is not supported, and should be reported.
actual = torch_glow.glow_shape_inference_find_unsupported_symbols(
jit_f_graph, args, skip_last_fusion_node=False
)
expected = [
"aten::linalg_multi_dot",
]
self.assertEqual(set(expected), set(actual))
# DO skip nodes after the last fusion node.
# in this case, one of the nodes (linalg.multi_dot) following the last fusion node
# is not supported, but is suppressed due to the skip_last_fusion_node flag.
actual = torch_glow.glow_shape_inference_find_unsupported_symbols(
jit_f_graph, args, skip_last_fusion_node=True
)
expected = []
self.assertEqual(set(expected), set(actual))
|
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
import torch_glow
from tests import utils
class TestPrintJitNodeIndices(utils.TorchGlowTestCase):
"""Test printing PyTorch jit node indices."""
def test_print_jit_indices(self):
def test_f(a, b):
c = a.add(b)
return c.add(c)
x = torch.randn(4)
y = torch.randn(4)
torch_glow.enableFusionPass_DO_NOT_USE_THIS()
torch_glow.enable_printing_jit_node_indices()
graph = torch.jit.trace(test_f, (x, y), check_trace=False)
graph(x, y)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class SimpleSizeModel(torch.nn.Module):
def __init__(self, dimension):
super(SimpleSizeModel, self).__init__()
self.dimension = dimension
def forward(self, tensor):
return tensor.size(self.dimension)
class TestSize(utils.TorchGlowTestCase):
# Need to be able to export lists from Glow fused nodes
# Commented out both test cases for not triggering internal CI
# @unittest.skip(reason="not ready")
# def test_size_basic(self):
# """Test of the PyTorch aten::size Node on Glow."""
# def test_f(a):
# b = a + a.size(0)
# return b
# x = torch.zeros([4], dtype=torch.int32)
# utils.compare_tracing_methods(test_f, x, fusible_ops={"aten::size"})
@utils.deterministic_expand(
[
lambda: (
"basic",
SimpleSizeModel(-1),
torch.randn(2, 3, 4, dtype=torch.float32),
)
]
)
def test_size(self, _, module, tensor):
utils.compare_tracing_methods(module, tensor, fusible_ops={"aten::size"})
@utils.deterministic_expand(
[
lambda: (
"oob",
SimpleSizeModel(-4),
torch.randn(2, 3, 4, dtype=torch.float32),
)
]
)
def test_size_failure(self, _, module, tensor):
with self.assertRaises(IndexError):
utils.compare_tracing_methods(module, tensor, fusible_ops={"aten::size"})
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class TestFullLike(utils.TorchGlowTestCase):
def test_empty_like_basic(self):
"""Basic test of the PyTorch empty_like Node on Glow."""
class TestModule(torch.nn.Module):
def forward(self, a):
b = torch.empty_like(a, dtype=torch.float)
c = torch.zeros_like(a, dtype=torch.float)
return a + (b * c)
x = torch.randn(2, 3, 4)
utils.compare_tracing_methods(TestModule(), x, fusible_ops={"aten::empty_like"})
def test_empty_like_no_assign_type(self):
"""Basic test of the PyTorch empty_like Node on Glow without assigning type."""
class TestModule(torch.nn.Module):
def forward(self, a):
b = torch.empty_like(a)
c = torch.zeros_like(a)
return a + (b * c)
x = torch.randn(2, 3, 4)
utils.compare_tracing_methods(TestModule(), x, fusible_ops={"aten::empty_like"})
def test_empty_like_int(self):
"""Basic test of the PyTorch empty_like Node on Glow with int type."""
class TestModule(torch.nn.Module):
def forward(self, a):
b = torch.empty_like(a, dtype=torch.int)
c = torch.zeros_like(a, dtype=torch.int)
return b * c
x = torch.randn(2, 3, 4)
utils.compare_tracing_methods(TestModule(), x, fusible_ops={"aten::empty_like"})
def test_full_like_basic(self):
"""Basic test of the PyTorch full_like Node on Glow."""
class TestModule(torch.nn.Module):
def forward(self, a):
b = torch.full_like(a, fill_value=3.1415, dtype=torch.float)
return a + b
x = torch.randn(2, 3, 4)
utils.compare_tracing_methods(TestModule(), x, fusible_ops={"aten::full_like"})
def test_full_like_no_assign_type(self):
"""Basic test of the PyTorch full_like Node on Glow without assigning type."""
class TestModule(torch.nn.Module):
def forward(self, a):
b = torch.full_like(a, fill_value=3.1415)
return a + b
x = torch.randn(2, 3, 4)
utils.compare_tracing_methods(TestModule(), x, fusible_ops={"aten::full_like"})
def test_full_like_int(self):
"""Basic test of the PyTorch full_like Node on Glow with int type."""
class TestModule(torch.nn.Module):
def forward(self, a):
b = torch.full_like(a, fill_value=4, dtype=torch.int)
c = torch.full_like(a, fill_value=5, dtype=torch.int)
return b + c
x = torch.randn(2, 3, 4)
utils.compare_tracing_methods(TestModule(), x, fusible_ops={"aten::full_like"})
def test_zeros_like_basic(self):
"""Basic test of the PyTorch zeros_like Node on Glow."""
class TestModule(torch.nn.Module):
def forward(self, a):
b = torch.zeros_like(a, dtype=torch.float)
return a + b
x = torch.randn(2, 3, 4)
utils.compare_tracing_methods(TestModule(), x, fusible_ops={"aten::zeros_like"})
def test_zeros_like_no_assign_type(self):
"""Basic test of the PyTorch zeros_like Node on Glow without assign type."""
class TestModule(torch.nn.Module):
def forward(self, a):
b = torch.zeros_like(a)
return a + b
x = torch.randn(2, 3, 4)
utils.compare_tracing_methods(TestModule(), x, fusible_ops={"aten::zeros_like"})
def test_zeros_like_int(self):
"""Basic test of the PyTorch zeros_like Node on Glow with int type."""
class TestModule(torch.nn.Module):
def forward(self, a):
b = torch.zeros_like(a, dtype=torch.int)
c = torch.zeros_like(b)
return b + c
x = torch.randn(2, 3, 4)
utils.compare_tracing_methods(TestModule(), x, fusible_ops={"aten::zeros_like"})
def test_ones_like_basic(self):
"""Basic test of the PyTorch ones_like Node on Glow."""
class TestModule(torch.nn.Module):
def forward(self, a):
b = torch.ones_like(a, dtype=torch.float)
return a + b
x = torch.randn(2, 3, 4)
utils.compare_tracing_methods(TestModule(), x, fusible_ops={"aten::ones_like"})
def test_ones_like_no_assign_type(self):
"""Basic test of the PyTorch ones_like Node on Glow without assign type."""
class TestModule(torch.nn.Module):
def forward(self, a):
b = torch.ones_like(a)
return a + b
x = torch.randn(2, 3, 4)
utils.compare_tracing_methods(TestModule(), x, fusible_ops={"aten::ones_like"})
def test_ones_like_int(self):
"""Basic test of the PyTorch ones_like Node on Glow with int type."""
class TestModule(torch.nn.Module):
def forward(self, a):
b = torch.ones_like(a, dtype=torch.int)
c = torch.ones_like(b, dtype=torch.int)
return b + c
x = torch.randn(2, 3, 4)
utils.compare_tracing_methods(TestModule(), x, fusible_ops={"aten::ones_like"})
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from tests import utils
class SimpleQuantizedLinearModel(torch.nn.Sequential):
def __init__(
self,
in_features,
out_features,
quantization,
per_tensor,
weight=None,
bias=None,
):
linear = torch.nn.Linear(in_features, out_features, bias=(bias is not None))
if weight:
linear.weight.data.fill_(weight)
else:
linear.weight.data.random_(0, 100)
if bias:
linear.bias.data.fill_(bias)
super(SimpleQuantizedLinearModel, self).__init__(
quantization, linear, torch.nn.quantized.DeQuantize()
)
weight_observer = (
torch.ao.quantization.default_weight_observer
if per_tensor
else torch.ao.quantization.default_per_channel_weight_observer
)
self.qconfig = torch.ao.quantization.QConfig(
activation=torch.ao.quantization.default_observer,
weight=weight_observer,
)
torch.ao.quantization.prepare(self, inplace=True)
torch.ao.quantization.convert(self, inplace=True)
def _make_input(size, duplications, shape, dtype=torch.float):
tensor = torch.tensor(range(size), dtype=dtype)
tensor = torch.cat(tuple(tensor for _ in range(duplications)))
tensor = torch.reshape(tensor, shape)
return tensor
class TestQuantizedLinear(utils.TorchGlowTestCase):
@utils.deterministic_expand(
[
lambda: (
"basic",
SimpleQuantizedLinearModel(
5,
5,
torch.nn.quantized.Quantize(
scale=1 / 25, zero_point=17, dtype=torch.quint8
),
False, # per_tensor
1.2,
3.0,
),
_make_input(5, 6, [3, 2, 5]),
),
lambda: (
"no_bias",
SimpleQuantizedLinearModel(
5,
3,
torch.nn.quantized.Quantize(
scale=1 / 15, zero_point=17, dtype=torch.quint8
),
False, # per_tensor
1.2,
),
_make_input(5, 6, [3, 2, 5]),
),
lambda: (
"exclude_dq",
SimpleQuantizedLinearModel(
5,
5,
torch.nn.quantized.Quantize(
scale=1 / 25, zero_point=17, dtype=torch.quint8
),
False, # per_tensor
1.2,
3.0,
),
_make_input(5, 6, [3, 2, 5]),
{"aten::dequantize"},
),
lambda: (
"rowwise",
SimpleQuantizedLinearModel(
6,
5,
torch.nn.quantized.Quantize(
scale=1 / 25, zero_point=17, dtype=torch.quint8
),
False, # per_tensor
),
_make_input(36, 1, [3, 2, 6]),
),
lambda: (
"tensorwise",
SimpleQuantizedLinearModel(
6,
5,
torch.nn.quantized.Quantize(
scale=1 / 25, zero_point=17, dtype=torch.quint8
),
True, # per_tensor
),
_make_input(36, 1, [3, 2, 6]),
),
]
)
def test_quantized_linear(self, _, model, tensor, fusion_blocklist=None):
fusible_ops = {
"aten::quantize_per_tensor",
"quantized::linear",
"aten::dequantize",
}
fusible_ops -= fusion_blocklist or set()
utils.compare_tracing_methods(
model, tensor, fusible_ops=fusible_ops, fusion_blocklist=fusion_blocklist
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
from collections import OrderedDict
import torch
from tests import utils
class TestQuantizedConv2dRelu(utils.TorchGlowTestCase):
def _test_quantized_conv2d_relu_packed(self, groups):
"""Basic test of PyTorch quantized::conv2d_relu Node with packed weights on Glow."""
with torch.no_grad():
x = torch.tensor(range(5), dtype=torch.float) / 3
x = torch.cat((x, x, x, x, x))
x = torch.cat((x, x, x))
x = torch.reshape(x, [1, 3, 5, 5])
q = torch.nn.quantized.Quantize(1, 2, torch.quint8)
conv = torch.nn.Conv2d(3, 3, [2, 2], groups=groups)
relu = torch.nn.ReLU()
dq = torch.nn.quantized.DeQuantize()
# Due to the off-by-one error, we cannot let the weights, bias & input
# to be totally random.
conv.weight.set_(
torch.arange(36 / groups, dtype=torch.float).reshape(
[3, 3 // groups, 2, 2]
)
/ 3
)
conv.bias.data.fill_(2)
model = torch.nn.Sequential(
OrderedDict(
[
("quantize", q),
("conv1", conv),
("relu1", relu),
("dequantize", dq),
]
)
)
model.eval()
model.qconfig = torch.ao.quantization.get_default_qconfig("fbgemm")
# Fuse conv and relu to conv_relu
model = torch.ao.quantization.fuse_modules(model, [["conv1", "relu1"]])
torch.ao.quantization.prepare(model, inplace=True)
torch.ao.quantization.convert(model, inplace=True)
utils.compare_tracing_methods(
model,
x,
fusible_ops={
"aten::quantize_per_tensor",
"quantized::conv2d_relu",
"aten::dequantize",
},
skip_to_glow=True,
)
def test_quantized_conv2d_relu_packed_groupwise(self):
"""PyTorch groupwise quantized::conv2d_relu Node with packed weights on Glow."""
self._test_quantized_conv2d_relu_packed(groups=3)
def test_quantized_conv2d_relu_packed_nongroupwise(self):
"""PyTorch vanilla quantized::conv2d_relu Node with packed weights on Glow."""
self._test_quantized_conv2d_relu_packed(groups=1)
def test_quantized_conv2d_relu_packed_cut_q_dq(self):
"""Basic test of PyTorch quantized::conv2d_relu Node with packed weights on Glow, with quantize and dequantize excluded."""
with torch.no_grad():
x = torch.tensor(range(5), dtype=torch.float) / 3
x = torch.cat((x, x, x, x, x))
x = torch.cat((x, x, x))
x = torch.reshape(x, [1, 3, 5, 5])
q = torch.nn.quantized.Quantize(1, 2, torch.quint8)
conv = torch.nn.Conv2d(3, 3, [2, 2], groups=1)
relu = torch.nn.ReLU()
dq = torch.nn.quantized.DeQuantize()
# Due to the off-by-one error, we cannot let the weights, bias & input
# to be totally random.
conv.weight.set_(
torch.arange(36, dtype=torch.float).reshape([3, 3, 2, 2]) / 3
)
conv.bias.data.fill_(2)
model = torch.nn.Sequential(
OrderedDict(
[
("quantize", q),
("conv1", conv),
("relu1", relu),
("dequantize", dq),
]
)
)
model.eval()
model.qconfig = torch.ao.quantization.get_default_qconfig("fbgemm")
# Fuse conv and relu to conv_relu
model = torch.ao.quantization.fuse_modules(model, [["conv1", "relu1"]])
torch.ao.quantization.prepare(model, inplace=True)
torch.ao.quantization.convert(model, inplace=True)
utils.compare_tracing_methods(
model,
x,
fusible_ops={"quantized::conv2d_relu"},
fusion_blocklist=["aten::quantize_per_tensor", "aten::dequantize"],
skip_to_glow=True,
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class SimpleQuantizedAddReluModule(torch.nn.Module):
def __init__(self, left_quantization, right_quantization, scale, zero_point):
super(SimpleQuantizedAddReluModule, self).__init__()
self.left_quantization = left_quantization
self.right_quantization = right_quantization
self.scale = scale
self.zero_point = zero_point
def forward(self, left, right):
return torch.nn.quantized.DeQuantize()(
torch.ops.quantized.add_relu(
self.left_quantization(left),
self.right_quantization(right),
scale=self.scale,
zero_point=self.zero_point,
)
)
class TestQuantizedAddRelu(utils.TorchGlowTestCase):
def test_quantized_add_relu_zerooffset(self):
"""Basic test of the PyTorch quantized::add Node_relu on Glow with zero offset."""
utils.compare_tracing_methods(
SimpleQuantizedAddReluModule(
torch.nn.quantized.Quantize(
scale=0.3, zero_point=0, dtype=torch.quint8
),
torch.nn.quantized.Quantize(
scale=0.3, zero_point=0, dtype=torch.quint8
),
0.05,
0,
),
torch.tensor([1, 2, 3, 4], dtype=torch.float32),
torch.tensor([5, 6, 7, 8], dtype=torch.float32),
skip_to_glow=True,
)
def test_quantized_add_relu(self):
"""Basic test of the PyTorch quantized::add_relu Node on Glow."""
utils.compare_tracing_methods(
SimpleQuantizedAddReluModule(
torch.nn.quantized.Quantize(
scale=1.0 / 128, zero_point=5, dtype=torch.quint8
),
torch.nn.quantized.Quantize(
scale=1.0 / 128, zero_point=10, dtype=torch.quint8
),
1.0 / 128,
3,
),
torch.rand([5, 5]),
torch.rand([5, 5]),
skip_to_glow=True,
)
def test_quantized_add_relu_cut_q_dq(self):
"""Basic test of the PyTorch quantized::add_relu Node on Glow, with quantize and dequantize excluded."""
utils.compare_tracing_methods(
SimpleQuantizedAddReluModule(
torch.nn.quantized.Quantize(
scale=1.0 / 128, zero_point=5, dtype=torch.quint8
),
torch.nn.quantized.Quantize(
scale=1.0 / 128, zero_point=10, dtype=torch.quint8
),
1.0 / 128,
3,
),
torch.rand([5, 5]),
torch.rand([5, 5]),
fusible_ops={"quantized::add_relu"},
fusion_blocklist=["aten::quantize_per_tensor", "aten::dequantize"],
skip_to_glow=True,
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class SimpleSubtractModel(torch.nn.Module):
def __init__(self):
super(SimpleSubtractModel, self).__init__()
def forward(self, a, b):
if b.size() == torch.Size([]):
return (a * a).sub(b.item())
else:
c = a.sub(b)
return c.sub(c)
class TestSub(utils.TorchGlowTestCase):
@utils.deterministic_expand(
[
lambda: ("basic", SimpleSubtractModel(), torch.randn(4), torch.randn(4)),
lambda: (
"broadcast_1",
SimpleSubtractModel(),
torch.randn(8, 3, 4, 2),
torch.randn(4, 2),
),
lambda: (
"broadcast_2",
SimpleSubtractModel(),
torch.randn(8, 3, 4, 2),
torch.randn(1, 2),
),
lambda: (
"broadcast_3",
SimpleSubtractModel(),
torch.randn(4, 2),
torch.randn(8, 3, 4, 2),
),
lambda: ("float", SimpleSubtractModel(), torch.randn(4), torch.tensor(3.9)),
lambda: (
"int",
SimpleSubtractModel(),
torch.randn(4),
torch.tensor(20),
),
lambda: (
"int64",
SimpleSubtractModel(),
torch.torch.randint(-10, 10, (2, 4), dtype=torch.int64),
torch.torch.randint(-10, 10, (2, 4), dtype=torch.int64),
),
]
)
def test_subtract(self, _, module, tensor, other):
utils.run_comparison_tests(module, (tensor, other), fusible_ops={"aten::sub"})
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class SimpleFloorDivideModule(torch.nn.Module):
def __init__(self, inplace=False):
super(SimpleFloorDivideModule, self).__init__()
self.inplace = inplace
def forward(self, a, b):
if b.size() == torch.Size([]):
b = b.item()
if self.inplace:
return (a + a).floor_divide_(b)
else:
return (a + a).floor_divide(b)
class TestFloorDiv(utils.TorchGlowTestCase):
@utils.deterministic_expand(
[
lambda: (
"basic",
SimpleFloorDivideModule(),
torch.Tensor(4).random_(0, 5),
torch.Tensor(4).random_(1, 5),
),
lambda: (
"inplace",
SimpleFloorDivideModule(True),
torch.Tensor(4).random_(0, 5),
torch.Tensor(4).random_(1, 5),
),
lambda: (
"positive_float",
SimpleFloorDivideModule(),
torch.Tensor(4).random_(0, 5),
torch.tensor(3.9),
),
lambda: (
"negative_float",
SimpleFloorDivideModule(),
torch.tensor([-4.0]),
torch.tensor([3.0]),
),
lambda: (
"positive_broadcast",
SimpleFloorDivideModule(),
torch.Tensor(8, 3, 4, 2).random_(0, 5),
torch.Tensor(4, 2).random_(1, 5),
),
lambda: (
"positive_broadcast",
SimpleFloorDivideModule(),
torch.Tensor(8, 3, 4, 2).random_(0, 5),
torch.Tensor(1, 2).random_(1, 5),
),
lambda: (
"positive_broadcast",
SimpleFloorDivideModule(),
torch.Tensor(4, 2).random_(0, 5),
torch.Tensor(8, 3, 4, 2).random_(1, 5),
),
lambda: (
"positive_int",
SimpleFloorDivideModule(),
torch.tensor([5]),
torch.tensor([4]),
),
lambda: (
"negative_int",
SimpleFloorDivideModule(),
torch.tensor([-5]),
torch.tensor([4]),
),
lambda: (
"int64",
SimpleFloorDivideModule(),
torch.torch.randint(-10, 10, (2, 4), dtype=torch.int64),
torch.torch.randint(-10, 10, (2, 4), dtype=torch.int64),
),
]
)
def test_floor_div(self, _, module, left, right):
utils.run_comparison_tests(
module,
(left, right),
fusible_ops={"aten::floor_divide"},
skip_for_backends="NNPI",
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class SimpleSliceModel(torch.nn.Module):
def __init__(self, start, end):
super(SimpleSliceModel, self).__init__()
self.start = start
self.end = end
def forward(self, x):
x = x + x
if self.start is None and self.end is None:
return x[:]
elif self.start is None:
return x[: self.end]
elif self.end is None:
return x[self.start :]
else:
return x[self.start : self.end]
class TestSlice(utils.TorchGlowTestCase):
@utils.deterministic_expand(
[
lambda: (0, 1),
lambda: (0, 2),
lambda: (0, 3),
lambda: (1, 2),
lambda: (1, 3),
lambda: (2, 3),
lambda: (-3, 1),
lambda: (-2, 2),
lambda: (-1, 3),
lambda: (-2, -1),
lambda: (0, -1),
lambda: (1, -1),
lambda: (None, 2),
lambda: (None, -1),
lambda: (0, None),
lambda: (-2, None),
lambda: (None, None),
]
)
def test_slice(self, start, end):
"""Test of the PyTorch slice Node on Glow."""
input = torch.rand(3, 2, 2)
utils.compare_tracing_methods(
SimpleSliceModel(start, end), input, fusible_ops={"aten::slice"}
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
from typing import Optional
import torch
from tests import utils
class SimpleDivModule(torch.nn.Module):
def __init__(self, rounding_mode: Optional[str] = None):
super(SimpleDivModule, self).__init__()
self.rounding_mode = rounding_mode
def forward(self, a, b):
rounding_mode = self.rounding_mode
if True: # until 3rd agr is implemented, then: rounding_mode is None:
if b.size() == torch.Size([]):
return (a * a).div(b.item())
else:
c = a.div(b)
return c.div(c)
else:
if b.size() == torch.Size([]):
return (a * a).div(b.item(), rounding_mode=rounding_mode)
else:
c = a.div(b, rounding_mode=rounding_mode)
return c.div(c, rounding_mode=rounding_mode)
class TestDiv(utils.TorchGlowTestCase):
@utils.deterministic_expand(
[
lambda: ("basic", SimpleDivModule(), torch.randn(4), torch.randn(4)),
lambda: (
"basic_rm_true",
SimpleDivModule(rounding_mode="true"),
torch.randn(4),
torch.randn(4),
),
lambda: (
"basic_rm_trunc",
SimpleDivModule(rounding_mode="trunc"),
torch.randn(4),
torch.randn(4),
),
lambda: (
"basic_rm_floor",
SimpleDivModule(rounding_mode="floor"),
torch.randn(4),
torch.randn(4),
),
lambda: (
"broadcast",
SimpleDivModule(),
torch.randn(8, 3, 4, 2),
torch.randn(4, 2),
),
lambda: (
"broadcast_rm_true",
SimpleDivModule(rounding_mode="true"),
torch.randn(8, 3, 4, 2),
torch.randn(4, 2),
),
lambda: (
"broadcast_rm_trunc",
SimpleDivModule(rounding_mode="trunc"),
torch.randn(8, 3, 4, 2),
torch.randn(4, 2),
),
lambda: (
"broadcast_rm_floor",
SimpleDivModule(rounding_mode="floor"),
torch.randn(8, 3, 4, 2),
torch.randn(4, 2),
),
lambda: (
"broadcast",
SimpleDivModule(),
torch.randn(8, 3, 4, 2),
torch.randn(1, 2),
),
lambda: (
"broadcast",
SimpleDivModule(),
torch.randn(4, 2),
torch.randn(8, 3, 4, 2),
),
lambda: (
"float_tensor",
SimpleDivModule(),
torch.randn(4),
torch.tensor(3.9),
),
lambda: (
"int_tensor",
SimpleDivModule(),
torch.tensor([4]),
torch.tensor([10]),
{"NNPI"}, # skip_for_backends
),
# This one will go through (a * a) / b.item() and b.item() is an integer.
lambda: (
"int_number",
SimpleDivModule(),
torch.tensor([4]),
torch.tensor(10),
{"NNPI"}, # skip_for_backends
),
lambda: (
"int64",
SimpleDivModule(),
torch.torch.randint(-10, 10, (2, 4), dtype=torch.int64),
torch.torch.randint(-10, 10, (2, 4), dtype=torch.int64),
{"NNPI"}, # skip_for_backends
),
]
)
def test_div(self, _, module, a, b, skip_for_backends={}):
utils.run_comparison_tests(
module,
(a, b),
fusible_ops={"aten::div"},
skip_for_backends=skip_for_backends,
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class SimpleTypeasModel(torch.nn.Module):
def __init__(self):
super(SimpleTypeasModel, self).__init__()
def forward(self, tensor, other=None):
# TODO: Understand and document the utility of the self-conversion test
# as well as the additional tensor + tensor step
other = tensor if other is None else other
if tensor.dtype != torch.bool:
tensor = tensor + tensor
typed = tensor.type_as(other)
return typed + typed
class TestTypeAs(utils.TorchGlowTestCase):
@utils.deterministic_expand(
[
lambda: (
"to_int32",
SimpleTypeasModel(),
torch.randn(4),
torch.zeros(4, dtype=torch.int32),
),
lambda: (
"from_int32",
SimpleTypeasModel(),
torch.randn(4).to(dtype=torch.int32),
torch.zeros(4),
),
lambda: (
"from_bool",
SimpleTypeasModel(),
torch.randn(4).to(dtype=torch.bool),
torch.zeros(4),
),
lambda: ("self", SimpleTypeasModel(), torch.randn(4), None, False),
lambda: (
"f2f2",
SimpleTypeasModel(),
torch.randn(4, 2),
torch.randn(8, 3, 4, 2),
False,
),
lambda: (
"f2i2",
SimpleTypeasModel(),
torch.randn(4, 2),
torch.randn(8, 3, 4, 2).to(dtype=torch.int32),
),
]
)
def test_typeas(self, _, module, tensor, other=None, should_fuse=True):
if other is not None:
utils.compare_tracing_methods(
module,
tensor,
other,
fusible_ops={"aten::type_as"} if should_fuse else {},
)
else:
utils.compare_tracing_methods(
module, tensor, fusible_ops={"aten::type_as"} if should_fuse else {}
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class CloneModel(torch.nn.Module):
def __init__(self, memory_format=torch.contiguous_format):
super(CloneModel, self).__init__()
self.memory_format = memory_format
def forward(self, a):
b = a.clone(memory_format=self.memory_format)
return b + a
class TestClone(utils.TorchGlowTestCase):
@utils.deterministic_expand(
[
lambda: ("1x3", [1, 3]),
lambda: ("8x3x5", [8, 3, 5]),
]
)
def test_clone(self, _, tensor_shape):
"""Test of the PyTorch clone method on Glow."""
utils.compare_tracing_methods(
CloneModel(),
torch.randn(tensor_shape),
fusible_ops={"aten::clone"},
)
@utils.deterministic_expand(
[
lambda: ("8x3x5x10", [8, 3, 5, 10]),
]
)
def test_clone_alt_memory_format(self, _, tensor_shape):
"""Test of the PyTorch clone method on Glow."""
utils.compare_tracing_methods(
CloneModel(memory_format=torch.channels_last),
torch.randn(tensor_shape),
fusible_ops={"aten::clone"},
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class SimpleAbsModule(torch.nn.Module):
def __init__(self):
super(SimpleAbsModule, self).__init__()
def forward(self, a):
return torch.abs(a + a)
class TestAbs(utils.TorchGlowTestCase):
def test_abs_basic(self):
"""Basic test of the PyTorch Abs Node on Glow."""
x = torch.randn(10)
utils.run_comparison_tests(
SimpleAbsModule(),
x,
fusible_ops={"aten::abs"},
)
def test_abs_3d(self):
"""Test multidimensional tensor for the PyTorch Abs Node on Glow."""
x = torch.randn(2, 3, 5)
utils.run_comparison_tests(
SimpleAbsModule(),
x,
fusible_ops={"aten::abs"},
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class SimpleAddModule(torch.nn.Module):
def __init__(self, inplace=False):
super(SimpleAddModule, self).__init__()
self.inplace = inplace
def forward(self, a, b):
if b.size() == torch.Size([]):
return (a * a).add(b.item())
if self.inplace:
c = a.add_(b)
return c.add_(c)
else:
c = a.add(b)
return c.add(c)
class TestAdd(utils.TorchGlowTestCase):
@utils.deterministic_expand(
[
lambda: ("basic", SimpleAddModule(), torch.randn(4), torch.randn(4)),
lambda: ("inplace", SimpleAddModule(True), torch.randn(4), torch.randn(4)),
lambda: (
"broadcast",
SimpleAddModule(),
torch.randn(8, 3, 4, 2),
torch.randn(4, 2),
),
lambda: (
"broadcast",
SimpleAddModule(),
torch.randn(8, 3, 4, 2),
torch.randn(1, 2),
),
lambda: (
"broadcast",
SimpleAddModule(),
torch.randn(4, 2),
torch.randn(8, 3, 4, 2),
),
lambda: ("float", SimpleAddModule(), torch.randn(4), torch.tensor(1.2345)),
lambda: (
"float_and_int",
SimpleAddModule(),
torch.randn(4),
torch.tensor(42),
True,
),
lambda: (
"int32",
SimpleAddModule(),
torch.torch.randint(-10, 10, (2, 4), dtype=torch.int32),
torch.torch.randint(-10, 10, (2, 4), dtype=torch.int32),
),
lambda: (
"int64",
SimpleAddModule(),
torch.torch.randint(-10, 10, (2, 4), dtype=torch.int64),
torch.torch.randint(-10, 10, (2, 4), dtype=torch.int64),
),
]
)
def test_add(self, _, module, a, b, skip_to_glow=False):
utils.run_comparison_tests(
module,
(a, b),
fusible_ops={"aten::add_"} if module.inplace else {"aten::add"},
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class SimpleExpModule(torch.nn.Module):
def forward(self, input):
other = torch.exp(input)
return torch.exp(other)
class TestExp(utils.TorchGlowTestCase):
def test_exp_basic(self):
"""Test of the PyTorch exp Node on Glow."""
utils.compare_tracing_methods(
SimpleExpModule(), torch.randn(4), fusible_ops={"aten::exp"}
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
import torch.nn.functional as F
from tests import utils
class SimpleAvgPool1dModule(torch.nn.Module):
def __init__(self, kernel_size, stride=None, padding=0):
super(SimpleAvgPool1dModule, self).__init__()
self.kernel_size = kernel_size
self.padding = padding
self.stride = stride
def forward(self, inputs):
return F.avg_pool1d(
inputs, self.kernel_size, padding=self.padding, stride=self.stride
)
class TestAvgPool1d(utils.TorchGlowTestCase):
def test_avg_pool1d_basic(self):
"""Basic test of the PyTorch avg_pool1d Node on Glow."""
inputs = torch.randn(1, 4, 9)
utils.compare_tracing_methods(
SimpleAvgPool1dModule(3), inputs, fusible_ops={"aten::avg_pool1d"}
)
def test_avg_pool1d_with_args(self):
"""Test of the PyTorch avg_pool1d Node with arguments on Glow."""
inputs = torch.randn(1, 4, 10)
utils.compare_tracing_methods(
SimpleAvgPool1dModule(3, stride=7), inputs, fusible_ops={"aten::avg_pool1d"}
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class SimpleIntModule(torch.nn.Module):
def __init__(self, dtype):
super(SimpleIntModule, self).__init__()
# This has to be done in the init block, because control flow statements in the
# forward method won't be fused during scripting.
if dtype == torch.int32:
self.forward = self._int32_forward
else:
self.forward = self._int64_forward
def _int32_forward(self, a):
b = a.size(0)
c = a.size(1)
bt = torch.ops.prim.NumToTensor(b)
ct = torch.ops.prim.NumToTensor(c)
d = bt + ct
d = d.to(torch.int32)
i = torch.ops.aten.Int(d)
res = torch.ops.prim.NumToTensor(i)
return res
def _int64_forward(self, a):
b = a.size(0)
c = a.size(1)
bt = torch.ops.prim.NumToTensor(b)
ct = torch.ops.prim.NumToTensor(c)
d = bt * ct
i = torch.ops.aten.Int(d)
res = torch.ops.prim.NumToTensor(i)
return res
class SimpleIntModuleEmptyShape(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, a):
d = torch._shape_as_tensor(a)[0] # tensor with empty shape
i = torch.ops.aten.Int(d)
res = torch.ops.prim.NumToTensor(i)
return res
class TestInt(utils.TorchGlowTestCase):
def test_Int(self):
"""Basic test of the PyTorch Int Node on Glow, along with constant
propagation. Using int32 dtype, and aten::add."""
x = torch.randn(2, 3, 4, dtype=torch.float32)
utils.compare_tracing_methods(
SimpleIntModule(torch.int32), x, fusible_ops={"aten::Int"}, scripted=True
)
def test_Int_mul_long(self):
"""Basic test of the PyTorch Int Node on Glow, along with constant
propagation. Using int64 dtype, and aten::mul"""
x = torch.randn(2, 3, 4, dtype=torch.float32)
utils.compare_tracing_methods(
SimpleIntModule(torch.int64), x, fusible_ops={"aten::Int"}, scripted=True
)
def test_Int_empty_shape(self):
"""Basic test of the PyTorch Int Node on Glow. Input tensor has empty shape."""
x = torch.randn(2, 3, 4, dtype=torch.float32)
utils.compare_tracing_methods(
SimpleIntModuleEmptyShape(), x, fusible_ops={"aten::Int"}, scripted=True
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class TestZero(utils.TorchGlowTestCase):
def test_zero_basic(self):
"""Basic test of the PyTorch zero Node on Glow."""
class TestModule(torch.nn.Module):
def forward(self, a):
b = torch.zeros(a.size(), dtype=torch.float)
return a + b
x = torch.randn(2, 3, 4)
utils.compare_tracing_methods(TestModule(), x, fusible_ops={"aten::zeros"})
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
from typing import Union
import torch
from tests import utils
class SimpleCompareOpsModule(torch.nn.Module):
def __init__(self, opType):
super(SimpleCompareOpsModule, self).__init__()
self.opType = opType
def forward(self, a, b):
if self.opType == "equal":
return torch.eq(a, b + 0.1)
elif self.opType == "notEqual":
return torch.ne(a, b + 0.1)
elif self.opType == "lessThan":
return torch.lt(a, b + 0.1)
elif self.opType == "lessEqual":
return torch.le(a, b + 0.1)
elif self.opType == "greaterThan":
return torch.gt(a, b + 0.1)
elif self.opType == "greaterEqual":
return torch.ge(a, b + 0.1)
class SimpleScalarVectorCmpModule(torch.nn.Module):
def __init__(self, opType: str, rhsScalar: Union[float, int]):
super().__init__()
self.opType = opType
self.rhsScalar = rhsScalar
def forward(self, a: torch.Tensor) -> torch.Tensor:
if self.opType == "equal":
return a == self.rhsScalar
if self.opType == "greaterEqual":
return a >= self.rhsScalar
if self.opType == "greaterThan":
return a > self.rhsScalar
if self.opType == "lessEqual":
return a <= self.rhsScalar
if self.opType == "lessThan":
return a < self.rhsScalar
if self.opType == "notEqual":
return a != self.rhsScalar
class TestCmp(utils.TorchGlowTestCase):
def test_equal_basic(self):
"""Basic test of the PyTorch Equal Node on Glow."""
utils.compare_tracing_methods(
SimpleCompareOpsModule("equal"),
torch.randn(3, 4, 5),
torch.randn(3, 4, 5),
fusible_ops={"aten::eq"},
)
def test_equal_bcast(self):
"""Basic test of the PyTorch Equal Node (broadcast) on Glow."""
utils.compare_tracing_methods(
SimpleCompareOpsModule("equal"),
torch.randn(3, 4, 5),
torch.randn(4, 5),
fusible_ops={"aten::eq"},
)
def test_not_equal(self):
"""Basic test of the PyTorch Not equal Node on Glow."""
utils.compare_tracing_methods(
SimpleCompareOpsModule("notEqual"),
torch.randn(3, 4, 5),
torch.randn(3, 4, 5),
fusible_ops={"aten::ne"},
)
def test_not_equal_bcast(self):
"""Basic test of the PyTorch Not equal (broadcast) Node on Glow."""
utils.compare_tracing_methods(
SimpleCompareOpsModule("notEqual"),
torch.randn(3, 4, 5),
torch.randn(4, 5),
fusible_ops={"aten::ne"},
)
def test_less_than(self):
"""Basic test of the PyTorch Less than Node on Glow."""
utils.compare_tracing_methods(
SimpleCompareOpsModule("lessThan"),
torch.randn(3, 4, 5),
torch.randn(3, 4, 5),
fusible_ops={"aten::lt"},
)
def test_less_than_bcast(self):
"""Basic test of the PyTorch Less than (broadcast) Node on Glow."""
utils.compare_tracing_methods(
SimpleCompareOpsModule("lessThan"),
torch.randn(3, 4, 5),
torch.randn(4, 5),
fusible_ops={"aten::lt"},
)
def test_less_equal(self):
"""Basic test of the PyTorch less equal Node on Glow."""
utils.compare_tracing_methods(
SimpleCompareOpsModule("lessEqual"),
torch.randn(3, 4, 5),
torch.randn(3, 4, 5),
fusible_ops={"aten::le"},
)
def test_less_equal_bcast(self):
"""Basic test of the PyTorch less equal (Broadcast) Node on Glow."""
utils.compare_tracing_methods(
SimpleCompareOpsModule("lessEqual"),
torch.randn(3, 4, 5),
torch.randn(4, 5),
fusible_ops={"aten::le"},
)
def test_greater_than(self):
"""Basic test of the PyTorch Greater than Node on Glow."""
utils.compare_tracing_methods(
SimpleCompareOpsModule("greaterThan"),
torch.randn(3, 4, 5),
torch.randn(3, 4, 5),
fusible_ops={"aten::gt"},
)
def test_greater_than_bcast(self):
"""Basic test of the PyTorch Greater than (Broadcast) Node on Glow."""
utils.compare_tracing_methods(
SimpleCompareOpsModule("greaterThan"),
torch.randn(3, 4, 5),
torch.randn(4, 5),
fusible_ops={"aten::gt"},
)
def test_greater_equal(self):
"""Basic test of the PyTorch Greater Equal Node on Glow."""
utils.compare_tracing_methods(
SimpleCompareOpsModule("greaterEqual"),
torch.randn(3, 4, 5),
torch.randn(3, 4, 5),
fusible_ops={"aten::ge"},
)
def test_greater_equal_bcast(self):
"""Basic test of the PyTorch Greater Equal (broadcast) Node on Glow."""
utils.compare_tracing_methods(
SimpleCompareOpsModule("greaterEqual"),
torch.randn(3, 4, 5),
torch.randn(4, 5),
fusible_ops={"aten::ge"},
)
@utils.deterministic_expand(
[
lambda: (
"eq_tensor_scalar",
"equal",
"aten::eq",
torch.randn(3, 4, 5),
0.5,
),
lambda: (
"gt_tensor_scalar",
"greaterThan",
"aten::gt",
torch.randn(3, 4, 5),
0.5,
),
lambda: (
"ge_tensor_scalar",
"greaterEqual",
"aten::ge",
torch.randn(3, 4, 5),
0.5,
),
lambda: (
"le_tensor_scalar",
"lessEqual",
"aten::le",
torch.randn(3, 4, 5),
0.5,
),
lambda: (
"lt_tensor_scalar",
"lessThan",
"aten::lt",
torch.randn(3, 4, 5),
0.5,
),
lambda: (
"ne_tensor_scalar",
"notEqual",
"aten::ne",
torch.randn(3, 4, 5),
0.5,
),
lambda: (
"eq_tensor_scalar_int64",
"equal",
"aten::eq",
torch.randn(3, 4, 5).to(torch.int64),
5,
),
lambda: (
"gt_tensor_scalar_int64",
"greaterThan",
"aten::gt",
torch.randn(3, 4, 5).to(torch.int64),
5,
),
lambda: (
"ge_tensor_scalar_int64",
"greaterEqual",
"aten::ge",
torch.randn(3, 4, 5).to(torch.int64),
5,
),
lambda: (
"le_tensor_scalar_int64",
"lessEqual",
"aten::le",
torch.randn(3, 4, 5).to(torch.int64),
5,
),
lambda: (
"lt_tensor_scalar_int64",
"lessThan",
"aten::lt",
torch.randn(3, 4, 5).to(torch.int64),
5,
),
lambda: (
"ne_tensor_scalar_int64",
"notEqual",
"aten::ne",
torch.randn(3, 4, 5).to(torch.int64),
5,
),
lambda: (
"eq_tensor_scalar_int32",
"equal",
"aten::eq",
torch.randn(3, 4, 5).to(torch.int32),
5,
),
lambda: (
"gt_tensor_scalar_int32",
"greaterThan",
"aten::gt",
torch.randn(3, 4, 5).to(torch.int32),
5,
),
lambda: (
"lt_tensor_scalar_int32",
"lessThan",
"aten::lt",
torch.randn(3, 4, 5).to(torch.int32),
5,
),
lambda: (
"eq_tensor_scalar_float_int",
"equal",
"aten::eq",
torch.randn(3, 4, 5),
5,
),
lambda: (
"gt_tensor_scalar_float_int",
"greaterThan",
"aten::gt",
torch.randn(3, 4, 5),
5,
),
lambda: (
"lt_tensor_scalar_float_int",
"lessThan",
"aten::lt",
torch.randn(3, 4, 5),
5,
),
]
)
def test_scalar_vector_cmp(self, _, opType, op, lhsTensor, rhsScalar):
"""Testing comparisons between tensors and scalars."""
utils.compare_tracing_methods(
SimpleScalarVectorCmpModule(opType, rhsScalar),
lhsTensor,
fusible_ops={op},
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class SelectModule(torch.nn.Module):
def __init__(self, indices, axis, rank):
super(SelectModule, self).__init__()
self.indices = indices
self.axis = axis
self.rank = rank
def forward(self, a):
if self.rank == 2:
if self.axis == 0:
return (a + a)[self.indices[0], :]
elif self.axis == 1:
return (a + a)[:, self.indices[0]]
else:
return (a + a)[self.indices[0], self.indices[1]]
elif self.rank == 3:
if self.axis == 0:
if len(self.indices) == 1:
return (a + a)[self.indices[0], :, :]
else:
return (a + a)[self.indices[0], self.indices[1], :]
elif self.axis == 1:
if len(self.indices) == 1:
return (a + a)[:, :, self.indices[0]]
else:
return (a + a)[:, self.indices[0], self.indices[1]]
else:
if len(self.indices) == 2:
return (a + a)[self.indices[0], :, self.indices[1]]
else:
return (a + a)[self.indices[0], self.indices[1], self.indices[2]]
class TestComplexSelect(utils.TorchGlowTestCase):
@utils.deterministic_expand(
[
lambda: ("2d_axis_0", SelectModule([1], 0, 2), torch.rand(2, 3)),
lambda: ("2d_axis_1", SelectModule([2], 1, 2), torch.rand(2, 3)),
lambda: ("2d_axis_0_1", SelectModule([0, 1], 2, 2), torch.rand(2, 3)),
lambda: ("3d_axis_0", SelectModule([0], 0, 3), torch.rand(3, 4, 5)),
lambda: ("3d_axis_0_1", SelectModule([2, 1], 0, 3), torch.rand(3, 4, 5)),
lambda: ("3d_axis_1", SelectModule([0], 1, 3), torch.rand(3, 4, 5)),
lambda: ("3d_axis_1_2", SelectModule([2, 1], 1, 3), torch.rand(3, 4, 5)),
lambda: ("3d_axis_0_2", SelectModule([1, 3], 2, 3), torch.rand(3, 4, 5)),
lambda: (
"3d_axis_0_1_2",
SelectModule([2, 0, 4], 1, 3),
torch.rand(3, 4, 5),
),
]
)
def test_f(self, _, module, tensor):
"""Test multidimensional tensors in the PyTorch Select Node on Glow."""
utils.compare_tracing_methods(module, tensor, fusible_ops={"aten::select"})
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class ExpandModel(torch.nn.Module):
def __init__(self, shape):
super(ExpandModel, self).__init__()
self.shape = shape
def forward(self, a):
return a.expand(self.shape)
class TestExpand(utils.TorchGlowTestCase):
@utils.deterministic_expand(
[
lambda: (
"unit_vector",
ExpandModel([3]),
torch.randn(1),
),
lambda: (
"unit_matrix",
ExpandModel([3, 4]),
torch.randn(1, 1),
),
lambda: (
"singleton_matrix",
ExpandModel([2, 4]),
torch.randn(2, 1),
),
lambda: (
"singleton_matrix_minus_one",
ExpandModel([-1, 4]),
torch.randn(2, 1),
),
lambda: (
"fourD",
ExpandModel([2, 4, 5, 8]),
torch.randn(2, 1, 5, 8),
),
lambda: (
"fourD_two_singleton",
ExpandModel([2, 4, 5, 8]),
torch.randn(2, 1, 5, 1),
),
lambda: (
"fourD_minus_ones",
ExpandModel([2, 4, -1, -1]),
torch.randn(2, 1, 5, 8),
),
lambda: (
"add_dim",
ExpandModel([3, 4, 2]),
torch.randn(4, 2),
),
lambda: (
"add_two_dims",
ExpandModel([8, 3, 4, 2]),
torch.randn(4, 2),
),
lambda: (
"add_dim_minus_one",
ExpandModel([3, -1, 2]),
torch.randn(4, 2),
),
lambda: (
"add_dim_minus_ones",
ExpandModel([3, -1, -1]),
torch.randn(4, 2),
),
lambda: (
"add_dims_minus_one",
ExpandModel([8, 3, -1, 2]),
torch.randn(4, 2),
),
lambda: (
"add_dims_minus_ones",
ExpandModel([8, 3, -1, -1]),
torch.randn(4, 2),
),
]
)
def test_expand(self, _, module, a):
"""Test of the PyTorch expand Node on Glow."""
utils.compare_tracing_methods(
module,
a,
fusible_ops={"aten::expand"},
)
class TestExpandError(utils.TorchGlowTestCase):
@utils.deterministic_expand(
[
lambda: (
"no_singleton",
ExpandModel([3, 3]),
torch.randn(2, 2),
),
lambda: (
"shape_too_small",
ExpandModel([3]),
torch.randn(2, 2),
),
lambda: (
"invalid_zero",
ExpandModel([0, 3]),
torch.randn(1, 2),
),
lambda: (
"invalid_negative",
ExpandModel([-2, 3]),
torch.randn(1, 2),
),
lambda: (
"add_dims_undefined_m1",
ExpandModel([-1, 2, 3]),
torch.randn(1, 2),
),
lambda: (
"add_dims_undefined_zero",
ExpandModel([0, 2, 3]),
torch.randn(1, 2),
),
lambda: (
"add_dims_undefined_m2",
ExpandModel([-2, 2, 3]),
torch.randn(1, 2),
),
]
)
def test_expand_error(self, _, module, a):
"""Test of the PyTorch expand Node on Glow."""
utils.compare_tracing_methods_error(
module,
a,
fusible_ops={"aten::expand"},
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
import torch.nn.functional as F
from tests import utils
class SimplePreluModule(torch.nn.Module):
def __init__(self):
super(SimplePreluModule, self).__init__()
def forward(self, inputs, weights):
return F.prelu(inputs + inputs, weights)
class TestPrelu(utils.TorchGlowTestCase):
def test_prelu_basic(self):
"""Basic test of the PyTorch prelu Node on Glow."""
utils.compare_tracing_methods(
SimplePreluModule(),
torch.randn(1, 4, 5, 5),
torch.tensor([0.25]),
fusible_ops={"aten::prelu"},
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class SimpleAndModule(torch.nn.Module):
def __init__(self):
super(SimpleAndModule, self).__init__()
def forward(self, a, b):
c = a & b
return torch.logical_or(c, b)
class TestAnd(utils.TorchGlowTestCase):
@utils.deterministic_expand(
[
lambda: (
"basic",
torch.tensor([True, True, False, False], dtype=torch.bool),
torch.tensor([True, False, True, False], dtype=torch.bool),
),
lambda: (
"basic_3d",
torch.zeros((3, 4, 5), dtype=torch.bool),
torch.ones((3, 4, 5), dtype=torch.bool),
),
lambda: (
"broadcast_3d",
torch.zeros((3, 4, 5), dtype=torch.bool),
torch.ones((4, 5), dtype=torch.bool),
),
]
)
def test_and(self, _, a, b, skip_to_glow=False):
utils.run_comparison_tests(
SimpleAndModule(),
(a, b),
fusible_ops={"aten::__and__"},
skip_to_glow=skip_to_glow,
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class SimplePowModule(torch.nn.Module):
def __init__(self, power):
super(SimplePowModule, self).__init__()
self.power = power
def forward(self, tensor):
return torch.pow(tensor, self.power)
class TestPow(utils.TorchGlowTestCase):
@utils.deterministic_expand(
[
lambda: ("float", 2.2),
lambda: ("tensor_basic", torch.randn(4) + 2),
lambda: ("tensor_size[]", torch.tensor(2.2)),
lambda: ("tensor_broadcast", torch.randn(1) + 2),
]
)
def test_pow_basic(self, _, power):
"""Test of the PyTorch pow Node on Glow."""
utils.compare_tracing_methods(
SimplePowModule(power), torch.rand(4) + 5, fusible_ops={"aten::pow"}
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class SimpleTransposeModel(torch.nn.Module):
def __init__(self, dim0=None, dim1=None, inplace=False):
super(SimpleTransposeModel, self).__init__()
self.dims = (dim0, dim1) if dim0 and dim1 else None
self.inplace = inplace
def forward(self, tensor):
t = tensor + tensor
if self.dims:
return t.transpose_(*self.dims) if self.inplace else t.transpose(*self.dims)
else:
return t.t_() if self.inplace else t.t()
class TestTranspose(utils.TorchGlowTestCase):
@utils.deterministic_expand(
[
lambda: ("2d", SimpleTransposeModel(), torch.randn(7, 4)),
lambda: ("1d", SimpleTransposeModel(), torch.randn(7)),
lambda: ("inplace", SimpleTransposeModel(inplace=True), torch.randn(7, 4)),
]
)
def test_t(self, _, module, tensor):
utils.compare_tracing_methods(module, tensor, fusible_ops={"aten::t"})
@utils.deterministic_expand(
[
lambda: ("simple", SimpleTransposeModel(1, 2), torch.randn(2, 3, 4)),
lambda: (
"inplace",
SimpleTransposeModel(1, 2, inplace=True),
torch.randn(2, 3, 4),
),
lambda: ("neg_dim", SimpleTransposeModel(-2, -1), torch.randn(2, 3, 4)),
]
)
def test_transpose(self, _, module, tensor, reference=None):
utils.compare_tracing_methods(module, tensor, fusible_ops={"aten::transpose"})
@utils.deterministic_expand(
[lambda: ("oob_neg_dim", SimpleTransposeModel(-2, -4), torch.randn(2, 3, 4))]
)
def test_transpose_failure(self, _, module, tensor):
with self.assertRaises(IndexError):
utils.compare_tracing_methods(
module, tensor, fusible_ops={"aten::transpose"}
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# isort:skip_file
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
import torch_glow
from tests import utils
from tests.utils import GLOW_FUSION_GROUP, SUBGRAPH_ATTR
class TestGetAttr(utils.TorchGlowTestCase):
def test_getattr(self):
"""Test fusion of the PyTorch prim::GetAttr Node into the Glow subgraph."""
with torch.no_grad():
class Model(torch.nn.Module):
def __init__(self):
super(Model, self).__init__()
self.linear = torch.nn.Linear(2, 1)
def forward(self, x):
return self.linear(x)
x = torch.tensor([2.0, 3.0])
torch_glow.enableFusionPass_DO_NOT_USE_THIS()
m = Model()
jit_m = torch.jit.trace(m, x)
jit_m_graph = jit_m.graph_for(x)
# Ensure all prim::GetAttrs were fused and none were left out
found_getattrs = False
for node in jit_m_graph.nodes():
kind = node.kind()
assert (
kind != "prim::GetAttr"
), "Expected all prim::GetAttrsGlow to be in Glow subgraph"
if kind == GLOW_FUSION_GROUP:
glow_subgraph = node.g(SUBGRAPH_ATTR)
for node in glow_subgraph.nodes():
if node.kind() == "prim::GetAttr":
found_getattrs = True
assert (
found_getattrs
), "Expected to find prim::GetAttrs in the Glow subgraph"
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class SimpleClampModel(torch.nn.Module):
def __init__(self, min, max):
super(SimpleClampModel, self).__init__()
self.min = min
self.max = max
def forward(self, input):
return torch.clamp(input, self.min, self.max)
class TestClamp(utils.TorchGlowTestCase):
@utils.deterministic_expand(
[
lambda: ("basic", 0.0, 0.8, torch.float),
lambda: ("no_min", None, 0.8, torch.float),
lambda: ("no_max", 0.0, None, torch.float),
lambda: ("int_basic", 4, 8, torch.int32),
]
)
def test_clamp(self, _, min, max, dtype):
"""Test of the PyTorch clamp Node on Glow."""
a = torch.randn(2, 7)
if dtype == torch.int32:
a = torch.randint(max * 2, (2, 7))
utils.compare_tracing_methods(
SimpleClampModel(min, max), a, fusible_ops={"aten::clamp"}
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class SimpleContiguousModel(torch.nn.Module):
def __init__(self, memory_format=torch.contiguous_format):
super(SimpleContiguousModel, self).__init__()
self.memory_format = memory_format
def forward(self, input):
formatted = input.contiguous(memory_format=self.memory_format)
return formatted + formatted
class TestContiguous(utils.TorchGlowTestCase):
def test_contiguous_basic(self):
"""Test of the PyTorch contiguous Node on Glow."""
x = torch.randn(2, 2, 2)
utils.compare_tracing_methods(
SimpleContiguousModel(), x, fusible_ops={"aten::contiguous"}
)
def test_with_alternate_memory_format(self):
x = torch.randn(3, 4, 5, 6)
utils.compare_tracing_methods(
SimpleContiguousModel(torch.channels_last),
x,
fusible_ops={"aten::contiguous"},
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class SimpleNormModule(torch.nn.Module):
def __init__(self, *args, **kwargs):
super(SimpleNormModule, self).__init__()
self.args = args
self.kwargs = kwargs
def forward(self, tensor):
return torch.norm(tensor, *self.args, **self.kwargs)
# TODO([email protected]): uncomment the following tests
# after https://github.com/pytorch/pytorch/pull/81761 lands
# class TestNorm(utils.TorchGlowTestCase):
# def test_norm_basic(self):
# """Basic test of the PyTorch norm Node on Glow."""
# utils.compare_tracing_methods(
# SimpleNormModule(dim=0, p=2),
# torch.arange(8, dtype=torch.float).reshape(2, 4),
# fusible_ops={"aten::linalg_vector_norm"},
# )
# def test_norm_float_p(self):
# """Test of the PyTorch norm Node that has p=2.0 on Glow."""
# utils.compare_tracing_methods(
# SimpleNormModule(dim=0, p=2.0),
# torch.arange(8, dtype=torch.float).reshape(2, 4),
# fusible_ops={"aten::linalg_vector_norm"},
# )
# def test_norm_3d_inner_axis(self):
# """Basic test of the PyTorch norm Node on Glow."""
# utils.compare_tracing_methods(
# SimpleNormModule(dim=1),
# torch.arange(8, dtype=torch.float).reshape(2, 2, 2),
# fusible_ops={"aten::linalg_vector_norm"},
# )
# def test_norm_4d_outer_axis(self):
# """Basic test of the PyTorch norm Node on Glow."""
# utils.compare_tracing_methods(
# SimpleNormModule(dim=[3]),
# torch.arange(16, dtype=torch.float).reshape(2, 2, 2, 2),
# fusible_ops={"aten::linalg_vector_norm"},
# )
# def test_norm_keepdim(self):
# """Basic test of the PyTorch norm Node on Glow."""
# utils.compare_tracing_methods(
# SimpleNormModule(dim=[1], keepdim=True),
# torch.arange(16, dtype=torch.float).reshape(2, 4, 2),
# fusible_ops={"aten::linalg_vector_norm"},
# )
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class SimpleMinModule(torch.nn.Module):
def __init__(self):
super(SimpleMinModule, self).__init__()
def forward(self, a, b):
return torch.min(a + a, b + b)
class UnaryMinModule(torch.nn.Module):
def __init__(self):
super(UnaryMinModule, self).__init__()
def forward(self, a):
return torch.min(a + a)
class TestMin(utils.TorchGlowTestCase):
def test_elementwise_min(self):
"""Test of the PyTorch min Node on Glow."""
utils.compare_tracing_methods(
SimpleMinModule(), torch.randn(7), torch.randn(7), fusible_ops={"aten::min"}
)
def test_elementwise_min_broadcast(self):
"""Test of the PyTorch min Node with broadcast on Glow."""
utils.compare_tracing_methods(
SimpleMinModule(),
torch.randn(2, 7),
torch.randn(7),
fusible_ops={"aten::min"},
)
def test_unary_min(self):
"""Test of the PyTorch unary min Node on Glow."""
utils.compare_tracing_methods(
UnaryMinModule(),
torch.randint(
20,
(
10,
10,
),
dtype=torch.int32,
),
fusible_ops={"aten::min"},
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
import torch
from tests import utils
from tests.utils import check_skip, DEFAULT_BACKEND
class TestEmbeddingBag(utils.TorchGlowTestCase):
supported_backends = {"Interpreter", "NNPI"}
def test_embedding_bag_basic(self):
"""Test of aten::embedding_bag node on glow"""
check_skip(self)
class TestModule(torch.nn.Module):
def forward(self, input, offsets, per_sample_weights):
weight = torch.FloatTensor([[1, 2.3, 3], [4, 5.1, 6.3]])
embedding_sum = torch.nn.EmbeddingBag.from_pretrained(
weight, mode="sum", include_last_offset=True
)
a = embedding_sum(input, offsets)
b = embedding_sum(input, offsets, per_sample_weights)
return a, b
input = torch.LongTensor([1, 0, 0, 1, 1])
offsets = torch.LongTensor([0, 1, 5]) # final item is endOffset
per_sample_weights = torch.FloatTensor([1, 2, 3, 4, 5])
utils.compare_tracing_methods(
TestModule(),
input,
offsets,
per_sample_weights,
fusible_ops={"aten::embedding_bag"},
)
class TestQuantizedEmbeddingBag(utils.TorchGlowTestCase):
supported_backends = {"Interpreter", "NNPI"}
@utils.deterministic_expand(
[
# explicit local param declaration required for lambda fn with loops for correct param generation
lambda num_lengths=num_lengths, is4bit=is4bit, is_weighted=is_weighted, use_fp16=use_fp16, per_sample_weights_fp16=per_sample_weights_fp16: (
"{len}{bits}{weighted}{fp16}{sample_weights}{backend}".format(
len=num_lengths,
bits="_4bit" if is4bit else "_byte",
weighted="_weighted" if is_weighted else "",
fp16="_fp16" if use_fp16 else "",
sample_weights="_sample_weights_fp16"
if per_sample_weights_fp16 and is_weighted
else "",
backend="_" + DEFAULT_BACKEND,
),
num_lengths,
is4bit,
is_weighted,
use_fp16,
per_sample_weights_fp16,
)
for num_lengths in [0, 8]
for is4bit in [False, True]
for is_weighted in [False, True]
for use_fp16 in [False, True]
for per_sample_weights_fp16 in [False, True]
]
)
def test_embedding_bag_rowwise_offsets(
self,
name,
num_lengths,
is4bit,
is_weighted,
use_fp16,
per_sample_weights_fp16,
):
"""Test of quantized::embedding_bag_byte_rowwise_offsets and
quantized::embedding_bag_4bit_rowwise_offsets node on glow"""
check_skip(self)
class TestModule(torch.nn.Module):
def __init__(self, q_weights, is4bit=False, per_sample_weights=None):
super().__init__()
self.q_weights = q_weights
self.per_sample_weights = per_sample_weights
if is4bit:
self.op = torch.ops.quantized.embedding_bag_4bit_rowwise_offsets
else:
self.op = torch.ops.quantized.embedding_bag_byte_rowwise_offsets
def forward(self, indices, offsets):
return self.op(
self.q_weights,
indices,
offsets,
mode=0,
per_sample_weights=self.per_sample_weights,
include_last_offset=True,
)
# generate random weights and indices
num_embeddings = 16
embedding_dim = 4
weights = torch.from_numpy(
(np.random.random_sample((num_embeddings, embedding_dim)) + 1).astype(
np.float32
)
)
q_weights = (
torch.ops.quantized.embedding_bag_4bit_prepack(weights)
if is4bit
else torch.ops.quantized.embedding_bag_byte_prepack(weights)
)
np_lengths = (
np.zeros(shape=[10]).astype(np.int32)
if num_lengths == 0
else np.random.randint(0, num_lengths, size=10).astype(np.int32)
)
num_lengths = np.sum(np_lengths)
lengths = torch.from_numpy(np_lengths)
indices = torch.from_numpy(
np.random.randint(
low=0, high=num_embeddings, size=num_lengths, dtype=np.int64
)
).long()
offsets = torch.cat([torch.zeros([1]), torch.cumsum(lengths, 0)]).long()
per_sample_weights_type = (
np.float16 if per_sample_weights_fp16 and is4bit else np.float32
)
per_sample_weights = torch.from_numpy(
np.random.uniform(low=0.01, high=0.5, size=[len(indices)]).astype(
per_sample_weights_type
)
)
m = TestModule(q_weights, is4bit, per_sample_weights if is_weighted else None)
utils.compare_tracing_methods(
m,
indices,
offsets,
fusible_ops={
"quantized::embedding_bag_4bit_rowwise_offsets"
if is4bit
else "quantized::embedding_bag_byte_rowwise_offsets"
},
fp16=use_fp16,
# FP16 version is known to yeild different results, so our
# test here is mainly focusing on the flow rather than actual
# accuracy. There will be additional coverage on accuracy of
# the lowered modules
atol=0.02 if (is4bit or use_fp16) else 5e-4,
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class SimpleFlattenModule(torch.nn.Module):
def __init__(self, start_dim=0, end_dim=-1):
super(SimpleFlattenModule, self).__init__()
self.start_dim = start_dim
self.end_dim = end_dim
def forward(self, input):
return torch.flatten(input, start_dim=self.start_dim, end_dim=self.end_dim)
class TestFlatten(utils.TorchGlowTestCase):
@utils.deterministic_expand(
[
lambda: ("basic", SimpleFlattenModule(), torch.randn(2, 3, 2, 5)),
lambda: ("start_at_0", SimpleFlattenModule(0, 2), torch.randn(2, 3, 2, 5)),
lambda: (
"start_in_middle",
SimpleFlattenModule(1, 2),
torch.randn(2, 3, 2, 5),
),
lambda: (
"negative_end_dim",
SimpleFlattenModule(0, -2),
torch.randn(2, 3, 2, 5),
),
lambda: ("same_dim", SimpleFlattenModule(2, 2), torch.randn(2, 3, 2, 5)),
lambda: (
"negative_start_dim",
SimpleFlattenModule(-3, -1),
torch.randn(2, 3, 2, 5),
),
]
)
def test_flatten(self, _, module, input):
utils.compare_tracing_methods(module, input, fusible_ops={"aten::flatten"})
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
import torch.nn.functional as F
from tests import utils
class SimpleGeluModule(torch.nn.Module):
def forward(self, tensor):
return F.gelu(tensor + tensor)
class TestGelu(utils.TorchGlowTestCase):
def test_gelu_basic(self):
"""Basic test of the PyTorch gelu Node on Glow."""
def test_f(a):
return F.gelu(a + a)
for _ in range(100):
x = torch.randn(10)
utils.compare_tracing_methods(
SimpleGeluModule(),
x,
check_trace=False,
atol=1e-3,
fusible_ops={"aten::gelu"},
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class SimpleBmmModule(torch.nn.Module):
def forward(self, a, b):
return (a + a).bmm(b)
class TestBmm(utils.TorchGlowTestCase):
def test_bmm(self):
"""Basic test of the PyTorch bmm Node on Glow."""
x = torch.randn(6, 4, 10)
y = torch.randn(6, 10, 2)
utils.compare_tracing_methods(
SimpleBmmModule(), x, y, fusible_ops={"aten::bmm"}
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
import torch.nn.functional as F
from tests import utils
class SimpleLinearModule(torch.nn.Module):
def __init__(self):
super(SimpleLinearModule, self).__init__()
def forward(self, input, weight, bias=None):
return F.linear((input + input), weight, bias)
class TestLinear(utils.TorchGlowTestCase):
def test_linear_basic(self):
"""Basic test of the PyTorch aten::linear op on Glow."""
def test_f(input, weight, bias=None):
return F.linear((input + input), weight, bias)
n = 5
in_features = 4
out_features = 3
input = torch.randn(n, in_features)
weight = torch.randn(out_features, in_features)
utils.compare_tracing_methods(
SimpleLinearModule(), input, weight, fusible_ops={"aten::linear"}
)
def test_linear_bias(self):
"""Test of the PyTorch aten::linear op on Glow."""
def test_f(input, weight, bias=None):
return F.linear((input + input), weight, bias)
n = 5
in_features = 4
out_features = 3
input = torch.randn(n, in_features)
weight = torch.randn(out_features, in_features)
bias = torch.randn(out_features)
utils.compare_tracing_methods(
SimpleLinearModule(), input, weight, bias, fusible_ops={"aten::linear"}
)
def test_linear_broadcast(self):
"""Test of the PyTorch aten::linear op with broadcasting on Glow."""
def test_f(input, weight, bias=None):
return F.linear((input + input), weight, bias)
n = 5
in_features = 4
out_features = 3
input = torch.randn(n, 9, 7, in_features)
weight = torch.randn(out_features, in_features)
utils.compare_tracing_methods(
SimpleLinearModule(), input, weight, fusible_ops={"aten::linear"}
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class IndexPutModule(torch.nn.Module):
def __init__(self, indices, accumulate=False):
super(IndexPutModule, self).__init__()
self.indices = indices
self.accumulate = accumulate
def forward(self, tensor, val):
tensor.index_put_(self.indices, val, accumulate=self.accumulate)
tensor = tensor + tensor
return tensor
class TestIndexPut(utils.TorchGlowTestCase):
@utils.deterministic_expand(
[
lambda: (
"basic",
IndexPutModule([torch.tensor([1, 1]), torch.tensor([0, 1])]),
torch.zeros(2, 3),
torch.tensor([1.0, 2.0]),
),
lambda: (
"3d_0",
IndexPutModule(
[torch.tensor([1, 1]), torch.tensor([0, 1]), torch.tensor([0, 1])]
),
torch.zeros(2, 3, 4),
torch.tensor([1.0, 2.0]),
),
lambda: (
"3d_1",
IndexPutModule(
[
torch.tensor([1, 1, 0]),
torch.tensor([0, 1, 1]),
torch.tensor([0, 1, 0]),
]
),
torch.zeros(2, 3, 4),
torch.tensor([1.0, 2.0, 3.0]),
),
lambda: (
"broadcast_value_0",
IndexPutModule(
[
torch.tensor([2, 0, 1]),
torch.tensor([1, 2, 0]),
torch.tensor([2, 0, 1]),
]
),
torch.zeros(5, 3, 4),
torch.tensor([1.0]),
),
lambda: (
"broadcast_value_1",
IndexPutModule(
[
torch.tensor([1, 1, 2]),
torch.tensor([0, 1, 2]),
torch.tensor([0, 1, 3]),
]
),
torch.zeros(5, 3, 4),
torch.tensor([1.0]),
),
lambda: (
"broadcast_value_2",
IndexPutModule(
[
torch.tensor([1, 1, 0]),
torch.tensor([0, 1, 0]),
]
),
torch.zeros(5, 3, 4),
torch.tensor([1.0, 1.0, 1.0, 1.0]),
),
lambda: (
"accumulate_basic",
IndexPutModule([torch.tensor([1, 2]), torch.tensor([0, 1])]),
torch.zeros(4, 3),
torch.tensor([1.0, 2.0]),
),
lambda: (
"accumulate_broadcast",
IndexPutModule(
[
torch.tensor([1, 1, 2]),
torch.tensor([0, 1, 2]),
torch.tensor([0, 1, 3]),
],
True,
),
torch.ones(5, 4, 6),
torch.tensor([5.0]),
),
lambda: (
"dim_0",
IndexPutModule(
[
torch.tensor([1]),
]
),
torch.zeros(5, 3, 4),
torch.tensor([5.0]),
),
lambda: (
"dim_1",
IndexPutModule(
[
torch.tensor([1]),
]
),
torch.tensor([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]),
torch.tensor([-3.0, -4.0]),
),
lambda: (
"dim_2",
IndexPutModule(
[
torch.tensor([1, 0]),
]
),
torch.tensor([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]),
torch.tensor([-3.0, -4.0]),
),
lambda: (
"dim_3",
IndexPutModule(
[
torch.tensor([1, 0, 2]),
]
),
torch.tensor([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]),
torch.tensor([[-3.0], [-4.0], [-5.0]]),
),
]
)
def test_index_put(self, _, module, tensor, value):
utils.compare_tracing_methods(
module, tensor, value, fusible_ops={"aten::index_put_"}
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
import torch.nn.functional as F
from tests import utils
class SimpleDropoutModule(torch.nn.Module):
def __init__(self, p=0.5, training=True, inplace=False):
super(SimpleDropoutModule, self).__init__()
self.p = p
self.training = training
self.inplace = inplace
def forward(self, input):
return F.dropout(
input + input, p=self.p, training=self.training, inplace=self.inplace
)
class TestDropout(utils.TorchGlowTestCase):
def test_dropout(self):
"""Basic test of the PyTorch aten::dropout Node on Glow."""
utils.compare_tracing_methods(
SimpleDropoutModule(training=False),
torch.randn(6, 4, 10),
fusible_ops={"aten::dropout"},
)
def test_dropout_inplace(self):
"""Basic test of the PyTorch aten::dropout_ Node on Glow."""
# Expect fuser to out-of-place the operator
utils.compare_tracing_methods(
SimpleDropoutModule(training=False, inplace=True),
torch.randn(6, 4, 10),
fusible_ops={"aten::dropout"},
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class SimpleToModel(torch.nn.Module):
def __init__(self, *conversions):
super(SimpleToModel, self).__init__()
self.conversions = conversions
def forward(self, tensor):
for conversion_type in self.conversions:
tensor = tensor.to(conversion_type)
return tensor
class ToWithDeviceModel(torch.nn.Module):
def __init__(self, *conversions):
super(ToWithDeviceModel, self).__init__()
self.conversions = conversions
def forward(self, tensor):
for conversion_type in self.conversions:
tensor = tensor.to(device="cpu", dtype=conversion_type)
return tensor
class SimplePrimToModel(torch.nn.Module):
def __init__(self, conversion, device=None):
super().__init__()
self.device = None
self.conversion = conversion
if self.device is None:
self.forward = self._forward_dtype
else:
self.forward = self._forward_device_dtype
def _forward_device_dtype(self, dummy):
return torch.ops.prim.NumToTensor(dummy.size(0)).to(
device=self.device, dtype=self.conversion
)
def _forward_dtype(self, dummy):
return torch.ops.prim.NumToTensor(dummy.size(0)).to(self.conversion)
class TestTo(utils.TorchGlowTestCase):
@utils.deterministic_expand(
[
lambda: ("to_int", SimpleToModel(torch.int), torch.randn(1, 2, 3, 4)),
lambda: ("to_float", SimpleToModel(torch.float), torch.randn(1, 2, 3, 4)),
lambda: (
"to_int_to_float",
SimpleToModel(torch.int, torch.float),
torch.randn(1, 2, 3, 4),
),
lambda: (
"to_int_with_device",
ToWithDeviceModel(torch.int),
torch.randn(1, 2, 3, 4),
),
lambda: ("to_cpu", SimpleToModel("cpu"), torch.randn(1, 2, 3, 4)),
lambda: (
"to_tensor",
SimpleToModel(torch.randn(3, 4).type(torch.int32)),
torch.randn(1, 2, 3, 4),
),
]
)
def test_to(self, _, module, tensor):
utils.compare_tracing_methods(module, tensor, fusible_ops={"aten::to"})
@utils.deterministic_expand(
[
lambda: (
"to_prim_dtype",
SimplePrimToModel(torch.float),
torch.randn(5, 6, 7),
),
lambda: ("to_prim_device", SimplePrimToModel("cpu"), torch.randn(5, 6, 7)),
lambda: (
"to_prim_device_with_dtype",
SimplePrimToModel(torch.float, "cuda"),
torch.randn(5, 6, 7),
),
]
)
def test_to_prim(self, _, module, tensor):
utils.compare_tracing_methods(
module,
tensor,
fusible_ops={"prim::NumToTensor", "aten::to"},
scripted=True,
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class SimplePermuteModule(torch.nn.Module):
def __init__(self, *dimensions):
super(SimplePermuteModule, self).__init__()
self.dimensions = dimensions
def forward(self, tensor):
return tensor.permute(*self.dimensions)
class TestPermute(utils.TorchGlowTestCase):
def test_permute(self):
"""Basic test of the PyTorch aten::permute node on Glow."""
utils.compare_tracing_methods(
SimplePermuteModule(0, 2, 1),
torch.randn(2, 3, 4),
fusible_ops={"aten::permute"},
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class SimpleQuantizedCatModel(torch.nn.Module):
def __init__(self, dimension, scale, zero_point):
super(SimpleQuantizedCatModel, self).__init__()
self.dimension = dimension
self.scale = scale
self.zero_point = zero_point
def forward(self, a, b):
return torch.nn.quantized.DeQuantize()(
torch.ops.quantized.cat(
(a, b),
dim=self.dimension,
scale=self.scale,
zero_point=self.zero_point,
)
)
class TestQuantizedCat(utils.TorchGlowTestCase):
@utils.deterministic_expand(
[
lambda: (
"zero_offset",
SimpleQuantizedCatModel(
0,
0.05,
0,
),
(
torch.nn.quantized.Quantize(
scale=0.3,
zero_point=0,
dtype=torch.quint8,
)(torch.randn([1, 2, 3, 4], dtype=torch.float32)),
torch.nn.quantized.Quantize(
scale=0.3,
zero_point=0,
dtype=torch.quint8,
)(torch.randn([5, 2, 3, 4], dtype=torch.float32)),
),
),
lambda: (
"basic",
SimpleQuantizedCatModel(
1,
0.05,
0,
),
(
torch.nn.quantized.Quantize(
scale=0.3,
zero_point=0.3,
dtype=torch.quint8,
)(torch.randn([8, 8, 8, 8], dtype=torch.float32)),
torch.nn.quantized.Quantize(
scale=0.3,
zero_point=0.3,
dtype=torch.quint8,
)(torch.randn([8, 8, 8, 8], dtype=torch.float32)),
),
),
lambda: (
"with_empty_tensor",
SimpleQuantizedCatModel(
0,
0.05,
0,
),
(
torch.nn.quantized.Quantize(
scale=0.2,
zero_point=0.1,
dtype=torch.quint8,
)(torch.empty(0, dtype=torch.float32)),
torch.nn.quantized.Quantize(
scale=0.2,
zero_point=0.1,
dtype=torch.quint8,
)(torch.randn([8, 8], dtype=torch.float32)),
),
),
lambda: (
"with_differing_quantizations",
SimpleQuantizedCatModel(
2,
0.05,
0,
),
(
torch.nn.quantized.Quantize(
scale=0.6,
zero_point=0.2,
dtype=torch.quint8,
)(torch.randn([7, 7, 7], dtype=torch.float32)),
torch.nn.quantized.Quantize(
scale=0.2,
zero_point=0.1,
dtype=torch.quint8,
)(torch.randn([7, 7, 7], dtype=torch.float32)),
),
),
]
)
def test_quantized_cat(self, _, module, tensors, fusion_blocklist=None):
utils.compare_tracing_methods(
module,
*tensors,
fusible_ops={"quantized::cat"},
fusion_blocklist=None,
skip_to_glow=False,
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class SimpleSplitModel(torch.nn.Module):
def __init__(self, split_size_or_sections, dimension):
super(SimpleSplitModel, self).__init__()
self.split_size_or_sections = split_size_or_sections
self.dimension = dimension
def forward(self, x):
return torch.split(x, self.split_size_or_sections, self.dimension)
class TestSplit(utils.TorchGlowTestCase):
@utils.deterministic_expand(
[
lambda: (torch.randn(8), 4, 0),
lambda: (torch.randn(10), [1, 2, 3, 4], 0),
lambda: (torch.randn(10, 10, 10), 3, 2),
lambda: (torch.randn(100, 100), [25, 50, 25], 1),
lambda: (torch.randn(100, 100), [25, 50, 25], -2),
lambda: (torch.randn(100, 100), 25, -1),
]
)
def test_split(self, tensor, split_size_or_sections, dimension):
utils.compare_tracing_methods(
SimpleSplitModel(split_size_or_sections, dimension), tensor
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class SimpleAddMmModule(torch.nn.Module):
def __init__(self, alpha=1, beta=1):
super(SimpleAddMmModule, self).__init__()
self.alpha = alpha
self.beta = beta
def forward(self, a, b, c):
return (a + a).addmm(b, c)
class TestAddMM(utils.TorchGlowTestCase):
def test_addmm_basic(self):
"""Basic test of the PyTorch addmm Node on Glow."""
utils.run_comparison_tests(
SimpleAddMmModule(),
(torch.randn(6, 4), torch.randn(6, 10), torch.randn(10, 4)),
fusible_ops={"aten::add", "aten::mm"},
fp16vfp16_atol=1e-3,
fp16vfp16_rtol=1e-3,
)
def test_addmm_broadcast(self):
"""Test of the PyTorch addmm with broadcasting add on Glow."""
utils.run_comparison_tests(
SimpleAddMmModule(),
(torch.randn(4), torch.randn(6, 10), torch.randn(10, 4)),
fusible_ops={"aten::add", "aten::mm"},
)
def test_addmm_broadcast_with_alpha_and_beta(self):
"""Test of the PyTorch addmm with broadcasting add on Glow."""
utils.run_comparison_tests(
SimpleAddMmModule(2.0, 3.0),
(torch.randn(4), torch.randn(6, 10), torch.randn(10, 4)),
fusible_ops={"aten::add", "aten::mm"},
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class SimpleMmModule(torch.nn.Module):
def __init__(self):
super(SimpleMmModule, self).__init__()
def forward(self, a, b, t):
r = torch.mm(a, b)
return r.mm(t)
class TestMm(utils.TorchGlowTestCase):
def test_mm_basic(self):
"""Test of the PyTorch mm Node on Glow."""
x = torch.randn(2, 3)
y = torch.randn(4, 3).t()
t = torch.randn(4, 2)
utils.compare_tracing_methods(
SimpleMmModule(), x, y, t, fusible_ops={"aten::mm"}, skip_to_glow=True
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class SimpleQuantizedMaxPoolModel(torch.nn.Module):
def __init__(self, scale, zero_point, dtype, kernel_size):
super(SimpleQuantizedMaxPoolModel, self).__init__()
self.scale = scale
self.zero_point = zero_point
self.dtype = dtype
def forward(self, tensor):
quantize = torch.nn.quantized.Quantize(
scale=self.scale, zero_point=self.zero_point, dtype=self.dtype
)
dequantize = torch.nn.quantized.DeQuantize()
maxpool = torch.nn.MaxPool2d(3)
dequantize = torch.nn.quantized.DeQuantize()
return dequantize(maxpool(quantize(tensor)))
class TestQuantizedMaxPool(utils.TorchGlowTestCase):
@utils.deterministic_expand(
[
lambda: (
"basic",
SimpleQuantizedMaxPoolModel(1.0 / 128, 3, torch.quint8, 3),
torch.randn(1, 4, 5, 5),
),
lambda: (
"cut_q",
SimpleQuantizedMaxPoolModel(1.0 / 128, 3, torch.quint8, 3),
torch.randn(1, 4, 5, 5),
{"aten::quantize_per_tensor"},
),
]
)
def test_quantized_maxpool(self, _, module, tensor, fusion_blocklist=None):
fusible_ops = {
"aten::max_pool2d",
"aten::quantize_per_tensor",
"aten::dequantize",
}
fusible_ops -= fusion_blocklist or set()
utils.compare_tracing_methods(
module, tensor, fusible_ops=fusible_ops, fusion_blocklist=fusion_blocklist
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
import torch.nn.functional as F
from tests import utils
class SimpleAdapativeAvgPool2dModule(torch.nn.Module):
def __init__(self, output_size):
super(SimpleAdapativeAvgPool2dModule, self).__init__()
self.output_size = output_size
def forward(self, inputs):
return F.adaptive_avg_pool2d(inputs, self.output_size)
class TestAdaptiveAvgPool2d(utils.TorchGlowTestCase):
def test_adaptive_avg_pool2d_basic(self):
"""Basic test of PyTorch adaptive_avg_pool2d Node."""
inputs = torch.randn(3, 6, 14, 14)
utils.run_comparison_tests(
SimpleAdapativeAvgPool2dModule((5, 5)),
inputs,
fusible_ops={"aten::adaptive_avg_pool2d"},
)
def test_adaptive_avg_pool2d_nonsquare_inputs(self):
"""Test of PyTorch adaptive_avg_pool2d Node with non-square inputs."""
inputs = torch.randn(3, 6, 13, 14)
utils.run_comparison_tests(
SimpleAdapativeAvgPool2dModule((3, 3)),
inputs,
fusible_ops={"aten::adaptive_avg_pool2d"},
)
def test_adaptive_avg_pool2d_nonsquare_outputs(self):
"""Test of PyTorch adaptive_avg_pool2d Node with non-square outputs."""
inputs = torch.randn(3, 6, 14, 14)
utils.run_comparison_tests(
SimpleAdapativeAvgPool2dModule((5, 3)),
inputs,
fusible_ops={"aten::adaptive_avg_pool2d"},
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# isort:skip_file
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class SimpleShapeAsTensorModel(torch.nn.Module):
def __init__(self):
super(SimpleShapeAsTensorModel, self).__init__()
def forward(self, tensor):
result = torch._shape_as_tensor(tensor)
return result + result
class TestShapeAsTensor(utils.TorchGlowTestCase):
@utils.deterministic_expand(
[
lambda: ("single dimension", SimpleShapeAsTensorModel(), torch.randn(6)),
lambda: (
"multiple dimensions",
SimpleShapeAsTensorModel(),
torch.randn(3, 2, 4),
),
]
)
def test_shape_as_tensor(self, _, module, tensor):
"""Test of the PyTorch ShapeAsTensor Node on Glow."""
utils.compare_tracing_methods(
module,
tensor,
fusible_ops={"aten::_shape_as_tensor"},
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
import torch.nn as nn
from tests import utils
class TestBatchNorm3D(utils.TorchGlowTestCase):
def test_batchnorm_basic(self):
"""
Basic test of the PyTorch 3D batchnorm Node on Glow.
"""
class SimpleBatchNorm(nn.Module):
def __init__(self, num_channels, running_mean, running_var):
super(SimpleBatchNorm, self).__init__()
self.batchnorm = nn.BatchNorm3d(num_channels)
self.batchnorm.running_mean = running_mean
self.batchnorm.running_var = running_var
def forward(self, x):
return self.batchnorm(x)
num_channels = 4
running_mean = torch.rand(num_channels)
running_var = torch.rand(num_channels)
model = SimpleBatchNorm(num_channels, running_mean, running_var)
model.eval()
inputs = torch.randn(1, num_channels, 4, 5, 5)
utils.compare_tracing_methods(model, inputs, fusible_ops={"aten::batch_norm"})
def test_batchnorm_with_weights(self):
"""
Test of the PyTorch 3D batchnorm Node with weights and biases on Glow.
"""
class SimpleBatchNorm(nn.Module):
def __init__(self, num_channels, weight, bias, running_mean, running_var):
super(SimpleBatchNorm, self).__init__()
self.batchnorm = nn.BatchNorm3d(num_channels)
self.batchnorm.weight = torch.nn.Parameter(weight)
self.batchnorm.bias = torch.nn.Parameter(bias)
self.batchnorm.running_mean = running_mean
self.batchnorm.running_var = running_var
def forward(self, x):
return self.batchnorm(x)
num_channels = 4
weight = torch.rand(num_channels)
bias = torch.rand(num_channels)
running_mean = torch.rand(num_channels)
running_var = torch.ones(num_channels)
inputs = torch.randn(1, num_channels, 4, 5, 5)
model = SimpleBatchNorm(num_channels, weight, bias, running_mean, running_var)
model.eval()
utils.compare_tracing_methods(model, inputs, fusible_ops={"aten::batch_norm"})
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
import torch.nn as nn
from tests import utils
class TestBatchNorm2D(utils.TorchGlowTestCase):
def test_batchnorm_basic(self):
"""
Basic test of the PyTorch 2D batchnorm Node on Glow.
"""
class SimpleBatchNorm(nn.Module):
def __init__(self, num_channels, running_mean, running_var):
super(SimpleBatchNorm, self).__init__()
self.batchnorm = nn.BatchNorm2d(num_channels)
self.batchnorm.running_mean = running_mean
self.batchnorm.running_var = running_var
def forward(self, x):
return self.batchnorm(x)
num_channels = 4
running_mean = torch.rand(num_channels)
running_var = torch.rand(num_channels)
model = SimpleBatchNorm(num_channels, running_mean, running_var)
model.eval()
inputs = torch.randn(1, num_channels, 5, 5)
utils.compare_tracing_methods(model, inputs, fusible_ops={"aten::batch_norm"})
def test_batchnorm_with_weights(self):
"""
Test of the PyTorch 2D batchnorm Node with weights and biases on Glow.
"""
class SimpleBatchNorm(nn.Module):
def __init__(self, num_channels, weight, bias, running_mean, running_var):
super(SimpleBatchNorm, self).__init__()
self.batchnorm = nn.BatchNorm2d(num_channels)
self.batchnorm.weight = torch.nn.Parameter(weight)
self.batchnorm.bias = torch.nn.Parameter(bias)
self.batchnorm.running_mean = running_mean
self.batchnorm.running_var = running_var
def forward(self, x):
return self.batchnorm(x)
num_channels = 4
weight = torch.rand(num_channels)
bias = torch.rand(num_channels)
running_mean = torch.rand(num_channels)
running_var = torch.ones(num_channels)
inputs = torch.randn(1, num_channels, 5, 5)
model = SimpleBatchNorm(num_channels, weight, bias, running_mean, running_var)
model.eval()
utils.compare_tracing_methods(model, inputs, fusible_ops={"aten::batch_norm"})
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class SimpleQuantizedReluModel(torch.nn.Module):
def __init__(self, scale, zero_point, dtype):
super(SimpleQuantizedReluModel, self).__init__()
self.scale = scale
self.zero_point = zero_point
self.dtype = dtype
def forward(self, tensor):
quantize = torch.nn.quantized.Quantize(
scale=self.scale, zero_point=self.zero_point, dtype=self.dtype
)
dequantize = torch.nn.quantized.DeQuantize()
relu = torch.nn.ReLU()
return dequantize(relu(quantize(tensor)))
class TestQuantizedRelu(utils.TorchGlowTestCase):
def test_quantized_relu(self):
"""Basic test of the PyTorch quantized::relu Node on Glow."""
utils.compare_tracing_methods(
SimpleQuantizedReluModel(1.0 / 128, 3, torch.quint8),
torch.randn([5, 5]),
fusible_ops={"aten::relu", "aten::quantize_per_tensor", "aten::dequantize"},
)
def test_quantized_relu_cut_dq(self):
"""Basic test of the PyTorch quantized::relu Node on Glow, with quantize and dequantize excluded."""
utils.compare_tracing_methods(
SimpleQuantizedReluModel(1.0 / 128, 3, torch.quint8),
torch.randn([5, 5]),
fusible_ops={"aten::relu", "aten::quantize_per_tensor"},
fusion_blocklist=["aten::dequantize"],
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class SimpleRsubModel(torch.nn.Module):
def __init__(self):
super(SimpleRsubModel, self).__init__()
def forward(self, tensor, other):
if other.size() == torch.Size([]):
return torch.rsub((tensor * tensor), other.item())
else:
third = torch.rsub(tensor, other)
return torch.rsub(third, third)
class TestRsub(utils.TorchGlowTestCase):
@utils.deterministic_expand(
[
lambda: ("basic", SimpleRsubModel(), torch.randn(4), torch.randn(4)),
lambda: (
"broadcast",
SimpleRsubModel(),
torch.randn(8, 3, 4, 2),
torch.randn(4, 2),
),
lambda: (
"broadcast",
SimpleRsubModel(),
torch.randn(8, 3, 4, 2),
torch.randn(1, 2),
),
lambda: (
"broadcast",
SimpleRsubModel(),
torch.randn(4, 2),
torch.randn(8, 3, 4, 2),
),
]
)
def test_rsub_as_sub(self, _, module, tensor, other):
# aten::rsub is normalized as aten::sub
utils.compare_tracing_methods(module, tensor, other, fusible_ops={"aten::sub"})
@utils.deterministic_expand(
[
lambda: ("float", SimpleRsubModel(), torch.randn(4), torch.tensor(13.293)),
lambda: ("int", SimpleRsubModel(), torch.randn(4), torch.tensor(4)),
]
)
def test_rsub(self, _, module, tensor, other):
utils.compare_tracing_methods(module, tensor, other, fusible_ops={"aten::rsub"})
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
from tests.utils import check_skip
class TestEmbedding(utils.TorchGlowTestCase):
supported_backends = {"Interpreter", "NNPI"}
def test_embedding_wt_float32_ind_int64(self):
"""Test of aten::embedding node on glow"""
check_skip(self)
class TestModule(torch.nn.Module):
def forward(self, indices):
weight = torch.FloatTensor([[1, 2.3, 3], [4, 5.1, 6.3], [3.3, -4, 5.7]])
embedding = torch.nn.Embedding.from_pretrained(weight)
a = embedding(indices)
return a
indices = torch.LongTensor([1, 0, 2, 0, 1])
utils.compare_tracing_methods(
TestModule(),
indices,
fusible_ops={"aten::embedding"},
skip_to_glow=True, # to_glow doesn't support include_last_offset=False
)
def test_embedding_wt_float16_ind_int64(self):
"""Test of aten::embedding node on glow"""
check_skip(self)
class TestModule(torch.nn.Module):
def forward(self, indices):
weight = torch.HalfTensor([[1, 2.3, 3], [4, 5.1, 6.3], [3.3, -4, 5.7]])
embedding = torch.nn.Embedding.from_pretrained(weight)
a = embedding(indices)
return a
indices = torch.LongTensor([1, 0, 2, 0, 1])
utils.compare_tracing_methods(
TestModule(),
indices,
fusible_ops={"aten::embedding"},
skip_to_glow=True, # to_glow doesn't support include_last_offset=False
)
def test_embedding_2d_indices(self):
"""Test of aten::embedding node on glow"""
check_skip(self)
class TestModule(torch.nn.Module):
def forward(self, indices):
weight = torch.FloatTensor([[1, 2.3, 3], [4, 5.1, 6.3], [3.3, -4, 5.7]])
embedding = torch.nn.Embedding.from_pretrained(weight)
a = embedding(indices)
return a
indices = torch.LongTensor([[1, 2], [0, 1]])
utils.compare_tracing_methods(
TestModule(),
indices,
fusible_ops={"aten::embedding"},
skip_to_glow=True, # to_glow doesn't support include_last_offset=False
)
def test_embedding_3d_indices(self):
"""Test of aten::embedding node on glow"""
check_skip(self)
class TestModule(torch.nn.Module):
def forward(self, indices):
weight = torch.FloatTensor([[1, 2.3, 3], [4, 5.1, 6.3], [3.3, -4, 5.7]])
embedding = torch.nn.Embedding.from_pretrained(weight)
a = embedding(indices)
return a
indices = torch.LongTensor([[[1, 2], [0, 1]]])
utils.compare_tracing_methods(
TestModule(),
indices,
fusible_ops={"aten::embedding"},
skip_to_glow=True, # to_glow doesn't support include_last_offset=False
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class SimpleQuantizedMulModel(torch.nn.Module):
def __init__(
self, left_quantization, right_quantization=None, scale=None, zero_point=None
):
super(SimpleQuantizedMulModel, self).__init__()
self.scale = scale
self.zero_point = zero_point
self.left_quantization = left_quantization
self.right_quantization = right_quantization or left_quantization
def forward(self, tensor, other):
if other.size() == torch.Size([]):
return torch.nn.quantized.DeQuantize()(
torch.ops.quantized.mul(self.left_quantization(tensor), other.item())
)
else:
return torch.nn.quantized.DeQuantize()(
torch.ops.quantized.mul(
self.left_quantization(tensor),
self.right_quantization(other),
scale=self.scale,
zero_point=self.zero_point,
)
)
class TestQuantizedMul(utils.TorchGlowTestCase):
@utils.deterministic_expand(
[
lambda: (
"zero_offset",
SimpleQuantizedMulModel(
torch.nn.quantized.Quantize(
scale=0.3, zero_point=0, dtype=torch.quint8
),
torch.nn.quantized.Quantize(
scale=0.3, zero_point=0, dtype=torch.quint8
),
0.05,
0,
),
torch.tensor([1, 2, 3, 4], dtype=torch.float32),
torch.tensor([5, 6, 7, 8], dtype=torch.float32),
),
lambda: (
"basic",
SimpleQuantizedMulModel(
torch.nn.quantized.Quantize(
scale=0.2, zero_point=1, dtype=torch.quint8
),
torch.nn.quantized.Quantize(
scale=0.2, zero_point=1, dtype=torch.quint8
),
0.2,
3,
),
torch.randn([5, 5]),
torch.randn([5, 5]),
),
lambda: (
"cut_q_dq",
SimpleQuantizedMulModel(
torch.nn.quantized.Quantize(
scale=0.2, zero_point=5, dtype=torch.quint8
),
torch.nn.quantized.Quantize(
scale=0.2, zero_point=10, dtype=torch.quint8
),
0.2,
3,
),
torch.randn([5, 5]),
torch.randn([5, 5]),
["aten::quantize_per_tensor", "aten::dequantize"],
),
lambda: (
"broadcast_rhs",
SimpleQuantizedMulModel(
torch.nn.quantized.Quantize(
scale=0.2, zero_point=1, dtype=torch.quint8
),
torch.nn.quantized.Quantize(
scale=0.2, zero_point=1, dtype=torch.quint8
),
0.2,
3,
),
torch.randn([1, 5, 6, 6]),
torch.randn([1, 5, 1, 1]),
["aten::quantize_per_tensor", "aten::dequantize"],
),
lambda: (
"broadcast_lhs",
SimpleQuantizedMulModel(
torch.nn.quantized.Quantize(
scale=0.2, zero_point=1, dtype=torch.quint8
),
torch.nn.quantized.Quantize(
scale=0.2, zero_point=1, dtype=torch.quint8
),
0.2,
3,
),
torch.randn([1, 5, 1, 1]),
torch.randn([1, 5, 6, 6]),
["aten::quantize_per_tensor", "aten::dequantize"],
),
lambda: (
"broadcast_with_implicit_degenerate_dimension",
SimpleQuantizedMulModel(
torch.nn.quantized.Quantize(
scale=0.2, zero_point=1, dtype=torch.quint8
),
torch.nn.quantized.Quantize(
scale=0.2, zero_point=1, dtype=torch.quint8
),
0.2,
3,
),
torch.randn([1, 3, 1, 1]),
torch.randn([3, 4, 4]),
["aten::quantize_per_tensor", "aten::dequantize"],
),
lambda: (
"broadcast_outer",
SimpleQuantizedMulModel(
torch.nn.quantized.Quantize(
scale=0.2, zero_point=1, dtype=torch.quint8
),
torch.nn.quantized.Quantize(
scale=0.2, zero_point=1, dtype=torch.quint8
),
0.2,
3,
),
torch.randn([1, 5]),
torch.randn([6, 1]),
["aten::quantize_per_tensor", "aten::dequantize"],
),
lambda: (
"positive_scalar",
SimpleQuantizedMulModel(
torch.nn.quantized.Quantize(
scale=0.05, zero_point=1, dtype=torch.quint8
),
),
torch.randn(1, 2, 3, 4),
torch.tensor(3.14),
),
lambda: (
"negative_scalar",
SimpleQuantizedMulModel(
torch.nn.quantized.Quantize(
scale=0.05, zero_point=2, dtype=torch.quint8
),
),
torch.randn(1, 2, 3, 4),
torch.tensor(-3.14),
),
lambda: (
"zero_scalar",
SimpleQuantizedMulModel(
torch.nn.quantized.Quantize(
scale=0.05, zero_point=2, dtype=torch.quint8
),
),
torch.randn(1, 2, 3, 4),
torch.tensor(0.00),
),
lambda: (
"negative_int8_scalar",
SimpleQuantizedMulModel(
torch.nn.quantized.Quantize(
scale=0.05, zero_point=4, dtype=torch.qint8
),
),
torch.randn(1, 2, 3, 4),
torch.tensor(-1.43),
),
]
)
def test_quantized_mul(self, _, module, tensor, other, fusion_blocklist=None):
utils.compare_tracing_methods(
module,
tensor,
other,
fusible_ops={"quantized::mul"},
fusion_blocklist=fusion_blocklist,
skip_to_glow=True,
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class SimpleSqrtModel(torch.nn.Module):
def __init__(self, inplace=False):
super(SimpleSqrtModel, self).__init__()
self.inplace = inplace
def forward(self, tensor):
if self.inplace:
other = tensor.sqrt_()
return other.sqrt_()
else:
tensor = torch.sqrt(tensor)
return torch.sqrt(tensor)
class TestSqrt(utils.TorchGlowTestCase):
def test_sqrt_basic(self):
"""Test of the PyTorch sqrt Node on Glow."""
# Make sure the input is positive and not super close to zero.
utils.compare_tracing_methods(SimpleSqrtModel(), torch.rand(4) + 5)
def test_sqrt_inplace(self):
"""Test of the PyTorch inplace sqrt Node on Glow."""
# Make sure the input is positive and not super close to zero.
utils.compare_tracing_methods(SimpleSqrtModel(inplace=True), torch.rand(4) + 5)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import unittest
import torch
from tests import utils
class SimpleIandModule(torch.nn.Module):
def __init__(self):
super(SimpleIandModule, self).__init__()
def forward(self, a, b):
a &= b
return torch.logical_or(a, b)
class TestIand(utils.TorchGlowTestCase):
@utils.deterministic_expand(
[
lambda: (
"basic",
torch.tensor([True, True, False, False], dtype=torch.bool),
torch.tensor([True, False, True, False], dtype=torch.bool),
),
lambda: (
"basic_3d",
torch.zeros((3, 4, 5), dtype=torch.bool),
torch.ones((3, 4, 5), dtype=torch.bool),
),
lambda: (
"broadcast_3d",
torch.zeros((3, 4, 5), dtype=torch.bool),
torch.ones((4, 5), dtype=torch.bool),
),
]
)
def test_iand(self, _, a, b, skip_to_glow=False):
utils.compare_tracing_methods(
SimpleIandModule(),
a,
b,
fusible_ops={"aten::__iand__"},
skip_to_glow=skip_to_glow,
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# isort:skip_file
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class SimpleSigmoidModel(torch.nn.Module):
def __init__(self, inplace=False):
super(SimpleSigmoidModel, self).__init__()
self.inplace = inplace
def forward(self, tensor):
if self.inplace:
other = tensor + tensor
return other.sigmoid_()
else:
other = tensor + tensor
return other.sigmoid()
class TestSigmoid(utils.TorchGlowTestCase):
@utils.deterministic_expand(
[
lambda: ("basic", SimpleSigmoidModel(), torch.randn(6)),
lambda: ("inplace", SimpleSigmoidModel(inplace=True), torch.randn(6)),
]
)
def test_sigmoid(self, _, module, tensor):
utils.compare_tracing_methods(module, tensor, fusible_ops={"aten::sigmoid"})
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class SimpleErfModule(torch.nn.Module):
def forward(self, input):
return torch.special.erf(input)
class TestErf(utils.TorchGlowTestCase):
def test_erf_basic(self):
"""Test of the PyTorch erf Node on Glow."""
utils.compare_tracing_methods(SimpleErfModule(), torch.randn(4))
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class SimpleTanhModel(torch.nn.Module):
def __init__(self, inplace=False):
super(SimpleTanhModel, self).__init__()
self.inplace = inplace
def forward(self, tensor):
tensor = tensor + tensor
return tensor.tanh_() if self.inplace else tensor.tanh()
class TestTanh(utils.TorchGlowTestCase):
def test_tanh(self):
"""Basic test of the PyTorch aten::tanh Node on Glow."""
utils.compare_tracing_methods(
SimpleTanhModel(), torch.randn(4), fusible_ops={"aten::tanh"}
)
def test_tanh_inplace(self):
"""Basic test of the PyTorch aten::tanh_ Node on Glow."""
utils.compare_tracing_methods(
SimpleTanhModel(inplace=True), torch.randn(4), fusible_ops={"aten::tanh"}
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from tests import utils
class ArgMinModule(torch.nn.Module):
def __init__(self, dim=None, keepDims=True):
super(ArgMinModule, self).__init__()
self.dim = dim
self.keepDims = keepDims
def forward(self, tensor):
if self.dim:
return torch.argmin(tensor, self.dim, self.keepDims)
else:
return torch.argmin(tensor)
class ArgMaxModule(torch.nn.Module):
def __init__(self, dim=None, keepDims=True):
super(ArgMaxModule, self).__init__()
self.dim = dim
self.keepDims = keepDims
def forward(self, tensor):
if self.dim:
return torch.argmax(tensor, self.dim, self.keepDims)
else:
return torch.argmax(tensor)
class TestArgMin(utils.TorchGlowTestCase):
@utils.deterministic_expand(
[
lambda: ("basic", ArgMinModule(), torch.randn(4)),
lambda: ("dimensions1", ArgMinModule(1, False), torch.randn(4, 4)),
lambda: ("dimensions2", ArgMinModule(1), torch.randn(5, 5)),
]
)
def test_argmin_node(self, _, module, tensor):
"""Test of the PyTorch ArgMin node on Glow."""
utils.run_comparison_tests(module, tensor, fusible_ops={"aten::argmin"})
class TestArgMax(utils.TorchGlowTestCase):
@utils.deterministic_expand(
[
lambda: ("basic", ArgMaxModule(), torch.randn(4)),
lambda: ("dimensions1", ArgMaxModule(1, False), torch.randn(4, 4)),
lambda: ("dimensions2", ArgMaxModule(1), torch.randn(5, 5)),
]
)
def test_argmax_node(self, _, module, tensor):
"""Test of the PyTorch ArgMax node on Glow."""
utils.run_comparison_tests(module, tensor, fusible_ops={"aten::argmax"})
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from parameterized import parameterized
from tests import utils
class SimpleFmodModule(torch.nn.Module):
def __init__(self):
super(SimpleFmodModule, self).__init__()
def forward(self, a, b):
if b.size() == torch.Size([]):
c = a.fmod(b.item())
else:
c = a.fmod(b)
return c.fmod(torch.tensor(1.0, dtype=c.dtype))
class TestFmod(utils.TorchGlowTestCase):
@utils.deterministic_expand(
[
lambda: (
"int64_tensor",
SimpleFmodModule(),
torch.tensor([14]),
torch.tensor([10]),
),
lambda: (
"int32_tensor",
SimpleFmodModule(),
torch.tensor([14], dtype=torch.int32),
torch.tensor([10], dtype=torch.int32),
),
lambda: (
"float_tensor",
SimpleFmodModule(),
torch.randn(4),
torch.tensor(0.3),
),
lambda: (
"basic_tensor",
SimpleFmodModule(),
torch.tensor([7.5]),
torch.tensor([2.4]),
),
lambda: (
"int_number",
SimpleFmodModule(),
torch.tensor([14]),
torch.tensor(10),
),
lambda: ("basic", SimpleFmodModule(), torch.randn(4), torch.randn(4)),
lambda: (
"broadcast",
SimpleFmodModule(),
torch.randn(8, 3, 4, 2),
torch.randn(4, 2),
),
lambda: (
"broadcast",
SimpleFmodModule(),
torch.randn(8, 3, 4, 2),
torch.randn(1, 2),
),
lambda: (
"positive_broadcast",
SimpleFmodModule(),
torch.Tensor(8, 3, 4, 2).random_(0, 5),
torch.Tensor(1, 2).random_(1, 5),
),
lambda: (
"positive_broadcast",
SimpleFmodModule(),
torch.Tensor(4, 2).random_(0, 5),
torch.Tensor(8, 3, 4, 2).random_(1, 5),
),
]
)
def test_fmod(self, _, module, a, b):
utils.compare_tracing_methods(module, a, b, fusible_ops={"aten::fmod"})
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class SimpleFloorModule(torch.nn.Module):
def forward(self, a, b):
c = a + b
return torch.floor(c)
class TestFloor(utils.TorchGlowTestCase):
def test_floor(self):
"""Basic test of the PyTorch floor Node on Glow."""
x = torch.randn(3, 4, 5)
y = torch.randn(3, 4, 5)
utils.compare_tracing_methods(
SimpleFloorModule(), x, y, fusible_ops={"aten::floor"}
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class SimpleBitwiseOrModule(torch.nn.Module):
def __init__(self, dtype=None):
super(SimpleBitwiseOrModule, self).__init__()
self.dtype = dtype
def forward(self, a, b):
c = torch.bitwise_or(a, b)
d = torch.bitwise_or(a, c)
return d
class TestBitwiseOr(utils.TorchGlowTestCase):
@utils.deterministic_expand(
[
lambda: (
"basic",
torch.tensor([0x01, 0x03, 0xFFFFFFF0, 0x5], dtype=torch.int32),
torch.tensor([0x02, 0x03, 0x2, 0x1F], dtype=torch.int32),
),
lambda: (
"basic_bool",
torch.tensor([True, True, False, False], dtype=torch.bool),
torch.tensor([True, False, True, False], dtype=torch.bool),
),
lambda: (
"basic_3d",
torch.zeros((0x1, 0x04, 0x1), dtype=torch.int32),
torch.ones((0x2, 0x1, 0x4), dtype=torch.int32),
),
lambda: (
"broadcast_3d",
torch.zeros((3, 4, 5), dtype=torch.int32),
torch.ones((4, 5), dtype=torch.int32),
),
]
)
def test_bitwise_or(self, _, a, b, skip_to_glow=False):
"""Tests of the PyTorch Bitwise Or Node on Glow."""
utils.compare_tracing_methods(
SimpleBitwiseOrModule(),
a,
b,
fusible_ops={"aten::bitwise_or"},
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class SimpleLogModule(torch.nn.Module):
def __init__(self, *dimensions):
super(SimpleLogModule, self).__init__()
def forward(
self,
a,
):
b = torch.log(a)
return torch.log(b)
class TestLog(utils.TorchGlowTestCase):
def test_log_basic(self):
x = 1 / torch.rand(3, 4, 5)
utils.compare_tracing_methods(
SimpleLogModule(),
x,
fusible_ops={"aten::log"},
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import random
import torch
from tests import utils
class SimplePixelUnshuffleModel(torch.nn.Module):
def __init__(self, downscale_factor):
super(SimplePixelUnshuffleModel, self).__init__()
self.downscale_factor = downscale_factor
self.ps = torch.nn.PixelUnshuffle(self.downscale_factor)
def forward(self, tensor):
return self.ps(tensor)
class TestPixelUnshuffle(utils.TorchGlowTestCase):
def test_pixel_unshuffle(self):
"""Test of the PyTorch pixel_unshuffle Node on Glow."""
for _ in range(0, 20):
c = random.randint(1, 3)
r = random.randint(2, 5)
w = random.randint(1, 100)
h = random.randint(1, 100)
b = random.randint(1, 10)
utils.compare_tracing_methods(
SimplePixelUnshuffleModel(r),
torch.randn(b, c, w * r, h * r),
fusible_ops={"aten::pixel_unshuffle"},
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
import torch.nn.functional as F
from tests import utils
class SimpleAvgPool2dModule(torch.nn.Module):
def __init__(self, kernel_size, stride=None, padding=0):
super(SimpleAvgPool2dModule, self).__init__()
self.kernel_size = kernel_size
self.padding = padding
self.stride = stride
def forward(self, inputs):
return F.avg_pool2d(
inputs + inputs, self.kernel_size, padding=self.padding, stride=self.stride
)
class TestAvgPool2d(utils.TorchGlowTestCase):
def test_avg_pool2d_basic(self):
"""Basic test of the PyTorch avg_pool2d Node on Glow."""
inputs = torch.randn(1, 4, 5, 5)
utils.run_comparison_tests(
SimpleAvgPool2dModule(2),
inputs,
fusible_ops={"aten::avg_pool2d"},
)
def test_avg_pool2d_with_args(self):
"""Test of the PyTorch avg_pool2d Node with arguments on Glow."""
inputs = torch.randn(1, 4, 10, 10)
utils.run_comparison_tests(
SimpleAvgPool2dModule(3, stride=7),
inputs,
fusible_ops={"aten::avg_pool2d"},
fp16vfp16_atol=1e-3,
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
import torch.nn.functional as F
from tests import utils
class SimpleAvgPool3dModule(torch.nn.Module):
def __init__(self, kernel_size, stride=None, padding=0):
super(SimpleAvgPool3dModule, self).__init__()
self.kernel_size = kernel_size
self.padding = padding
def forward(self, inputs):
return F.avg_pool3d(inputs, self.kernel_size, padding=self.padding)
class TestAvgPool3d(utils.TorchGlowTestCase):
def test_avg_pool3d_basic(self):
"""Basic test of the PyTorch avg_pool3d Node on Glow."""
inputs = torch.randn(1, 4, 5, 5, 5)
utils.run_comparison_tests(
SimpleAvgPool3dModule(3), inputs, fusible_ops={"aten::avg_pool3d"}
)
def test_avg_pool3d_with_args(self):
"""Test of the PyTorch avg_pool3d Node with arguments on Glow."""
inputs = torch.randn(1, 4, 10, 10, 10)
utils.run_comparison_tests(
SimpleAvgPool3dModule(3, (4, 7, 7)),
inputs,
fusible_ops={"aten::avg_pool3d"},
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class SimpleQuantizedLeakyReluModel(torch.nn.Module):
def __init__(self, scale, zero_point, dtype):
super(SimpleQuantizedLeakyReluModel, self).__init__()
self.scale = scale
self.zero_point = zero_point
self.dtype = dtype
def forward(self, tensor):
quantize = torch.nn.quantized.Quantize(
scale=self.scale, zero_point=self.zero_point, dtype=self.dtype
)
dequantize = torch.nn.quantized.DeQuantize()
leaky_relu = torch.nn.LeakyReLU()
return dequantize(leaky_relu(quantize(tensor)))
class TestQuantizedLeakyRelu(utils.TorchGlowTestCase):
def test_quantized_leaky_relu(self):
"""Basic test of the PyTorch quantized::leaky_relu Node on Glow."""
utils.compare_tracing_methods(
SimpleQuantizedLeakyReluModel(0.3, 0, torch.quint8),
torch.randn([5, 5]),
fusible_ops={
"aten::leaky_relu",
"aten::quantize_per_tensor",
"aten::dequantize",
},
)
def test_quantized_leaky_relu_cut_dq(self):
"""Basic test of the PyTorch quantized::leaky_relu Node on Glow, with quantize and dequantize excluded."""
utils.compare_tracing_methods(
SimpleQuantizedLeakyReluModel(0.3, 0, torch.quint8),
torch.randn([5, 5]),
fusible_ops={"aten::leaky_relu", "aten::quantize_per_tensor"},
fusion_blocklist=["aten::dequantize"],
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class SimpleCatModule(torch.nn.Module):
def __init__(self, *dimensions):
super(SimpleCatModule, self).__init__()
self.dimensions = dimensions
def forward(self, a, b, c):
other = torch.cat((a, b, c), self.dimensions[0])
for dimension in self.dimensions[1:]:
other = torch.cat((other, other), dimension)
return other
class TestCat(utils.TorchGlowTestCase):
def test_cat_with_empty_tensor(self):
"""Basic test of the PyTorch cat Node on Glow."""
utils.compare_tracing_methods(
SimpleCatModule(0, 1, 2),
torch.empty(0),
torch.randn(2, 3, 4, 5),
torch.randn(2, 3, 4, 5),
fusible_ops={"prim::FusedConcat"},
)
def test_cat_basic(self):
"""Basic test of the PyTorch cat Node on Glow."""
utils.compare_tracing_methods(
SimpleCatModule(0, 1, 2),
torch.randn(2, 3, 4),
torch.randn(2, 3, 4),
torch.randn(2, 3, 4),
fusible_ops={"prim::FusedConcat"},
)
def test_cat_neg_dim(self):
"""Test negative dimension index for the PyTorch cat Node on Glow."""
utils.compare_tracing_methods(
SimpleCatModule(-3, -2, -1),
torch.randn(2, 3, 4),
torch.randn(2, 3, 4),
torch.randn(2, 3, 4),
fusible_ops={"prim::FusedConcat"},
)
def test_cat_oob_neg_dim(self):
"""Test out of bounds negative dimension index for the PyTorch cat Node on Glow."""
with self.assertRaises(IndexError):
utils.compare_tracing_methods(
SimpleCatModule(-4, -2, -1),
torch.randn(2, 3, 4),
torch.randn(2, 3, 4),
torch.randn(2, 3, 4),
fusible_ops={"prim::FusedConcat"},
)
def test_cat_with_different_types(self):
"""Test cat between different types that can be cast, which is supported in pytorch."""
utils.compare_tracing_methods(
SimpleCatModule(0, 1, 2),
torch.randn(2, 3, 4),
torch.randn(2, 3, 4, dtype=torch.half),
torch.randn(2, 3, 4, dtype=torch.half),
fusible_ops={"prim::FusedConcat"},
)
utils.compare_tracing_methods(
SimpleCatModule(0, 1, 2),
torch.randn(2, 3, 4).to(torch.int),
torch.randn(2, 3, 4).to(torch.long),
torch.randn(2, 3, 4).to(torch.long),
fusible_ops={"prim::FusedConcat"},
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class SimpleStackModel(torch.nn.Module):
def __init__(self, dim):
super(SimpleStackModel, self).__init__()
self.dim = dim
def forward(self, a, b):
c = b + b
return torch.stack((a, c), dim=self.dim)
class TestStack(utils.TorchGlowTestCase):
def test_stack_basic(self):
"""Basic test of the PyTorch aten::stack Node on Glow."""
for d in range(0, 4):
utils.compare_tracing_methods(
SimpleStackModel(d),
torch.randn(2, 3, 4),
torch.randn(2, 3, 4),
skip_to_glow=True,
)
def test_stack_different_types(self):
"""Test stack between fp16 and fp32, which is supported in pytorch."""
x = torch.randn(2, 3, 4)
y = torch.randn(2, 3, 4, dtype=torch.half)
for d in range(0, 4):
utils.compare_tracing_methods(
SimpleStackModel(d),
x,
y,
skip_to_glow=True,
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
from collections import namedtuple
import torch
import torch.nn.functional as F
from tests import utils
class SimpleConvTranspose2dModule(torch.nn.Module):
def __init__(self, stride=1, padding=0, output_padding=0, dilation=1, groups=1):
super(SimpleConvTranspose2dModule, self).__init__()
self.stride = stride
self.padding = padding
self.output_padding = output_padding
self.groups = groups
self.dilation = dilation
def forward(self, inputs, filters, bias=None):
convTranspose = F.conv_transpose2d(
inputs,
filters,
bias=bias,
stride=self.stride,
padding=self.padding,
output_padding=self.output_padding,
groups=self.groups,
dilation=self.dilation,
)
return F.relu(convTranspose)
class TestConvTranpose2d(utils.TorchGlowTestCase):
@utils.deterministic_expand(
[
lambda: (
"basic",
SimpleConvTranspose2dModule(padding=1),
torch.randn(1, 4, 5, 5),
torch.randn(4, 8, 3, 3),
),
lambda: (
"with_bias",
SimpleConvTranspose2dModule(padding=1),
torch.randn(1, 4, 5, 5),
torch.randn(4, 8, 3, 3),
torch.randn(4),
),
]
)
def test_convTranpose2d(self, _, module, inputs, filters, bias=None):
"""Basic test of the PyTorch conv3d Node on Glow."""
utils.compare_tracing_methods(
module, inputs, filters, fusible_ops={"aten::_convolution"}
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
import torch.nn.functional as F
from tests import utils
class SimpleReluModel(torch.nn.Module):
def __init__(self, inplace=False):
super(SimpleReluModel, self).__init__()
self.inplace = inplace
def forward(self, tensor):
other = F.relu(tensor, inplace=self.inplace)
return F.relu(other, inplace=self.inplace)
class TestRelu(utils.TorchGlowTestCase):
def test_relu_basic(self):
"""Basic test of the PyTorch relu Node on Glow."""
x = torch.randn(4)
# make sure we have at least one negative
x[0] = -2.0
utils.compare_tracing_methods(SimpleReluModel(), x, fusible_ops={"aten::relu"})
def test_relu_inplace(self):
"""Test of the PyTorch relu_ Node on Glow."""
x = torch.randn(4)
# make sure we have at least one negative
x[0] = -2.0
utils.compare_tracing_methods(
SimpleReluModel(inplace=True), x, fusible_ops={"aten::relu_"}
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class SimpleArgSortModule(torch.nn.Module):
def __init__(self, descending=True):
super(SimpleArgSortModule, self).__init__()
self.descending = descending
def forward(self, inputs):
# Only last dim is currently supported
return torch.argsort(inputs, dim=-1, descending=self.descending)
class TestArgSort(utils.TorchGlowTestCase):
@utils.deterministic_expand(
[
lambda: (
"desc",
SimpleArgSortModule(),
torch.randn(4),
),
lambda: (
"asc",
SimpleArgSortModule(descending=False),
torch.randn(4),
),
lambda: (
"2d_desc",
SimpleArgSortModule(),
torch.randn(4, 3),
),
lambda: (
"3d_asc",
SimpleArgSortModule(descending=False),
torch.randn(6, 4, 5),
),
lambda: (
"4d_desc",
SimpleArgSortModule(),
torch.randn(4, 7, 7, 3),
),
]
)
def test_argsort(self, _, module, a):
utils.compare_tracing_methods(module, a, fusible_ops={"aten::argsort"})
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class SimpleViewModule(torch.nn.Module):
def __init__(self, *shape):
super(SimpleViewModule, self).__init__()
self.shape = shape
def forward(self, tensor):
return (tensor + tensor).view(self.shape)
class TestView(utils.TorchGlowTestCase):
@utils.deterministic_expand(
[
lambda: (SimpleViewModule(2, -1), torch.rand(2, 3, 4)),
lambda: (SimpleViewModule(-1, 2), torch.rand(2, 3, 4)),
]
)
def test_simple(self, module, tensor):
utils.compare_tracing_methods(module, tensor, fusible_ops={"aten::view"})
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class SimpleReshapeModel(torch.nn.Module):
def __init__(self, shape):
super(SimpleReshapeModel, self).__init__()
self.shape = shape
def forward(self, tensor):
combined = tensor + tensor
return combined.reshape(self.shape)
class TestReshape(utils.TorchGlowTestCase):
def test_reshape(self):
"""Test of the PyTorch reshape Node on Glow."""
utils.compare_tracing_methods(
SimpleReshapeModel([2, -1]),
torch.rand(2, 3, 4),
fusible_ops={"aten::reshape"},
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class SimpleBitwiseXorModule(torch.nn.Module):
def __init__(self, dtype=None):
super(SimpleBitwiseXorModule, self).__init__()
self.dtype = dtype
def forward(self, a, b):
c = torch.bitwise_xor(a, b)
d = torch.bitwise_xor(a, c)
return d
class TestBitwiseXor(utils.TorchGlowTestCase):
@utils.deterministic_expand(
[
lambda: (
"basic",
torch.tensor([0x01, 0x02, 0x03, 0x04], dtype=torch.int32),
torch.tensor([0x03, 0x07, 0xFF, 0x0100], dtype=torch.int32),
),
lambda: (
"basic_64",
torch.tensor([0x01, 0x03, 0xFFFFFFFFFF00, 0x5], dtype=torch.int64),
torch.tensor([0x02, 0x03, 0x2, 0x1F], dtype=torch.int64),
),
lambda: (
"basic_bool",
torch.tensor([True, True, False, False], dtype=torch.bool),
torch.tensor([True, False, True, False], dtype=torch.bool),
),
lambda: (
"basic_3d",
torch.zeros((0x1, 0x3, 0x6), dtype=torch.int32),
torch.ones((0x3, 0x1, 0x6), dtype=torch.int32),
),
lambda: (
"broadcast_3d",
torch.zeros((3, 4, 5), dtype=torch.int32),
torch.ones((4, 5), dtype=torch.int32),
),
]
)
def test_bitwise_xor(self, _, a, b, skip_to_glow=False):
"""Tests of the PyTorch Bitwise Xor Node on Glow."""
utils.compare_tracing_methods(
SimpleBitwiseXorModule(),
a,
b,
fusible_ops={"aten::bitwise_xor"},
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
import torch.nn.functional as F
from tests import utils
class SimpleLayerNormModule(torch.nn.Module):
def __init__(self, normalized_shape):
super(SimpleLayerNormModule, self).__init__()
self.normalized_shape = normalized_shape
def forward(self, input, weight=None, bias=None):
return F.layer_norm(input, self.normalized_shape, weight, bias)
class OneLayerModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.normalized_shape = (6,)
self.weight = torch.Tensor([2.4180, 2.2070, 2.3184, 0.7378, 0.7734, 0.7520])
self.bias = torch.Tensor([0.1567, 0.0308, 0.0166, 0.2944, 0.2759, 0.5649])
self.eps = 1e-5
def forward(self, tensor):
return F.layer_norm(
tensor, self.normalized_shape, self.weight, self.bias, self.eps
)
class LayerNormNHCWLayout(torch.nn.Module):
def __init__(self, normalized_shape, stride=1, padding=0, dilation=1, groups=1):
super(LayerNormNHCWLayout, self).__init__()
self.normalized_shape = normalized_shape
self.stride = stride
self.padding = padding
self.dilation = dilation
self.groups = groups
def forward(self, inputs, filters, biasConv=None, weight=None, bias=None):
conv = F.conv2d(
inputs,
filters,
bias=biasConv,
stride=self.stride,
padding=self.padding,
dilation=self.dilation,
groups=self.groups,
)
return F.layer_norm(
conv.permute(0, 2, 1, 3), self.normalized_shape, weight, bias
)
class LayerNormNHCWLayoutWithConvAfter(torch.nn.Module):
def __init__(self, normalized_shape, stride=1, padding=0, dilation=1, groups=1):
super(LayerNormNHCWLayoutWithConvAfter, self).__init__()
self.normalized_shape = normalized_shape
self.stride = stride
self.padding = padding
self.dilation = dilation
self.groups = groups
def forward(
self,
inputs,
filters,
filters2,
biasConv=None,
biasConv2=None,
weight=None,
bias=None,
):
conv = F.conv2d(
inputs,
filters,
bias=biasConv,
stride=self.stride,
padding=self.padding,
dilation=self.dilation,
groups=self.groups,
)
t = F.layer_norm(conv.permute(0, 2, 1, 3), self.normalized_shape, weight, bias)
return F.conv2d(
t.permute(0, 2, 1, 3),
filters2,
biasConv2,
stride=self.stride,
padding=self.padding,
dilation=self.dilation,
groups=self.groups,
)
class TestLayerNorm(utils.TorchGlowTestCase):
def test_layernorm_basic(self):
"""Basic test of the PyTorch layernorm Node on Glow."""
inputs = torch.randn(1, 4, 5, 5)
weight = torch.randn(5)
bias = torch.randn(5)
utils.compare_tracing_methods(
SimpleLayerNormModule([5]),
inputs,
weight,
bias,
fusible_ops={"aten::layer_norm"},
)
def test_layernorm_no_bias(self):
"""Test of the PyTorch aten::layer_norm without weights and bias."""
inputs = torch.randn(1, 4, 5, 5)
utils.compare_tracing_methods(
SimpleLayerNormModule([5, 5]), inputs, fusible_ops={"aten::layer_norm"}
)
def test_layernorm_layout(self):
"""Test of the PyTorch aten::layer_norm with NHCW layout."""
inputs = torch.randn(1, 6, 5, 6)
kernel = torch.randn(3, 6, 2, 2)
# This unit test build a graph like conv => permute => layer_norm
# Since in Glow we always guess 4 dims input tensor to be NCHW,
# After the permutation, the layout of layer_norm's input would be
# NHCW, which is not a supported layout, and we should mitigate this by
# setting accept_all_layouts to be true.
utils.compare_tracing_methods(
LayerNormNHCWLayout([5]),
inputs,
kernel,
fusible_ops={"aten::layer_norm", "aten::permute", "aten::_convolution"},
accept_all_layouts=True,
)
def test_layernorm_layout_with_conv_after(self):
"""Test of the PyTorch aten::layer_norm with NHCW layout and conv after
layer_norm."""
inputs = torch.randn(1, 8, 5, 6)
kernel = torch.randn(4, 8, 2, 2)
kernel2 = torch.randn(2, 4, 2, 2)
# This unit test build a graph like conv => permute => layer_norm
# => conv. Since in Glow we always guess 4 dims input tensor to be NCHW,
# After the permutation, the layout of layer_norm's input would be
# NHCW. If we simply ignore the layout checking of layer_norm, still
# the second conv will complain about layout mismatch. We should
# mitigate this by setting accept_all_layouts to be true.
utils.compare_tracing_methods(
LayerNormNHCWLayoutWithConvAfter([5]),
inputs,
kernel,
kernel2,
fusible_ops={"aten::layer_norm", "aten::permute", "aten::_convolution"},
accept_all_layouts=True,
)
def test_layernorm_output(self):
"""Test with mock inputs to verify that output is as expected"""
module = OneLayerModule()
module_inputs = (torch.tensor([[1.0, 2.0, 3.0, 4.0, 5.0, 6.0]]),)
loc_out = module(*module_inputs)
expected_output = torch.tensor(
[
[
-3.382883310317993,
-1.907626986503601,
-0.662156879901886,
0.5104053020477295,
0.9551836252212524,
1.6657130718231201,
]
]
)
tolerance = 1e-04
assert tolerance > torch.norm(loc_out - expected_output)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.