python_code
stringlengths 0
229k
|
---|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
import torch.nn.functional as F
from tests import utils
class SimpleAdapativeAvgPool2dModule(torch.nn.Module):
def __init__(self, output_size):
super(SimpleAdapativeAvgPool2dModule, self).__init__()
self.output_size = output_size
def forward(self, inputs):
return F.adaptive_avg_pool2d(inputs, self.output_size)
class TestAdaptiveAvgPool2d(utils.TorchGlowTestCase):
def test_adaptive_avg_pool2d_basic(self):
"""Basic test of PyTorch adaptive_avg_pool2d Node."""
inputs = torch.randn(3, 6, 14, 14)
utils.run_comparison_tests(
SimpleAdapativeAvgPool2dModule((5, 5)),
inputs,
fusible_ops={"aten::adaptive_avg_pool2d"},
)
def test_adaptive_avg_pool2d_nonsquare_inputs(self):
"""Test of PyTorch adaptive_avg_pool2d Node with non-square inputs."""
inputs = torch.randn(3, 6, 13, 14)
utils.run_comparison_tests(
SimpleAdapativeAvgPool2dModule((3, 3)),
inputs,
fusible_ops={"aten::adaptive_avg_pool2d"},
)
def test_adaptive_avg_pool2d_nonsquare_outputs(self):
"""Test of PyTorch adaptive_avg_pool2d Node with non-square outputs."""
inputs = torch.randn(3, 6, 14, 14)
utils.run_comparison_tests(
SimpleAdapativeAvgPool2dModule((5, 3)),
inputs,
fusible_ops={"aten::adaptive_avg_pool2d"},
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# isort:skip_file
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class SimpleShapeAsTensorModel(torch.nn.Module):
def __init__(self):
super(SimpleShapeAsTensorModel, self).__init__()
def forward(self, tensor):
result = torch._shape_as_tensor(tensor)
return result + result
class TestShapeAsTensor(utils.TorchGlowTestCase):
@utils.deterministic_expand(
[
lambda: ("single dimension", SimpleShapeAsTensorModel(), torch.randn(6)),
lambda: (
"multiple dimensions",
SimpleShapeAsTensorModel(),
torch.randn(3, 2, 4),
),
]
)
def test_shape_as_tensor(self, _, module, tensor):
"""Test of the PyTorch ShapeAsTensor Node on Glow."""
utils.compare_tracing_methods(
module,
tensor,
fusible_ops={"aten::_shape_as_tensor"},
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
import torch.nn as nn
from tests import utils
class TestBatchNorm3D(utils.TorchGlowTestCase):
def test_batchnorm_basic(self):
"""
Basic test of the PyTorch 3D batchnorm Node on Glow.
"""
class SimpleBatchNorm(nn.Module):
def __init__(self, num_channels, running_mean, running_var):
super(SimpleBatchNorm, self).__init__()
self.batchnorm = nn.BatchNorm3d(num_channels)
self.batchnorm.running_mean = running_mean
self.batchnorm.running_var = running_var
def forward(self, x):
return self.batchnorm(x)
num_channels = 4
running_mean = torch.rand(num_channels)
running_var = torch.rand(num_channels)
model = SimpleBatchNorm(num_channels, running_mean, running_var)
model.eval()
inputs = torch.randn(1, num_channels, 4, 5, 5)
utils.compare_tracing_methods(model, inputs, fusible_ops={"aten::batch_norm"})
def test_batchnorm_with_weights(self):
"""
Test of the PyTorch 3D batchnorm Node with weights and biases on Glow.
"""
class SimpleBatchNorm(nn.Module):
def __init__(self, num_channels, weight, bias, running_mean, running_var):
super(SimpleBatchNorm, self).__init__()
self.batchnorm = nn.BatchNorm3d(num_channels)
self.batchnorm.weight = torch.nn.Parameter(weight)
self.batchnorm.bias = torch.nn.Parameter(bias)
self.batchnorm.running_mean = running_mean
self.batchnorm.running_var = running_var
def forward(self, x):
return self.batchnorm(x)
num_channels = 4
weight = torch.rand(num_channels)
bias = torch.rand(num_channels)
running_mean = torch.rand(num_channels)
running_var = torch.ones(num_channels)
inputs = torch.randn(1, num_channels, 4, 5, 5)
model = SimpleBatchNorm(num_channels, weight, bias, running_mean, running_var)
model.eval()
utils.compare_tracing_methods(model, inputs, fusible_ops={"aten::batch_norm"})
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
import torch.nn as nn
from tests import utils
class TestBatchNorm2D(utils.TorchGlowTestCase):
def test_batchnorm_basic(self):
"""
Basic test of the PyTorch 2D batchnorm Node on Glow.
"""
class SimpleBatchNorm(nn.Module):
def __init__(self, num_channels, running_mean, running_var):
super(SimpleBatchNorm, self).__init__()
self.batchnorm = nn.BatchNorm2d(num_channels)
self.batchnorm.running_mean = running_mean
self.batchnorm.running_var = running_var
def forward(self, x):
return self.batchnorm(x)
num_channels = 4
running_mean = torch.rand(num_channels)
running_var = torch.rand(num_channels)
model = SimpleBatchNorm(num_channels, running_mean, running_var)
model.eval()
inputs = torch.randn(1, num_channels, 5, 5)
utils.compare_tracing_methods(model, inputs, fusible_ops={"aten::batch_norm"})
def test_batchnorm_with_weights(self):
"""
Test of the PyTorch 2D batchnorm Node with weights and biases on Glow.
"""
class SimpleBatchNorm(nn.Module):
def __init__(self, num_channels, weight, bias, running_mean, running_var):
super(SimpleBatchNorm, self).__init__()
self.batchnorm = nn.BatchNorm2d(num_channels)
self.batchnorm.weight = torch.nn.Parameter(weight)
self.batchnorm.bias = torch.nn.Parameter(bias)
self.batchnorm.running_mean = running_mean
self.batchnorm.running_var = running_var
def forward(self, x):
return self.batchnorm(x)
num_channels = 4
weight = torch.rand(num_channels)
bias = torch.rand(num_channels)
running_mean = torch.rand(num_channels)
running_var = torch.ones(num_channels)
inputs = torch.randn(1, num_channels, 5, 5)
model = SimpleBatchNorm(num_channels, weight, bias, running_mean, running_var)
model.eval()
utils.compare_tracing_methods(model, inputs, fusible_ops={"aten::batch_norm"})
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class SimpleQuantizedReluModel(torch.nn.Module):
def __init__(self, scale, zero_point, dtype):
super(SimpleQuantizedReluModel, self).__init__()
self.scale = scale
self.zero_point = zero_point
self.dtype = dtype
def forward(self, tensor):
quantize = torch.nn.quantized.Quantize(
scale=self.scale, zero_point=self.zero_point, dtype=self.dtype
)
dequantize = torch.nn.quantized.DeQuantize()
relu = torch.nn.ReLU()
return dequantize(relu(quantize(tensor)))
class TestQuantizedRelu(utils.TorchGlowTestCase):
def test_quantized_relu(self):
"""Basic test of the PyTorch quantized::relu Node on Glow."""
utils.compare_tracing_methods(
SimpleQuantizedReluModel(1.0 / 128, 3, torch.quint8),
torch.randn([5, 5]),
fusible_ops={"aten::relu", "aten::quantize_per_tensor", "aten::dequantize"},
)
def test_quantized_relu_cut_dq(self):
"""Basic test of the PyTorch quantized::relu Node on Glow, with quantize and dequantize excluded."""
utils.compare_tracing_methods(
SimpleQuantizedReluModel(1.0 / 128, 3, torch.quint8),
torch.randn([5, 5]),
fusible_ops={"aten::relu", "aten::quantize_per_tensor"},
fusion_blocklist=["aten::dequantize"],
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class SimpleRsubModel(torch.nn.Module):
def __init__(self):
super(SimpleRsubModel, self).__init__()
def forward(self, tensor, other):
if other.size() == torch.Size([]):
return torch.rsub((tensor * tensor), other.item())
else:
third = torch.rsub(tensor, other)
return torch.rsub(third, third)
class TestRsub(utils.TorchGlowTestCase):
@utils.deterministic_expand(
[
lambda: ("basic", SimpleRsubModel(), torch.randn(4), torch.randn(4)),
lambda: (
"broadcast",
SimpleRsubModel(),
torch.randn(8, 3, 4, 2),
torch.randn(4, 2),
),
lambda: (
"broadcast",
SimpleRsubModel(),
torch.randn(8, 3, 4, 2),
torch.randn(1, 2),
),
lambda: (
"broadcast",
SimpleRsubModel(),
torch.randn(4, 2),
torch.randn(8, 3, 4, 2),
),
]
)
def test_rsub_as_sub(self, _, module, tensor, other):
# aten::rsub is normalized as aten::sub
utils.compare_tracing_methods(module, tensor, other, fusible_ops={"aten::sub"})
@utils.deterministic_expand(
[
lambda: ("float", SimpleRsubModel(), torch.randn(4), torch.tensor(13.293)),
lambda: ("int", SimpleRsubModel(), torch.randn(4), torch.tensor(4)),
]
)
def test_rsub(self, _, module, tensor, other):
utils.compare_tracing_methods(module, tensor, other, fusible_ops={"aten::rsub"})
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
from tests.utils import check_skip
class TestEmbedding(utils.TorchGlowTestCase):
supported_backends = {"Interpreter", "NNPI"}
def test_embedding_wt_float32_ind_int64(self):
"""Test of aten::embedding node on glow"""
check_skip(self)
class TestModule(torch.nn.Module):
def forward(self, indices):
weight = torch.FloatTensor([[1, 2.3, 3], [4, 5.1, 6.3], [3.3, -4, 5.7]])
embedding = torch.nn.Embedding.from_pretrained(weight)
a = embedding(indices)
return a
indices = torch.LongTensor([1, 0, 2, 0, 1])
utils.compare_tracing_methods(
TestModule(),
indices,
fusible_ops={"aten::embedding"},
skip_to_glow=True, # to_glow doesn't support include_last_offset=False
)
def test_embedding_wt_float16_ind_int64(self):
"""Test of aten::embedding node on glow"""
check_skip(self)
class TestModule(torch.nn.Module):
def forward(self, indices):
weight = torch.HalfTensor([[1, 2.3, 3], [4, 5.1, 6.3], [3.3, -4, 5.7]])
embedding = torch.nn.Embedding.from_pretrained(weight)
a = embedding(indices)
return a
indices = torch.LongTensor([1, 0, 2, 0, 1])
utils.compare_tracing_methods(
TestModule(),
indices,
fusible_ops={"aten::embedding"},
skip_to_glow=True, # to_glow doesn't support include_last_offset=False
)
def test_embedding_2d_indices(self):
"""Test of aten::embedding node on glow"""
check_skip(self)
class TestModule(torch.nn.Module):
def forward(self, indices):
weight = torch.FloatTensor([[1, 2.3, 3], [4, 5.1, 6.3], [3.3, -4, 5.7]])
embedding = torch.nn.Embedding.from_pretrained(weight)
a = embedding(indices)
return a
indices = torch.LongTensor([[1, 2], [0, 1]])
utils.compare_tracing_methods(
TestModule(),
indices,
fusible_ops={"aten::embedding"},
skip_to_glow=True, # to_glow doesn't support include_last_offset=False
)
def test_embedding_3d_indices(self):
"""Test of aten::embedding node on glow"""
check_skip(self)
class TestModule(torch.nn.Module):
def forward(self, indices):
weight = torch.FloatTensor([[1, 2.3, 3], [4, 5.1, 6.3], [3.3, -4, 5.7]])
embedding = torch.nn.Embedding.from_pretrained(weight)
a = embedding(indices)
return a
indices = torch.LongTensor([[[1, 2], [0, 1]]])
utils.compare_tracing_methods(
TestModule(),
indices,
fusible_ops={"aten::embedding"},
skip_to_glow=True, # to_glow doesn't support include_last_offset=False
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class SimpleQuantizedMulModel(torch.nn.Module):
def __init__(
self, left_quantization, right_quantization=None, scale=None, zero_point=None
):
super(SimpleQuantizedMulModel, self).__init__()
self.scale = scale
self.zero_point = zero_point
self.left_quantization = left_quantization
self.right_quantization = right_quantization or left_quantization
def forward(self, tensor, other):
if other.size() == torch.Size([]):
return torch.nn.quantized.DeQuantize()(
torch.ops.quantized.mul(self.left_quantization(tensor), other.item())
)
else:
return torch.nn.quantized.DeQuantize()(
torch.ops.quantized.mul(
self.left_quantization(tensor),
self.right_quantization(other),
scale=self.scale,
zero_point=self.zero_point,
)
)
class TestQuantizedMul(utils.TorchGlowTestCase):
@utils.deterministic_expand(
[
lambda: (
"zero_offset",
SimpleQuantizedMulModel(
torch.nn.quantized.Quantize(
scale=0.3, zero_point=0, dtype=torch.quint8
),
torch.nn.quantized.Quantize(
scale=0.3, zero_point=0, dtype=torch.quint8
),
0.05,
0,
),
torch.tensor([1, 2, 3, 4], dtype=torch.float32),
torch.tensor([5, 6, 7, 8], dtype=torch.float32),
),
lambda: (
"basic",
SimpleQuantizedMulModel(
torch.nn.quantized.Quantize(
scale=0.2, zero_point=1, dtype=torch.quint8
),
torch.nn.quantized.Quantize(
scale=0.2, zero_point=1, dtype=torch.quint8
),
0.2,
3,
),
torch.randn([5, 5]),
torch.randn([5, 5]),
),
lambda: (
"cut_q_dq",
SimpleQuantizedMulModel(
torch.nn.quantized.Quantize(
scale=0.2, zero_point=5, dtype=torch.quint8
),
torch.nn.quantized.Quantize(
scale=0.2, zero_point=10, dtype=torch.quint8
),
0.2,
3,
),
torch.randn([5, 5]),
torch.randn([5, 5]),
["aten::quantize_per_tensor", "aten::dequantize"],
),
lambda: (
"broadcast_rhs",
SimpleQuantizedMulModel(
torch.nn.quantized.Quantize(
scale=0.2, zero_point=1, dtype=torch.quint8
),
torch.nn.quantized.Quantize(
scale=0.2, zero_point=1, dtype=torch.quint8
),
0.2,
3,
),
torch.randn([1, 5, 6, 6]),
torch.randn([1, 5, 1, 1]),
["aten::quantize_per_tensor", "aten::dequantize"],
),
lambda: (
"broadcast_lhs",
SimpleQuantizedMulModel(
torch.nn.quantized.Quantize(
scale=0.2, zero_point=1, dtype=torch.quint8
),
torch.nn.quantized.Quantize(
scale=0.2, zero_point=1, dtype=torch.quint8
),
0.2,
3,
),
torch.randn([1, 5, 1, 1]),
torch.randn([1, 5, 6, 6]),
["aten::quantize_per_tensor", "aten::dequantize"],
),
lambda: (
"broadcast_with_implicit_degenerate_dimension",
SimpleQuantizedMulModel(
torch.nn.quantized.Quantize(
scale=0.2, zero_point=1, dtype=torch.quint8
),
torch.nn.quantized.Quantize(
scale=0.2, zero_point=1, dtype=torch.quint8
),
0.2,
3,
),
torch.randn([1, 3, 1, 1]),
torch.randn([3, 4, 4]),
["aten::quantize_per_tensor", "aten::dequantize"],
),
lambda: (
"broadcast_outer",
SimpleQuantizedMulModel(
torch.nn.quantized.Quantize(
scale=0.2, zero_point=1, dtype=torch.quint8
),
torch.nn.quantized.Quantize(
scale=0.2, zero_point=1, dtype=torch.quint8
),
0.2,
3,
),
torch.randn([1, 5]),
torch.randn([6, 1]),
["aten::quantize_per_tensor", "aten::dequantize"],
),
lambda: (
"positive_scalar",
SimpleQuantizedMulModel(
torch.nn.quantized.Quantize(
scale=0.05, zero_point=1, dtype=torch.quint8
),
),
torch.randn(1, 2, 3, 4),
torch.tensor(3.14),
),
lambda: (
"negative_scalar",
SimpleQuantizedMulModel(
torch.nn.quantized.Quantize(
scale=0.05, zero_point=2, dtype=torch.quint8
),
),
torch.randn(1, 2, 3, 4),
torch.tensor(-3.14),
),
lambda: (
"zero_scalar",
SimpleQuantizedMulModel(
torch.nn.quantized.Quantize(
scale=0.05, zero_point=2, dtype=torch.quint8
),
),
torch.randn(1, 2, 3, 4),
torch.tensor(0.00),
),
lambda: (
"negative_int8_scalar",
SimpleQuantizedMulModel(
torch.nn.quantized.Quantize(
scale=0.05, zero_point=4, dtype=torch.qint8
),
),
torch.randn(1, 2, 3, 4),
torch.tensor(-1.43),
),
]
)
def test_quantized_mul(self, _, module, tensor, other, fusion_blocklist=None):
utils.compare_tracing_methods(
module,
tensor,
other,
fusible_ops={"quantized::mul"},
fusion_blocklist=fusion_blocklist,
skip_to_glow=True,
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class SimpleSqrtModel(torch.nn.Module):
def __init__(self, inplace=False):
super(SimpleSqrtModel, self).__init__()
self.inplace = inplace
def forward(self, tensor):
if self.inplace:
other = tensor.sqrt_()
return other.sqrt_()
else:
tensor = torch.sqrt(tensor)
return torch.sqrt(tensor)
class TestSqrt(utils.TorchGlowTestCase):
def test_sqrt_basic(self):
"""Test of the PyTorch sqrt Node on Glow."""
# Make sure the input is positive and not super close to zero.
utils.compare_tracing_methods(SimpleSqrtModel(), torch.rand(4) + 5)
def test_sqrt_inplace(self):
"""Test of the PyTorch inplace sqrt Node on Glow."""
# Make sure the input is positive and not super close to zero.
utils.compare_tracing_methods(SimpleSqrtModel(inplace=True), torch.rand(4) + 5)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import unittest
import torch
from tests import utils
class SimpleIandModule(torch.nn.Module):
def __init__(self):
super(SimpleIandModule, self).__init__()
def forward(self, a, b):
a &= b
return torch.logical_or(a, b)
class TestIand(utils.TorchGlowTestCase):
@utils.deterministic_expand(
[
lambda: (
"basic",
torch.tensor([True, True, False, False], dtype=torch.bool),
torch.tensor([True, False, True, False], dtype=torch.bool),
),
lambda: (
"basic_3d",
torch.zeros((3, 4, 5), dtype=torch.bool),
torch.ones((3, 4, 5), dtype=torch.bool),
),
lambda: (
"broadcast_3d",
torch.zeros((3, 4, 5), dtype=torch.bool),
torch.ones((4, 5), dtype=torch.bool),
),
]
)
def test_iand(self, _, a, b, skip_to_glow=False):
utils.compare_tracing_methods(
SimpleIandModule(),
a,
b,
fusible_ops={"aten::__iand__"},
skip_to_glow=skip_to_glow,
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# isort:skip_file
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class SimpleSigmoidModel(torch.nn.Module):
def __init__(self, inplace=False):
super(SimpleSigmoidModel, self).__init__()
self.inplace = inplace
def forward(self, tensor):
if self.inplace:
other = tensor + tensor
return other.sigmoid_()
else:
other = tensor + tensor
return other.sigmoid()
class TestSigmoid(utils.TorchGlowTestCase):
@utils.deterministic_expand(
[
lambda: ("basic", SimpleSigmoidModel(), torch.randn(6)),
lambda: ("inplace", SimpleSigmoidModel(inplace=True), torch.randn(6)),
]
)
def test_sigmoid(self, _, module, tensor):
utils.compare_tracing_methods(module, tensor, fusible_ops={"aten::sigmoid"})
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class SimpleErfModule(torch.nn.Module):
def forward(self, input):
return torch.special.erf(input)
class TestErf(utils.TorchGlowTestCase):
def test_erf_basic(self):
"""Test of the PyTorch erf Node on Glow."""
utils.compare_tracing_methods(SimpleErfModule(), torch.randn(4))
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class SimpleTanhModel(torch.nn.Module):
def __init__(self, inplace=False):
super(SimpleTanhModel, self).__init__()
self.inplace = inplace
def forward(self, tensor):
tensor = tensor + tensor
return tensor.tanh_() if self.inplace else tensor.tanh()
class TestTanh(utils.TorchGlowTestCase):
def test_tanh(self):
"""Basic test of the PyTorch aten::tanh Node on Glow."""
utils.compare_tracing_methods(
SimpleTanhModel(), torch.randn(4), fusible_ops={"aten::tanh"}
)
def test_tanh_inplace(self):
"""Basic test of the PyTorch aten::tanh_ Node on Glow."""
utils.compare_tracing_methods(
SimpleTanhModel(inplace=True), torch.randn(4), fusible_ops={"aten::tanh"}
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from tests import utils
class ArgMinModule(torch.nn.Module):
def __init__(self, dim=None, keepDims=True):
super(ArgMinModule, self).__init__()
self.dim = dim
self.keepDims = keepDims
def forward(self, tensor):
if self.dim:
return torch.argmin(tensor, self.dim, self.keepDims)
else:
return torch.argmin(tensor)
class ArgMaxModule(torch.nn.Module):
def __init__(self, dim=None, keepDims=True):
super(ArgMaxModule, self).__init__()
self.dim = dim
self.keepDims = keepDims
def forward(self, tensor):
if self.dim:
return torch.argmax(tensor, self.dim, self.keepDims)
else:
return torch.argmax(tensor)
class TestArgMin(utils.TorchGlowTestCase):
@utils.deterministic_expand(
[
lambda: ("basic", ArgMinModule(), torch.randn(4)),
lambda: ("dimensions1", ArgMinModule(1, False), torch.randn(4, 4)),
lambda: ("dimensions2", ArgMinModule(1), torch.randn(5, 5)),
]
)
def test_argmin_node(self, _, module, tensor):
"""Test of the PyTorch ArgMin node on Glow."""
utils.run_comparison_tests(module, tensor, fusible_ops={"aten::argmin"})
class TestArgMax(utils.TorchGlowTestCase):
@utils.deterministic_expand(
[
lambda: ("basic", ArgMaxModule(), torch.randn(4)),
lambda: ("dimensions1", ArgMaxModule(1, False), torch.randn(4, 4)),
lambda: ("dimensions2", ArgMaxModule(1), torch.randn(5, 5)),
]
)
def test_argmax_node(self, _, module, tensor):
"""Test of the PyTorch ArgMax node on Glow."""
utils.run_comparison_tests(module, tensor, fusible_ops={"aten::argmax"})
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from parameterized import parameterized
from tests import utils
class SimpleFmodModule(torch.nn.Module):
def __init__(self):
super(SimpleFmodModule, self).__init__()
def forward(self, a, b):
if b.size() == torch.Size([]):
c = a.fmod(b.item())
else:
c = a.fmod(b)
return c.fmod(torch.tensor(1.0, dtype=c.dtype))
class TestFmod(utils.TorchGlowTestCase):
@utils.deterministic_expand(
[
lambda: (
"int64_tensor",
SimpleFmodModule(),
torch.tensor([14]),
torch.tensor([10]),
),
lambda: (
"int32_tensor",
SimpleFmodModule(),
torch.tensor([14], dtype=torch.int32),
torch.tensor([10], dtype=torch.int32),
),
lambda: (
"float_tensor",
SimpleFmodModule(),
torch.randn(4),
torch.tensor(0.3),
),
lambda: (
"basic_tensor",
SimpleFmodModule(),
torch.tensor([7.5]),
torch.tensor([2.4]),
),
lambda: (
"int_number",
SimpleFmodModule(),
torch.tensor([14]),
torch.tensor(10),
),
lambda: ("basic", SimpleFmodModule(), torch.randn(4), torch.randn(4)),
lambda: (
"broadcast",
SimpleFmodModule(),
torch.randn(8, 3, 4, 2),
torch.randn(4, 2),
),
lambda: (
"broadcast",
SimpleFmodModule(),
torch.randn(8, 3, 4, 2),
torch.randn(1, 2),
),
lambda: (
"positive_broadcast",
SimpleFmodModule(),
torch.Tensor(8, 3, 4, 2).random_(0, 5),
torch.Tensor(1, 2).random_(1, 5),
),
lambda: (
"positive_broadcast",
SimpleFmodModule(),
torch.Tensor(4, 2).random_(0, 5),
torch.Tensor(8, 3, 4, 2).random_(1, 5),
),
]
)
def test_fmod(self, _, module, a, b):
utils.compare_tracing_methods(module, a, b, fusible_ops={"aten::fmod"})
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class SimpleFloorModule(torch.nn.Module):
def forward(self, a, b):
c = a + b
return torch.floor(c)
class TestFloor(utils.TorchGlowTestCase):
def test_floor(self):
"""Basic test of the PyTorch floor Node on Glow."""
x = torch.randn(3, 4, 5)
y = torch.randn(3, 4, 5)
utils.compare_tracing_methods(
SimpleFloorModule(), x, y, fusible_ops={"aten::floor"}
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class SimpleBitwiseOrModule(torch.nn.Module):
def __init__(self, dtype=None):
super(SimpleBitwiseOrModule, self).__init__()
self.dtype = dtype
def forward(self, a, b):
c = torch.bitwise_or(a, b)
d = torch.bitwise_or(a, c)
return d
class TestBitwiseOr(utils.TorchGlowTestCase):
@utils.deterministic_expand(
[
lambda: (
"basic",
torch.tensor([0x01, 0x03, 0xFFFFFFF0, 0x5], dtype=torch.int32),
torch.tensor([0x02, 0x03, 0x2, 0x1F], dtype=torch.int32),
),
lambda: (
"basic_bool",
torch.tensor([True, True, False, False], dtype=torch.bool),
torch.tensor([True, False, True, False], dtype=torch.bool),
),
lambda: (
"basic_3d",
torch.zeros((0x1, 0x04, 0x1), dtype=torch.int32),
torch.ones((0x2, 0x1, 0x4), dtype=torch.int32),
),
lambda: (
"broadcast_3d",
torch.zeros((3, 4, 5), dtype=torch.int32),
torch.ones((4, 5), dtype=torch.int32),
),
]
)
def test_bitwise_or(self, _, a, b, skip_to_glow=False):
"""Tests of the PyTorch Bitwise Or Node on Glow."""
utils.compare_tracing_methods(
SimpleBitwiseOrModule(),
a,
b,
fusible_ops={"aten::bitwise_or"},
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class SimpleLogModule(torch.nn.Module):
def __init__(self, *dimensions):
super(SimpleLogModule, self).__init__()
def forward(
self,
a,
):
b = torch.log(a)
return torch.log(b)
class TestLog(utils.TorchGlowTestCase):
def test_log_basic(self):
x = 1 / torch.rand(3, 4, 5)
utils.compare_tracing_methods(
SimpleLogModule(),
x,
fusible_ops={"aten::log"},
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import random
import torch
from tests import utils
class SimplePixelUnshuffleModel(torch.nn.Module):
def __init__(self, downscale_factor):
super(SimplePixelUnshuffleModel, self).__init__()
self.downscale_factor = downscale_factor
self.ps = torch.nn.PixelUnshuffle(self.downscale_factor)
def forward(self, tensor):
return self.ps(tensor)
class TestPixelUnshuffle(utils.TorchGlowTestCase):
def test_pixel_unshuffle(self):
"""Test of the PyTorch pixel_unshuffle Node on Glow."""
for _ in range(0, 20):
c = random.randint(1, 3)
r = random.randint(2, 5)
w = random.randint(1, 100)
h = random.randint(1, 100)
b = random.randint(1, 10)
utils.compare_tracing_methods(
SimplePixelUnshuffleModel(r),
torch.randn(b, c, w * r, h * r),
fusible_ops={"aten::pixel_unshuffle"},
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
import torch.nn.functional as F
from tests import utils
class SimpleAvgPool2dModule(torch.nn.Module):
def __init__(self, kernel_size, stride=None, padding=0):
super(SimpleAvgPool2dModule, self).__init__()
self.kernel_size = kernel_size
self.padding = padding
self.stride = stride
def forward(self, inputs):
return F.avg_pool2d(
inputs + inputs, self.kernel_size, padding=self.padding, stride=self.stride
)
class TestAvgPool2d(utils.TorchGlowTestCase):
def test_avg_pool2d_basic(self):
"""Basic test of the PyTorch avg_pool2d Node on Glow."""
inputs = torch.randn(1, 4, 5, 5)
utils.run_comparison_tests(
SimpleAvgPool2dModule(2),
inputs,
fusible_ops={"aten::avg_pool2d"},
)
def test_avg_pool2d_with_args(self):
"""Test of the PyTorch avg_pool2d Node with arguments on Glow."""
inputs = torch.randn(1, 4, 10, 10)
utils.run_comparison_tests(
SimpleAvgPool2dModule(3, stride=7),
inputs,
fusible_ops={"aten::avg_pool2d"},
fp16vfp16_atol=1e-3,
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
import torch.nn.functional as F
from tests import utils
class SimpleAvgPool3dModule(torch.nn.Module):
def __init__(self, kernel_size, stride=None, padding=0):
super(SimpleAvgPool3dModule, self).__init__()
self.kernel_size = kernel_size
self.padding = padding
def forward(self, inputs):
return F.avg_pool3d(inputs, self.kernel_size, padding=self.padding)
class TestAvgPool3d(utils.TorchGlowTestCase):
def test_avg_pool3d_basic(self):
"""Basic test of the PyTorch avg_pool3d Node on Glow."""
inputs = torch.randn(1, 4, 5, 5, 5)
utils.run_comparison_tests(
SimpleAvgPool3dModule(3), inputs, fusible_ops={"aten::avg_pool3d"}
)
def test_avg_pool3d_with_args(self):
"""Test of the PyTorch avg_pool3d Node with arguments on Glow."""
inputs = torch.randn(1, 4, 10, 10, 10)
utils.run_comparison_tests(
SimpleAvgPool3dModule(3, (4, 7, 7)),
inputs,
fusible_ops={"aten::avg_pool3d"},
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class SimpleQuantizedLeakyReluModel(torch.nn.Module):
def __init__(self, scale, zero_point, dtype):
super(SimpleQuantizedLeakyReluModel, self).__init__()
self.scale = scale
self.zero_point = zero_point
self.dtype = dtype
def forward(self, tensor):
quantize = torch.nn.quantized.Quantize(
scale=self.scale, zero_point=self.zero_point, dtype=self.dtype
)
dequantize = torch.nn.quantized.DeQuantize()
leaky_relu = torch.nn.LeakyReLU()
return dequantize(leaky_relu(quantize(tensor)))
class TestQuantizedLeakyRelu(utils.TorchGlowTestCase):
def test_quantized_leaky_relu(self):
"""Basic test of the PyTorch quantized::leaky_relu Node on Glow."""
utils.compare_tracing_methods(
SimpleQuantizedLeakyReluModel(0.3, 0, torch.quint8),
torch.randn([5, 5]),
fusible_ops={
"aten::leaky_relu",
"aten::quantize_per_tensor",
"aten::dequantize",
},
)
def test_quantized_leaky_relu_cut_dq(self):
"""Basic test of the PyTorch quantized::leaky_relu Node on Glow, with quantize and dequantize excluded."""
utils.compare_tracing_methods(
SimpleQuantizedLeakyReluModel(0.3, 0, torch.quint8),
torch.randn([5, 5]),
fusible_ops={"aten::leaky_relu", "aten::quantize_per_tensor"},
fusion_blocklist=["aten::dequantize"],
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class SimpleCatModule(torch.nn.Module):
def __init__(self, *dimensions):
super(SimpleCatModule, self).__init__()
self.dimensions = dimensions
def forward(self, a, b, c):
other = torch.cat((a, b, c), self.dimensions[0])
for dimension in self.dimensions[1:]:
other = torch.cat((other, other), dimension)
return other
class TestCat(utils.TorchGlowTestCase):
def test_cat_with_empty_tensor(self):
"""Basic test of the PyTorch cat Node on Glow."""
utils.compare_tracing_methods(
SimpleCatModule(0, 1, 2),
torch.empty(0),
torch.randn(2, 3, 4, 5),
torch.randn(2, 3, 4, 5),
fusible_ops={"prim::FusedConcat"},
)
def test_cat_basic(self):
"""Basic test of the PyTorch cat Node on Glow."""
utils.compare_tracing_methods(
SimpleCatModule(0, 1, 2),
torch.randn(2, 3, 4),
torch.randn(2, 3, 4),
torch.randn(2, 3, 4),
fusible_ops={"prim::FusedConcat"},
)
def test_cat_neg_dim(self):
"""Test negative dimension index for the PyTorch cat Node on Glow."""
utils.compare_tracing_methods(
SimpleCatModule(-3, -2, -1),
torch.randn(2, 3, 4),
torch.randn(2, 3, 4),
torch.randn(2, 3, 4),
fusible_ops={"prim::FusedConcat"},
)
def test_cat_oob_neg_dim(self):
"""Test out of bounds negative dimension index for the PyTorch cat Node on Glow."""
with self.assertRaises(IndexError):
utils.compare_tracing_methods(
SimpleCatModule(-4, -2, -1),
torch.randn(2, 3, 4),
torch.randn(2, 3, 4),
torch.randn(2, 3, 4),
fusible_ops={"prim::FusedConcat"},
)
def test_cat_with_different_types(self):
"""Test cat between different types that can be cast, which is supported in pytorch."""
utils.compare_tracing_methods(
SimpleCatModule(0, 1, 2),
torch.randn(2, 3, 4),
torch.randn(2, 3, 4, dtype=torch.half),
torch.randn(2, 3, 4, dtype=torch.half),
fusible_ops={"prim::FusedConcat"},
)
utils.compare_tracing_methods(
SimpleCatModule(0, 1, 2),
torch.randn(2, 3, 4).to(torch.int),
torch.randn(2, 3, 4).to(torch.long),
torch.randn(2, 3, 4).to(torch.long),
fusible_ops={"prim::FusedConcat"},
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class SimpleStackModel(torch.nn.Module):
def __init__(self, dim):
super(SimpleStackModel, self).__init__()
self.dim = dim
def forward(self, a, b):
c = b + b
return torch.stack((a, c), dim=self.dim)
class TestStack(utils.TorchGlowTestCase):
def test_stack_basic(self):
"""Basic test of the PyTorch aten::stack Node on Glow."""
for d in range(0, 4):
utils.compare_tracing_methods(
SimpleStackModel(d),
torch.randn(2, 3, 4),
torch.randn(2, 3, 4),
skip_to_glow=True,
)
def test_stack_different_types(self):
"""Test stack between fp16 and fp32, which is supported in pytorch."""
x = torch.randn(2, 3, 4)
y = torch.randn(2, 3, 4, dtype=torch.half)
for d in range(0, 4):
utils.compare_tracing_methods(
SimpleStackModel(d),
x,
y,
skip_to_glow=True,
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
from collections import namedtuple
import torch
import torch.nn.functional as F
from tests import utils
class SimpleConvTranspose2dModule(torch.nn.Module):
def __init__(self, stride=1, padding=0, output_padding=0, dilation=1, groups=1):
super(SimpleConvTranspose2dModule, self).__init__()
self.stride = stride
self.padding = padding
self.output_padding = output_padding
self.groups = groups
self.dilation = dilation
def forward(self, inputs, filters, bias=None):
convTranspose = F.conv_transpose2d(
inputs,
filters,
bias=bias,
stride=self.stride,
padding=self.padding,
output_padding=self.output_padding,
groups=self.groups,
dilation=self.dilation,
)
return F.relu(convTranspose)
class TestConvTranpose2d(utils.TorchGlowTestCase):
@utils.deterministic_expand(
[
lambda: (
"basic",
SimpleConvTranspose2dModule(padding=1),
torch.randn(1, 4, 5, 5),
torch.randn(4, 8, 3, 3),
),
lambda: (
"with_bias",
SimpleConvTranspose2dModule(padding=1),
torch.randn(1, 4, 5, 5),
torch.randn(4, 8, 3, 3),
torch.randn(4),
),
]
)
def test_convTranpose2d(self, _, module, inputs, filters, bias=None):
"""Basic test of the PyTorch conv3d Node on Glow."""
utils.compare_tracing_methods(
module, inputs, filters, fusible_ops={"aten::_convolution"}
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
import torch.nn.functional as F
from tests import utils
class SimpleReluModel(torch.nn.Module):
def __init__(self, inplace=False):
super(SimpleReluModel, self).__init__()
self.inplace = inplace
def forward(self, tensor):
other = F.relu(tensor, inplace=self.inplace)
return F.relu(other, inplace=self.inplace)
class TestRelu(utils.TorchGlowTestCase):
def test_relu_basic(self):
"""Basic test of the PyTorch relu Node on Glow."""
x = torch.randn(4)
# make sure we have at least one negative
x[0] = -2.0
utils.compare_tracing_methods(SimpleReluModel(), x, fusible_ops={"aten::relu"})
def test_relu_inplace(self):
"""Test of the PyTorch relu_ Node on Glow."""
x = torch.randn(4)
# make sure we have at least one negative
x[0] = -2.0
utils.compare_tracing_methods(
SimpleReluModel(inplace=True), x, fusible_ops={"aten::relu_"}
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class SimpleArgSortModule(torch.nn.Module):
def __init__(self, descending=True):
super(SimpleArgSortModule, self).__init__()
self.descending = descending
def forward(self, inputs):
# Only last dim is currently supported
return torch.argsort(inputs, dim=-1, descending=self.descending)
class TestArgSort(utils.TorchGlowTestCase):
@utils.deterministic_expand(
[
lambda: (
"desc",
SimpleArgSortModule(),
torch.randn(4),
),
lambda: (
"asc",
SimpleArgSortModule(descending=False),
torch.randn(4),
),
lambda: (
"2d_desc",
SimpleArgSortModule(),
torch.randn(4, 3),
),
lambda: (
"3d_asc",
SimpleArgSortModule(descending=False),
torch.randn(6, 4, 5),
),
lambda: (
"4d_desc",
SimpleArgSortModule(),
torch.randn(4, 7, 7, 3),
),
]
)
def test_argsort(self, _, module, a):
utils.compare_tracing_methods(module, a, fusible_ops={"aten::argsort"})
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class SimpleViewModule(torch.nn.Module):
def __init__(self, *shape):
super(SimpleViewModule, self).__init__()
self.shape = shape
def forward(self, tensor):
return (tensor + tensor).view(self.shape)
class TestView(utils.TorchGlowTestCase):
@utils.deterministic_expand(
[
lambda: (SimpleViewModule(2, -1), torch.rand(2, 3, 4)),
lambda: (SimpleViewModule(-1, 2), torch.rand(2, 3, 4)),
]
)
def test_simple(self, module, tensor):
utils.compare_tracing_methods(module, tensor, fusible_ops={"aten::view"})
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class SimpleReshapeModel(torch.nn.Module):
def __init__(self, shape):
super(SimpleReshapeModel, self).__init__()
self.shape = shape
def forward(self, tensor):
combined = tensor + tensor
return combined.reshape(self.shape)
class TestReshape(utils.TorchGlowTestCase):
def test_reshape(self):
"""Test of the PyTorch reshape Node on Glow."""
utils.compare_tracing_methods(
SimpleReshapeModel([2, -1]),
torch.rand(2, 3, 4),
fusible_ops={"aten::reshape"},
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class SimpleBitwiseXorModule(torch.nn.Module):
def __init__(self, dtype=None):
super(SimpleBitwiseXorModule, self).__init__()
self.dtype = dtype
def forward(self, a, b):
c = torch.bitwise_xor(a, b)
d = torch.bitwise_xor(a, c)
return d
class TestBitwiseXor(utils.TorchGlowTestCase):
@utils.deterministic_expand(
[
lambda: (
"basic",
torch.tensor([0x01, 0x02, 0x03, 0x04], dtype=torch.int32),
torch.tensor([0x03, 0x07, 0xFF, 0x0100], dtype=torch.int32),
),
lambda: (
"basic_64",
torch.tensor([0x01, 0x03, 0xFFFFFFFFFF00, 0x5], dtype=torch.int64),
torch.tensor([0x02, 0x03, 0x2, 0x1F], dtype=torch.int64),
),
lambda: (
"basic_bool",
torch.tensor([True, True, False, False], dtype=torch.bool),
torch.tensor([True, False, True, False], dtype=torch.bool),
),
lambda: (
"basic_3d",
torch.zeros((0x1, 0x3, 0x6), dtype=torch.int32),
torch.ones((0x3, 0x1, 0x6), dtype=torch.int32),
),
lambda: (
"broadcast_3d",
torch.zeros((3, 4, 5), dtype=torch.int32),
torch.ones((4, 5), dtype=torch.int32),
),
]
)
def test_bitwise_xor(self, _, a, b, skip_to_glow=False):
"""Tests of the PyTorch Bitwise Xor Node on Glow."""
utils.compare_tracing_methods(
SimpleBitwiseXorModule(),
a,
b,
fusible_ops={"aten::bitwise_xor"},
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
import torch.nn.functional as F
from tests import utils
class SimpleLayerNormModule(torch.nn.Module):
def __init__(self, normalized_shape):
super(SimpleLayerNormModule, self).__init__()
self.normalized_shape = normalized_shape
def forward(self, input, weight=None, bias=None):
return F.layer_norm(input, self.normalized_shape, weight, bias)
class OneLayerModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.normalized_shape = (6,)
self.weight = torch.Tensor([2.4180, 2.2070, 2.3184, 0.7378, 0.7734, 0.7520])
self.bias = torch.Tensor([0.1567, 0.0308, 0.0166, 0.2944, 0.2759, 0.5649])
self.eps = 1e-5
def forward(self, tensor):
return F.layer_norm(
tensor, self.normalized_shape, self.weight, self.bias, self.eps
)
class LayerNormNHCWLayout(torch.nn.Module):
def __init__(self, normalized_shape, stride=1, padding=0, dilation=1, groups=1):
super(LayerNormNHCWLayout, self).__init__()
self.normalized_shape = normalized_shape
self.stride = stride
self.padding = padding
self.dilation = dilation
self.groups = groups
def forward(self, inputs, filters, biasConv=None, weight=None, bias=None):
conv = F.conv2d(
inputs,
filters,
bias=biasConv,
stride=self.stride,
padding=self.padding,
dilation=self.dilation,
groups=self.groups,
)
return F.layer_norm(
conv.permute(0, 2, 1, 3), self.normalized_shape, weight, bias
)
class LayerNormNHCWLayoutWithConvAfter(torch.nn.Module):
def __init__(self, normalized_shape, stride=1, padding=0, dilation=1, groups=1):
super(LayerNormNHCWLayoutWithConvAfter, self).__init__()
self.normalized_shape = normalized_shape
self.stride = stride
self.padding = padding
self.dilation = dilation
self.groups = groups
def forward(
self,
inputs,
filters,
filters2,
biasConv=None,
biasConv2=None,
weight=None,
bias=None,
):
conv = F.conv2d(
inputs,
filters,
bias=biasConv,
stride=self.stride,
padding=self.padding,
dilation=self.dilation,
groups=self.groups,
)
t = F.layer_norm(conv.permute(0, 2, 1, 3), self.normalized_shape, weight, bias)
return F.conv2d(
t.permute(0, 2, 1, 3),
filters2,
biasConv2,
stride=self.stride,
padding=self.padding,
dilation=self.dilation,
groups=self.groups,
)
class TestLayerNorm(utils.TorchGlowTestCase):
def test_layernorm_basic(self):
"""Basic test of the PyTorch layernorm Node on Glow."""
inputs = torch.randn(1, 4, 5, 5)
weight = torch.randn(5)
bias = torch.randn(5)
utils.compare_tracing_methods(
SimpleLayerNormModule([5]),
inputs,
weight,
bias,
fusible_ops={"aten::layer_norm"},
)
def test_layernorm_no_bias(self):
"""Test of the PyTorch aten::layer_norm without weights and bias."""
inputs = torch.randn(1, 4, 5, 5)
utils.compare_tracing_methods(
SimpleLayerNormModule([5, 5]), inputs, fusible_ops={"aten::layer_norm"}
)
def test_layernorm_layout(self):
"""Test of the PyTorch aten::layer_norm with NHCW layout."""
inputs = torch.randn(1, 6, 5, 6)
kernel = torch.randn(3, 6, 2, 2)
# This unit test build a graph like conv => permute => layer_norm
# Since in Glow we always guess 4 dims input tensor to be NCHW,
# After the permutation, the layout of layer_norm's input would be
# NHCW, which is not a supported layout, and we should mitigate this by
# setting accept_all_layouts to be true.
utils.compare_tracing_methods(
LayerNormNHCWLayout([5]),
inputs,
kernel,
fusible_ops={"aten::layer_norm", "aten::permute", "aten::_convolution"},
accept_all_layouts=True,
)
def test_layernorm_layout_with_conv_after(self):
"""Test of the PyTorch aten::layer_norm with NHCW layout and conv after
layer_norm."""
inputs = torch.randn(1, 8, 5, 6)
kernel = torch.randn(4, 8, 2, 2)
kernel2 = torch.randn(2, 4, 2, 2)
# This unit test build a graph like conv => permute => layer_norm
# => conv. Since in Glow we always guess 4 dims input tensor to be NCHW,
# After the permutation, the layout of layer_norm's input would be
# NHCW. If we simply ignore the layout checking of layer_norm, still
# the second conv will complain about layout mismatch. We should
# mitigate this by setting accept_all_layouts to be true.
utils.compare_tracing_methods(
LayerNormNHCWLayoutWithConvAfter([5]),
inputs,
kernel,
kernel2,
fusible_ops={"aten::layer_norm", "aten::permute", "aten::_convolution"},
accept_all_layouts=True,
)
def test_layernorm_output(self):
"""Test with mock inputs to verify that output is as expected"""
module = OneLayerModule()
module_inputs = (torch.tensor([[1.0, 2.0, 3.0, 4.0, 5.0, 6.0]]),)
loc_out = module(*module_inputs)
expected_output = torch.tensor(
[
[
-3.382883310317993,
-1.907626986503601,
-0.662156879901886,
0.5104053020477295,
0.9551836252212524,
1.6657130718231201,
]
]
)
tolerance = 1e-04
assert tolerance > torch.norm(loc_out - expected_output)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class SimpleBitwiseAndModule(torch.nn.Module):
def __init__(self, dtype=None):
super(SimpleBitwiseAndModule, self).__init__()
self.dtype = dtype
def forward(self, a, b):
c = torch.bitwise_and(a, b)
d = torch.bitwise_and(a, c)
return d
class TestBitwiseAnd(utils.TorchGlowTestCase):
@utils.deterministic_expand(
[
lambda: (
"basic",
torch.tensor([0x01, 0x02, 0x03, 0x04], dtype=torch.int32),
torch.tensor([0x03, 0x07, 0xFF, 0x0100], dtype=torch.int32),
),
lambda: (
"basic_64",
torch.tensor([0x01, 0x03, 0xFFFFFFFFFF00, 0x5], dtype=torch.int64),
torch.tensor([0x02, 0x03, 0x2, 0x1F], dtype=torch.int64),
),
lambda: (
"basic_bool",
torch.tensor([True, True, False, False], dtype=torch.bool),
torch.tensor([True, False, True, False], dtype=torch.bool),
),
lambda: (
"basic_3d",
torch.zeros((0x2, 0x1, 0x5), dtype=torch.int32),
torch.ones((0x1, 0x6, 0x5), dtype=torch.int32),
),
lambda: (
"broadcast_3d",
torch.zeros((3, 4, 5), dtype=torch.int32),
torch.ones((4, 5), dtype=torch.int32),
),
]
)
def test_bitwise_and(self, _, a, b, skip_to_glow=False):
"""Tests of the PyTorch Bitwise And Node on Glow."""
utils.compare_tracing_methods(
SimpleBitwiseAndModule(),
a,
b,
fusible_ops={"aten::bitwise_and"},
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class SimpleChunkModel(torch.nn.Module):
def __init__(self, chunks, dimension):
super(SimpleChunkModel, self).__init__()
self.chunks = chunks
self.dimension = dimension
def forward(self, input):
return torch.chunk(input + input, self.chunks, self.dimension)
class TestConstantChunk(utils.TorchGlowTestCase):
def test_constant_chunk_basic(self):
"""Test of prim::ConstantChunk node on glow"""
x = torch.rand((10, 11))
# shapes: [(10,4), (10,4), (10,3)]
utils.compare_tracing_methods(
SimpleChunkModel(3, 1),
x,
fusible_ops={"prim::ConstantChunk"},
skip_to_glow=True,
)
def test_constant_chunk_negative_indices(self):
"""Test of prim::ConstantChunk node on glow"""
x = torch.rand((10, 11))
# shapes: [(4,11), (4,11), (2,11)]
utils.compare_tracing_methods(
SimpleChunkModel(3, -2),
x,
fusible_ops={"prim::ConstantChunk"},
skip_to_glow=True,
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
from collections import namedtuple
import torch
import torch.nn.functional as F
from tests import utils
class SimpleConv3dModule(torch.nn.Module):
def __init__(self, stride=1, padding=0, dilation=1, groups=1):
super(SimpleConv3dModule, self).__init__()
self.stride = stride
self.padding = padding
self.dilation = dilation
self.groups = groups
def forward(self, inputs, filters, bias=None):
conv = F.conv3d(
inputs,
filters,
bias=bias,
stride=self.stride,
padding=self.padding,
dilation=self.dilation,
groups=self.groups,
)
return F.relu(conv)
class TestConv3d(utils.TorchGlowTestCase):
@utils.deterministic_expand(
[
lambda: (
"basic",
SimpleConv3dModule(padding=1),
torch.randn(1, 4, 5, 5, 3),
torch.randn(8, 4, 3, 3, 3),
),
lambda: (
"with_bias",
SimpleConv3dModule(padding=1),
torch.randn(1, 4, 5, 5, 3),
torch.randn(8, 4, 3, 3, 3),
torch.randn(8),
),
]
)
def test_conv3d(self, _, module, inputs, filters, bias=None):
"""Basic test of the PyTorch conv3d Node on Glow."""
utils.compare_tracing_methods(
module, inputs, filters, fusible_ops={"aten::_convolution"}
)
def test_conv3d_param_sweep(self):
"""
Test of the PyTorch conv3d Node sweeping through various parameters of the
Node to test that they work correctly.
"""
thwOpts = [3, 4]
padOpts = [0, 1]
groupsOpts = [1, 2]
strideOpts = [1, 2]
Setting = namedtuple("Setting", ["t", "h", "w", "p", "g", "s"])
settings = [
Setting(t=t, h=h, w=w, p=p, g=g, s=s)
for t in thwOpts
for h in thwOpts
for w in thwOpts
for p in padOpts
for g in groupsOpts
for s in strideOpts
]
for setting in settings:
inputs = torch.randn(2, 4, setting.t, setting.h, setting.w)
filters = torch.randn(8, int(4 / setting.g), 3, 3, 3)
utils.compare_tracing_methods(
SimpleConv3dModule(
padding=setting.p, stride=setting.s, groups=setting.g
),
inputs,
filters,
fusible_ops={"aten::_convolution"},
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import unittest
from collections import namedtuple
import torch
import torch.nn.functional as F
from tests import utils
class SimpleConv2dModule(torch.nn.Module):
def __init__(self, stride=1, padding=0, dilation=1, groups=1):
super(SimpleConv2dModule, self).__init__()
self.stride = stride
self.padding = padding
self.dilation = dilation
self.groups = groups
def forward(self, inputs, filters, bias=None):
conv = F.conv2d(
inputs,
filters,
bias=bias,
stride=self.stride,
padding=self.padding,
dilation=self.dilation,
groups=self.groups,
)
return F.relu(conv)
class TestConv2d(utils.TorchGlowTestCase):
@utils.deterministic_expand(
[
lambda: (
"basic",
SimpleConv2dModule(padding=1),
torch.randn(1, 4, 5, 5),
torch.randn(8, 4, 3, 3),
),
lambda: (
"with_bias",
SimpleConv2dModule(padding=1),
torch.randn(1, 4, 5, 5),
torch.randn(8, 4, 3, 3),
torch.randn(8),
),
lambda: (
"nonsquare_dilation",
SimpleConv2dModule(padding=1, dilation=[1, 2]),
torch.randn(1, 4, 5, 5),
torch.randn(8, 4, 3, 3),
),
lambda: (
"different_padding",
SimpleConv2dModule(padding=[1, 0]),
torch.randn(1, 4, 5, 5),
torch.randn(8, 4, 3, 3),
),
]
)
def test_conv2d(self, _, module, inputs, filters, bias=None):
"""Basic test of the PyTorch conv3d Node on Glow."""
utils.compare_tracing_methods(
module, inputs, filters, fusible_ops={"aten::_convolution"}
)
def test_conv2d_param_sweep(self):
"""
Test of the PyTorch conv2d Node sweeping through various parameters of the
Node to test that they work correctly.
"""
hwOpts = [3, 4]
padOpts = [0, 1]
groupsOpts = [1, 2]
dilationOpts = [1, 2]
strideOpts = [1, 2]
Setting = namedtuple("Setting", ["h", "w", "p", "g", "d", "s"])
settings = [
Setting(h=h, w=w, p=p, g=g, d=d, s=s)
for h in hwOpts
for w in hwOpts
for p in padOpts
for g in groupsOpts
for d in dilationOpts
for s in strideOpts
]
for setting in settings:
class Conv2dTestModule(torch.nn.Module):
def forward(self, inputs, filters):
conv = F.conv2d(
inputs, filters, padding=setting.p, groups=setting.g
)
return F.relu(conv)
inputs = torch.randn(2, 4, setting.h, setting.w)
filters = torch.randn(8, 4 // setting.g, 3, 3)
utils.compare_tracing_methods(
Conv2dTestModule(), inputs, filters, fusible_ops={"aten::_convolution"}
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class SimpleMulModule(torch.nn.Module):
def __init__(self):
super(SimpleMulModule, self).__init__()
def forward(self, left, right):
other = left.mul(right.item() if right.size() == torch.Size([]) else right)
return other.mul(other)
class TestMul(utils.TorchGlowTestCase):
@utils.deterministic_expand(
[
lambda: ("basic", torch.randn(4), torch.randn(4)),
lambda: ("broadcast", torch.randn(8, 3, 4, 2), torch.randn(4, 2)),
lambda: ("broadcast", torch.randn(8, 3, 4, 2), torch.randn(1, 2)),
lambda: ("broadcast", torch.randn(4, 2), torch.randn(8, 3, 4, 2)),
lambda: ("float", torch.randn(4, 2), torch.tensor(3.2)),
lambda: ("int", torch.randn(4, 2), torch.tensor(22)),
lambda: (
"int64",
torch.torch.randint(-10, 10, (2, 4), dtype=torch.int64),
torch.torch.randint(-10, 10, (2, 4), dtype=torch.int64),
),
]
)
def test_mul(self, _, left, right, skip_to_glow=False):
"""Basic test of the PyTorch mul Node on Glow."""
utils.run_comparison_tests(
SimpleMulModule(),
(left, right),
fusible_ops={"aten::mul"},
skip_to_glow=skip_to_glow,
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
def rand_rois(N, H, W, count):
rois = torch.rand((count, 5))
for i in range(count):
rois[i][0] = (N * rois[i][0]) // 1 # batch index
rois[i][1] *= W - 1 # x1
rois[i][2] *= H - 1 # y1
f = rois[i][3]
if f == 0: # enforce 0 < f < 1
f = 1e-3
rois[i][3] = rois[i][1] + f * (W - rois[i][1]) # x2
f = rois[i][4]
if f == 0: # enforce 0 < f < 1
f = 1e-3
rois[i][4] = rois[i][2] + f * (H - rois[i][2]) # y2
assert rois[i][1] > 0 and rois[i][1] < W - 1
assert rois[i][2] > 0 and rois[i][2] < H - 1
assert rois[i][3] > rois[i][1] and rois[i][3] < W
assert rois[i][4] > rois[i][2] and rois[i][3] < W
return rois
class SimpleRoiAlignModel(torch.nn.Module):
def __init__(
self,
order,
spatial_scale=1.0,
pooled_h=6,
pooled_w=6,
sampling_ratio=2,
aligned=True,
):
super(SimpleRoiAlignModel, self).__init__()
self.kwargs = {
"order": order,
"spatial_scale": spatial_scale,
"pooled_h": pooled_h,
"pooled_w": pooled_w,
"sampling_ratio": sampling_ratio,
"aligned": aligned,
}
def forward(self, features, rois):
return torch.ops._caffe2.RoIAlign(features, rois, **self.kwargs)
class TestRoiAlign(utils.TorchGlowTestCase):
@utils.deterministic_expand(
[
lambda: ("basic", SimpleRoiAlignModel("NCHW"), torch.randn(1, 3, 16, 20)),
lambda: ("nhwc", SimpleRoiAlignModel("NHWC"), torch.randn(1, 16, 20, 3)),
lambda: ("batched", SimpleRoiAlignModel("NCHW"), torch.randn(4, 3, 16, 20)),
lambda: (
"scaled",
SimpleRoiAlignModel("NCHW", spatial_scale=0.0625),
torch.randn(1, 3, 224, 224),
),
lambda: (
"unaligned",
SimpleRoiAlignModel("NCHW", aligned=False),
torch.randn(1, 3, 16, 20),
),
lambda: (
"dynamic_sampling",
SimpleRoiAlignModel("NCHW", sampling_ratio=0),
torch.randn(1, 3, 16, 20),
),
]
)
def test_roi_align(self, _, module, features):
order = module.kwargs.get("order")
kwargs = {k: v for k, v in zip(order, features.size())}
kwargs.pop("C")
rois = rand_rois(count=250, **kwargs)
utils.compare_tracing_methods(
module, features, rois, fusible_ops={"_caffe2::RoIAlign"}
)
def test_roi_align_fp16(self):
"""Test of the _caffe2::RoiAlign Node on Glow."""
N, C, H, W = 1, 3, 16, 20
features = torch.randn(N, C, H, W)
rois = rand_rois(N, H, W, 250)
# atol/rtol must be high because maximum delta can be high due to shifts
# in sampling points due to fp16 rounding of coordinates.
utils.compare_tracing_methods(
SimpleRoiAlignModel("NCHW"),
features,
rois,
fusible_ops={"_caffe2::RoIAlign"},
fp16=True,
atol=1e-1,
rtol=1e-1,
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class SimpleSumModule(torch.nn.Module):
def __init__(self, dtype=None):
super(SimpleSumModule, self).__init__()
self.dtype = dtype
def forward(self, a):
b = a + a
return torch.sum(b, dtype=self.dtype)
class KeepdimSumModule(torch.nn.Module):
def __init__(self, axis, keepdim, dtype=None):
super(KeepdimSumModule, self).__init__()
self.axis = axis
self.keepdim = keepdim
self.dtype = dtype
def forward(self, a):
b = a + a
return torch.sum(b, self.axis, keepdim=self.keepdim, dtype=self.dtype)
class TestSumBasic(utils.TorchGlowTestCase):
def test_sum_basic(self):
a = torch.randn(2, 3, 4)
utils.compare_tracing_methods(SimpleSumModule(), a, fusible_ops={"aten::sum"})
class TestSumKeepdim(utils.TorchGlowTestCase):
@utils.deterministic_expand(
[
lambda: ("keepdim", KeepdimSumModule(0, True), torch.randn(2, 3, 4)),
lambda: ("axis_1", KeepdimSumModule(1, False), torch.randn(4, 3, 4)),
lambda: (
"axis_2_keepdim_f16",
KeepdimSumModule(2, True, torch.float16),
torch.randn(5, 2, 4),
),
lambda: (
"axis_1_f16",
KeepdimSumModule(1, False, torch.float16),
torch.randn(3, 1, 2),
),
lambda: (
"neg_axis_f16",
KeepdimSumModule(-2, False, torch.float16),
torch.randn(3, 1, 2),
),
lambda: (
"neg_axis_keepdim",
KeepdimSumModule(-2, True),
torch.randn(3, 1, 2),
),
lambda: (
"multiple_axes",
KeepdimSumModule((0, 1), False, torch.float16),
torch.randn(3, 4, 2),
),
lambda: (
"multiple_axes_keep_dim",
KeepdimSumModule((2, 1), True, torch.float16),
torch.randn(3, 4, 2),
),
]
)
def test_sum(self, _, module, a):
utils.compare_tracing_methods(module, a, fusible_ops={"aten::sum"})
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
import torch.nn.functional as F
from tests import utils
class SimpleSoftmaxModel(torch.nn.Module):
def __init__(self, dimension):
super(SimpleSoftmaxModel, self).__init__()
self.dimension = dimension
def forward(self, tensor):
return F.softmax(tensor, self.dimension)
class TestSoftmax(utils.TorchGlowTestCase):
@utils.deterministic_expand(
[
lambda: (-2, [2, 3]),
lambda: (-1, [2, 3]),
lambda: (0, [2, 3]),
lambda: (1, [2, 3]),
lambda: (-3, [2, 3, 4]),
lambda: (-2, [2, 3, 4]),
lambda: (-1, [2, 3, 4]),
lambda: (0, [2, 3, 4]),
lambda: (1, [2, 3, 4]),
lambda: (2, [2, 3, 4]),
]
)
def test_softmax(self, dim, input_dims):
module = SimpleSoftmaxModel(dim)
input = torch.randn(input_dims)
utils.compare_tracing_methods(module, input)
def test_softmax_oob_neg_dim(self):
"""Test out of bounds negative dimension index for the PyTorch SoftMax Node on Glow."""
with self.assertRaises(IndexError):
utils.compare_tracing_methods(SimpleSoftmaxModel(-3), torch.randn(2, 3))
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import unittest
import torch
import torch.nn.functional as F
from tests import utils
class SimpleLogSoftmaxModel(torch.nn.Module):
def __init__(self, dimension):
super(SimpleLogSoftmaxModel, self).__init__()
self.dimension = dimension
def forward(self, tensor):
return F.log_softmax(tensor, self.dimension)
class TestLogSoftmax(utils.TorchGlowTestCase):
@utils.deterministic_expand(
[
lambda: (-2, [2, 3]),
lambda: (-1, [2, 3]),
lambda: (0, [2, 3]),
lambda: (1, [2, 3]),
lambda: (-3, [2, 3, 4]),
lambda: (-2, [2, 3, 4]),
lambda: (-1, [2, 3, 4]),
lambda: (0, [2, 3, 4]),
lambda: (1, [2, 3, 4]),
lambda: (2, [2, 3, 4]),
]
)
def test_log_softmax(self, dim, input_dims):
module = SimpleLogSoftmaxModel(dim)
input = torch.randn(input_dims)
utils.compare_tracing_methods(module, input, fusible_ops={"aten::log_softmax"})
def test_log_softmax_oob_neg_dim(self):
"""Test out of bounds negative dimension index for the PyTorch LogSoftMax Node on Glow."""
with self.assertRaises(IndexError):
utils.compare_tracing_methods(
SimpleLogSoftmaxModel(-3),
torch.randn(2, 3),
fusible_ops={"aten::log_softmax"},
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class SimpleMaxModule(torch.nn.Module):
def __init__(self):
super(SimpleMaxModule, self).__init__()
def forward(self, a, b):
return torch.max(a + a, b + b)
class UnaryMaxModule(torch.nn.Module):
def __init__(self):
super(UnaryMaxModule, self).__init__()
def forward(self, a):
return torch.max(a + a)
class ReduceMaxModule(torch.nn.Module):
def __init__(self, dim, keep_dim):
super(ReduceMaxModule, self).__init__()
self.dim = dim
self.keep_dim = keep_dim
def forward(self, a):
values, index = torch.max(a + a, self.dim, self.keep_dim)
return torch.stack((values, index))
class TestMax(utils.TorchGlowTestCase):
def test_elementwise_max(self):
"""Test of the PyTorch max Node on Glow."""
utils.compare_tracing_methods(
SimpleMaxModule(), torch.randn(4), torch.randn(4), fusible_ops={"aten::max"}
)
def test_elementwise_max_broadcast(self):
"""Test of the PyTorch max Node with broadcast on Glow."""
utils.compare_tracing_methods(
SimpleMaxModule(),
torch.randn(2, 4),
torch.randn(4),
fusible_ops={"aten::max"},
)
def test_unary_max(self):
"""Test of the PyTorch unary max Node on Glow."""
utils.compare_tracing_methods(
UnaryMaxModule(),
torch.randint(
50,
(
10,
10,
),
dtype=torch.int,
),
fusible_ops={"aten::max"},
)
def test_reduce_max(self):
"""Test of the PyTorch max Node reducing on a specified dim."""
utils.compare_tracing_methods(
ReduceMaxModule(2, False),
torch.randn(3, 4, 5),
fusible_ops={"aten::max"},
)
def test_reduce_max_keep_dim(self):
"""Test of the PyTorch max Node reducing on a specified dim and keeping dim."""
utils.compare_tracing_methods(
ReduceMaxModule(2, True),
torch.randn(3, 4, 5),
fusible_ops={"aten::max"},
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import unittest
import torch
from tests import utils
logger = logging.getLogger("quantized conv3d test")
logger.setLevel(logging.INFO)
class UnpackedConv3dModel(torch.nn.Module):
def __init__(self, input_quantization, weight_quantization):
super(UnpackedConv3dModel, self).__init__()
self.input_quantization = input_quantization
self.weight_quantization = weight_quantization
def forward(self, tensor, weight, bias):
return torch.nn.quantized.DeQuantize()(
torch.nn.quantized.functional.conv3d(
self.input_quantization(tensor), self.weight_quantization(weight), bias
)
)
class PackedConv3dModel(torch.nn.Sequential):
def __init__(self, quantization, convolution, dequantization, weight, bias):
# Due to the off-by-one error, we cannot let the weights, bias & input
# to be totally random.
convolution.weight.data.fill_(weight)
convolution.bias.data.fill_(bias)
super(PackedConv3dModel, self).__init__(
quantization, convolution, dequantization
)
self.eval()
self.qconfig = torch.ao.quantization.get_default_qconfig("fbgemm")
torch.ao.quantization.prepare(self, inplace=True)
torch.ao.quantization.convert(self, inplace=True)
class TestQuantizedConv3d(utils.TorchGlowTestCase):
@unittest.skip(reason="Requires freezing")
def test_quantized_conv3d_unpacked(self):
"""Basic test of the PyTorch quantize::conv3d Node with unpacked weights on Glow."""
def test_f(a, w, b):
qu = torch.nn.quantized.Quantize(1 / 16, 0, torch.quint8)
qi = torch.nn.quantized.Quantize(1 / 16, 0, torch.qint8)
dq = torch.nn.quantized.DeQuantize()
conv = torch.nn.quantized.functional.conv3d
return dq(conv(qu(a), qi(w), b))
# TODO
# Due to the quantization error between
# PyTorch and Glow, we would like to use some
# determined test data instead of random ones
# x = torch.randn([3, 3, 3, 3])
# w = torch.randn([3, 3, 3, 3])
# b = torch.randn([3])
x = torch.tensor([[[[[5.0, 6.0], [7.0, 8.0]]]]])
w = torch.tensor([[[[[2.0]]]]])
b_zero = torch.zeros(1)
b = torch.randn(1)
utils.compare_tracing_methods(
UnpackedConv3dModel(
torch.nn.quantized.Quantize(1 / 16, 0, torch.quint8),
torch.nn.quantized.Quantize(1 / 16, 0, torch.qint8),
),
x,
w,
b,
fusible_ops={
"aten::quantize_per_tensor",
"glow::unpacked_quantized_conv3d",
"aten::dequantize",
},
skip_to_glow=True,
)
utils.compare_tracing_methods(
UnpackedConv3dModel(
torch.nn.quantized.Quantize(1 / 16, 0, torch.quint8),
torch.nn.quantized.Quantize(1 / 16, 0, torch.qint8),
),
x,
w,
b_zero,
fusible_ops={
"aten::quantize_per_tensor",
"glow::unpacked_quantized_conv3d",
"aten::dequantize",
},
skip_to_glow=True,
)
def test_quantized_conv3d_packed_groupwise(self):
"""Basic test of PyTorch quantize::conv3d Node with packed weights on Glow."""
x = torch.tensor(range(5), dtype=torch.float)
x = torch.cat((x, x, x, x, x))
x = torch.cat((x, x, x))
x = torch.cat((x, x, x))
x = torch.reshape(x, [1, 3, 3, 5, 5])
utils.compare_tracing_methods(
PackedConv3dModel(
torch.nn.quantized.Quantize(0.1, 2, torch.quint8),
torch.nn.Conv3d(3, 3, kernel_size=1, stride=(1, 1, 1), groups=3),
torch.nn.quantized.DeQuantize(),
2.0,
1.0,
),
x,
fusible_ops={
"aten::quantize_per_tensor",
"quantized::conv3d",
"aten::dequantize",
},
)
def test_quantized_conv3d_packed_cut_q_dq(self):
"""Basic test of PyTorch quantize::conv3d Node with packed weights on Glow, with quantize and dequantize excluded."""
x = torch.tensor(range(5), dtype=torch.float)
x = torch.cat((x, x, x, x, x))
x = torch.cat((x, x, x))
x = torch.cat((x, x, x))
x = torch.reshape(x, [1, 3, 3, 5, 5])
utils.compare_tracing_methods(
PackedConv3dModel(
torch.nn.quantized.Quantize(0.1, 2, torch.quint8),
torch.nn.Conv3d(3, 3, kernel_size=1, stride=(1, 1, 1), groups=3),
torch.nn.quantized.DeQuantize(),
2.0,
1.0,
),
x,
fusible_ops={"quantized::conv3d"},
fusion_blocklist=["aten::quantize_per_tensor", "aten::dequantize"],
)
def test_quantized_conv3d_packed_channelwise(self):
"""Basic test of PyTorch quantize::conv3d Node with packed channelwise weights on Glow."""
with torch.no_grad():
x = torch.randn([1, 4, 4, 4, 4])
conv = torch.nn.Conv3d(4, 2, 2, (2, 2, 2), groups=1)
conv.weight.random_(-1, 1)
conv.bias.data.random_(-1, 1)
model = torch.ao.quantization.QuantWrapper(conv)
model.qconfig = torch.ao.quantization.get_default_qconfig("fbgemm")
torch.ao.quantization.prepare(model, inplace=True)
# Calibration
model.forward(x)
torch.ao.quantization.convert(model, inplace=True)
# TODO: acuracy needs to be investigated. Average acuracy is decent
# but some elements have errors (possibly from rounding differences)
utils.compare_tracing_methods(
model,
x,
fusible_ops={
"aten::quantize_per_tensor",
"quantized::conv3d",
"aten::dequantize",
},
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import unittest
import torch
from tests import utils
class TestQuantizedConv2d(utils.TorchGlowTestCase):
@unittest.skip(reason="Requires freezing")
def test_quantized_conv2d_unpacked(self):
"""Basic test of the PyTorch quantize::conv2d Node with unpacked weights on Glow."""
class SimpleQuantizedConvModel(torch.nn.Module):
def __init__(self):
super(SimpleQuantizedConvModel, self).__init__()
def forward(self, a, w, b):
qu = torch.nn.quantized.Quantize(1 / 16, 0, torch.quint8)
qi = torch.nn.quantized.Quantize(1 / 16, 0, torch.qint8)
dq = torch.nn.quantized.DeQuantize()
conv = torch.nn.quantized.functional.conv2d
return dq(conv(qu(a), qi(w), b))
# TODO
# Due to the quantization error between
# PyTorch and Glow, we would like to use some
# determined test data instead of random ones
# x = torch.randn([3, 3, 3, 3])
# w = torch.randn([3, 3, 3, 3])
# b = torch.randn([3])
x = torch.tensor([[[[5.0, 6.0], [7.0, 8.0]]]])
w = torch.tensor([[[[1.0, 2.0], [3.0, 4.0]]]])
b_zero = torch.zeros(1)
b = torch.randn(1)
utils.compare_tracing_methods(
SimpleQuantizedConvModel(),
x,
w,
b,
fusible_ops={
"aten::quantize_per_tensor",
"glow::unpacked_quantized_conv2d",
"aten::dequantize",
},
skip_to_glow=True,
)
utils.compare_tracing_methods(
SimpleQuantizedConvModel(),
x,
w,
b_zero,
fusible_ops={
"aten::quantize_per_tensor",
"glow::unpacked_quantized_conv2d",
"aten::dequantize",
},
skip_to_glow=True,
)
def test_quantized_conv2d_packed_groupwise(self):
"""Basic test of PyTorch quantize::conv2d Node with packed weights on Glow."""
x = torch.tensor(range(5), dtype=torch.float)
x = torch.cat((x, x, x, x, x))
x = torch.cat((x, x, x))
x = torch.reshape(x, [1, 3, 5, 5])
q = torch.nn.quantized.Quantize(0.1, 2, torch.quint8)
conv = torch.nn.Conv2d(3, 3, [2, 2], groups=3)
dq = torch.nn.quantized.DeQuantize()
# Due to the off-by-one error, we cannot let the weights, bias & input
# to be totally random.
conv.weight.data.fill_(2.0)
conv.bias.data.fill_(1.0)
model = torch.nn.Sequential(q, conv, dq)
model.eval()
model.qconfig = torch.ao.quantization.get_default_qconfig("fbgemm")
torch.ao.quantization.prepare(model, inplace=True)
torch.ao.quantization.convert(model, inplace=True)
utils.compare_tracing_methods(
model,
x,
fusible_ops={
"aten::quantize_per_tensor",
"quantized::conv2d",
"aten::dequantize",
},
)
def test_quantized_conv2d_packed_cut_q_dq(self):
"""Basic test of PyTorch quantize::conv2d Node with packed weights on Glow, with quantize and dequantize excluded."""
x = torch.tensor(range(5), dtype=torch.float)
x = torch.cat((x, x, x, x, x))
x = torch.cat((x, x, x))
x = torch.reshape(x, [1, 3, 5, 5])
q = torch.nn.quantized.Quantize(0.1, 2, torch.quint8)
conv = torch.nn.Conv2d(3, 3, [2, 2], groups=3)
dq = torch.nn.quantized.DeQuantize()
# Due to the off-by-one error, we cannot let the weights, bias & input
# to be totally random.
conv.weight.data.fill_(2.0)
conv.bias.data.fill_(1.0)
model = torch.nn.Sequential(q, conv, dq)
model.eval()
model.qconfig = torch.ao.quantization.get_default_qconfig("fbgemm")
torch.ao.quantization.prepare(model, inplace=True)
torch.ao.quantization.convert(model, inplace=True)
utils.compare_tracing_methods(
model,
x,
fusible_ops={"quantized::conv2d"},
fusion_blocklist=["aten::quantize_per_tensor", "aten::dequantize"],
skip_to_glow=True,
)
def test_quantized_conv2d_packed_channelwise(self):
"""Basic test of PyTorch quantize::conv2d Node with packed channelwise weights on Glow."""
with torch.no_grad():
x = torch.randn([1, 4, 4, 4])
conv = torch.nn.Conv2d(4, 2, [2, 2], groups=1)
conv.weight.random_(-1, 1)
conv.bias.data.random_(-1, 1)
model = torch.ao.quantization.QuantWrapper(conv)
model.qconfig = torch.ao.quantization.get_default_qconfig("fbgemm")
torch.ao.quantization.prepare(model, inplace=True)
# Calibration
model.forward(x)
torch.ao.quantization.convert(model, inplace=True)
# TODO: acuracy needs to be investigated. Average acuracy is decent
# but some elements have errors (possibly from rounding differences)
utils.compare_tracing_methods(
model,
x,
fusible_ops={
"aten::quantize_per_tensor",
"quantized::conv2d",
"aten::dequantize",
},
)
def test_quantized_conv2d_packed_channelwise_serial_qconv(self):
"""Test of serial structure PyTorch quantized::conv2d on Glow."""
class SerialQuantizedConvModel(torch.nn.Module):
def __init__(self):
super(SerialQuantizedConvModel, self).__init__()
self.qconfig = torch.ao.quantization.get_default_qconfig("fbgemm")
self.quant = torch.ao.quantization.QuantStub()
self.conv1 = torch.nn.Conv2d(4, 4, [2, 2], groups=1)
self.conv1.weight.random_(-1, 1)
self.conv1.bias.data.random_(-1, 1)
self.conv2 = torch.nn.Conv2d(4, 2, [2, 2], groups=1)
self.conv2.weight.random_(-1, 1)
self.conv2.bias.data.random_(-1, 1)
self.dequant = torch.ao.quantization.DeQuantStub()
def forward(self, x):
x = self.quant(x)
x = self.conv1(x)
x = self.conv2(x)
x = self.dequant(x)
return x
with torch.no_grad():
x = torch.randn([1, 4, 4, 4])
model = SerialQuantizedConvModel()
torch.ao.quantization.prepare(model, inplace=True)
# Calibration
model.forward(x)
torch.ao.quantization.convert(model, inplace=True)
utils.compare_tracing_methods(
model,
x,
fusible_ops={
"aten::quantize_per_tensor",
"quantized::conv2d",
"aten::dequantize",
},
)
def test_quantized_conv2d_packed_channelwise_parallel_qconv(self):
"""Test of parallel structure PyTorch quantized::conv2d on Glow."""
class ParallelQuantizedConvModel(torch.nn.Module):
def __init__(self):
super(ParallelQuantizedConvModel, self).__init__()
self.qconfig = torch.ao.quantization.get_default_qconfig("fbgemm")
self.quant = torch.ao.quantization.QuantStub()
self.conv1 = torch.nn.Conv2d(4, 2, [2, 2], groups=1)
self.conv1.weight.random_(-1, 1)
self.conv1.bias.data.random_(-1, 1)
self.conv2 = torch.nn.Conv2d(4, 2, [2, 2], groups=1)
self.conv2.weight.random_(-1, 1)
self.conv2.bias.data.random_(-1, 1)
self.cat = torch.ops.quantized.cat
self.dequant = torch.ao.quantization.DeQuantStub()
self.dequant2 = torch.ao.quantization.DeQuantStub()
def forward(self, x):
x = self.quant(x)
x1 = self.conv1(x)
x2 = self.conv2(x)
x1 = self.dequant(x1)
x2 = self.dequant2(x2)
x = torch.cat((x1, x2), dim=0)
return x
with torch.no_grad():
x = torch.randn([1, 4, 4, 4])
model = ParallelQuantizedConvModel()
torch.ao.quantization.prepare(model, inplace=True)
# Calibration
model.forward(x)
torch.ao.quantization.convert(model, inplace=True)
utils.compare_tracing_methods(
model,
x,
fusible_ops={
"aten::quantize_per_tensor",
"quantized::conv2d",
"aten::dequantize",
},
skip_to_glow=True,
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class SimpleUnsqueezeModel(torch.nn.Module):
def __init__(self, dimension, inplace=False):
super(SimpleUnsqueezeModel, self).__init__()
self.dimension = dimension
self.inplace = inplace
def forward(self, tensor):
if self.inplace:
other = tensor + tensor
return other.unsqueeze_(self.dimension)
else:
return torch.unsqueeze(tensor + tensor, self.dimension)
class TestUnsqueeze(utils.TorchGlowTestCase):
@utils.deterministic_expand(
[
lambda: ("dim0", SimpleUnsqueezeModel(0), torch.randn(2, 3, 4)),
lambda: ("dim1", SimpleUnsqueezeModel(1), torch.randn(2, 3, 4)),
lambda: ("dim2", SimpleUnsqueezeModel(2), torch.randn(2, 3, 4)),
lambda: ("dim3", SimpleUnsqueezeModel(3), torch.randn(2, 3, 4)),
lambda: ("dim_negative", SimpleUnsqueezeModel(-1), torch.randn(2, 3, 4)),
lambda: (
"inplace",
SimpleUnsqueezeModel(-1, inplace=True),
torch.randn(2, 3, 4),
),
]
)
def test_unsqueeze(self, _, module, tensor):
utils.compare_tracing_methods(module, tensor, fusible_ops=["aten::unsqueeze"])
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
import torch
from tests import utils
def generate_rois(roi_counts, im_dims):
assert len(roi_counts) == len(im_dims)
all_rois = []
for i, num_rois in enumerate(roi_counts):
if num_rois == 0:
continue
# [batch_idx, x1, y1, x2, y2]
rois = np.random.uniform(0, im_dims[i], size=(roi_counts[i], 5)).astype(
np.float32
)
rois[:, 0] = i # batch_idx
# Swap (x1, x2) if x1 > x2
rois[:, 1], rois[:, 3] = (
np.minimum(rois[:, 1], rois[:, 3]),
np.maximum(rois[:, 1], rois[:, 3]),
)
# Swap (y1, y2) if y1 > y2
rois[:, 2], rois[:, 4] = (
np.minimum(rois[:, 2], rois[:, 4]),
np.maximum(rois[:, 2], rois[:, 4]),
)
all_rois.append(rois)
if len(all_rois) > 0:
return np.vstack(all_rois)
return np.empty((0, 5)).astype(np.float32)
def generate_rois_rotated(roi_counts, im_dims):
rois = generate_rois(roi_counts, im_dims)
# [batch_id, ctr_x, ctr_y, w, h, angle]
rotated_rois = np.empty((rois.shape[0], 6)).astype(np.float32)
rotated_rois[:, 0] = rois[:, 0] # batch_id
rotated_rois[:, 1] = (rois[:, 1] + rois[:, 3]) / 2.0 # ctr_x = (x1 + x2) / 2
rotated_rois[:, 2] = (rois[:, 2] + rois[:, 4]) / 2.0 # ctr_y = (y1 + y2) / 2
rotated_rois[:, 3] = rois[:, 3] - rois[:, 1] + 1.0 # w = x2 - x1 + 1
rotated_rois[:, 4] = rois[:, 4] - rois[:, 2] + 1.0 # h = y2 - y1 + 1
rotated_rois[:, 5] = np.random.uniform(-90.0, 90.0) # angle in degrees
return rotated_rois
def create_bbox_transform_inputs(roi_counts, num_classes, rotated):
batch_size = len(roi_counts)
total_rois = sum(roi_counts)
im_dims = np.random.randint(100, 200, batch_size)
rois = (
generate_rois_rotated(roi_counts, im_dims)
if rotated
else generate_rois(roi_counts, im_dims)
)
box_dim = 5 if rotated else 4
deltas = np.random.randn(total_rois, box_dim * num_classes).astype(np.float32)
im_info = np.zeros((batch_size, 3)).astype(np.float32)
im_info[:, 0] = im_dims
im_info[:, 1] = im_dims
im_info[:, 2] = max(np.random.random(), 0.1)
return rois, deltas, im_info
class TestBBoxTransform(utils.TorchGlowTestCase):
def test_bbox_transform_basic(self):
"""Test of the _caffe2::BBoxTransform Node on Glow."""
class TestModule(torch.nn.Module):
def forward(self, rois, deltas, im_info):
return torch.ops._caffe2.BBoxTransform(
rois,
deltas,
im_info,
weights=[1.0, 1.0, 1.0, 1.0],
apply_scale=False,
rotated=False,
angle_bound_on=False,
angle_bound_lo=-90,
angle_bound_hi=90,
clip_angle_thresh=1.0,
legacy_plus_one=False,
)
roi_counts, num_classes = ([5, 4, 3, 2, 1], 3)
rois, deltas, im_info = create_bbox_transform_inputs(
roi_counts, num_classes, False
)
utils.compare_tracing_methods(
TestModule(),
torch.tensor(rois),
torch.tensor(deltas),
torch.tensor(im_info),
fusible_ops={"_caffe2::BBoxTransform"},
)
def test_bbox_transform_legacy_plus_one(self):
"""Test of the _caffe2::BBoxTransform Node on Glow."""
class TestModule(torch.nn.Module):
def forward(self, rois, deltas, im_info):
return torch.ops._caffe2.BBoxTransform(
rois,
deltas,
im_info,
weights=[1.0, 1.0, 1.0, 1.0],
apply_scale=False,
rotated=False,
angle_bound_on=False,
angle_bound_lo=-90,
angle_bound_hi=90,
clip_angle_thresh=1.0,
legacy_plus_one=True,
)
roi_counts, num_classes = ([5, 4, 3, 2, 1], 3)
rois, deltas, im_info = create_bbox_transform_inputs(
roi_counts, num_classes, False
)
utils.compare_tracing_methods(
TestModule(),
torch.tensor(rois),
torch.tensor(deltas),
torch.tensor(im_info),
fusible_ops={"_caffe2::BBoxTransform"},
)
def test_bbox_transform_apply_scale(self):
"""Test of the _caffe2::BBoxTransform Node on Glow."""
class TestModule(torch.nn.Module):
def forward(self, rois, deltas, im_info):
return torch.ops._caffe2.BBoxTransform(
rois,
deltas,
im_info,
weights=[1.0, 1.0, 1.0, 1.0],
apply_scale=True,
rotated=False,
angle_bound_on=False,
angle_bound_lo=-90,
angle_bound_hi=90,
clip_angle_thresh=1.0,
legacy_plus_one=False,
)
roi_counts, num_classes = ([5, 4, 3, 2, 1], 3)
rois, deltas, im_info = create_bbox_transform_inputs(
roi_counts, num_classes, False
)
utils.compare_tracing_methods(
TestModule(),
torch.tensor(rois),
torch.tensor(deltas),
torch.tensor(im_info),
fusible_ops={"_caffe2::BBoxTransform"},
)
def test_bbox_transform_weights(self):
"""Test of the _caffe2::BBoxTransform Node on Glow."""
class TestModule(torch.nn.Module):
def forward(self, rois, deltas, im_info):
return torch.ops._caffe2.BBoxTransform(
rois,
deltas,
im_info,
weights=[10.0, 10.0, 5.0, 5.0],
apply_scale=False,
rotated=False,
angle_bound_on=False,
angle_bound_lo=-90,
angle_bound_hi=90,
clip_angle_thresh=1.0,
legacy_plus_one=False,
)
roi_counts, num_classes = ([5, 4, 3, 2, 1], 3)
rois, deltas, im_info = create_bbox_transform_inputs(
roi_counts, num_classes, False
)
utils.compare_tracing_methods(
TestModule(),
torch.tensor(rois),
torch.tensor(deltas),
torch.tensor(im_info),
fusible_ops={"_caffe2::BBoxTransform"},
)
def test_bbox_transform_fp16(self):
"""Test of the _caffe2::BBoxTransform Node on Glow."""
class TestModule(torch.nn.Module):
def forward(self, rois, deltas, im_info):
return torch.ops._caffe2.BBoxTransform(
rois,
deltas,
im_info,
weights=[1.0, 1.0, 1.0, 1.0],
apply_scale=False,
rotated=False,
angle_bound_on=False,
angle_bound_lo=-90,
angle_bound_hi=90,
clip_angle_thresh=1.0,
legacy_plus_one=False,
)
roi_counts, num_classes = ([5, 4, 3, 2, 1], 3)
rois, deltas, im_info = create_bbox_transform_inputs(
roi_counts, num_classes, False
)
utils.compare_tracing_methods(
TestModule(),
torch.tensor(rois),
torch.tensor(deltas),
torch.tensor(im_info),
fusible_ops={"_caffe2::BBoxTransform"},
fp16=True,
atol=1,
rtol=1e-01,
)
def test_bbox_transform_rotated_basic(self):
"""Test of the _caffe2::BBoxTransform Node on Glow."""
class TestModule(torch.nn.Module):
def forward(self, rois, deltas, im_info):
return torch.ops._caffe2.BBoxTransform(
rois,
deltas,
im_info,
weights=[1.0, 1.0, 1.0, 1.0],
apply_scale=False,
rotated=True,
angle_bound_on=False,
angle_bound_lo=-90,
angle_bound_hi=90,
clip_angle_thresh=1.0,
legacy_plus_one=False,
)
roi_counts, num_classes = ([1, 1], 1)
rois, deltas, im_info = create_bbox_transform_inputs(
roi_counts, num_classes, True
)
utils.compare_tracing_methods(
TestModule(),
torch.tensor(rois),
torch.tensor(deltas),
torch.tensor(im_info),
fusible_ops={"_caffe2::BBoxTransform"},
)
def test_bbox_transform_rotated_angle_bound_on(self):
"""Test of the _caffe2::BBoxTransform Node on Glow."""
class TestModule(torch.nn.Module):
def forward(self, rois, deltas, im_info):
return torch.ops._caffe2.BBoxTransform(
rois,
deltas,
im_info,
weights=[1.0, 1.0, 1.0, 1.0],
apply_scale=False,
rotated=True,
angle_bound_on=True,
angle_bound_lo=-180,
angle_bound_hi=180,
clip_angle_thresh=1.0,
legacy_plus_one=False,
)
roi_counts, num_classes = ([5, 4, 3, 2, 1], 3)
rois, deltas, im_info = create_bbox_transform_inputs(
roi_counts, num_classes, True
)
utils.compare_tracing_methods(
TestModule(),
torch.tensor(rois),
torch.tensor(deltas),
torch.tensor(im_info),
fusible_ops={"_caffe2::BBoxTransform"},
)
def test_bbox_transform_rotated_legacy_plus_one(self):
"""Test of the _caffe2::BBoxTransform Node on Glow."""
class TestModule(torch.nn.Module):
def forward(self, rois, deltas, im_info):
return torch.ops._caffe2.BBoxTransform(
rois,
deltas,
im_info,
weights=[1.0, 1.0, 1.0, 1.0],
apply_scale=False,
rotated=True,
angle_bound_on=False,
angle_bound_lo=-90,
angle_bound_hi=90,
clip_angle_thresh=1.0,
legacy_plus_one=True,
)
roi_counts, num_classes = ([5, 4, 3, 2, 1], 3)
rois, deltas, im_info = create_bbox_transform_inputs(
roi_counts, num_classes, True
)
utils.compare_tracing_methods(
TestModule(),
torch.tensor(rois),
torch.tensor(deltas),
torch.tensor(im_info),
fusible_ops={"_caffe2::BBoxTransform"},
)
def test_bbox_transform_rotated_apply_scale(self):
"""Test of the _caffe2::BBoxTransform Node on Glow."""
class TestModule(torch.nn.Module):
def forward(self, rois, deltas, im_info):
return torch.ops._caffe2.BBoxTransform(
rois,
deltas,
im_info,
weights=[1.0, 1.0, 1.0, 1.0],
apply_scale=True,
rotated=True,
angle_bound_on=False,
angle_bound_lo=-90,
angle_bound_hi=90,
clip_angle_thresh=1.0,
legacy_plus_one=False,
)
roi_counts, num_classes = ([5, 4, 3, 2, 1], 3)
rois, deltas, im_info = create_bbox_transform_inputs(
roi_counts, num_classes, True
)
utils.compare_tracing_methods(
TestModule(),
torch.tensor(rois),
torch.tensor(deltas),
torch.tensor(im_info),
fusible_ops={"_caffe2::BBoxTransform"},
)
def test_bbox_transform_rotated_weights(self):
"""Test of the _caffe2::BBoxTransform Node on Glow."""
class TestModule(torch.nn.Module):
def forward(self, rois, deltas, im_info):
return torch.ops._caffe2.BBoxTransform(
rois,
deltas,
im_info,
weights=[10.0, 10.0, 5.0, 5.0],
apply_scale=False,
rotated=True,
angle_bound_on=False,
angle_bound_lo=-90,
angle_bound_hi=90,
clip_angle_thresh=1.0,
legacy_plus_one=False,
)
roi_counts, num_classes = ([5, 4, 3, 2, 1], 3)
rois, deltas, im_info = create_bbox_transform_inputs(
roi_counts, num_classes, True
)
utils.compare_tracing_methods(
TestModule(),
torch.tensor(rois),
torch.tensor(deltas),
torch.tensor(im_info),
fusible_ops={"_caffe2::BBoxTransform"},
)
def test_bbox_transform_rotated_fp16(self):
"""Test of the _caffe2::BBoxTransform Node on Glow."""
class TestModule(torch.nn.Module):
def forward(self, rois, deltas, im_info):
return torch.ops._caffe2.BBoxTransform(
rois,
deltas,
im_info,
weights=[1.0, 1.0, 1.0, 1.0],
apply_scale=False,
rotated=True,
angle_bound_on=False,
angle_bound_lo=-90,
angle_bound_hi=90,
clip_angle_thresh=1.0,
legacy_plus_one=False,
)
roi_counts, num_classes = ([5, 4, 3, 2, 1], 3)
rois, deltas, im_info = create_bbox_transform_inputs(
roi_counts, num_classes, True
)
utils.compare_tracing_methods(
TestModule(),
torch.tensor(rois),
torch.tensor(deltas),
torch.tensor(im_info),
fusible_ops={"_caffe2::BBoxTransform"},
fp16=True,
atol=1,
rtol=1e-01,
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class SimpleMeanModule(torch.nn.Module):
def __init__(self, dim=None, keepdim=False):
super(SimpleMeanModule, self).__init__()
self.dim = dim
self.keepdim = keepdim
def forward(self, a, b):
if self.dim:
return torch.mean(a + b, self.dim, keepdim=self.keepdim)
else:
return torch.mean(a + b)
class TestMean(utils.TorchGlowTestCase):
def test_basic(self):
"""Test of the PyTorch mean Node on Glow."""
utils.compare_tracing_methods(
SimpleMeanModule(),
torch.randn(7),
torch.randn(7),
fusible_ops={"aten::mean"},
)
def test_with_dims(self):
"""Test of the PyTorch mean node with dims on Glow."""
utils.compare_tracing_methods(
SimpleMeanModule((1, 2)),
torch.randn([1, 2, 3, 4]),
torch.randn([1, 2, 3, 4]),
fusible_ops={"aten::mean"},
)
def test_with_keepdim(self):
"""Test of the PyTorch mean node with dims and keepdim=True on Glow."""
utils.compare_tracing_methods(
SimpleMeanModule((2, 1), keepdim=True),
torch.randn([1, 2, 3, 4]),
torch.randn([1, 2, 3, 4]),
fusible_ops={"aten::mean"},
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import unittest
import torch
from tests import utils
def rand_rotated_rois(N, H, W, count, horizontal=False):
W -= 1
H -= 1
rois = torch.rand((count, 6))
for i in range(count):
rois[i][0] = (N * rois[i][0]) // 1 # batch index
rois[i][1] *= W - 1 # center_x
rois[i][2] *= H - 1 # center_y
rois[i][1] += 1
rois[i][2] += 1
rois[i][3] *= W - rois[i][1] # width
rois[i][4] *= H - rois[i][2] # height
rois[i][5] *= 0 if horizontal else 360 - 180 # angle
assert rois[i][1] >= 1 and rois[i][1] < W
assert rois[i][2] >= 1 and rois[i][2] < H
assert rois[i][1] + rois[i][3] <= W
assert rois[i][2] + rois[i][4] <= H
assert rois[i][3] > 0
assert rois[i][4] > 0
return rois
class SimpleRoiAlignRotatedModel(torch.nn.Module):
def __init__(
self,
order,
spatial_scale=1.0,
pooled_h=6,
pooled_w=6,
sampling_ratio=2,
aligned=True,
):
super(SimpleRoiAlignRotatedModel, self).__init__()
self.kwargs = {
"order": order,
"spatial_scale": spatial_scale,
"pooled_h": pooled_h,
"pooled_w": pooled_w,
"sampling_ratio": sampling_ratio,
"aligned": aligned,
}
def forward(self, features, rois):
return torch.ops._caffe2.RoIAlignRotated(features, rois, **self.kwargs)
class TestRoiAlignRotated(utils.TorchGlowTestCase):
"""TODO: Combine with TestRoiAlign"""
@utils.deterministic_expand(
[
lambda: (
"basic",
SimpleRoiAlignRotatedModel("NCHW"),
torch.randn(1, 3, 16, 20),
),
lambda: (
"nhwc",
SimpleRoiAlignRotatedModel("NHWC"),
torch.randn(1, 16, 20, 3),
),
lambda: (
"batched",
SimpleRoiAlignRotatedModel("NCHW"),
torch.randn(4, 3, 16, 20),
),
lambda: (
"horizontal",
SimpleRoiAlignRotatedModel("NCHW"),
torch.randn(4, 3, 16, 20),
True,
),
lambda: (
"scaled",
SimpleRoiAlignRotatedModel("NCHW", spatial_scale=0.0625),
torch.randn(1, 3, 224, 224),
),
lambda: (
"unaligned",
SimpleRoiAlignRotatedModel("NCHW", aligned=False),
torch.randn(1, 3, 16, 20),
),
lambda: (
"dynamic_sampling",
SimpleRoiAlignRotatedModel("NCHW", sampling_ratio=0),
torch.randn(1, 3, 16, 20),
),
]
)
def test_roi_align_rotated(self, _, module, features, horizontal=False):
order = module.kwargs.get("order")
kwargs = {k: v for k, v in zip(order, features.size())}
kwargs.pop("C")
rois = rand_rotated_rois(count=250, horizontal=horizontal, **kwargs)
utils.compare_tracing_methods(
module, features, rois, fusible_ops={"_caffe2::RoIAlignRotated"}
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class SimpleXorModule(torch.nn.Module):
def __init__(self):
super(SimpleXorModule, self).__init__()
def forward(self, a, b):
c = torch.logical_xor(a, b)
return torch.logical_xor(c, c)
class SimpleOrModule(torch.nn.Module):
def __init__(self):
super(SimpleOrModule, self).__init__()
def forward(self, a, b):
c = torch.logical_or(a, b)
return torch.logical_or(c, c)
class SimpleAndModule(torch.nn.Module):
def __init__(self):
super(SimpleAndModule, self).__init__()
def forward(self, a, b):
c = torch.logical_and(a, b)
return torch.logical_and(c, c)
class SimpleNotModule(torch.nn.Module):
def __init__(self):
super(SimpleNotModule, self).__init__()
def forward(self, a):
b = torch.logical_not(a)
return torch.logical_not(b)
class TestXor(utils.TorchGlowTestCase):
@utils.deterministic_expand(
[
lambda: (
"basic",
torch.zeros((3, 4, 5), dtype=torch.bool),
torch.ones((3, 4, 5), dtype=torch.bool),
),
lambda: (
"broadcast",
torch.zeros((3, 4, 5), dtype=torch.bool),
torch.ones((4, 5), dtype=torch.bool),
),
]
)
def test_xor(self, _, a, b, skip_to_glow=False):
utils.compare_tracing_methods(
SimpleXorModule(),
a,
b,
fusible_ops={"aten::logical_xor"},
skip_to_glow=skip_to_glow,
)
class TestOr(utils.TorchGlowTestCase):
@utils.deterministic_expand(
[
lambda: (
"basic",
torch.zeros((3, 4, 5), dtype=torch.bool),
torch.ones((3, 4, 5), dtype=torch.bool),
),
lambda: (
"broadcast",
torch.zeros((3, 4, 5), dtype=torch.bool),
torch.ones((4, 5), dtype=torch.bool),
),
]
)
def test_or(self, _, a, b, skip_to_glow=False):
utils.compare_tracing_methods(
SimpleOrModule(),
a,
b,
fusible_ops={"aten::logical_or"},
skip_to_glow=skip_to_glow,
)
class TestAnd(utils.TorchGlowTestCase):
@utils.deterministic_expand(
[
lambda: (
"basic",
torch.zeros((3, 4, 5), dtype=torch.bool),
torch.ones((3, 4, 5), dtype=torch.bool),
),
lambda: (
"broadcast",
torch.zeros((3, 4, 5), dtype=torch.bool),
torch.ones((4, 5), dtype=torch.bool),
),
]
)
def test_and(self, _, a, b, skip_to_glow=False):
utils.compare_tracing_methods(
SimpleAndModule(),
a,
b,
fusible_ops={"aten::logical_and"},
skip_to_glow=skip_to_glow,
)
class TestNot(utils.TorchGlowTestCase):
@utils.deterministic_expand(
[lambda: ("basic", torch.zeros((3, 4, 5), dtype=torch.bool))]
)
def test_not(self, _, a, skip_to_glow=False):
utils.compare_tracing_methods(
SimpleNotModule(),
a,
fusible_ops={"aten::logical_not"},
skip_to_glow=skip_to_glow,
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import random
import torch
from tests import utils
class SimplePixelShuffleModel(torch.nn.Module):
def __init__(self, upscale_factor):
super(SimplePixelShuffleModel, self).__init__()
self.upscale_factor = upscale_factor
self.ps = torch.nn.PixelShuffle(self.upscale_factor)
def forward(self, tensor):
return self.ps(tensor)
class TestPixelShuffle(utils.TorchGlowTestCase):
def test_pixel_shuffle(self):
"""Test of the PyTorch pixel_shuffle Node on Glow."""
for _ in range(0, 20):
c = random.randint(1, 3)
r = random.randint(2, 5)
w = random.randint(1, 100)
h = random.randint(1, 100)
b = random.randint(1, 10)
utils.compare_tracing_methods(
SimplePixelShuffleModel(r),
torch.randn(b, c * r * r, w, h),
fusible_ops={"aten::pixel_shuffle"},
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class SimpleArangeModule(torch.nn.Module):
def __init__(self, end, start=0, step=1):
super(SimpleArangeModule, self).__init__()
self.start = start
self.end = end
self.step = step
def forward(self, dummy):
start = self.start(dummy) if callable(self.start) else self.start
end = self.end(dummy) if callable(self.end) else self.end
step = self.step(dummy) if callable(self.step) else self.step
return torch.arange(start=start, end=end, step=step)
class TestArange(utils.TorchGlowTestCase):
"""
Tests for torch.arange glow fusion.
Note that torch.arange is effectively a constant, so torch jit will try to
compile it down to said constant. The tests in this class utilize a test
function which takes a tensor as input, so that we can prevent that from
happening. Otherwise, there would be nothing to fuse.
"""
@utils.deterministic_expand(
[
lambda: (
"simple",
SimpleArangeModule(end=lambda x: x.size(0)),
torch.randn(10),
),
lambda: (
"all_args",
SimpleArangeModule(start=lambda x: x.size(0), end=30, step=1),
torch.randn(10),
),
lambda: (
"floats",
SimpleArangeModule(start=lambda x: x.size(0), end=30.5, step=0.8),
torch.randn(10),
),
lambda: (
"negative_step",
SimpleArangeModule(
start=lambda x: x.size(0), end=lambda x: x.size(1), step=-1.2
),
torch.randn(10, 2),
),
]
)
def test_arange(self, _, module, dummy):
"""Testing arange with minimum parameters"""
utils.run_comparison_tests(module, dummy, fusible_ops={"aten::arange"})
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class SimpleTopkModel(torch.nn.Module):
def __init__(self, count):
super(SimpleTopkModel, self).__init__()
self.count = count
def forward(self, tensor):
tensor = tensor + tensor
return torch.topk(tensor, self.count)
class TestTopk(utils.TorchGlowTestCase):
def test_topk_basic(self):
"""Test of the PyTorch TopK Node on Glow."""
utils.compare_tracing_methods(
SimpleTopkModel(3), torch.arange(1.0, 6.0), fusible_ops={"aten::topk"}
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class QuantizedLayerNormModule(torch.nn.Module):
def __init__(self, normalized_shape, scale, zero_point, weight=None, bias=None):
super(QuantizedLayerNormModule, self).__init__()
self.normalized_shape = normalized_shape
self.scale = scale
self.zero_point = zero_point
self.weight = weight
self.bias = bias
self.quant = torch.nn.quantized.Quantize(
scale=0.3, zero_point=128, dtype=torch.quint8
)
def forward(self, x):
x = self.quant(x)
x = torch.ops.quantized.layer_norm(
x,
self.normalized_shape,
weight=self.weight,
bias=self.bias,
eps=1e-05,
output_scale=self.scale,
output_zero_point=self.zero_point,
)
return torch.dequantize(x)
class TestQuantizedLayerNorm(utils.TorchGlowTestCase):
def test_layernorm_basic(self):
"""Basic test of the PyTorch quantized layernorm Node on Glow."""
inputs = torch.tensor([0.3, 0.6, 0.3]).reshape(1, 1, 3)
weight = torch.tensor([1.0, 1.1, 1.2])
bias = torch.tensor([0.1, 0.1, 0.2])
utils.compare_tracing_methods(
QuantizedLayerNormModule([3], 0.01, 66, weight, bias),
inputs,
fusible_ops={"quantized::layer_norm"},
atol=1e-02,
)
def test_layernorm_no_weight_bias(self):
"""Test of the PyTorch quantized::layer_norm without weights and bias."""
inputs = torch.tensor([0.3, 0.6, 0.9, 0.3]).reshape(1, 1, 2, 2)
utils.compare_tracing_methods(
QuantizedLayerNormModule([2, 2], 0.01, 91),
inputs,
fusible_ops={"quantized::layer_norm"},
atol=1e-2,
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class SimpleNumToTensorModule(torch.nn.Module):
def __init__(self, make_float=False):
super(SimpleNumToTensorModule, self).__init__()
self.forward = self._float_forward if make_float else self._forward
def _float_forward(self, dummy):
at0 = torch.ops.prim.NumToTensor(dummy.size(0)).to(torch.float)
# Const floating number is torch.float64 by-default
# Therefore we need to convert it to float32 once NumToTensor is
# used
at1 = torch.ops.prim.NumToTensor(1.2).to(torch.float)
return torch.cat((at0.reshape(1), at1.reshape(1)))
def _forward(self, dummy):
at0 = torch.ops.prim.NumToTensor(dummy.size(0))
at1 = torch.ops.prim.NumToTensor(dummy.size(1))
return torch.cat((at0.reshape(1), at1.reshape(1)))
class TestNumToTensor(utils.TorchGlowTestCase):
def test_NumToTensor_basic(self):
"""Basic test of the PyTorch NumToTensor Node on Glow."""
utils.compare_tracing_methods(
SimpleNumToTensorModule(),
torch.randn(5, 6, 7),
fusible_ops={"prim::NumToTensor"},
scripted=True,
skip_to_glow=True,
)
def test_NumToTensor_float(self):
"""Basic test of the PyTorch NumToTensor Node on Glow."""
utils.compare_tracing_methods(
SimpleNumToTensorModule(True),
torch.randn(5, 6, 7),
fusible_ops={"prim::NumToTensor"},
scripted=True,
skip_to_glow=True,
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
import torch.nn as nn
from tests import utils
from torch.ao.quantization import (
convert,
DeQuantStub,
fuse_modules,
observer,
prepare,
QConfig,
QuantStub,
)
my_qconfig = QConfig(
activation=observer.default_observer,
weight=observer.HistogramObserver.with_args(dtype=torch.qint8, reduce_range=False),
)
class TestQuantizedBatchNorm3DRelu(utils.TorchGlowTestCase):
def test_batchnorm_relu_basic(self):
"""
Basic test of the PyTorch 3D batchnorm RELU Node on Glow.
"""
class SimpleQuantizedBatchNormRelu(nn.Module):
def __init__(self, w, b, m, v):
super(SimpleQuantizedBatchNormRelu, self).__init__()
self.bn = torch.nn.BatchNorm3d(4)
self.relu = torch.nn.ReLU()
self.bn.weight = torch.nn.Parameter(w)
self.bn.bias = torch.nn.Parameter(b)
self.bn.running_mean = m
self.bn.running_var = v
self.q = QuantStub()
self.dq = DeQuantStub()
def forward(self, x):
qx = self.q(x)
qy = self.bn(qx)
qy_relu = self.relu(qy)
y = self.dq(qy_relu)
return y
C = 4
weight = torch.ones(C) + torch.rand(C) * 0.001
bias = torch.rand(C) * 0.0001
running_mean = torch.zeros(C)
running_var = torch.ones(C)
inputs = torch.randn((10, C, 2, 3, 4), requires_grad=False)
model = SimpleQuantizedBatchNormRelu(weight, bias, running_mean, running_var)
model.eval()
model.qconfig = my_qconfig
modules_to_fuse = [["bn", "relu"]]
fuse_modules(model, modules_to_fuse, inplace=True)
prepare(model, inplace=True)
model.forward(inputs)
convert(model, inplace=True)
# Because of the difference of quantization between PyTorch & Glow
# We set eps big enough.
# Batchnorm introduced great accuracy issues, which could create up to
# ~1e-2 difference in some rare cases. In order to prevent this test
# to be flaky, atol is set to be 0.1 and rtol is set to 0.00001.
utils.compare_tracing_methods(
model,
inputs,
fusible_ops={"quantized::batch_norm3d_relu"},
atol=1e-1,
rtol=1e-5,
skip_to_glow=True,
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class SimpleLeakyReluModule(torch.nn.Module):
def __init__(self, negative_slope=1e-2, inplace=False):
super(SimpleLeakyReluModule, self).__init__()
self.negative_slope = negative_slope
self.inplace = inplace
def forward(self, a):
return torch.nn.functional.leaky_relu(
a, negative_slope=self.negative_slope, inplace=self.inplace
)
class TestLeakyRelu(utils.TorchGlowTestCase):
def test_leaky_relu_basic(self):
x = torch.randn(10)
utils.compare_tracing_methods(
SimpleLeakyReluModule(),
x,
fusible_ops={"aten::leaky_relu"},
)
def test_leaky_relu_3d(self):
x = torch.randn(2, 3, 5)
utils.compare_tracing_methods(
SimpleLeakyReluModule(),
x,
fusible_ops={"aten::leaky_relu"},
)
def test_leaky_relu_inplace(self):
x = torch.randn(10)
utils.compare_tracing_methods(
SimpleLeakyReluModule(inplace=True),
x,
fusible_ops={"aten::leaky_relu_"},
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class SimpleQuantizedAvgPool2DModule(torch.nn.Module):
def __init__(self, scale, zero_point, dtype, kernel_size):
super(SimpleQuantizedAvgPool2DModule, self).__init__()
self.quantize = torch.nn.quantized.Quantize(
scale=scale, zero_point=zero_point, dtype=dtype
)
self.average_pool = torch.nn.AvgPool2d(kernel_size)
def forward(self, inputs):
return torch.nn.quantized.DeQuantize()(self.average_pool(self.quantize(inputs)))
class TestQuantizedAvgPool(utils.TorchGlowTestCase):
def test_quantized_avgpool(self):
"""Basic test of the PyTorch quantized::avg_pool2d Node on Glow."""
utils.compare_tracing_methods(
SimpleQuantizedAvgPool2DModule(1.0 / 128, 3, torch.quint8, 3),
torch.randn(1, 4, 5, 5),
fusible_ops={
"aten::avg_pool2d",
"aten::quantize_per_tensor",
"aten::dequantize",
},
)
def test_quantized_avgpool_cut_q_dq(self):
"""Basic test of the PyTorch quantized::avg_pool2d Node on Glow, with quantize and dequantize excluded."""
utils.compare_tracing_methods(
SimpleQuantizedAvgPool2DModule(1.0 / 128, 3, torch.quint8, 3),
torch.randn(1, 4, 5, 5),
fusible_ops={"aten::avg_pool2d"},
fusion_blocklist=["aten::quantize_per_tensor", "aten::dequantize"],
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class SimpleClampMinModel(torch.nn.Module):
def __init__(self, min):
super(SimpleClampMinModel, self).__init__()
self.min = min
def forward(self, input):
return torch.clamp_min(input, self.min)
class TestClamp(utils.TorchGlowTestCase):
def test_clamp_min(self):
"""Test of the PyTorch clamp_min Node on Glow."""
utils.compare_tracing_methods(
SimpleClampMinModel(0.1), torch.randn(7), fusible_ops={"aten::clamp_min"}
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class SimpleQuantizedAvgPool3DModule(torch.nn.Module):
def __init__(self, scale, zero_point, dtype, kernel_size):
super(SimpleQuantizedAvgPool3DModule, self).__init__()
self.quantize = torch.nn.quantized.Quantize(
scale=scale, zero_point=zero_point, dtype=dtype
)
self.average_pool = torch.nn.AvgPool3d(kernel_size)
def forward(self, inputs):
return torch.nn.quantized.DeQuantize()(self.average_pool(self.quantize(inputs)))
class TestQuantizedAvgPool3D(utils.TorchGlowTestCase):
def test_quantized_avgpool3d(self):
"""Basic test of the PyTorch quantized::avg_pool2d Node on Glow."""
utils.compare_tracing_methods(
SimpleQuantizedAvgPool3DModule(1.0 / 128, 3, torch.quint8, 3),
torch.randn(1, 2, 4, 5, 5),
fusible_ops={
"aten::avg_pool3d",
"aten::quantize_per_tensor",
"aten::dequantize",
},
)
def test_quantized_avgpool_cut_q_dq(self):
"""Basic test of the PyTorch quantized::avg_pool2d Node on Glow, with quantize and dequantize excluded."""
utils.compare_tracing_methods(
SimpleQuantizedAvgPool3DModule(1.0 / 128, 3, torch.quint8, 3),
torch.randn(1, 2, 4, 5, 5),
fusible_ops={"aten::avg_pool3d"},
fusion_blocklist=["aten::quantize_per_tensor", "aten::dequantize"],
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from tests import utils
class IndexSelectModule(torch.nn.Module):
def __init__(self, dimension):
super(IndexSelectModule, self).__init__()
self.dimension = dimension
def forward(self, tensor, index):
return torch.index_select(tensor, self.dimension, index)
class TestIndexSelect(utils.TorchGlowTestCase):
@utils.deterministic_expand(
[
lambda: ("0-dim", torch.randn(3, 4), 0, torch.tensor([0, 2])),
lambda: ("1-dim", torch.randn(3, 4), 1, torch.tensor([0, 2])),
lambda: ("repeat index", torch.randn(3, 4), 1, torch.tensor([2, 2])),
]
)
def test_index_select(self, _, tensor, dimension, index):
utils.compare_tracing_methods(
IndexSelectModule(dimension),
tensor,
index,
skip_to_glow=True,
fusible_ops={"aten::index_select"},
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class ExpandAsModel(torch.nn.Module):
def __init__(self, shape):
super(ExpandAsModel, self).__init__()
self.other = torch.randn(shape)
def forward(self, a):
return a.expand_as(self.other)
class TestClamp(utils.TorchGlowTestCase):
def test_clamp_min(self):
"""Test of the PyTorch expand_as Node on Glow."""
utils.compare_tracing_methods(
ExpandAsModel([2, 2, 4]), torch.randn(1, 4), fusible_ops={"aten::expand_as"}
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class SimpleBAddBmmModule(torch.nn.Module):
def __init__(self, alpha=1, beta=1):
super(SimpleBAddBmmModule, self).__init__()
self.alpha = alpha
self.beta = beta
def forward(self, a, b, c):
return (a + a).baddbmm(b, c)
class TestBAddBmm(utils.TorchGlowTestCase):
def test_baddbmm_basic(self):
"""Basic test of the PyTorch baddbmm Node on Glow."""
utils.run_comparison_tests(
SimpleBAddBmmModule(),
(torch.randn(3, 6, 4), torch.randn(3, 6, 10), torch.randn(3, 10, 4)),
fusible_ops={"aten::baddbmm"},
)
def test_baddbmm_broadcast(self):
"""Test of the PyTorch baddbmm with broadcasting add on Glow."""
utils.run_comparison_tests(
SimpleBAddBmmModule(),
(torch.randn(1, 4), torch.randn(3, 6, 10), torch.randn(3, 10, 4)),
fusible_ops={"aten::baddbmm"},
)
def test_baddbmm_broadcast_with_alpha_and_beta(self):
"""Test of the PyTorch baddbmm with broadcasting add on Glow, a=2/b=3"""
utils.run_comparison_tests(
SimpleBAddBmmModule(2.0, 3.0),
(torch.randn(1, 4), torch.randn(3, 6, 10), torch.randn(3, 10, 4)),
fusible_ops={"aten::baddbmm"},
)
def test_baddbmm_basic_tracing(self):
"""Basic test of the PyTorch baddbmm Node on Glow, w/ trace"""
utils.compare_tracing_methods(
SimpleBAddBmmModule(),
torch.randn(2, 3, 5),
torch.randn(2, 3, 9),
torch.randn(2, 9, 5),
fusible_ops={"aten::baddbmm"},
)
def test_baddbmm_broadcast_tracing(self):
"""Test of the PyTorch baddbmm with broadcasting add on Glow, w/ trace"""
utils.compare_tracing_methods(
SimpleBAddBmmModule(),
torch.randn(1),
torch.randn(3, 6, 9),
torch.randn(3, 9, 5),
fusible_ops={"aten::baddbmm"},
)
def test_baddbmm_broadcast_with_alpha_and_beta_tracing(self):
"""Test of the PyTorch baddbmm with broadcasting add on Glow, non-1 a/b, w/ trace"""
utils.compare_tracing_methods(
SimpleBAddBmmModule(0.5, 0.3),
torch.randn(1),
torch.randn(3, 6, 9),
torch.randn(3, 9, 5),
fusible_ops={"aten::baddbmm"},
)
def test_baddbmm_broadcast_tracing_error(self):
"""Test of the PyTorch baddbmm with broadcasting add on Glow, w/ trace + error"""
utils.compare_tracing_methods_error(
SimpleBAddBmmModule(),
torch.randn(4),
torch.randn(3, 6, 9),
torch.randn(3, 9, 5),
fusible_ops={"aten::baddbmm"},
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class SimpleSqueezeModel(torch.nn.Module):
def __init__(self, dimension=None, inplace=False):
super(SimpleSqueezeModel, self).__init__()
self.dimension = dimension
self.inplace = inplace
def forward(self, tensor):
if self.inplace:
tensor = tensor + tensor
if self.dimension:
return tensor.squeeze_(self.dimension)
else:
return tensor.squeeze_()
else:
if self.dimension:
return torch.squeeze(tensor + tensor, self.dimension)
else:
return torch.squeeze(tensor + tensor)
class TestSqueeze(utils.TorchGlowTestCase):
@utils.deterministic_expand(
[
lambda: ("basic", SimpleSqueezeModel(), torch.randn(1, 3, 1, 2, 5, 1)),
lambda: ("with_dim", SimpleSqueezeModel(2), torch.randn(1, 3, 1, 2, 5, 1)),
lambda: (
"with_neg_dim",
SimpleSqueezeModel(-1),
torch.randn(1, 3, 1, 2, 5, 1),
),
lambda: (
"inplace",
SimpleSqueezeModel(inplace=True),
torch.randn(1, 3, 1, 2, 5, 1),
),
]
)
def test_squeeze(self, _, module, tensor):
utils.compare_tracing_methods(module, tensor)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import unittest
import torch
from tests import utils
class SimpleDynQLinearModule(torch.nn.Module):
def __init__(self, in_features, out_features):
super(SimpleDynQLinearModule, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.m = torch.nn.quantized.dynamic.Linear(self.in_features, self.out_features)
def forward(self, input):
return self.m(input)
class SimpleDynQLinearPerChannelModule(torch.nn.Module):
def __init__(self, in_features, out_features, qconfig):
super(SimpleDynQLinearPerChannelModule, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.mf = torch.nn.Linear(self.in_features, self.out_features)
self.mf.qconfig = qconfig
self.m = torch.nn.quantized.dynamic.Linear.from_float(self.mf)
def forward(self, input):
return self.m(input)
class TestLinear(utils.TorchGlowTestCase):
@unittest.skip(
reason="By default it is asymmetric in PyTorch but symmetric in Glow. Will re-enable it once Glow side diff is landed."
)
def test_linear_basic(self):
"""Basic test of the PyTorch aten::linear op on Glow."""
n = 5
in_features = 4
out_features = 3
input = torch.randn(n, in_features, dtype=torch.float)
utils.compare_tracing_methods(
SimpleDynQLinearModule(in_features, out_features),
input,
fusible_ops={"quantized::linear_dynamic"},
fp16=True,
skip_to_glow=True,
rtol=1e-1,
atol=1e-1,
)
@unittest.skip(
reason="By default it is asymmetric in PyTorch but symmetric in Glow. Will re-enable it once Glow side diff is landed."
)
def test_linear_per_channel(self):
"""Basic test of the PyTorch channel wise aten::linear op on Glow."""
n = 5
in_features = 3
out_features = 4
input = torch.randn(n, in_features, dtype=torch.float)
my_qconfig = torch.ao.quantization.QConfig(
activation=torch.ao.quantization.default_dynamic_quant_observer,
weight=torch.ao.quantization.default_per_channel_weight_observer,
)
utils.compare_tracing_methods(
SimpleDynQLinearPerChannelModule(in_features, out_features, my_qconfig),
input,
fusible_ops={"quantized::linear_dynamic"},
fp16=True,
skip_to_glow=True,
rtol=1e-1,
atol=1e-1,
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
import torch.nn as nn
from tests import utils
class TestLSTM(utils.TorchGlowTestCase):
def test_lstm_basic(self):
"""Basic test of the PyTorch lstm Node on Glow."""
class SimpleLSTM(nn.Module):
def __init__(self):
super(SimpleLSTM, self).__init__()
self.rnn = torch.nn.LSTM(12, 10, 1)
w2 = torch.randn(40, 10)
w1 = torch.randn(40, 12)
b1 = torch.randn(40)
b2 = torch.randn(40)
self.rnn.training = False
self.rnn.weight_ih_l0 = torch.nn.Parameter(w1)
self.rnn.weight_hh_l0 = torch.nn.Parameter(w2)
self.rnn.bias_ih_l0 = torch.nn.Parameter(b1)
self.rnn.bias_hh_l0 = torch.nn.Parameter(b2)
def forward(self, inputs, h, c):
return self.rnn(inputs, (h, c))
inputs = torch.randn(10, 3, 12)
h = torch.randn(1, 3, 10)
c = torch.randn(1, 3, 10)
model = SimpleLSTM()
utils.compare_tracing_methods(
model, inputs, h, c, fusible_ops={"aten::lstm"}, skip_to_glow=True
)
def test_lstm_no_bias(self):
"""Basic test of the PyTorch lstm Node with no bias on Glow."""
class SimpleNoBiasLSTM(nn.Module):
def __init__(self):
super(SimpleNoBiasLSTM, self).__init__()
self.rnn = torch.nn.LSTM(5, 10, 1, bias=False)
w2 = torch.randn(40, 10)
w1 = torch.randn(40, 5)
self.rnn.training = False
self.rnn.weight_ih_l0 = torch.nn.Parameter(w1)
self.rnn.weight_hh_l0 = torch.nn.Parameter(w2)
def forward(self, inputs, h, c):
return self.rnn(inputs, (h, c))
inputs = torch.randn(10, 3, 5)
h = torch.randn(1, 3, 10)
c = torch.randn(1, 3, 10)
model = SimpleNoBiasLSTM()
utils.compare_tracing_methods(
model, inputs, h, c, fusible_ops={"aten::lstm"}, skip_to_glow=True
)
def test_lstm_batch_first(self):
"""Basic test of the PyTorch lstm Node with batch first."""
class SimpleBatchFirstLSTM(nn.Module):
def __init__(self):
super(SimpleBatchFirstLSTM, self).__init__()
self.rnn = torch.nn.LSTM(12, 10, 1, batch_first=True)
w2 = torch.randn(40, 10)
w1 = torch.randn(40, 12)
b1 = torch.randn(40)
b2 = torch.randn(40)
self.rnn.training = False
self.rnn.weight_ih_l0 = torch.nn.Parameter(w1)
self.rnn.weight_hh_l0 = torch.nn.Parameter(w2)
self.rnn.bias_ih_l0 = torch.nn.Parameter(b1)
self.rnn.bias_hh_l0 = torch.nn.Parameter(b2)
def forward(self, inputs, h, c):
return self.rnn(inputs, (h, c))
inputs = torch.randn(3, 10, 12)
h = torch.randn(1, 3, 10)
c = torch.randn(1, 3, 10)
model = SimpleBatchFirstLSTM()
utils.compare_tracing_methods(
model, inputs, h, c, fusible_ops={"aten::lstm"}, skip_to_glow=True
)
def test_lstm_bidirectional(self):
"""Bidirectional test of the PyTorch lstm Node on Glow."""
class BidirectionalLSTM(nn.Module):
def __init__(self):
super(BidirectionalLSTM, self).__init__()
self.rnn = torch.nn.LSTM(8, 10, 1, bidirectional=True)
self.rnn.training = False
def forward(self, inputs, h, c):
return self.rnn(inputs, (h, c))
inputs = torch.randn(5, 3, 8)
h = torch.randn(2, 3, 10)
c = torch.randn(2, 3, 10)
model = BidirectionalLSTM()
utils.compare_tracing_methods(
model, inputs, h, c, fusible_ops={"aten::lstm"}, skip_to_glow=True
)
def test_lstm_two_layers(self):
"""2 layer test of the PyTorch lstm Node on Glow."""
class MultipleLayersLSTM(nn.Module):
def __init__(self):
super(MultipleLayersLSTM, self).__init__()
self.rnn = torch.nn.LSTM(10, 20, 2, bidirectional=False)
self.rnn.training = False
def forward(self, inputs, h, c):
return self.rnn(inputs, (h, c))
inputs = torch.randn(5, 3, 10)
h = torch.randn(2, 3, 20)
c = torch.randn(2, 3, 20)
model = MultipleLayersLSTM()
utils.compare_tracing_methods(
model, inputs, h, c, fusible_ops={"aten::lstm"}, skip_to_glow=True
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class DetachModel(torch.nn.Module):
def __init__(self):
super(DetachModel, self).__init__()
def forward(self, a):
b = a.detach()
return b + b
class TestDetach(utils.TorchGlowTestCase):
def test_detach(self):
"""Test of the PyTorch detach Node on Glow."""
x = torch.randn(5, 6, 7)
x.requires_grad = True
utils.compare_tracing_methods(DetachModel(), x, fusible_ops={"aten::detach"})
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class CopyModel(torch.nn.Module):
def __init__(self, shape):
super(CopyModel, self).__init__()
self.other = torch.randn(shape)
def forward(self, a):
b = a.copy_(self.other)
return a + b
class TestCopy(utils.TorchGlowTestCase):
@utils.deterministic_expand(
[
lambda: ("1x1 => 1x3", [1, 1], [1, 3]),
lambda: ("1x3x5 => 1x3x5", [1, 3, 5], [1, 3, 5]),
lambda: ("1x3 => 4x4x3", [1, 3], [4, 4, 3]),
]
)
def test_copy_(self, _, other_shape, tensor_shape):
"""Test of the PyTorch copy_ method on Glow."""
utils.compare_tracing_methods(
CopyModel(other_shape),
torch.randn(tensor_shape),
fusible_ops={"aten::copy_"},
)
@utils.deterministic_expand(
[
lambda: ("1x1x1 => 1x3", [1, 1, 1], [1, 3]),
lambda: ("1x4 => 4x4x3", [1, 4], [4, 4, 3]),
lambda: ("4x4x3 => 1x3", [4, 4, 3], [1, 3]),
]
)
def test_copy_broadcast_failure(self, _, other_shape, tensor_shape):
"""Test of the PyTorch copy_ method on Glow."""
with self.assertRaises(RuntimeError):
utils.compare_tracing_methods(
CopyModel(other_shape),
torch.randn(tensor_shape),
fusible_ops={"aten::copy_"},
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
import torch.nn as nn
from tests import utils
class TestBatchNorm0D(utils.TorchGlowTestCase):
def test_batchnorm_basic(self):
"""
Basic test of the PyTorch 0D batchnorm Node on Glow.
"""
class SimpleBatchNorm(nn.Module):
def __init__(self, num_channels, running_mean, running_var):
super(SimpleBatchNorm, self).__init__()
self.batchnorm = nn.BatchNorm1d(num_channels)
self.batchnorm.running_mean = running_mean
self.batchnorm.running_var = running_var
def forward(self, x):
return self.batchnorm(x)
num_channels = 4
running_mean = torch.rand(num_channels)
running_var = torch.rand(num_channels)
model = SimpleBatchNorm(num_channels, running_mean, running_var)
model.eval()
inputs = torch.randn(1, num_channels)
utils.compare_tracing_methods(model, inputs, fusible_ops={"aten::batch_norm"})
def test_batchnorm_with_weights(self):
"""
Test of the PyTorch 0D batchnorm Node with weights and biases on Glow.
"""
class SimpleBatchNorm(nn.Module):
def __init__(self, num_channels, weight, bias, running_mean, running_var):
super(SimpleBatchNorm, self).__init__()
self.batchnorm = nn.BatchNorm1d(num_channels)
self.batchnorm.weight = torch.nn.Parameter(weight)
self.batchnorm.bias = torch.nn.Parameter(bias)
self.batchnorm.running_mean = running_mean
self.batchnorm.running_var = running_var
def forward(self, x):
return self.batchnorm(x)
num_channels = 4
weight = torch.rand(num_channels)
bias = torch.rand(num_channels)
running_mean = torch.rand(num_channels)
running_var = torch.ones(num_channels)
inputs = torch.randn(1, num_channels)
model = SimpleBatchNorm(num_channels, weight, bias, running_mean, running_var)
model.eval()
utils.compare_tracing_methods(model, inputs, fusible_ops={"aten::batch_norm"})
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
import torch.nn as nn
from tests import utils
class TestBatchNorm1D(utils.TorchGlowTestCase):
def test_batchnorm_basic(self):
"""
Basic test of the PyTorch 1D batchnorm Node on Glow.
"""
class SimpleBatchNorm(nn.Module):
def __init__(self, num_channels, running_mean, running_var):
super(SimpleBatchNorm, self).__init__()
self.batchnorm = nn.BatchNorm1d(num_channels)
self.batchnorm.running_mean = running_mean
self.batchnorm.running_var = running_var
def forward(self, x):
return self.batchnorm(x)
num_channels = 4
running_mean = torch.rand(num_channels)
running_var = torch.rand(num_channels)
model = SimpleBatchNorm(num_channels, running_mean, running_var)
model.eval()
inputs = torch.randn(1, num_channels, 5)
utils.compare_tracing_methods(model, inputs, fusible_ops={"aten::batch_norm"})
def test_batchnorm_with_weights(self):
"""
Test of the PyTorch 1D batchnorm Node with weights and biases on Glow.
"""
class SimpleBatchNorm(nn.Module):
def __init__(self, num_channels, weight, bias, running_mean, running_var):
super(SimpleBatchNorm, self).__init__()
self.batchnorm = nn.BatchNorm1d(num_channels)
self.batchnorm.weight = torch.nn.Parameter(weight)
self.batchnorm.bias = torch.nn.Parameter(bias)
self.batchnorm.running_mean = running_mean
self.batchnorm.running_var = running_var
def forward(self, x):
return self.batchnorm(x)
num_channels = 4
weight = torch.rand(num_channels)
bias = torch.rand(num_channels)
running_mean = torch.rand(num_channels)
running_var = torch.ones(num_channels)
inputs = torch.randn(1, num_channels, 5)
model = SimpleBatchNorm(num_channels, weight, bias, running_mean, running_var)
model.eval()
utils.compare_tracing_methods(model, inputs, fusible_ops={"aten::batch_norm"})
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import unittest
import torch
import torch_glow
from tests import utils
class SimpleAttentionModule(torch.nn.Module):
def __init__(self):
super(SimpleAttentionModule, self).__init__()
self.self_attn = torch.nn.MultiheadAttention(32, 8)
def forward(self, inputs):
return self.self_attn(inputs, inputs, inputs)
# Changes introduced by Diff: D41625335 broke this Test
@unittest.skip("See Task: T139048984")
class TestAttention(utils.TorchGlowTestCase):
def test_attention_basic(self):
"""Basic test of the PyTorch attention Node on Glow."""
inputs = torch.randn(2, 4, 32)
model = SimpleAttentionModule()
torch_glow.enable_ignore_div_rounding_args()
utils.compare_tracing_methods(
model,
inputs,
fusible_ops={
"aten::div",
"aten::mul",
"aten::transpose",
"aten::softmax",
},
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import random
import torch
from tests import utils
class SimpleMatmulModule(torch.nn.Module):
def __init__(self):
super(SimpleMatmulModule, self).__init__()
def forward(self, a, b):
return a.matmul(b + b)
class TestMatMul(utils.TorchGlowTestCase):
@utils.deterministic_expand(
[
lambda: ("1d_1d", torch.randn(4), torch.randn(4)),
lambda: ("1d_2d", torch.randn(4), torch.randn(4, 9)),
lambda: ("1d_3d", torch.randn(4), torch.randn(3, 4, 9)),
lambda: ("1d_4d", torch.randn(4), torch.randn(5, 3, 4, 9)),
lambda: ("2d_1d", torch.randn(9, 4), torch.randn(4)),
lambda: ("3d_1d", torch.randn(6, 9, 4), torch.randn(4)),
lambda: ("4d_1d", torch.randn(2, 6, 9, 4), torch.randn(4)),
]
)
def test_matmul(self, _, left, right):
"""Test of aten::matmul with two 1d inputs Glow."""
utils.compare_tracing_methods(
SimpleMatmulModule(), left, right, fusible_ops={"aten::matmul"}
)
def test_matmul_nd_nd(self):
"""Test of aten::matmul with >2d and >2d inputs Glow."""
def do_test(lhsDims, rhsDims):
lhs = torch.randn(lhsDims)
rhs = torch.randn(rhsDims)
utils.compare_tracing_methods(
SimpleMatmulModule(), lhs, rhs, fusible_ops={"aten::matmul"}
)
def randomDimsOfRank(rank):
dims = []
for _ in range(rank):
dim = random.randint(2, 9)
dims.append(dim)
return dims
# Dimensions of base tensors that lhs and rhs will be built from
lhsBase = [3, 4]
rhsBase = [4, 2]
for additional_dims in range(3):
extension = randomDimsOfRank(additional_dims)
do_test(extension + lhsBase, rhsBase)
do_test([1] + extension + lhsBase, rhsBase)
do_test(extension + [1] + lhsBase, rhsBase)
do_test(lhsBase, extension + rhsBase)
do_test(lhsBase, [1] + extension + rhsBase)
do_test(lhsBase, extension + [1] + rhsBase)
do_test(extension + lhsBase, extension + rhsBase)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class SimpleBatchPermutationModule(torch.nn.Module):
def forward(self, input, indices):
return torch.ops._caffe2.BatchPermutation(input + input, indices)
class TestBatchPermutation(utils.TorchGlowTestCase):
def test_batch_permutation_basic(self):
"""Basic test of the _caffe2::BatchPermutation Node on Glow."""
x = torch.randn(4, 2, 3)
indices = torch.tensor([1, 3, 0, 2], dtype=torch.int32)
utils.compare_tracing_methods(
SimpleBatchPermutationModule(),
x,
indices,
fusible_ops={"_caffe2::BatchPermutation"},
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
import torch.nn as nn
from tests import utils
from torch.ao.quantization import observer, QConfig
my_qconfig = QConfig(
activation=observer.default_observer,
weight=observer.HistogramObserver.with_args(dtype=torch.qint8, reduce_range=False),
)
class TestQuantizedBatchNorm2D(utils.TorchGlowTestCase):
def test_batchnorm_basic(self):
"""
Basic test of the PyTorch 3D batchnorm Node on Glow.
"""
class SimpleQuantizedBatchNorm(nn.Module):
def __init__(
self,
C,
running_mean,
running_var,
in_scale,
in_zero_point,
out_scale,
out_zero_point,
):
super(SimpleQuantizedBatchNorm, self).__init__()
self.qconfig = my_qconfig
self.batchnorm = nn.BatchNorm2d(C)
self.batchnorm.scale = out_scale
self.batchnorm.zero_point = out_zero_point
self.batchnorm.running_mean = running_mean
self.batchnorm.running_var = running_var
self.relu = torch.nn.ReLU()
self.q = torch.ao.quantization.QuantStub()
self.q.scale = in_scale
self.q.zero_point = in_zero_point
self.dq = torch.ao.quantization.DeQuantStub()
def forward(self, x):
qx = self.q(x)
qy = self.batchnorm(qx)
qy = self.relu(qy)
y = self.dq(qy)
return y
C = 7
in_scale = 0.102
out_scale = 0.003
in_zero_point = -37
out_zero_point = 3
running_mean = torch.zeros(C)
running_var = torch.ones(C)
inputs = torch.randn((6, C, 43, 52), requires_grad=False)
model = SimpleQuantizedBatchNorm(
C,
running_mean,
running_var,
in_scale,
in_zero_point,
out_scale,
out_zero_point,
)
model.eval()
utils.compare_tracing_methods(
model,
inputs,
skip_to_glow=True,
)
def test_batchnorm_with_weights(self):
"""
Test of the PyTorch 2D batchnorm Node with weights and biases on Glow.
"""
class SimpleQuantizedBatchNorm(nn.Module):
def __init__(
self,
C,
weight,
bias,
running_mean,
running_var,
in_scale,
in_zero_point,
out_scale,
out_zero_point,
):
super(SimpleQuantizedBatchNorm, self).__init__()
self.qconfig = my_qconfig
self.batchnorm = nn.BatchNorm2d(C)
self.batchnorm.scale = out_scale
self.batchnorm.zero_point = out_zero_point
self.batchnorm.weight = nn.Parameter(weight)
self.batchnorm.bias = nn.Parameter(bias)
self.batchnorm.running_mean = running_mean
self.batchnorm.running_var = running_var
self.relu = nn.ReLU()
self.q = torch.ao.quantization.QuantStub()
self.q.scale = in_scale
self.q.zero_point = in_zero_point
self.dq = torch.ao.quantization.DeQuantStub()
def forward(self, x):
qx = self.q(x)
qy = self.batchnorm(qx)
y = self.dq(qy)
return y
C = 11
in_scale = 0.234
out_scale = 0.003
in_zero_point = -10
out_zero_point = -5
weight = torch.ones(C) + torch.rand(C) * 0.001
bias = torch.rand(C) * 0.0001
running_mean = torch.zeros(C)
running_var = torch.ones(C)
inputs = torch.randn((6, C, 33, 42), requires_grad=False)
model = SimpleQuantizedBatchNorm(
C,
weight,
bias,
running_mean,
running_var,
in_scale,
in_zero_point,
out_scale,
out_zero_point,
)
model.eval()
utils.compare_tracing_methods(
model,
inputs,
skip_to_glow=True,
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
import torch.nn as nn
from tests import utils
from torch.ao.quantization import observer, QConfig
my_qconfig = QConfig(
activation=observer.default_observer,
weight=observer.HistogramObserver.with_args(dtype=torch.qint8, reduce_range=False),
)
class TestQuantizedBatchNorm3D(utils.TorchGlowTestCase):
def test_batchnorm_basic(self):
"""
Basic test of the PyTorch 3D batchnorm Node on Glow.
"""
class SimpleQuantizedBatchNorm(nn.Module):
def __init__(
self,
C,
running_mean,
running_var,
in_scale,
in_zero_point,
out_scale,
out_zero_point,
):
super(SimpleQuantizedBatchNorm, self).__init__()
self.qconfig = my_qconfig
self.batchnorm = nn.BatchNorm3d(C)
self.batchnorm.scale = out_scale
self.batchnorm.zero_point = out_zero_point
self.batchnorm.running_mean = running_mean
self.batchnorm.running_var = running_var
self.relu = torch.nn.ReLU()
self.q = torch.ao.quantization.QuantStub()
self.q.scale = in_scale
self.q.zero_point = in_zero_point
self.dq = torch.ao.quantization.DeQuantStub()
def forward(self, x):
qx = self.q(x)
qy = self.batchnorm(qx)
y = self.dq(qy)
return y
C = 4
in_scale = 0.123
out_scale = 0.004
in_zero_point = 90
out_zero_point = 4
running_mean = torch.zeros(C)
running_var = torch.ones(C)
inputs = torch.randn((5, C, 6, 32, 73), requires_grad=False)
model = SimpleQuantizedBatchNorm(
C,
running_mean,
running_var,
in_scale,
in_zero_point,
out_scale,
out_zero_point,
)
model.eval()
utils.compare_tracing_methods(
model,
inputs,
skip_to_glow=True,
)
def test_batchnorm_with_weights(self):
"""
Test of the PyTorch 2D batchnorm Node with weights and biases on Glow.
"""
class SimpleQuantizedBatchNorm(nn.Module):
def __init__(
self,
C,
weight,
bias,
running_mean,
running_var,
in_scale,
in_zero_point,
out_scale,
out_zero_point,
):
super(SimpleQuantizedBatchNorm, self).__init__()
self.qconfig = my_qconfig
self.batchnorm = nn.BatchNorm3d(C)
self.batchnorm.scale = out_scale
self.batchnorm.zero_point = out_zero_point
self.batchnorm.weight = nn.Parameter(weight)
self.batchnorm.bias = nn.Parameter(bias)
self.batchnorm.running_mean = running_mean
self.batchnorm.running_var = running_var
self.relu = nn.ReLU()
self.q = torch.ao.quantization.QuantStub()
self.q.scale = in_scale
self.q.zero_point = in_zero_point
self.dq = torch.ao.quantization.DeQuantStub()
def forward(self, x):
qx = self.q(x)
qy = self.batchnorm(qx)
y = self.dq(qy)
return y
C = 7
in_scale = 0.0031
out_scale = 0.0047
in_zero_point = -42
out_zero_point = 23
weight = torch.ones(C) + torch.rand(C) * 0.001
bias = torch.rand(C) * 0.0001
running_mean = torch.zeros(C)
running_var = torch.ones(C)
inputs = torch.randn((6, C, 4, 33, 42), requires_grad=False)
model = SimpleQuantizedBatchNorm(
C,
weight,
bias,
running_mean,
running_var,
in_scale,
in_zero_point,
out_scale,
out_zero_point,
)
model.eval()
utils.compare_tracing_methods(
model,
inputs,
skip_to_glow=True,
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import unittest
import torch
from tests import utils
class RepeatModule(torch.nn.Module):
def __init__(self, repeats):
super(RepeatModule, self).__init__()
self.repeats = repeats
def forward(self, tensor):
tensor = tensor + tensor
return tensor.repeat(self.repeats)
class TestRepeat(utils.TorchGlowTestCase):
@utils.deterministic_expand(
[
lambda: ("basic_1", RepeatModule([4]), torch.randn(3)),
lambda: ("basic_2", RepeatModule([3, 5]), torch.randn(3)),
lambda: ("basic_3", RepeatModule([4, 3, 5]), torch.tensor(3)),
lambda: ("2d_1", RepeatModule([4, 2]), torch.randn(5, 1)),
lambda: ("2d_2", RepeatModule([4, 2, 6]), torch.randn(4, 3)),
lambda: ("3d_1", RepeatModule([4, 4, 2]), torch.randn(6, 3, 4)),
lambda: ("3d_2", RepeatModule([3, 1, 1]), torch.randn(3, 3, 4)),
lambda: ("3d_3", RepeatModule([1, 5, 1]), torch.randn(5, 3, 4)),
# Disabled due to sanitizer checking.
# lambda: ("3d_4", RepeatModule([5, 2, 1, 5, 2, 10]), torch.randn(6, 3, 4)),
]
)
def test_repeat(self, _, module, tensor):
utils.compare_tracing_methods(module, tensor, fusible_ops={"aten::repeat"})
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
import torch
from tests import utils
class SimpleCosModule(torch.nn.Module):
def __init__(self):
super(SimpleCosModule, self).__init__()
def forward(self, a):
return torch.cos(a + a)
class SimpleSinModule(torch.nn.Module):
def __init__(self):
super(SimpleSinModule, self).__init__()
def forward(self, a):
return torch.sin(a + a)
class SimpleACosModule(torch.nn.Module):
def __init__(self):
super(SimpleACosModule, self).__init__()
def forward(self, a):
return torch.acos(a + a)
class SimpleASinModule(torch.nn.Module):
def __init__(self):
super(SimpleASinModule, self).__init__()
def forward(self, a):
return torch.asin(a + a)
class SimpleATanModule(torch.nn.Module):
def __init__(self):
super(SimpleATanModule, self).__init__()
def forward(self, a):
return torch.atan(a + a)
class TestCos(utils.TorchGlowTestCase):
def test_cos(self, skip_to_glow=False):
# Ensures range is in [-2*pi, 2*pi]
x = 4 * np.pi * (torch.rand(2, 3, 4) - 0.5)
utils.compare_tracing_methods(
SimpleCosModule(), x, fusible_ops={"aten::cos"}, skip_to_glow=skip_to_glow
)
class TestSin(utils.TorchGlowTestCase):
def test_sin(self, skip_to_glow=False):
# Ensures range is in [-2*pi, 2*pi]
x = 4 * np.pi * (torch.rand(2, 3, 4) - 0.5)
utils.compare_tracing_methods(
SimpleSinModule(), x, fusible_ops={"aten::sin"}, skip_to_glow=skip_to_glow
)
class TestACos(utils.TorchGlowTestCase):
def test_acos(self, skip_to_glow=False):
x = torch.rand(2, 3, 4) - 0.5 # Ensures range is in [-1,1]
utils.compare_tracing_methods(
SimpleACosModule(), x, fusible_ops={"aten::acos"}, skip_to_glow=skip_to_glow
)
class TestASin(utils.TorchGlowTestCase):
def test_asin(self, skip_to_glow=False):
x = torch.rand(2, 3, 4) - 0.5 # Ensures range is in [-1,1]
utils.compare_tracing_methods(
SimpleASinModule(), x, fusible_ops={"aten::asin"}, skip_to_glow=skip_to_glow
)
class TestATan(utils.TorchGlowTestCase):
def test_atan(self, skip_to_glow=False):
x = torch.randn(2, 3, 4)
utils.compare_tracing_methods(
SimpleATanModule(), x, fusible_ops={"aten::atan"}, skip_to_glow=skip_to_glow
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
import torch.nn.functional as F
from tests import utils
class SimpleMaxPool2dTest(torch.nn.Module):
def __init__(self, kernel_size, padding=0, ceil_mode=False):
super(SimpleMaxPool2dTest, self).__init__()
self.kernel_size = kernel_size
self.padding = padding
self.ceil_mode = ceil_mode
def forward(self, inputs):
return F.max_pool2d(
inputs,
kernel_size=self.kernel_size,
padding=self.padding,
ceil_mode=self.ceil_mode,
)
class TestMaxPool2d(utils.TorchGlowTestCase):
def test_max_pool2d_basic(self):
"""Basic test of the PyTorch max_pool2d Node on Glow."""
utils.compare_tracing_methods(
SimpleMaxPool2dTest(3),
torch.randn(1, 4, 5, 5),
fusible_ops={"aten::max_pool2d"},
)
def test_max_pool2d_with_args(self):
"""Test of the PyTorch max_pool2d Node with arguments on Glow."""
utils.compare_tracing_methods(
SimpleMaxPool2dTest(7, 3),
torch.randn(1, 4, 10, 10),
fusible_ops={"aten::max_pool2d"},
)
def test_max_pool2d_ceil_mode(self):
"""Test of the PyTorch max_pool2d Node with ceil_mode on Glow."""
utils.compare_tracing_methods(
SimpleMaxPool2dTest(7, 1, ceil_mode=True),
torch.randn(1, 4, 16, 16),
fusible_ops={"aten::max_pool2d"},
)
def test_max_pool2d_ceil_mode_strong_1(self):
"""Stronger test of the PyTorch max_pool2d Node with ceil_mode on Glow."""
utils.compare_tracing_methods(
SimpleMaxPool2dTest(5, 1, ceil_mode=True),
torch.randn(1, 5, 33, 33),
fusible_ops={"aten::max_pool2d"},
)
def test_max_pool2d_ceil_mode_strong_2(self):
"""Stronger test of the PyTorch max_pool2d Node with ceil_mode on Glow."""
utils.compare_tracing_methods(
SimpleMaxPool2dTest(8, 2, ceil_mode=True),
torch.randn(1, 3, 41, 41),
fusible_ops={"aten::max_pool2d"},
)
|
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
import torch.nn.functional as F
from tests import utils
class SimpleSoftPlusModel(torch.nn.Module):
def __init__(self):
super(SimpleSoftPlusModel, self).__init__()
def forward(self, tensor):
tensor = tensor + tensor
return F.softplus(tensor)
class TestSoftPlus(utils.TorchGlowTestCase):
def test_softplus(self):
"""Basic test of the PyTorch aten::softplus Node on Glow."""
utils.compare_tracing_methods(
SimpleSoftPlusModel(),
torch.randn(4, 3),
fusible_ops={"aten::softplus"},
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class SimpleMaskedFillModule(torch.nn.Module):
def __init__(self, inplace=False):
super(SimpleMaskedFillModule, self).__init__()
self.inplace = inplace
def forward(self, tensor, mask):
if self.inplace:
other = tensor + tensor
other.masked_fill_(mask, 42.0)
return other
else:
return torch.masked_fill(tensor + tensor, mask, 42.0)
class TestMaskedFill(utils.TorchGlowTestCase):
@utils.deterministic_expand(
[
lambda: (
"basic",
SimpleMaskedFillModule(),
torch.randn([3]),
torch.tensor([True, False, True], dtype=torch.bool),
),
lambda: (
"broadcasted_unit_dim",
SimpleMaskedFillModule(),
torch.randn([4, 1, 3]),
torch.tensor([True, True, True], dtype=torch.bool),
),
lambda: (
"broadcasted_multi_dim",
SimpleMaskedFillModule(),
torch.randn([2, 4, 3, 3]),
torch.tensor(
[[[[True, False, True]]], [[[True, False, True]]]], dtype=torch.bool
),
),
lambda: (
"inplace",
SimpleMaskedFillModule(True),
torch.randn([3]),
torch.tensor([True, False, True], dtype=torch.bool),
),
]
)
def test_masked_fill(self, _, module, tensor, mask):
utils.compare_tracing_methods(
module, tensor, mask, fusible_ops={"aten::masked_fill"}
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from tests import utils
class GatherModule(torch.nn.Module):
def __init__(self, dimension):
super(GatherModule, self).__init__()
self.dimension = dimension
def forward(self, tensor, index):
return torch.gather(tensor, self.dimension, index)
class TestGather(utils.TorchGlowTestCase):
@utils.deterministic_expand(
[
lambda: (
"basic-1dim",
torch.tensor([1, 2, 3, 4]),
0,
torch.tensor([0, 0, 1, 0]),
),
lambda: (
"0-dim",
torch.tensor([[1, 2], [3, 4]]),
0,
torch.tensor([[0, 1], [0, 1]]),
),
lambda: (
"1-dim",
torch.tensor([[1, 2], [3, 4]]),
1,
torch.tensor([[0, 0], [0, 0]]),
),
lambda: (
"2-dim",
torch.randn(3, 4, 2),
2,
torch.empty(3, 4, 2).random_(2).long(),
),
]
)
def test_gather(self, _, tensor, dimension, index):
utils.compare_tracing_methods(
GatherModule(dimension),
tensor,
index,
skip_to_glow=True,
fusible_ops={"aten::gather"},
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
from collections import OrderedDict
import torch
from tests import utils
class TestQuantizedConv3dRelu(utils.TorchGlowTestCase):
def _test_quantized_conv3d_relu_packed(self, groups):
"""Basic test of PyTorch quantized::conv3d_relu Node with packed weights on Glow."""
with torch.no_grad():
x = torch.tensor(range(5), dtype=torch.float)
x = torch.cat((x, x, x, x, x))
x = torch.cat((x, x, x))
x = torch.cat((x, x, x))
x = torch.reshape(x, [1, 3, 3, 5, 5])
q = torch.nn.quantized.Quantize(1, 2, torch.quint8)
conv = torch.nn.Conv3d(3, 3, kernel_size=3, stride=(2, 2, 2), groups=groups)
relu = torch.nn.ReLU()
dq = torch.nn.quantized.DeQuantize()
# Due to the off-by-one error, we cannot let the weights, bias & input
# to be totally random.
conv.weight.set_(
torch.arange(72 / groups, dtype=torch.float).reshape(
[3, 3 // groups, 2, 2, 2]
)
/ 3
)
conv.bias.data.fill_(2)
model = torch.nn.Sequential(
OrderedDict(
[
("quantize", q),
("conv1", conv),
("relu1", relu),
("dequantize", dq),
]
)
)
model.eval()
model.qconfig = torch.ao.quantization.get_default_qconfig("fbgemm")
# Fuse conv and relu to conv_relu
model = torch.ao.quantization.fuse_modules(model, [["conv1", "relu1"]])
torch.ao.quantization.prepare(model, inplace=True)
torch.ao.quantization.convert(model, inplace=True)
utils.compare_tracing_methods(
model,
x,
fusible_ops={
"aten::quantize_per_tensor",
"quantized::conv3d_relu",
"aten::dequantize",
},
skip_to_glow=True,
)
def test_quantized_conv3d_relu_packed_groupwise(self):
"""PyTorch groupwise quantized::conv3d_relu Node with packed weights on Glow."""
self._test_quantized_conv3d_relu_packed(groups=3)
def test_quantized_conv3d_relu_packed_nongroupwise(self):
"""PyTorch vanilla quantized::conv3d_relu Node with packed weights on Glow."""
self._test_quantized_conv3d_relu_packed(groups=1)
def test_quantized_conv3d_relu_packed_cut_q_dq(self):
"""Basic test of PyTorch quantized::conv3d_relu Node with packed weights on Glow, with quantize and dequantize excluded."""
with torch.no_grad():
x = torch.tensor(range(5), dtype=torch.float)
x = torch.cat((x, x, x, x, x))
x = torch.cat((x, x, x))
x = torch.cat((x, x, x))
x = torch.reshape(x, [1, 3, 3, 5, 5])
q = torch.nn.quantized.Quantize(1, 2, torch.quint8)
conv = torch.nn.Conv3d(3, 3, kernel_size=3, stride=(2, 2, 2), groups=1)
relu = torch.nn.ReLU()
dq = torch.nn.quantized.DeQuantize()
# Due to the off-by-one error, we cannot let the weights, bias & input
# to be totally random.
conv.weight.set_(
torch.arange(72, dtype=torch.float).reshape([3, 3, 2, 2, 2]) / 3
)
conv.bias.data.fill_(2)
model = torch.nn.Sequential(
OrderedDict(
[
("quantize", q),
("conv1", conv),
("relu1", relu),
("dequantize", dq),
]
)
)
model.eval()
model.qconfig = torch.ao.quantization.get_default_qconfig("fbgemm")
# Fuse conv and relu to conv_relu
model = torch.ao.quantization.fuse_modules(model, [["conv1", "relu1"]])
torch.ao.quantization.prepare(model, inplace=True)
torch.ao.quantization.convert(model, inplace=True)
utils.compare_tracing_methods(
model,
x,
fusible_ops={"quantized::conv3d_relu"},
fusion_blocklist=["aten::quantize_per_tensor", "aten::dequantize"],
skip_to_glow=True,
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class SimpleCeilModule(torch.nn.Module):
def forward(self, a, b):
c = a + b
return torch.ceil(c)
class TestCeil(utils.TorchGlowTestCase):
def test_ceil(self):
"""Basic test of the PyTorch Ceil Node on Glow."""
x = torch.randn(3, 4, 5)
y = torch.randn(3, 4, 5)
utils.compare_tracing_methods(
SimpleCeilModule(), x, y, fusible_ops={"aten::ceil"}
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# isort:skip_file
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class SimpleUpsampleModel(torch.nn.Module):
def __init__(self, *args, **kwargs):
super(SimpleUpsampleModel, self).__init__()
self.args = args
self.kwargs = kwargs
def forward(self, tensor):
return torch.nn.Upsample(*self.args, **self.kwargs)(tensor)
class TestUpsample(utils.TorchGlowTestCase):
@utils.deterministic_expand(
[
lambda: (
"3d_2x_size_nearest",
SimpleUpsampleModel(size=(8, 10, 12)),
torch.rand(2, 3, 4, 5, 6),
),
lambda: (
"3d_2x_scale_factor_nearest",
SimpleUpsampleModel(scale_factor=(2, 2, 2)),
torch.rand(2, 3, 4, 5, 6),
),
lambda: (
"3d_2x_single_scale_factor_nearest",
SimpleUpsampleModel(scale_factor=2),
torch.rand(2, 3, 4, 5, 6),
),
lambda: (
"3d_not_2x_single_scale_factor_nearest",
SimpleUpsampleModel(scale_factor=5),
torch.rand(2, 3, 4, 5, 6),
),
lambda: (
"3d_not_2x_scale_factor_nearest",
SimpleUpsampleModel(scale_factor=(1, 2, 3)),
torch.rand(2, 3, 4, 5, 6),
),
lambda: (
"3d_not_2x_size_nearest",
SimpleUpsampleModel(size=(10, 12, 13)),
torch.rand(2, 3, 4, 5, 6),
),
]
)
def test_upsample_nearest3d(self, name, module, tensor):
utils.compare_tracing_methods(
module,
tensor,
fusible_ops=["aten::upsample_nearest3d"],
)
@utils.deterministic_expand(
[
lambda: (
"2d_2x_scale_factor_nearest",
SimpleUpsampleModel(scale_factor=(2, 2)),
torch.rand(1, 1, 3, 3),
),
lambda: (
"2d_not_2x_scale_factor_nearest",
SimpleUpsampleModel(scale_factor=(3, 4)),
torch.rand(1, 1, 3, 3),
),
lambda: (
"2d_2x_single_scale_factor_nearest",
SimpleUpsampleModel(scale_factor=2),
torch.rand(1, 1, 3, 3),
),
lambda: (
"2d_not_2x_single_scale_factor_nearest",
SimpleUpsampleModel(scale_factor=3),
torch.rand(1, 1, 3, 3),
),
lambda: (
"2d_2x_size_nearest",
SimpleUpsampleModel(size=(6, 6)),
torch.rand(1, 1, 3, 3),
),
lambda: (
"2d_not_2x_size_nearest",
SimpleUpsampleModel(size=(4, 8)),
torch.rand(1, 1, 3, 3),
),
]
)
def test_upsample_nearest2d(self, name, module, tensor):
utils.compare_tracing_methods(
module,
tensor,
fusible_ops=["aten::upsample_nearest2d"],
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class SimpleReciprocalModel(torch.nn.Module):
def __init__(self, inplace=False):
super(SimpleReciprocalModel, self).__init__()
self.inplace = inplace
def forward(self, tensor):
other = tensor + tensor
return other.reciprocal_() if self.inplace else torch.reciprocal(other)
class TestReciprocal(utils.TorchGlowTestCase):
def test_reciprocal(self):
"""Test of the PyTorch reciprocal Node on Glow."""
utils.compare_tracing_methods(
SimpleReciprocalModel(), torch.randn(4), fusible_ops={"aten::reciprocal"}
)
def test_inplace_reciprocal(self):
"""Test of the PyTorch inplace reciprocal Node on Glow."""
# Expect fuser to out-of-place the operator
utils.compare_tracing_methods(
SimpleReciprocalModel(inplace=True),
torch.randn(4),
fusible_ops={"aten::reciprocal"},
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class SimpleCumSumModule(torch.nn.Module):
def __init__(self, dim):
super(SimpleCumSumModule, self).__init__()
self.dim = dim
def forward(self, tensor):
return torch.cumsum(tensor, self.dim)
class TestCumSum(utils.TorchGlowTestCase):
@utils.deterministic_expand(
[
lambda: ("1", torch.randn(1), 0),
lambda: ("2", torch.randn(2), 0),
lambda: ("20", torch.randn(20), 0),
lambda: ("3x4_0", torch.randn(3, 4), 0),
lambda: ("3x4_1", torch.randn(3, 4), 1),
lambda: ("3x4_-1", torch.randn(3, 4), -1),
lambda: ("3x4_-2", torch.randn(3, 4), -2),
lambda: ("3x4x5_0", torch.randn(3, 4, 5), 0),
lambda: ("3x4x5_1", torch.randn(3, 4, 5), 1),
lambda: ("3x4x5_2", torch.randn(3, 4, 5), 2),
lambda: ("3x4x5_-1", torch.randn(3, 4, 5), -1),
lambda: ("3x4x5_-2", torch.randn(3, 4, 5), -2),
lambda: ("3x4x5_-3", torch.randn(3, 4, 5), -3),
lambda: ("6x5x4x3_0", torch.randn(6, 5, 4, 3), 0),
lambda: ("6x5x4x3_1", torch.randn(6, 5, 4, 3), 1),
lambda: ("6x5x4x3_2", torch.randn(6, 5, 4, 3), 2),
lambda: ("6x5x4x3_3", torch.randn(6, 5, 4, 3), 3),
lambda: ("6x5x4x3_-1", torch.randn(6, 5, 4, 3), -1),
lambda: ("6x5x4x3_-2", torch.randn(6, 5, 4, 3), -2),
lambda: ("6x5x4x3_-3", torch.randn(6, 5, 4, 3), -3),
lambda: ("6x5x4x3_-4", torch.randn(6, 5, 4, 3), -4),
lambda: (
"3x4_0,int64",
torch.torch.randint(-10, 10, (3, 4), dtype=torch.int64),
0,
),
]
)
def test_cumsum(self, _, tensor, dim):
utils.compare_tracing_methods(
SimpleCumSumModule(dim), tensor, fusible_ops={"aten::cumsum"}
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import unittest
import torch
from parameterized import parameterized
from tests import utils
class SimpleBitwiseNotModule(torch.nn.Module):
def __init__(self):
super(SimpleBitwiseNotModule, self).__init__()
def forward(self, a):
b = torch.bitwise_not(a)
return torch.bitwise_not(b)
class TestBitwiseNot(utils.TorchGlowTestCase):
@utils.deterministic_expand(
[
lambda: ("basic", torch.tensor([-1, -2, 3], dtype=torch.int32)),
lambda: ("basic_int64", torch.tensor([-1, -2, 3], dtype=torch.int64)),
lambda: (
"rand_int",
torch.randint(-1000000000, 1000000000, (2, 3), dtype=torch.int64),
),
lambda: ("bool_ts", torch.zeros((2, 2, 3), dtype=torch.bool)),
lambda: ("bool_fs", torch.ones((2, 2, 3), dtype=torch.bool)),
lambda: ("bool_tf", torch.tensor([False, True], dtype=torch.bool)),
]
)
def test_bitwise_not(self, _, x):
"""Tests of the PyTorch Bitwise Not Node on Glow."""
utils.compare_tracing_methods(
SimpleBitwiseNotModule(),
x,
fusible_ops={"aten::bitwise_not"},
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class SimpleQuantizedAddModule(torch.nn.Module):
def __init__(self, left_quantization, right_quantization, scale, zero_point):
super(SimpleQuantizedAddModule, self).__init__()
self.left_quantization = left_quantization
self.right_quantization = right_quantization
self.scale = scale
self.zero_point = zero_point
def forward(self, left, right):
return torch.nn.quantized.DeQuantize()(
torch.ops.quantized.add(
self.left_quantization(left),
self.right_quantization(right),
scale=self.scale,
zero_point=self.zero_point,
)
)
class TestQuantizedAdd(utils.TorchGlowTestCase):
def test_quantized_add_zerooffset(self):
"""Basic test of the PyTorch quantized::add Node on Glow with zero offset."""
utils.compare_tracing_methods(
SimpleQuantizedAddModule(
torch.nn.quantized.Quantize(
scale=0.3, zero_point=0, dtype=torch.quint8
),
torch.nn.quantized.Quantize(
scale=0.3, zero_point=0, dtype=torch.quint8
),
0.05,
0,
),
torch.tensor([1, 2, 3, 4], dtype=torch.float32),
torch.tensor([5, 6, 7, 8], dtype=torch.float32),
fusible_ops={
"quantized::add",
"aten::quantize_per_tensor",
"aten::dequantize",
},
skip_to_glow=True,
)
def test_quantized_add(self):
"""Basic test of the PyTorch quantized::add Node on Glow."""
utils.compare_tracing_methods(
SimpleQuantizedAddModule(
torch.nn.quantized.Quantize(
scale=1.0 / 128, zero_point=5, dtype=torch.quint8
),
torch.nn.quantized.Quantize(
scale=1.0 / 128, zero_point=10, dtype=torch.quint8
),
1.0 / 128,
3,
),
torch.randn([5, 5]),
torch.randn([5, 5]),
fusible_ops={
"quantized::add",
"aten::quantize_per_tensor",
"aten::dequantize",
},
skip_to_glow=True,
)
def test_quantized_add_with_broadcast(self):
"""Basic test of the PyTorch quantized::add Node on Glow."""
utils.compare_tracing_methods(
SimpleQuantizedAddModule(
torch.nn.quantized.Quantize(
scale=1.0 / 128, zero_point=5, dtype=torch.quint8
),
torch.nn.quantized.Quantize(
scale=1.0 / 128, zero_point=10, dtype=torch.quint8
),
1.0 / 128,
3,
),
torch.randn([1, 10]),
torch.randn([10]),
fusible_ops={
"quantized::add",
"aten::quantize_per_tensor",
"aten::dequantize",
},
skip_to_glow=True,
)
def test_quantized_add_cut_q_dq(self):
"""Basic test of the PyTorch quantized::add Node on Glow, with quantize and dequantize excluded."""
utils.compare_tracing_methods(
SimpleQuantizedAddModule(
torch.nn.quantized.Quantize(
scale=1.0 / 128, zero_point=5, dtype=torch.quint8
),
torch.nn.quantized.Quantize(
scale=1.0 / 128, zero_point=10, dtype=torch.quint8
),
1.0 / 128,
3,
),
torch.randn([5, 5]),
torch.randn([5, 5]),
fusible_ops={"quantized::add"},
fusion_blocklist=["aten::quantize_per_tensor", "aten::dequantize"],
skip_to_glow=True,
)
|
from __future__ import absolute_import, division, print_function, unicode_literals
import unittest
import torch
import torch.jit
import torch_glow
from tests import utils
class Bar(torch.nn.Module):
def __init__(self):
super(Bar, self).__init__()
self.q = torch.nn.quantized.Quantize(
scale=0.05, zero_point=1, dtype=torch.quint8
)
self.dq = torch.nn.quantized.DeQuantize()
def forward(self, x, y):
qx = self.q(x)
qy = self.q(y)
qz = torch.ops.quantized.add(qx, qy, 0.08, 0)
return self.dq(qz)
@unittest.skip(reason="This test only works on HW")
class TestToGlowNNPIModelDumping(utils.TorchGlowTestCase):
def test_serialization(self):
with torch.no_grad():
x = torch.randn([1, 4, 4, 4], dtype=torch.float32)
y = torch.randn([1, 4, 4, 4], dtype=torch.float32)
model = Bar()
model = torch.jit.trace(model, (x, y))
spec = torch_glow.CompilationSpec()
spec_settings = spec.get_settings()
spec_settings.set_glow_backend("NNPI")
# Enabled the serialize in this spec
spec_settings.set_enable_serialize(True)
compilation_group = torch_glow.CompilationGroup()
compilation_group_settings = compilation_group.get_settings()
compilation_group_settings.set_replication_count(1)
compilation_group_settings.backend_specific_opts_insert(
"NNPI_IceCores", "1"
)
compilation_group.input_sets_append(
torch_glow.input_specs_from_tensors([x, y])
)
spec.compilation_groups_append(compilation_group)
torch_glow.disableFusionPass()
torch_glow.enable_convert_to_fp16()
# Enable global serialize
# then compile(serialize) the model and save it
torch_glow.enable_dump_serialized_model()
glow_mod = torch_glow.to_glow(model, spec)
res1 = glow_mod(x, y)
torch.jit.save(glow_mod, "/tmp/serialize_to_glow.pt")
# Enable global deserialize and disable serialize
# and load(deserialize) the model to loaded_glow_mod
torch_glow.enable_deserialize()
torch_glow.disable_dump_serialized_model()
loaded_glow_mod = torch.jit.load("/tmp/serialize_to_glow.pt")
res2 = loaded_glow_mod(x, y)
assert torch.allclose(res1, res2, 1e-5, 1e-5)
|
from __future__ import absolute_import, division, print_function, unicode_literals
import unittest
import torch
from tests import utils
class TestQuantizedConv2dBigStrideSmallKernel(utils.TorchGlowTestCase):
# These tests should be run on NNPI card manually, or else
# buck test will only run them on emulator.
supported_backends = {"NNPI"}
fused_2d_expect = {
"aten::quantize_per_tensor",
"quantized::conv2d",
"aten::dequantize",
}
fused_3d_expect = {
"aten::quantize_per_tensor",
"quantized::conv3d",
"aten::dequantize",
}
@utils.deterministic_expand(
[
lambda: (
"2d_stride_bigger_in_one_dim",
torch.nn.Conv2d(8, 4, [1, 1], groups=1, stride=[2, 1]),
torch.randn([1, 8, 8, 8]),
),
lambda: (
"2d_stride_bigger_in_multi_dims",
torch.nn.Conv2d(8, 4, [1, 1], groups=1, stride=[2, 2]),
torch.randn([1, 8, 8, 8]),
),
lambda: (
"2d_stride_bigger_in_multi_groups",
torch.nn.Conv2d(8, 4, [1, 1], groups=4, stride=[2, 1]),
torch.randn([1, 8, 8, 8]),
),
lambda: (
"2d_stride_bigger_strong_test_1",
torch.nn.Conv2d(4, 8, [2, 3], groups=2, stride=[1, 4]),
torch.randn([1, 4, 29, 23]),
),
lambda: (
"2d_stride_bigger_strong_test_2",
torch.nn.Conv2d(6, 8, [7, 3], groups=2, stride=[8, 4]),
torch.randn([2, 6, 47, 35]),
),
]
)
def test_qconv_2d(self, name, conv, tensor):
"""Test of quantized conv2d whose stride is bigger than kernel."""
with torch.no_grad():
model = torch.ao.quantization.QuantWrapper(conv)
model.qconfig = torch.ao.quantization.get_default_qconfig("fbgemm")
torch.ao.quantization.prepare(model, inplace=True)
# Calibration
model.forward(tensor)
torch.ao.quantization.convert(model, inplace=True)
utils.compare_tracing_methods(
model,
tensor,
fusible_ops=self.fused_2d_expect,
# We set the atol & rtol of this test to be very big,
# because we know there is going to be issues of off-by-1,
# and we dont want to trigger it.
# However, even with such great atol & rtol, this is still
# good enough to verify the functionality is enabled correctly.
atol=0.1,
rtol=0.1,
)
# Skiped 3d tests
@utils.deterministic_expand(
[
lambda: (
"3d_stride_bigger_in_one_dim",
torch.nn.Conv3d(8, 4, kernel_size=2, groups=1, stride=1),
torch.randn([1, 8, 16, 8, 8]),
),
]
)
@unittest.skip(reason="qconv3d channelwise is not yet supported on NNPI")
def test_qconv_3d(self, name, conv, tensor):
"""Test of quantized conv3d whose stride is bigger than kernel."""
with torch.no_grad():
model = torch.ao.quantization.QuantWrapper(conv)
model.qconfig = torch.ao.quantization.get_default_qconfig("fbgemm")
torch.ao.quantization.prepare(model, inplace=True)
# Calibration
model.forward(tensor)
torch.ao.quantization.convert(model, inplace=True)
utils.compare_tracing_methods(
model,
tensor,
fusible_ops=self.fused_3d_expect,
atol=0.1,
rtol=0.1,
)
|
# COPIED FROM: torchvision/transforms/functional.py
import numbers
import numpy as np
import torch
from PIL import Image
try:
import accimage
except ImportError:
accimage = None
def _is_pil_image(img):
return isinstance(img, Image.Image)
def _is_numpy(img):
return isinstance(img, np.ndarray)
def _is_tensor_image(img):
return torch.is_tensor(img) and img.ndimension() == 3
def resize(img, size, interpolation=Image.BILINEAR):
r"""Resize the input PIL Image to the given size.
Args:
img (PIL Image): Image to be resized.
size (sequence or int): Desired output size. If size is a sequence like
(h, w), the output size will be matched to this. If size is an int,
the smaller edge of the image will be matched to this number maintaing
the aspect ratio. i.e, if height > width, then image will be rescaled to
:math:`\left(\text{size} \times \frac{\text{height}}{\text{width}}, \text{size}\right)`
interpolation (int, optional): Desired interpolation. Default is
``PIL.Image.BILINEAR``
Returns:
PIL Image: Resized image.
"""
if not _is_pil_image(img):
raise TypeError("img should be PIL Image. Got {}".format(type(img)))
if not (isinstance(size, int) or (isinstance(size, Iterable) and len(size) == 2)):
raise TypeError("Got inappropriate size arg: {}".format(size))
if isinstance(size, int):
w, h = img.size
if (w <= h and w == size) or (h <= w and h == size):
return img
if w < h:
ow = size
oh = int(size * h / w)
return img.resize((ow, oh), interpolation)
else:
oh = size
ow = int(size * w / h)
return img.resize((ow, oh), interpolation)
else:
return img.resize(size[::-1], interpolation)
def center_crop(img, output_size):
if isinstance(output_size, numbers.Number):
output_size = (int(output_size), int(output_size))
w, h = img.size
th, tw = output_size
i = int(round((h - th) / 2.0))
j = int(round((w - tw) / 2.0))
return crop(img, i, j, th, tw)
def crop(img, i, j, h, w):
if not _is_pil_image(img):
raise TypeError("img should be PIL Image. Got {}".format(type(img)))
return img.crop((j, i, j + w, i + h))
def to_tensor(pic):
"""Convert a ``PIL Image`` or ``numpy.ndarray`` to tensor.
See ``ToTensor`` for more details.
Args:
pic (PIL Image or numpy.ndarray): Image to be converted to tensor.
Returns:
Tensor: Converted image.
"""
if not (_is_pil_image(pic) or _is_numpy(pic)):
raise TypeError("pic should be PIL Image or ndarray. Got {}".format(type(pic)))
if _is_numpy(pic) and not _is_numpy_image(pic):
raise ValueError(
"pic should be 2/3 dimensional. Got {} dimensions.".format(pic.ndim)
)
if isinstance(pic, np.ndarray):
# handle numpy array
if pic.ndim == 2:
pic = pic[:, :, None]
img = torch.from_numpy(pic.transpose((2, 0, 1)))
# backward compatibility
if isinstance(img, torch.ByteTensor):
return img.float().div(255)
else:
return img
if accimage is not None and isinstance(pic, accimage.Image):
nppic = np.zeros([pic.channels, pic.height, pic.width], dtype=np.float32)
pic.copyto(nppic)
return torch.from_numpy(nppic)
# handle PIL Image
if pic.mode == "I":
img = torch.from_numpy(np.array(pic, np.int32, copy=False))
elif pic.mode == "I;16":
img = torch.from_numpy(np.array(pic, np.int16, copy=False))
elif pic.mode == "F":
img = torch.from_numpy(np.array(pic, np.float32, copy=False))
elif pic.mode == "1":
img = 255 * torch.from_numpy(np.array(pic, np.uint8, copy=False))
else:
img = torch.ByteTensor(torch.ByteStorage.from_buffer(pic.tobytes()))
# PIL image mode: L, LA, P, I, F, RGB, YCbCr, RGBA, CMYK
if pic.mode == "YCbCr":
nchannel = 3
elif pic.mode == "I;16":
nchannel = 1
else:
nchannel = len(pic.mode)
img = img.view(pic.size[1], pic.size[0], nchannel)
# put it from HWC to CHW format
# yikes, this transpose takes 80% of the loading time/CPU
img = img.transpose(0, 1).transpose(0, 2).contiguous()
if isinstance(img, torch.ByteTensor):
return img.float().div(255)
else:
return img
def normalize(tensor, mean, std, inplace=False):
"""Normalize a tensor image with mean and standard deviation.
.. note::
This transform acts out of place by default, i.e., it does not mutates the input tensor.
See :class:`~torchvision.transforms.Normalize` for more details.
Args:
tensor (Tensor): Tensor image of size (C, H, W) to be normalized.
mean (sequence): Sequence of means for each channel.
std (sequence): Sequence of standard deviations for each channel.
inplace(bool,optional): Bool to make this operation inplace.
Returns:
Tensor: Normalized Tensor image.
"""
if not _is_tensor_image(tensor):
raise TypeError("tensor is not a torch image.")
if not inplace:
tensor = tensor.clone()
dtype = tensor.dtype
mean = torch.as_tensor(mean, dtype=dtype, device=tensor.device)
std = torch.as_tensor(std, dtype=dtype, device=tensor.device)
tensor.sub_(mean[:, None, None]).div_(std[:, None, None])
return tensor
|
# COPIED FROM: torchvision/models/resnet.py
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
__all__ = [
"ResNet",
"resnet18",
"resnet34",
"resnet50",
"resnet101",
"resnet152",
"resnext50_32x4d",
"resnext101_32x8d",
"wide_resnet50_2",
"wide_resnet101_2",
]
model_urls = {
"resnet18": "https://download.pytorch.org/models/resnet18-5c106cde.pth",
"resnet34": "https://download.pytorch.org/models/resnet34-333f7ec4.pth",
"resnet50": "https://download.pytorch.org/models/resnet50-19c8e357.pth",
"resnet101": "https://download.pytorch.org/models/resnet101-5d3b4d8f.pth",
"resnet152": "https://download.pytorch.org/models/resnet152-b121ed2d.pth",
"resnext50_32x4d": "https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth",
"resnext101_32x8d": "https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth",
"wide_resnet50_2": "https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth",
"wide_resnet101_2": "https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth",
}
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(
in_planes,
out_planes,
kernel_size=3,
stride=stride,
padding=dilation,
groups=groups,
bias=False,
dilation=dilation,
)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(
self,
inplanes,
planes,
stride=1,
downsample=None,
groups=1,
base_width=64,
dilation=1,
norm_layer=None,
):
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError("BasicBlock only supports groups=1 and base_width=64")
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when
# stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(
self,
inplanes,
planes,
stride=1,
downsample=None,
groups=1,
base_width=64,
dilation=1,
norm_layer=None,
):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.0)) * groups
# Both self.conv2 and self.downsample layers downsample the input when
# stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(
self,
block,
layers,
num_classes=1000,
zero_init_residual=False,
groups=1,
width_per_group=64,
replace_stride_with_dilation=None,
norm_layer=None,
):
super(ResNet, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError(
"replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation)
)
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(
3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False
)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(
block, 128, layers[1], stride=2, dilate=replace_stride_with_dilation[0]
)
self.layer3 = self._make_layer(
block, 256, layers[2], stride=2, dilate=replace_stride_with_dilation[1]
)
self.layer4 = self._make_layer(
block, 512, layers[3], stride=2, dilate=replace_stride_with_dilation[2]
)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu")
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to
# https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(
block(
self.inplanes,
planes,
stride,
downsample,
self.groups,
self.base_width,
previous_dilation,
norm_layer,
)
)
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(
block(
self.inplanes,
planes,
groups=self.groups,
base_width=self.base_width,
dilation=self.dilation,
norm_layer=norm_layer,
)
)
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.reshape(x.size(0), -1)
x = self.fc(x)
return x
def _resnet(arch, block, layers, pretrained, progress, **kwargs):
model = ResNet(block, layers, **kwargs)
if pretrained:
state_dict = model_zoo.load_url(model_urls[arch], progress=progress)
model.load_state_dict(state_dict)
return model
def resnet18(pretrained=False, progress=True, **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet("resnet18", BasicBlock, [2, 2, 2, 2], pretrained, progress, **kwargs)
def resnet34(pretrained=False, progress=True, **kwargs):
"""Constructs a ResNet-34 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet("resnet34", BasicBlock, [3, 4, 6, 3], pretrained, progress, **kwargs)
def resnet50(pretrained=False, progress=True, **kwargs):
"""Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet("resnet50", Bottleneck, [3, 4, 6, 3], pretrained, progress, **kwargs)
def resnet101(pretrained=False, progress=True, **kwargs):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet(
"resnet101", Bottleneck, [3, 4, 23, 3], pretrained, progress, **kwargs
)
def resnet152(pretrained=False, progress=True, **kwargs):
"""Constructs a ResNet-152 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet(
"resnet152", Bottleneck, [3, 8, 36, 3], pretrained, progress, **kwargs
)
def resnext50_32x4d(pretrained=False, progress=True, **kwargs):
"""Constructs a ResNeXt-50 32x4d model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs["groups"] = 32
kwargs["width_per_group"] = 4
return _resnet(
"resnext50_32x4d", Bottleneck, [3, 4, 6, 3], pretrained, progress, **kwargs
)
def resnext101_32x8d(pretrained=False, progress=True, **kwargs):
"""Constructs a ResNeXt-101 32x8d model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs["groups"] = 32
kwargs["width_per_group"] = 8
return _resnet(
"resnext101_32x8d", Bottleneck, [3, 4, 23, 3], pretrained, progress, **kwargs
)
def wide_resnet50_2(pretrained=False, progress=True, **kwargs):
"""Constructs a Wide ResNet-50-2 model.
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs["width_per_group"] = 64 * 2
return _resnet(
"wide_resnet50_2", Bottleneck, [3, 4, 6, 3], pretrained, progress, **kwargs
)
def wide_resnet101_2(pretrained=False, progress=True, **kwargs):
"""Constructs a Wide ResNet-101-2 model.
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs["width_per_group"] = 64 * 2
return _resnet(
"wide_resnet101_2", Bottleneck, [3, 4, 23, 3], pretrained, progress, **kwargs
)
|
# isort:skip_file
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
import torch.nn as nn
import torch_glow
class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
self.linear = nn.Linear(10, 2)
def forward(self, x):
return self.linear(x)
torch._C._jit_set_profiling_mode(True)
torch_glow.enableFusionPass_DO_NOT_USE_THIS()
m = Model()
m_jit = torch.jit.script(m)
x = torch.randn(10)
# No Glow fusion node
print("initial jit ir")
print(m_jit.graph_for(x))
m_jit(x)
m_jit(x)
m_jit(x)
# Contains Glow fusion node
print("final jit ir")
print(m_jit.graph_for(x))
|
# isort:skip_file
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
import torch_glow
x = torch.randn(4)
y = torch.randn(4)
@torch.jit.script
def foo(a, b):
c = a.mul(b)
a = c.mul(c)
a = c.mul(a)
d = c.div(a)
return d
print("original jit ir")
print(foo.graph_for(x, y))
jit_res = foo(x, y)
torch_glow.enableFusionPass_DO_NOT_USE_THIS()
@torch.jit.script
def foo_glow(a, b):
return foo(a, b)
print("glow jit ir")
print(foo_glow.graph_for(x, y))
jit_glow_res = foo_glow(x, y)
print("jit_res")
print(jit_res)
print("jit_glow_res")
print(jit_glow_res)
assert torch.allclose(jit_res, jit_glow_res)
|
# isort:skip_file
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import torch
import torch_glow
import utils.torchvision_fake.resnet as resnet
import utils.torchvision_fake.transforms as torchvisionTransforms
from PIL import Image
def load_image(image_path):
image = Image.open(image_path).convert("RGB")
transformed_image = transform_image(image)
return torch.reshape(transformed_image, (1, 3, 224, 224))
def transform_image(image):
"""
Given a PIL image, transform it to a normalized tensor for classification.
"""
image = torchvisionTransforms.resize(image, 256)
image = torchvisionTransforms.center_crop(image, 224)
image = torchvisionTransforms.to_tensor(image)
image = torchvisionTransforms.normalize(
image, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
)
return image
def run_model(model, image, use_glow, backend, print_graph):
if use_glow:
torch_glow.enableFusionPass_DO_NOT_USE_THIS()
if backend:
torch_glow.setGlowBackend(backend)
with torch.no_grad():
traced = torch.jit.trace(model, image)
if print_graph:
print(traced.graph_for(image))
all_outputs = traced(image)
topk = all_outputs.topk(5)
return (topk[1], topk[0])
def run():
parser = argparse.ArgumentParser()
parser.add_argument(
"--image",
type=str,
required=True,
help="Location of the image to be classified",
)
parser.add_argument("--k", type=int, default=5, help="how many results to show")
parser.add_argument(
"--skip_glow", action="store_true", default=False, help="Don't run using Glow"
)
parser.add_argument(
"--print_graph", action="store_true", default=False, help="Don't run using Glow"
)
parser.add_argument(
"--backend",
action="store",
default=None,
help="Select Glow backend to run. Default is not to request a specific backend.",
)
args = parser.parse_args()
image = load_image(args.image)
model = resnet.resnet18(pretrained=True, progress=True)
model.eval()
use_glow = not args.skip_glow
(indices, scores) = run_model(
model,
image,
use_glow=use_glow,
backend=args.backend,
print_graph=args.print_graph,
)
print("rank", "class", "P")
for i in range(args.k):
print(i, int(indices[0][i]), float(scores[0][i]))
run()
|
import collections
import collections.abc
import copy
from contextlib import contextmanager
from enum import Enum
from typing import Any, Iterable, List, Mapping, Optional, Tuple, Union
import torch
import torch_glow
__all__ = [
"to_glow",
"to_glow_selective",
"get_submod_inputs",
"CompilationSpec",
"CompilationGroup",
"InputSpec",
"CompilationSpecSettings",
"FuserSettings",
"input_spec_from_tensor",
"input_specs_from_tensors",
"lower",
"Backend",
"onnx_capture",
]
CompilationSpec = torch.classes.glow.CompilationSpec
CompilationGroup = torch.classes.glow.CompilationGroup
InputSpec = torch.classes.glow.InputSpec
CompilationSpecSettings = torch.classes.glow.CompilationSpecSettings
FuserSettings = torch.classes.glow.FuserSettings
class Backend(Enum):
Interpreter = "Interpreter"
NNPI = "NNPI"
CPU = "CPU"
def __str__(self):
return self.value
@contextmanager
def onnx_capture(filename_prefix=None, zip_mode=True, write_without_randomize=False):
try:
torch_glow.disableFusionPass()
torch_glow.enable_write_to_onnx()
if write_without_randomize:
torch_glow.enable_write_without_randomize()
if zip_mode:
torch_glow.enable_onnx_zip_mode()
if filename_prefix is not None:
torch_glow.set_onnx_file_name_prefix(filename_prefix)
yield
finally:
torch_glow.disable_write_without_randomize()
torch_glow.disable_write_to_onnx()
torch_glow.disable_onnx_zip_mode()
torch_glow.set_onnx_file_name_prefix("")
def input_spec_from_tensor(tensor: torch.Tensor) -> InputSpec:
input_spec = InputSpec()
input_spec.set_same_as(tensor)
return input_spec
def input_specs_from_tensors(tensors: List[torch.Tensor]) -> List[InputSpec]:
return [input_spec_from_tensor(tensor) for tensor in tensors]
def lower(
model: torch.nn.Module,
example_inputs: Iterable[torch.Tensor],
backend: Union[str, Backend],
convert_to_fp16: Optional[bool] = None,
num_devices: Optional[int] = None,
replication_count: Optional[int] = None,
backend_specific_options: Mapping[str, str] = None,
):
r"""Lower a model to Glow
This is the simplest interface to lower a model. For more complex lowering,
the to_glow function should be used.
Return:
A copy of the model that has been lowered to Glow and will run on
Glow backend devices
"""
if not isinstance(model, torch.jit.ScriptModule):
try:
model = torch.jit.trace(model, example_inputs)
except RuntimeError as exc:
print(exc.args[0])
raise RuntimeError(
"Model failed tracing! Try tracing/scripting by yourself first."
)
spec = CompilationSpec()
spec.get_settings().set_glow_backend(Backend(backend).value)
compilation_group = CompilationGroup()
if convert_to_fp16 is not None:
compilation_group.get_settings().set_convert_to_fp16(convert_to_fp16)
if num_devices is not None:
compilation_group.get_settings().set_num_devices(num_devices)
if replication_count is not None:
compilation_group.get_settings().set_replication_count(replication_count)
if backend_specific_options is not None:
for opt_key in backend_specific_options:
compilation_group.get_settings().backend_specific_opts_insert(
opt_key, backend_specific_options[opt_key]
)
compilation_group.input_sets_append(input_specs_from_tensors(example_inputs))
spec.compilation_groups_append(compilation_group)
return to_glow(model, {"forward": spec})
def to_glow(model, method_compile_spec):
r"""Lower a model to Glow
to_glow is a wrapper around the torch._C._jit_to_backend which lowers the
the specified module `mod` to Glow using the the MethodCompileSpec
`method_compile_spec`. MethodCompileSpec is a dictionary from method name
in `mod` such as 'forward' to CompilationSpec for that method
Args:
model: Model to be lowered to glow
method_compile_spec: Either a dicionary from method name to
CompilationSpec or just a CompilationSpec and method
name is assumed to be "forward"
Return:
A copy of the model that has been lowered to Glow and will run on
Glow backend devices
"""
if not isinstance(method_compile_spec, collections.abc.Mapping):
method_compile_spec = {"forward": method_compile_spec}
return torch._C._jit_to_backend("glow", model, method_compile_spec)
def check_module_names(module_names):
"""Checks that module names don't overlap at all"""
assert "" not in module_names, "Use to_glow to lower top level module"
for path1 in module_names:
for path2 in module_names:
if path1 == path2:
continue
assert (
path1 not in path2
), f"Can't to_glow a module nested inside another to_glow module, \
found {path2} inside of {path1}"
def get_submodule(mod, path):
path = path.split(".")
assert len(path) > 0
found_mod = mod
for item in path:
found_mod = getattr(found_mod, item)
return found_mod
def set_submodule(mod, path, submod):
path = path.split(".")
assert len(path) > 0
found_mod = mod
for item in path[:-1]:
found_mod = getattr(found_mod, item)
setattr(found_mod, path[-1], submod)
pass
def get_submod_inputs(
mod: torch.nn.Module, path: str, example_inputs: Any
) -> Tuple[torch.Tensor]:
r"""Get the inputs of a submodule given the top-level model
and its input.
Register a forward hook that record the inputs of the submodule
and then run the model to triger the hook.
Args:
mod: top-level model
path: path to a submodule
example_inputs: inputs to the top-level model
Return:
inputs: Tuple[torch.Tensor]
"""
submod = get_submodule(mod, path)
sub_inputs = None
def get_inputs(self: torch.nn.Module, inputs: Any):
nonlocal sub_inputs
sub_inputs = inputs
handle = submod.register_forward_pre_hook(get_inputs)
mod(*example_inputs)
handle.remove()
return sub_inputs
def to_glow_selective(model, specs_and_examples, inplace=False):
r"""Selectively lowers submodules of the given module to Glow.
Instead of using to_glow to lower an entire module to Glow,
to_glow_selective can be used to selectively find and replace submodules in
the given module with a version of the module that is traced and lowered
to Glow. Each specified submodule is lowered independently and so will be
a separate compilation unit in Glow.
Args:
model: top-level model to be selectively lowered
specs_and_examples: A dictionary with keys that name submodules
recursively from model and values that are the a
tuple of (CompilationSpec, example_inputs) where
example_inputs are inputs that are used to trace
the submodule.
inplace: Carry out model transformations in-place, the original module
is mutated
Return:
Model with selectively lowered submodules
"""
check_module_names(list(specs_and_examples.keys()))
if not inplace:
model = copy.deepcopy(model)
if isinstance(model, torch.jit._script.RecursiveScriptModule):
spec_list, path_list = [], []
submod_idx = 0
for path, spec in specs_and_examples.items():
spec_list.append(spec)
path_list.append(path)
def to_glow_helper(submod):
nonlocal submod_idx
res_model = to_glow(submod, {"forward": spec_list[submod_idx]})
submod_idx += 1
return res_model
model = torch._C._jit_to_backend_selective(model, to_glow_helper, path_list)
else:
for path, (spec, example_inputs) in specs_and_examples.items():
submod = get_submodule(model, path)
submod = torch.jit.trace(submod, example_inputs)
submod = to_glow(submod, {"forward": spec})
set_submodule(model, path, submod)
return model
|
from ._torch_glow import *
from .to_glow import *
|
#!/usr/bin/env python
import os
import re
import sys
import shutil
import subprocess
from pathlib import Path
from setuptools import setup, find_packages
import distutils.command.clean
import torch
from tools import setup_helpers
ROOT_DIR = Path(__file__).parent.resolve()
def _run_cmd(cmd):
try:
return subprocess.check_output(cmd, cwd=ROOT_DIR).decode('ascii').strip()
except Exception:
return None
def _get_version(sha):
version = '0.11.0a0'
if os.getenv('BUILD_VERSION'):
version = os.getenv('BUILD_VERSION')
elif sha is not None:
version += '+' + sha[:7]
return version
def _make_version_file(version, sha):
sha = 'Unknown' if sha is None else sha
version_path = ROOT_DIR / 'torchaudio' / 'version.py'
with open(version_path, 'w') as f:
f.write(f"__version__ = '{version}'\n")
f.write(f"git_version = '{sha}'\n")
def _get_pytorch_version():
if 'PYTORCH_VERSION' in os.environ:
return f"torch=={os.environ['PYTORCH_VERSION']}"
return 'torch'
class clean(distutils.command.clean.clean):
def run(self):
# Run default behavior first
distutils.command.clean.clean.run(self)
# Remove torchaudio extension
for path in (ROOT_DIR / 'torchaudio').glob('**/*.so'):
print(f'removing \'{path}\'')
path.unlink()
# Remove build directory
build_dirs = [
ROOT_DIR / 'build',
]
for path in build_dirs:
if path.exists():
print(f'removing \'{path}\' (and everything under it)')
shutil.rmtree(str(path), ignore_errors=True)
def _get_packages(branch_name, tag):
exclude = [
"build*",
"test*",
"torchaudio.csrc*",
"third_party*",
"tools*",
]
exclude_prototype = False
if branch_name is not None and branch_name.startswith('release/'):
exclude_prototype = True
if tag is not None and re.match(r'v[\d.]+(-rc\d+)?', tag):
exclude_prototype = True
if exclude_prototype:
print('Excluding torchaudio.prototype from the package.')
exclude.append("torchaudio.prototype")
return find_packages(exclude=exclude)
def _init_submodule():
print(' --- Initializing submodules')
try:
subprocess.check_call(['git', 'submodule', 'init'])
subprocess.check_call(['git', 'submodule', 'update'])
except Exception:
print(' --- Submodule initalization failed')
print('Please run:\n\tgit submodule update --init --recursive')
sys.exit(1)
print(' --- Initialized submodule')
def _parse_sox_sources():
sox_dir = ROOT_DIR / 'third_party' / 'sox'
cmake_file = sox_dir / 'CMakeLists.txt'
archive_dir = sox_dir / 'archives'
archive_dir.mkdir(exist_ok=True)
with open(cmake_file, 'r') as file_:
for line in file_:
match = re.match(r'^\s*URL\s+(https:\/\/.+)$', line)
if match:
url = match.group(1)
path = archive_dir / os.path.basename(url)
yield path, url
def _fetch_sox_archives():
for dest, url in _parse_sox_sources():
if not dest.exists():
print(f' --- Fetching {os.path.basename(dest)}')
torch.hub.download_url_to_file(url, dest, progress=False)
def _fetch_third_party_libraries():
if not (ROOT_DIR / 'third_party' / 'kaldi' / 'submodule' / 'CMakeLists.txt').exists():
_init_submodule()
if os.name != 'nt':
_fetch_sox_archives()
def _main():
sha = _run_cmd(['git', 'rev-parse', 'HEAD'])
branch = _run_cmd(['git', 'rev-parse', '--abbrev-ref', 'HEAD'])
tag = _run_cmd(['git', 'describe', '--tags', '--exact-match', '@'])
print('-- Git branch:', branch)
print('-- Git SHA:', sha)
print('-- Git tag:', tag)
pytorch_package_dep = _get_pytorch_version()
print('-- PyTorch dependency:', pytorch_package_dep)
version = _get_version(sha)
print('-- Building version', version)
_make_version_file(version, sha)
_fetch_third_party_libraries()
setup(
name="torchaudio",
version=version,
description="An audio package for PyTorch",
url="https://github.com/pytorch/audio",
author="Soumith Chintala, David Pollack, Sean Naren, Peter Goldsborough",
author_email="[email protected]",
classifiers=[
"Environment :: Plugins",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: BSD License",
"Operating System :: MacOS :: MacOS X",
"Operating System :: Microsoft :: Windows",
"Operating System :: POSIX",
"Programming Language :: C++",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: Implementation :: CPython",
"Topic :: Multimedia :: Sound/Audio",
"Topic :: Scientific/Engineering :: Artificial Intelligence"
],
packages=_get_packages(branch, tag),
ext_modules=setup_helpers.get_ext_modules(),
cmdclass={
'build_ext': setup_helpers.CMakeBuild,
'clean': clean,
},
install_requires=[pytorch_package_dep],
zip_safe=False,
)
if __name__ == '__main__':
_main()
|
#!/usr/bin/env python3
"""Convert a Wav2Vec2/HuBERT model published by fairseq into torchaudio format
Examples
```
python convert_fairseq_models.py \
--input-file hubert_base_ls960.pt \
--output-file hubert_fairseq_base_ls960.pth
python convert_fairseq_models.py \
--input-file hubert_large_ll60k.pt \
--output-file hubert_fairseq_large_ll60k.pth
python convert_fairseq_models.py \
--input-file hubert_large_ll60k_finetune_ls960.pt \
--output-file hubert_fairseq_large_ll60k_asr_ls960.pth
python convert_fairseq_models.py \
--input-file hubert_xtralarge_ll60k.pt \
--output-file hubert_fairseq_xlarge_ll60k.pth
python convert_fairseq_models.py \
--input-file hubert_xtralarge_ll60k_finetune_ls960.pt \
--output-file hubert_fairseq_xlarge_ll60k_asr_ls960.pth
"""
import argparse
# Note: Avoiding the import of torch and fairseq on global scope as they are slow
def _parse_args():
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawTextHelpFormatter,
)
parser.add_argument(
'--input-file', required=True,
help='Input model file.'
)
parser.add_argument(
'--output-file', required=False,
help='Output model file.'
)
parser.add_argument(
'--dict-dir',
help=(
'Directory where letter vocabulary file, `dict.ltr.txt`, is found. '
'Required when loading wav2vec2 model. '
'https://dl.fbaipublicfiles.com/fairseq/wav2vec/dict.ltr.txt'
)
)
return parser.parse_args()
def _load_model(input_file, dict_dir):
import fairseq
overrides = {} if dict_dir is None else {'data': dict_dir}
models, _, _ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[input_file], arg_overrides=overrides,
)
return models[0]
def _import_model(model):
from torchaudio.models.wav2vec2.utils import import_fairseq_model
if model.__class__.__name__ in ['HubertCtc', 'Wav2VecCtc']:
model = model.w2v_encoder
model = import_fairseq_model(model)
return model
def _main(args):
import torch
model = _load_model(args.input_file, args.dict_dir)
model = _import_model(model)
torch.save(model.state_dict(), args.output_file)
if __name__ == '__main__':
_main(_parse_args())
|
#!/usr/bin/env python3
"""Convert the fairseq models available in voxpopuli repo https://github.com/facebookresearch/voxpopuli
The available checkpoints should open with fairseq.
But the following error cannot be resolved with almost any version of fairseq.
https://github.com/facebookresearch/voxpopuli/issues/29
So this script manually parse the checkpoint file and reconstruct the model.
Examples
```
python convert_voxpopuli_models.py \
--input-file wav2vec2_base_10k_ft_fr.pt \
--output-file wav2vec2_voxpopuli_base_10k_asr_fr.pt
```
"""
def _parse_args():
import argparse
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawTextHelpFormatter,
)
parser.add_argument(
'--input-file', required=True,
help='Input checkpoint file.'
)
parser.add_argument(
'--output-file', required=False,
help='Output model file.'
)
return parser.parse_args()
def _removeprefix(s, prefix):
if s.startswith(prefix):
return s[len(prefix):]
return s
def _load(input_file):
import torch
from omegaconf import OmegaConf
data = torch.load(input_file)
cfg = OmegaConf.to_container(data['cfg'])
for key in list(cfg.keys()):
if key != 'model':
del cfg[key]
if 'w2v_args' in cfg['model']:
del cfg['model']['w2v_args'][key]
state_dict = {_removeprefix(k, 'w2v_encoder.'): v for k, v in data['model'].items()}
return cfg, state_dict
def _parse_model_param(cfg, state_dict):
key_mapping = {
"extractor_mode": "extractor_mode",
"conv_feature_layers": "extractor_conv_layer_config",
"conv_bias": "extractor_conv_bias",
"encoder_embed_dim": "encoder_embed_dim",
"dropout_input": "encoder_projection_dropout",
"conv_pos": "encoder_pos_conv_kernel",
"conv_pos_groups": "encoder_pos_conv_groups",
"encoder_layers": "encoder_num_layers",
"encoder_attention_heads": "encoder_num_heads",
"attention_dropout": "encoder_attention_dropout",
"encoder_ffn_embed_dim": "encoder_ff_interm_features",
"activation_dropout": "encoder_ff_interm_dropout",
"dropout": "encoder_dropout",
"layer_norm_first": "encoder_layer_norm_first",
"layerdrop": "encoder_layer_drop",
"encoder_layerdrop": "encoder_layer_drop",
}
params = {}
src_dicts = [cfg['model']]
if 'w2v_args' in cfg['model']:
src_dicts.append(cfg['model']['w2v_args']['model'])
for src, tgt in key_mapping.items():
for model_cfg in src_dicts:
if src in model_cfg:
params[tgt] = model_cfg[src]
break
if params["extractor_mode"] == "default":
params["extractor_mode"] = "group_norm"
# the following line is commented out to resolve lint warning; uncomment before running script
# params["extractor_conv_layer_config"] = eval(params["extractor_conv_layer_config"])
assert len(params) == 15
params['aux_num_out'] = state_dict['proj.bias'].numel() if 'proj.bias' in state_dict else None
return params
def _main(args):
import json
import torch
import torchaudio
from torchaudio.models.wav2vec2.utils.import_fairseq import _convert_state_dict as _convert
cfg, state_dict = _load(args.input_file)
params = _parse_model_param(cfg, state_dict)
print(json.dumps(params, indent=4))
model = torchaudio.models.wav2vec2_model(**params)
model.load_state_dict(_convert(state_dict))
torch.save(model.state_dict(), args.output_file)
if __name__ == '__main__':
_main(_parse_args())
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.