code
stringlengths 208
42.9k
| apis
list | extract_api
stringlengths 129
69.9k
|
---|---|---|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine.functional as F
from megengine.core import Tensor
from official.vision.detection import layers
def get_focal_loss(
logits: Tensor,
labels: Tensor,
ignore_label: int = -1,
background: int = 0,
alpha: float = 0.5,
gamma: float = 0,
norm_type: str = "fg",
) -> Tensor:
r"""Focal Loss for Dense Object Detection:
<https://arxiv.org/pdf/1708.02002.pdf>
.. math::
FL(p_t) = -\alpha_t(1-p_t)^\gamma \log(p_t)
Args:
logits (Tensor):
the predicted logits with the shape of :math:`(B, A, C)`
labels (Tensor):
the assigned labels of boxes with shape of :math:`(B, A)`
ignore_label (int):
the value of ignore class. Default: -1
background (int):
the value of background class. Default: 0
alpha (float):
parameter to mitigate class imbalance. Default: 0.5
gamma (float):
parameter to mitigate easy/hard loss imbalance. Default: 0
norm_type (str): current support "fg", "none":
"fg": loss will be normalized by number of fore-ground samples
"none": not norm
Returns:
the calculated focal loss.
"""
class_range = F.arange(1, logits.shape[2] + 1)
labels = F.add_axis(labels, axis=2)
scores = F.sigmoid(logits)
pos_part = (1 - scores) ** gamma * layers.logsigmoid(logits)
neg_part = scores ** gamma * layers.logsigmoid(-logits)
pos_loss = -(labels == class_range) * pos_part * alpha
neg_loss = (
-(labels != class_range) * (labels != ignore_label) * neg_part * (1 - alpha)
)
loss = (pos_loss + neg_loss).sum()
if norm_type == "fg":
fg_mask = (labels != background) * (labels != ignore_label)
return loss / F.maximum(fg_mask.sum(), 1)
elif norm_type == "none":
return loss
else:
raise NotImplementedError
def get_smooth_l1_loss(
pred_bbox: Tensor,
gt_bbox: Tensor,
labels: Tensor,
beta: int = 1,
background: int = 0,
ignore_label: int = -1,
norm_type: str = "fg",
) -> Tensor:
r"""Smooth l1 loss used in RetinaNet.
Args:
pred_bbox (Tensor):
the predicted bbox with the shape of :math:`(B, A, 4)`
gt_bbox (Tensor):
the ground-truth bbox with the shape of :math:`(B, A, 4)`
labels (Tensor):
the assigned labels of boxes with shape of :math:`(B, A)`
beta (int):
the parameter of smooth l1 loss. Default: 1
background (int):
the value of background class. Default: 0
ignore_label (int):
the value of ignore class. Default: -1
norm_type (str): current support "fg", "all", "none":
"fg": loss will be normalized by number of fore-ground samples
"all": loss will be normalized by number of all samples
"none": not norm
Returns:
the calculated smooth l1 loss.
"""
pred_bbox = pred_bbox.reshape(-1, 4)
gt_bbox = gt_bbox.reshape(-1, 4)
labels = labels.reshape(-1)
fg_mask = (labels != background) * (labels != ignore_label)
loss = get_smooth_l1_base(pred_bbox, gt_bbox, beta)
loss = (loss.sum(axis=1) * fg_mask).sum()
if norm_type == "fg":
loss = loss / F.maximum(fg_mask.sum(), 1)
elif norm_type == "all":
all_mask = labels != ignore_label
loss = loss / F.maximum(all_mask.sum(), 1)
elif norm_type == "none":
return loss
else:
raise NotImplementedError
return loss
def get_smooth_l1_base(pred_bbox: Tensor, gt_bbox: Tensor, beta: float) -> Tensor:
r"""
Args:
pred_bbox (Tensor):
the predicted bbox with the shape of :math:`(N, 4)`
gt_bbox (Tensor):
the ground-truth bbox with the shape of :math:`(N, 4)`
beta (int):
the parameter of smooth l1 loss.
Returns:
the calculated smooth l1 loss.
"""
x = pred_bbox - gt_bbox
abs_x = F.abs(x)
if beta < 1e-5:
loss = abs_x
else:
in_loss = 0.5 * x ** 2 / beta
out_loss = abs_x - 0.5 * beta
# FIXME: F.where cannot handle 0-shape tensor yet
# loss = F.where(abs_x < beta, in_loss, out_loss)
in_mask = abs_x < beta
loss = in_loss * in_mask + out_loss * (1 - in_mask)
return loss
def softmax_loss(scores: Tensor, labels: Tensor, ignore_label: int = -1) -> Tensor:
max_scores = F.zero_grad(scores.max(axis=1, keepdims=True))
scores -= max_scores
log_prob = scores - F.log(F.exp(scores).sum(axis=1, keepdims=True))
mask = labels != ignore_label
vlabels = labels * mask
loss = -(F.indexing_one_hot(log_prob, vlabels.astype("int32"), 1) * mask).sum()
loss = loss / F.maximum(mask.sum(), 1)
return loss
|
[
"megengine.functional.add_axis",
"megengine.functional.arange",
"megengine.functional.sigmoid",
"megengine.functional.abs",
"megengine.functional.exp"
] |
[((1623, 1655), 'megengine.functional.arange', 'F.arange', (['(1)', '(logits.shape[2] + 1)'], {}), '(1, logits.shape[2] + 1)\n', (1631, 1655), True, 'import megengine.functional as F\n'), ((1670, 1696), 'megengine.functional.add_axis', 'F.add_axis', (['labels'], {'axis': '(2)'}), '(labels, axis=2)\n', (1680, 1696), True, 'import megengine.functional as F\n'), ((1710, 1727), 'megengine.functional.sigmoid', 'F.sigmoid', (['logits'], {}), '(logits)\n', (1719, 1727), True, 'import megengine.functional as F\n'), ((4412, 4420), 'megengine.functional.abs', 'F.abs', (['x'], {}), '(x)\n', (4417, 4420), True, 'import megengine.functional as F\n'), ((1767, 1792), 'official.vision.detection.layers.logsigmoid', 'layers.logsigmoid', (['logits'], {}), '(logits)\n', (1784, 1792), False, 'from official.vision.detection import layers\n'), ((1826, 1852), 'official.vision.detection.layers.logsigmoid', 'layers.logsigmoid', (['(-logits)'], {}), '(-logits)\n', (1843, 1852), False, 'from official.vision.detection import layers\n'), ((4977, 4990), 'megengine.functional.exp', 'F.exp', (['scores'], {}), '(scores)\n', (4982, 4990), True, 'import megengine.functional as F\n')]
|
#!/usr/bin/env python3
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
import megengine as mge
import megengine.module as M
import numpy as np
import pytest
from basecls.models.repvgg import RepVGGBlock
@pytest.mark.parametrize("w_in", [32, 64])
@pytest.mark.parametrize("w_out", [64])
@pytest.mark.parametrize("stride", [1, 2])
@pytest.mark.parametrize("groups", [1, 2, 4])
@pytest.mark.parametrize("se_r", [0.0, 0.25])
@pytest.mark.parametrize("act_name", ["relu"])
def test_block(w_in, w_out, stride, groups, se_r, act_name):
m = RepVGGBlock(w_in, w_out, stride, groups, se_r, act_name, deploy=False)
assert isinstance(m, M.Module)
m.eval()
x = mge.random.uniform(size=(2, w_in, 8, 8))
y0 = m(x)
m = RepVGGBlock.convert_to_deploy(m)
y1 = m(x)
np.testing.assert_allclose(y1.numpy(), y0.numpy(), rtol=1e-4, atol=1e-6)
|
[
"megengine.random.uniform"
] |
[((218, 259), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""w_in"""', '[32, 64]'], {}), "('w_in', [32, 64])\n", (241, 259), False, 'import pytest\n'), ((261, 299), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""w_out"""', '[64]'], {}), "('w_out', [64])\n", (284, 299), False, 'import pytest\n'), ((301, 342), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""stride"""', '[1, 2]'], {}), "('stride', [1, 2])\n", (324, 342), False, 'import pytest\n'), ((344, 388), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""groups"""', '[1, 2, 4]'], {}), "('groups', [1, 2, 4])\n", (367, 388), False, 'import pytest\n'), ((390, 434), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""se_r"""', '[0.0, 0.25]'], {}), "('se_r', [0.0, 0.25])\n", (413, 434), False, 'import pytest\n'), ((436, 481), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""act_name"""', "['relu']"], {}), "('act_name', ['relu'])\n", (459, 481), False, 'import pytest\n'), ((551, 621), 'basecls.models.repvgg.RepVGGBlock', 'RepVGGBlock', (['w_in', 'w_out', 'stride', 'groups', 'se_r', 'act_name'], {'deploy': '(False)'}), '(w_in, w_out, stride, groups, se_r, act_name, deploy=False)\n', (562, 621), False, 'from basecls.models.repvgg import RepVGGBlock\n'), ((679, 719), 'megengine.random.uniform', 'mge.random.uniform', ([], {'size': '(2, w_in, 8, 8)'}), '(size=(2, w_in, 8, 8))\n', (697, 719), True, 'import megengine as mge\n'), ((743, 775), 'basecls.models.repvgg.RepVGGBlock.convert_to_deploy', 'RepVGGBlock.convert_to_deploy', (['m'], {}), '(m)\n', (772, 775), False, 'from basecls.models.repvgg import RepVGGBlock\n')]
|
import math
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
# ================================= GRU Implementation ==========================================================
class GRUCell(M.Module):
"""
An implementation of GRUCell.
"""
def __init__(self, input_size, hidden_size, bias=True):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bias = bias
self.ih = M.Linear(input_size, 3 * hidden_size, bias=bias)
self.hh = M.Linear(hidden_size, 3 * hidden_size, bias=bias)
self.reset_parameters()
def reset_parameters(self):
std = 1.0 / math.sqrt(self.hidden_size)
for w in self.parameters():
M.init.uniform_(w, -std, std)
def forward(self, x, hidden):
x = F.reshape(x, (-1, x.shape[1]))
gate_x = self.ih(x)
gate_h = self.hh(hidden)
i_r, i_i, i_n = F.split(gate_x, 3, axis=1)
h_r, h_i, h_n = F.split(gate_h, 3, axis=1)
resetgate = F.sigmoid(i_r + h_r)
inputgate = F.sigmoid(i_i + h_i)
newgate = F.tanh(i_n + (resetgate * h_n))
hy = newgate + inputgate * (hidden - newgate)
return hy
class GRU(M.Module):
"""
An implementation of GRUModule.
"""
def __init__(
self,
input_size,
hidden_size,
num_layers,
bias=True,
batch_first=False,
dropout=0,
):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layers = num_layers
self.bias = bias
self.batch_first = batch_first
self.dropout = dropout
self.rnn_cell_list = []
self.rnn_cell_list.append(GRUCell(self.input_size, self.hidden_size, self.bias))
for l in range(1, self.num_layers):
self.rnn_cell_list.append(
GRUCell(self.hidden_size, self.hidden_size, self.bias)
)
def forward(self, input, hx=None):
if hx is None:
batch = input.shape[0] if self.batch_first else input.shape[1]
h0 = F.zeros((self.num_layers, batch, self.hidden_size))
else:
h0 = hx
outs = []
hidden = list()
for layer in range(self.num_layers):
hidden.append(h0[layer, :, :])
length = input.shape[1] if self.batch_first else input.shape[0]
for t in range(length):
for layer in range(self.num_layers):
if layer == 0:
if self.batch_first:
hidden_l = self.rnn_cell_list[layer](
input[:, t, :], hidden[layer]
)
else:
hidden_l = self.rnn_cell_list[layer](
input[t, :, :], hidden[layer]
)
else:
hidden_l = self.rnn_cell_list[layer](
hidden[layer - 1], hidden[layer]
)
if self.dropout and (layer is not self.num_layers - 1):
hidden_l = F.dropout(hidden_l, self.dropout)
hidden[layer] = hidden_l
outs.append(hidden_l)
if self.batch_first:
output = F.stack(outs, axis=1)
else:
output = F.stack(outs, axis=0)
return output
# ================================= LSTM Implementation ==========================================================
class LSTMCell(M.Module):
"""
An implementation of LSTMCell.
"""
def __init__(self, input_size, hidden_size, bias=True):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bias = bias
self.x2h = M.Linear(input_size, 4 * hidden_size, bias=bias)
self.h2h = M.Linear(hidden_size, 4 * hidden_size, bias=bias)
self.reset_parameters()
def reset_parameters(self):
std = 1.0 / math.sqrt(self.hidden_size)
for w in self.parameters():
M.init.uniform_(w, -std, std)
def forward(self, x, hidden):
hx, cx = hidden
x = F.reshape(x, (-1, x.shape[1]))
gates = self.x2h(x) + self.h2h(hx)
ingate, forgetgate, cellgate, outgate = F.split(gates, 4, axis=1)
ingate = F.sigmoid(ingate)
forgetgate = F.sigmoid(forgetgate)
cellgate = F.tanh(cellgate)
outgate = F.sigmoid(outgate)
cy = F.mul(cx, forgetgate) + F.mul(ingate, cellgate)
hy = F.mul(outgate, F.tanh(cy))
return (hy, cy)
class LSTM(M.Module):
"""
An implementation of LSTMModule.
"""
def __init__(
self,
input_size,
hidden_size,
num_layers,
bias=True,
batch_first=False,
dropout=0,
):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layers = num_layers
self.bias = bias
self.batch_first = batch_first
self.dropout = dropout
self.rnn_cell_list = []
self.rnn_cell_list.append(
LSTMCell(self.input_size, self.hidden_size, self.bias)
)
for l in range(1, self.num_layers):
self.rnn_cell_list.append(
LSTMCell(self.hidden_size, self.hidden_size, self.bias)
)
def forward(self, input, hx=None):
if hx is None:
batch = input.shape[0] if self.batch_first else input.shape[1]
h0 = F.zeros((self.num_layers, batch, self.hidden_size))
c0 = F.zeros((self.num_layers, batch, self.hidden_size))
else:
h0 = hx[0]
c0 = hx[1]
outs = []
hidden = list()
for layer in range(self.num_layers):
hidden.append((h0[layer, :, :], c0[layer, :, :]))
length = input.shape[1] if self.batch_first else input.shape[0]
for t in range(length):
for layer in range(self.num_layers):
if layer == 0:
inp = input[:, t, :] if self.batch_first else input[t, :, :]
hidden_l = self.rnn_cell_list[layer](
inp, (hidden[layer][0], hidden[layer][1])
)
else:
hidden_l = self.rnn_cell_list[layer](
hidden[layer - 1][0], (hidden[layer][0], hidden[layer][1])
)
if self.dropout and (layer is not self.num_layers - 1):
hidden_l = (
F.dropout(hidden_l[0], self.dropout),
F.dropout(hidden_l[1], self.dropout),
)
hidden[layer] = hidden_l
outs.append(hidden_l[0])
if self.batch_first:
output = F.stack(outs, axis=1)
else:
output = F.stack(outs, axis=0)
return output
|
[
"megengine.functional.zeros",
"megengine.functional.mul",
"megengine.functional.sigmoid",
"megengine.functional.split",
"megengine.functional.stack",
"megengine.module.Linear",
"megengine.functional.dropout",
"megengine.functional.tanh",
"megengine.functional.reshape",
"megengine.module.init.uniform_"
] |
[((520, 568), 'megengine.module.Linear', 'M.Linear', (['input_size', '(3 * hidden_size)'], {'bias': 'bias'}), '(input_size, 3 * hidden_size, bias=bias)\n', (528, 568), True, 'import megengine.module as M\n'), ((587, 636), 'megengine.module.Linear', 'M.Linear', (['hidden_size', '(3 * hidden_size)'], {'bias': 'bias'}), '(hidden_size, 3 * hidden_size, bias=bias)\n', (595, 636), True, 'import megengine.module as M\n'), ((876, 906), 'megengine.functional.reshape', 'F.reshape', (['x', '(-1, x.shape[1])'], {}), '(x, (-1, x.shape[1]))\n', (885, 906), True, 'import megengine.functional as F\n'), ((994, 1020), 'megengine.functional.split', 'F.split', (['gate_x', '(3)'], {'axis': '(1)'}), '(gate_x, 3, axis=1)\n', (1001, 1020), True, 'import megengine.functional as F\n'), ((1045, 1071), 'megengine.functional.split', 'F.split', (['gate_h', '(3)'], {'axis': '(1)'}), '(gate_h, 3, axis=1)\n', (1052, 1071), True, 'import megengine.functional as F\n'), ((1093, 1113), 'megengine.functional.sigmoid', 'F.sigmoid', (['(i_r + h_r)'], {}), '(i_r + h_r)\n', (1102, 1113), True, 'import megengine.functional as F\n'), ((1134, 1154), 'megengine.functional.sigmoid', 'F.sigmoid', (['(i_i + h_i)'], {}), '(i_i + h_i)\n', (1143, 1154), True, 'import megengine.functional as F\n'), ((1173, 1202), 'megengine.functional.tanh', 'F.tanh', (['(i_n + resetgate * h_n)'], {}), '(i_n + resetgate * h_n)\n', (1179, 1202), True, 'import megengine.functional as F\n'), ((3899, 3947), 'megengine.module.Linear', 'M.Linear', (['input_size', '(4 * hidden_size)'], {'bias': 'bias'}), '(input_size, 4 * hidden_size, bias=bias)\n', (3907, 3947), True, 'import megengine.module as M\n'), ((3967, 4016), 'megengine.module.Linear', 'M.Linear', (['hidden_size', '(4 * hidden_size)'], {'bias': 'bias'}), '(hidden_size, 4 * hidden_size, bias=bias)\n', (3975, 4016), True, 'import megengine.module as M\n'), ((4281, 4311), 'megengine.functional.reshape', 'F.reshape', (['x', '(-1, x.shape[1])'], {}), '(x, (-1, x.shape[1]))\n', (4290, 4311), True, 'import megengine.functional as F\n'), ((4405, 4430), 'megengine.functional.split', 'F.split', (['gates', '(4)'], {'axis': '(1)'}), '(gates, 4, axis=1)\n', (4412, 4430), True, 'import megengine.functional as F\n'), ((4449, 4466), 'megengine.functional.sigmoid', 'F.sigmoid', (['ingate'], {}), '(ingate)\n', (4458, 4466), True, 'import megengine.functional as F\n'), ((4488, 4509), 'megengine.functional.sigmoid', 'F.sigmoid', (['forgetgate'], {}), '(forgetgate)\n', (4497, 4509), True, 'import megengine.functional as F\n'), ((4529, 4545), 'megengine.functional.tanh', 'F.tanh', (['cellgate'], {}), '(cellgate)\n', (4535, 4545), True, 'import megengine.functional as F\n'), ((4564, 4582), 'megengine.functional.sigmoid', 'F.sigmoid', (['outgate'], {}), '(outgate)\n', (4573, 4582), True, 'import megengine.functional as F\n'), ((722, 749), 'math.sqrt', 'math.sqrt', (['self.hidden_size'], {}), '(self.hidden_size)\n', (731, 749), False, 'import math\n'), ((798, 827), 'megengine.module.init.uniform_', 'M.init.uniform_', (['w', '(-std)', 'std'], {}), '(w, -std, std)\n', (813, 827), True, 'import megengine.module as M\n'), ((2204, 2255), 'megengine.functional.zeros', 'F.zeros', (['(self.num_layers, batch, self.hidden_size)'], {}), '((self.num_layers, batch, self.hidden_size))\n', (2211, 2255), True, 'import megengine.functional as F\n'), ((3392, 3413), 'megengine.functional.stack', 'F.stack', (['outs'], {'axis': '(1)'}), '(outs, axis=1)\n', (3399, 3413), True, 'import megengine.functional as F\n'), ((3449, 3470), 'megengine.functional.stack', 'F.stack', (['outs'], {'axis': '(0)'}), '(outs, axis=0)\n', (3456, 3470), True, 'import megengine.functional as F\n'), ((4102, 4129), 'math.sqrt', 'math.sqrt', (['self.hidden_size'], {}), '(self.hidden_size)\n', (4111, 4129), False, 'import math\n'), ((4178, 4207), 'megengine.module.init.uniform_', 'M.init.uniform_', (['w', '(-std)', 'std'], {}), '(w, -std, std)\n', (4193, 4207), True, 'import megengine.module as M\n'), ((4597, 4618), 'megengine.functional.mul', 'F.mul', (['cx', 'forgetgate'], {}), '(cx, forgetgate)\n', (4602, 4618), True, 'import megengine.functional as F\n'), ((4621, 4644), 'megengine.functional.mul', 'F.mul', (['ingate', 'cellgate'], {}), '(ingate, cellgate)\n', (4626, 4644), True, 'import megengine.functional as F\n'), ((4674, 4684), 'megengine.functional.tanh', 'F.tanh', (['cy'], {}), '(cy)\n', (4680, 4684), True, 'import megengine.functional as F\n'), ((5661, 5712), 'megengine.functional.zeros', 'F.zeros', (['(self.num_layers, batch, self.hidden_size)'], {}), '((self.num_layers, batch, self.hidden_size))\n', (5668, 5712), True, 'import megengine.functional as F\n'), ((5730, 5781), 'megengine.functional.zeros', 'F.zeros', (['(self.num_layers, batch, self.hidden_size)'], {}), '((self.num_layers, batch, self.hidden_size))\n', (5737, 5781), True, 'import megengine.functional as F\n'), ((6974, 6995), 'megengine.functional.stack', 'F.stack', (['outs'], {'axis': '(1)'}), '(outs, axis=1)\n', (6981, 6995), True, 'import megengine.functional as F\n'), ((7031, 7052), 'megengine.functional.stack', 'F.stack', (['outs'], {'axis': '(0)'}), '(outs, axis=0)\n', (7038, 7052), True, 'import megengine.functional as F\n'), ((3230, 3263), 'megengine.functional.dropout', 'F.dropout', (['hidden_l', 'self.dropout'], {}), '(hidden_l, self.dropout)\n', (3239, 3263), True, 'import megengine.functional as F\n'), ((6721, 6757), 'megengine.functional.dropout', 'F.dropout', (['hidden_l[0]', 'self.dropout'], {}), '(hidden_l[0], self.dropout)\n', (6730, 6757), True, 'import megengine.functional as F\n'), ((6783, 6819), 'megengine.functional.dropout', 'F.dropout', (['hidden_l[1]', 'self.dropout'], {}), '(hidden_l[1], self.dropout)\n', (6792, 6819), True, 'import megengine.functional as F\n')]
|
import megengine.module as M
import megengine.functional as F
class FlowHead(M.Module):
def __init__(self, input_dim=128, hidden_dim=256):
super(FlowHead, self).__init__()
self.conv1 = M.Conv2d(input_dim, hidden_dim, 3, padding=1)
self.conv2 = M.Conv2d(hidden_dim, 2, 3, padding=1)
self.relu = M.ReLU()
def forward(self, x):
return self.conv2(self.relu(self.conv1(x)))
class SepConvGRU(M.Module):
def __init__(self, hidden_dim=128, input_dim=192 + 128):
super(SepConvGRU, self).__init__()
self.convz1 = M.Conv2d(
hidden_dim + input_dim, hidden_dim, (1, 5), padding=(0, 2)
)
self.convr1 = M.Conv2d(
hidden_dim + input_dim, hidden_dim, (1, 5), padding=(0, 2)
)
self.convq1 = M.Conv2d(
hidden_dim + input_dim, hidden_dim, (1, 5), padding=(0, 2)
)
self.convz2 = M.Conv2d(
hidden_dim + input_dim, hidden_dim, (5, 1), padding=(2, 0)
)
self.convr2 = M.Conv2d(
hidden_dim + input_dim, hidden_dim, (5, 1), padding=(2, 0)
)
self.convq2 = M.Conv2d(
hidden_dim + input_dim, hidden_dim, (5, 1), padding=(2, 0)
)
def forward(self, h, x):
# horizontal
hx = F.concat([h, x], axis=1)
z = F.sigmoid(self.convz1(hx))
r = F.sigmoid(self.convr1(hx))
q = F.tanh(self.convq1(F.concat([r * h, x], axis=1)))
h = (1 - z) * h + z * q
# vertical
hx = F.concat([h, x], axis=1)
z = F.sigmoid(self.convz2(hx))
r = F.sigmoid(self.convr2(hx))
q = F.tanh(self.convq2(F.concat([r * h, x], axis=1)))
h = (1 - z) * h + z * q
return h
class BasicMotionEncoder(M.Module):
def __init__(self, cor_planes):
super(BasicMotionEncoder, self).__init__()
self.convc1 = M.Conv2d(cor_planes, 256, 1, padding=0)
self.convc2 = M.Conv2d(256, 192, 3, padding=1)
self.convf1 = M.Conv2d(2, 128, 7, padding=3)
self.convf2 = M.Conv2d(128, 64, 3, padding=1)
self.conv = M.Conv2d(64 + 192, 128 - 2, 3, padding=1)
def forward(self, flow, corr):
cor = F.relu(self.convc1(corr))
cor = F.relu(self.convc2(cor))
flo = F.relu(self.convf1(flow))
flo = F.relu(self.convf2(flo))
cor_flo = F.concat([cor, flo], axis=1)
out = F.relu(self.conv(cor_flo))
return F.concat([out, flow], axis=1)
class BasicUpdateBlock(M.Module):
def __init__(self, hidden_dim, cor_planes, mask_size=8):
super(BasicUpdateBlock, self).__init__()
self.encoder = BasicMotionEncoder(cor_planes)
self.gru = SepConvGRU(hidden_dim=hidden_dim, input_dim=128 + hidden_dim)
self.flow_head = FlowHead(hidden_dim, hidden_dim=256)
self.mask = M.Sequential(
M.Conv2d(128, 256, 3, padding=1),
M.ReLU(),
M.Conv2d(256, mask_size**2 * 9, 1, padding=0),
)
def forward(self, net, inp, corr, flow, upsample=True):
motion_features = self.encoder(flow, corr)
inp = F.concat([inp, motion_features], axis=1)
net = self.gru(net, inp)
delta_flow = self.flow_head(net)
# scale mask to balence gradients
mask = 0.25 * self.mask(net)
return net, mask, delta_flow
|
[
"megengine.functional.concat",
"megengine.module.ReLU",
"megengine.module.Conv2d"
] |
[((207, 252), 'megengine.module.Conv2d', 'M.Conv2d', (['input_dim', 'hidden_dim', '(3)'], {'padding': '(1)'}), '(input_dim, hidden_dim, 3, padding=1)\n', (215, 252), True, 'import megengine.module as M\n'), ((274, 311), 'megengine.module.Conv2d', 'M.Conv2d', (['hidden_dim', '(2)', '(3)'], {'padding': '(1)'}), '(hidden_dim, 2, 3, padding=1)\n', (282, 311), True, 'import megengine.module as M\n'), ((332, 340), 'megengine.module.ReLU', 'M.ReLU', ([], {}), '()\n', (338, 340), True, 'import megengine.module as M\n'), ((576, 644), 'megengine.module.Conv2d', 'M.Conv2d', (['(hidden_dim + input_dim)', 'hidden_dim', '(1, 5)'], {'padding': '(0, 2)'}), '(hidden_dim + input_dim, hidden_dim, (1, 5), padding=(0, 2))\n', (584, 644), True, 'import megengine.module as M\n'), ((689, 757), 'megengine.module.Conv2d', 'M.Conv2d', (['(hidden_dim + input_dim)', 'hidden_dim', '(1, 5)'], {'padding': '(0, 2)'}), '(hidden_dim + input_dim, hidden_dim, (1, 5), padding=(0, 2))\n', (697, 757), True, 'import megengine.module as M\n'), ((802, 870), 'megengine.module.Conv2d', 'M.Conv2d', (['(hidden_dim + input_dim)', 'hidden_dim', '(1, 5)'], {'padding': '(0, 2)'}), '(hidden_dim + input_dim, hidden_dim, (1, 5), padding=(0, 2))\n', (810, 870), True, 'import megengine.module as M\n'), ((916, 984), 'megengine.module.Conv2d', 'M.Conv2d', (['(hidden_dim + input_dim)', 'hidden_dim', '(5, 1)'], {'padding': '(2, 0)'}), '(hidden_dim + input_dim, hidden_dim, (5, 1), padding=(2, 0))\n', (924, 984), True, 'import megengine.module as M\n'), ((1029, 1097), 'megengine.module.Conv2d', 'M.Conv2d', (['(hidden_dim + input_dim)', 'hidden_dim', '(5, 1)'], {'padding': '(2, 0)'}), '(hidden_dim + input_dim, hidden_dim, (5, 1), padding=(2, 0))\n', (1037, 1097), True, 'import megengine.module as M\n'), ((1142, 1210), 'megengine.module.Conv2d', 'M.Conv2d', (['(hidden_dim + input_dim)', 'hidden_dim', '(5, 1)'], {'padding': '(2, 0)'}), '(hidden_dim + input_dim, hidden_dim, (5, 1), padding=(2, 0))\n', (1150, 1210), True, 'import megengine.module as M\n'), ((1297, 1321), 'megengine.functional.concat', 'F.concat', (['[h, x]'], {'axis': '(1)'}), '([h, x], axis=1)\n', (1305, 1321), True, 'import megengine.functional as F\n'), ((1527, 1551), 'megengine.functional.concat', 'F.concat', (['[h, x]'], {'axis': '(1)'}), '([h, x], axis=1)\n', (1535, 1551), True, 'import megengine.functional as F\n'), ((1890, 1929), 'megengine.module.Conv2d', 'M.Conv2d', (['cor_planes', '(256)', '(1)'], {'padding': '(0)'}), '(cor_planes, 256, 1, padding=0)\n', (1898, 1929), True, 'import megengine.module as M\n'), ((1952, 1984), 'megengine.module.Conv2d', 'M.Conv2d', (['(256)', '(192)', '(3)'], {'padding': '(1)'}), '(256, 192, 3, padding=1)\n', (1960, 1984), True, 'import megengine.module as M\n'), ((2007, 2037), 'megengine.module.Conv2d', 'M.Conv2d', (['(2)', '(128)', '(7)'], {'padding': '(3)'}), '(2, 128, 7, padding=3)\n', (2015, 2037), True, 'import megengine.module as M\n'), ((2060, 2091), 'megengine.module.Conv2d', 'M.Conv2d', (['(128)', '(64)', '(3)'], {'padding': '(1)'}), '(128, 64, 3, padding=1)\n', (2068, 2091), True, 'import megengine.module as M\n'), ((2112, 2153), 'megengine.module.Conv2d', 'M.Conv2d', (['(64 + 192)', '(128 - 2)', '(3)'], {'padding': '(1)'}), '(64 + 192, 128 - 2, 3, padding=1)\n', (2120, 2153), True, 'import megengine.module as M\n'), ((2367, 2395), 'megengine.functional.concat', 'F.concat', (['[cor, flo]'], {'axis': '(1)'}), '([cor, flo], axis=1)\n', (2375, 2395), True, 'import megengine.functional as F\n'), ((2452, 2481), 'megengine.functional.concat', 'F.concat', (['[out, flow]'], {'axis': '(1)'}), '([out, flow], axis=1)\n', (2460, 2481), True, 'import megengine.functional as F\n'), ((3123, 3163), 'megengine.functional.concat', 'F.concat', (['[inp, motion_features]'], {'axis': '(1)'}), '([inp, motion_features], axis=1)\n', (3131, 3163), True, 'import megengine.functional as F\n'), ((2872, 2904), 'megengine.module.Conv2d', 'M.Conv2d', (['(128)', '(256)', '(3)'], {'padding': '(1)'}), '(128, 256, 3, padding=1)\n', (2880, 2904), True, 'import megengine.module as M\n'), ((2918, 2926), 'megengine.module.ReLU', 'M.ReLU', ([], {}), '()\n', (2924, 2926), True, 'import megengine.module as M\n'), ((2940, 2987), 'megengine.module.Conv2d', 'M.Conv2d', (['(256)', '(mask_size ** 2 * 9)', '(1)'], {'padding': '(0)'}), '(256, mask_size ** 2 * 9, 1, padding=0)\n', (2948, 2987), True, 'import megengine.module as M\n'), ((1431, 1459), 'megengine.functional.concat', 'F.concat', (['[r * h, x]'], {'axis': '(1)'}), '([r * h, x], axis=1)\n', (1439, 1459), True, 'import megengine.functional as F\n'), ((1661, 1689), 'megengine.functional.concat', 'F.concat', (['[r * h, x]'], {'axis': '(1)'}), '([r * h, x], axis=1)\n', (1669, 1689), True, 'import megengine.functional as F\n')]
|
#!/usr/bin/env python3
from dataset import SIDDValData
from model import UNetD
import megengine.data as data
from utils import batch_PSNR
from tqdm import tqdm
import argparse
import pickle
import megengine
def test(args):
valid_dataset = SIDDValData(args.data)
valid_sampler = data.SequentialSampler(
valid_dataset, batch_size=1, drop_last=False
)
valid_dataloader = data.DataLoader(
valid_dataset,
sampler=valid_sampler,
num_workers=8,
)
model = UNetD(3)
with open(args.checkpoint, "rb") as f:
state = pickle.load(f)
model.load_state_dict(state["state_dict"])
model.eval()
def valid_step(image, label):
pred = model(image)
pred = image - pred
psnr_it = batch_PSNR(pred, label)
return psnr_it
def valid(func, data_queue):
psnr_v = 0.
for step, (image, label) in tqdm(enumerate(data_queue)):
image = megengine.tensor(image)
label = megengine.tensor(label)
psnr_it = func(image, label)
psnr_v += psnr_it
psnr_v /= step + 1
return psnr_v
psnr_v = valid(valid_step, valid_dataloader)
print("PSNR: {:.3f}".format(psnr_v.item()) )
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="MegEngine NBNet")
parser.add_argument("-d", "--data", default="/data/sidd", metavar="DIR", help="path to imagenet dataset")
parser.add_argument("-c", "--checkpoint", help="path to checkpoint")
args = parser.parse_args()
test(args)
# vim: ts=4 sw=4 sts=4 expandtab
|
[
"megengine.data.DataLoader",
"megengine.tensor",
"megengine.data.SequentialSampler"
] |
[((245, 267), 'dataset.SIDDValData', 'SIDDValData', (['args.data'], {}), '(args.data)\n', (256, 267), False, 'from dataset import SIDDValData\n'), ((288, 356), 'megengine.data.SequentialSampler', 'data.SequentialSampler', (['valid_dataset'], {'batch_size': '(1)', 'drop_last': '(False)'}), '(valid_dataset, batch_size=1, drop_last=False)\n', (310, 356), True, 'import megengine.data as data\n'), ((394, 462), 'megengine.data.DataLoader', 'data.DataLoader', (['valid_dataset'], {'sampler': 'valid_sampler', 'num_workers': '(8)'}), '(valid_dataset, sampler=valid_sampler, num_workers=8)\n', (409, 462), True, 'import megengine.data as data\n'), ((506, 514), 'model.UNetD', 'UNetD', (['(3)'], {}), '(3)\n', (511, 514), False, 'from model import UNetD\n'), ((1276, 1330), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""MegEngine NBNet"""'}), "(description='MegEngine NBNet')\n", (1299, 1330), False, 'import argparse\n'), ((574, 588), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (585, 588), False, 'import pickle\n'), ((762, 785), 'utils.batch_PSNR', 'batch_PSNR', (['pred', 'label'], {}), '(pred, label)\n', (772, 785), False, 'from utils import batch_PSNR\n'), ((948, 971), 'megengine.tensor', 'megengine.tensor', (['image'], {}), '(image)\n', (964, 971), False, 'import megengine\n'), ((992, 1015), 'megengine.tensor', 'megengine.tensor', (['label'], {}), '(label)\n', (1008, 1015), False, 'import megengine\n')]
|
import os
import numpy as np
import collections
import megengine.module as M
import megengine.functional as F
import megengine as mge
from megengine.data.dataset import Dataset
from megengine.data import DataLoader
import hparams as hp
from megengine.data import Collator
class AsrDataset(Dataset):
def __init__(self, data_set="train"):
"""
Args:
root_dir (string): Directory with all the spectrograms.
"""
self.metas = self.load_metas(hp.dataset_root, data_set)
def load_metas(self, root, data_set): # fix a bug
metas = []
with open(os.path.join(root, f"{data_set}.txt")) as f:
for line in f.readlines():
info = line.split("|")
metas.append(
{
"mel_path": os.path.join(root, info[0]),
"frames": info[1],
"token_ids_str": info[2],
"speaker": info[3],
}
)
return metas
def __len__(self):
return len(self.metas)
def __getitem__(self, idx):
meta = self.metas[idx]
token_ids = [int(i) for i in meta["token_ids_str"].split(" ")]
text = np.array(token_ids, dtype=np.int32)
mel = np.load(meta["mel_path"])
text_input = text[:-1]
text_output = text[1:]
text_length = text_input.shape[0]
pos_text = np.arange(1, text_length + 1)
pos_mel = np.arange(1, mel.shape[0] + 1)
return {
"text": text,
"text_input": text_input,
"text_output": text_output,
"text_length": text_length,
"mel": mel,
"pos_mel": pos_mel,
"pos_text": pos_text,
}
class AsrCollator(Collator):
def __init__(self, pad_value: float = 0.0):
super().__init__()
self.pad_value = pad_value
def apply(self, batch):
# Puts each data field into a tensor with outer dimension batch size
if isinstance(batch[0], collections.Mapping):
text = [d["text"] for d in batch]
text_input = [d["text_input"] for d in batch]
text_output = [d["text_output"] for d in batch]
text_length = [d["text_length"] for d in batch]
mel = [d["mel"] for d in batch]
mel_length = [d["mel"].shape[0] for d in batch]
pos_mel = [d["pos_mel"] for d in batch]
pos_text = [d["pos_text"] for d in batch]
text = [
i
for i, _ in sorted(
zip(text, mel_length), key=lambda x: x[1], reverse=True
)
]
text_input = [
i
for i, _ in sorted(
zip(text_input, mel_length), key=lambda x: x[1], reverse=True
)
]
text_output = [
i
for i, _ in sorted(
zip(text_output, mel_length), key=lambda x: x[1], reverse=True
)
]
text_length = [
i
for i, _ in sorted(
zip(text_length, mel_length), key=lambda x: x[1], reverse=True
)
]
mel = [
i
for i, _ in sorted(
zip(mel, mel_length), key=lambda x: x[1], reverse=True
)
]
pos_text = [
i
for i, _ in sorted(
zip(pos_text, mel_length), key=lambda x: x[1], reverse=True
)
]
pos_mel = [
i
for i, _ in sorted(
zip(pos_mel, mel_length), key=lambda x: x[1], reverse=True
)
]
mel_length = sorted(mel_length, reverse=True)
# PAD sequences with largest length of the batch
text_input = _prepare_data(text_input).astype(np.int32)
text_output = _prepare_data(text_output).astype(np.int32)
mel = _pad_mel(mel)
pos_mel = _prepare_data(pos_mel).astype(np.int32)
pos_text = _prepare_data(pos_text).astype(np.int32)
return (
mge.Tensor(text_input),
mge.Tensor(text_output),
mge.Tensor(mel),
mge.Tensor(pos_text),
mge.Tensor(pos_mel),
mge.Tensor(text_length),
mge.Tensor(mel_length),
)
raise TypeError(
(
"batch must contain tensors, numbers, dicts or lists; found {}".format(
type(batch[0])
)
)
)
def collate_fn_transformer_test(batch):
# Puts each data field into a tensor with outer dimension batch size
# if isinstance(batch[0], collections.Mapping):
text = [batch["text"]] # for d in batch]
text_input = batch["text_input"]
text_output = batch["text_output"]
text_length = batch["text_length"]
mel = [batch["mel"]]
mel_length = [batch["mel"].shape[1]]
pos_mel = batch["pos_mel"]
pos_text = batch["pos_text"]
text = [
i for i, _ in sorted(zip(text, mel_length), key=lambda x: x[1], reverse=True)
]
text_input = [
i
for i, _ in sorted(
zip(text_input, mel_length), key=lambda x: x[1], reverse=True
)
]
text_output = [
i
for i, _ in sorted(
zip(text_output, mel_length), key=lambda x: x[1], reverse=True
)
]
text_length = [
i
for i, _ in sorted(
zip(text_length, mel_length), key=lambda x: x[1], reverse=True
)
]
mel = [i for i, _ in sorted(zip(mel, mel_length), key=lambda x: x[1], reverse=True)]
pos_text = [
i
for i, _ in sorted(zip(pos_text, mel_length), key=lambda x: x[1], reverse=True)
]
pos_mel = [
i for i, _ in sorted(zip(pos_mel, mel_length), key=lambda x: x[1], reverse=True)
]
mel_length = sorted(mel_length, reverse=True)
# PAD sequences with largest length of the batch
text_input = _prepare_data(text_input).astype(np.int32)
text_output = _prepare_data(text_output).astype(np.int32)
mel = _pad_mel(mel[0])
pos_mel = _prepare_data(pos_mel).astype(np.int32)
pos_text = _prepare_data(pos_text).astype(np.int32)
return (
mge.Tensor(text_input),
mge.Tensor(text_output),
mge.Tensor(mel),
mge.Tensor(pos_text),
mge.Tensor(pos_mel),
mge.Tensor(text_length),
mge.Tensor(mel_length),
)
raise TypeError(
(
"batch must contain tensors, numbers, dicts or lists; found {}".format(
type(batch[0])
)
)
)
############################ Utils ###################################
def _pad_data(x, length):
_pad = 0
return np.pad(x, (0, length - x.shape[0]), mode="constant", constant_values=_pad)
def _prepare_data(inputs):
max_len = max((len(x) for x in inputs))
return np.stack([_pad_data(x, max_len) for x in inputs])
def _pad_mel(inputs):
_pad = 0
def _pad_one(x, max_len):
mel_len = x.shape[0]
return np.pad(
x, [[0, max_len - mel_len], [0, 0]], mode="constant", constant_values=_pad
)
max_len = max((x.shape[0] for x in inputs))
return np.stack([_pad_one(x, max_len) for x in inputs])
|
[
"megengine.Tensor"
] |
[((7013, 7087), 'numpy.pad', 'np.pad', (['x', '(0, length - x.shape[0])'], {'mode': '"""constant"""', 'constant_values': '_pad'}), "(x, (0, length - x.shape[0]), mode='constant', constant_values=_pad)\n", (7019, 7087), True, 'import numpy as np\n'), ((1250, 1285), 'numpy.array', 'np.array', (['token_ids'], {'dtype': 'np.int32'}), '(token_ids, dtype=np.int32)\n', (1258, 1285), True, 'import numpy as np\n'), ((1300, 1325), 'numpy.load', 'np.load', (["meta['mel_path']"], {}), "(meta['mel_path'])\n", (1307, 1325), True, 'import numpy as np\n'), ((1449, 1478), 'numpy.arange', 'np.arange', (['(1)', '(text_length + 1)'], {}), '(1, text_length + 1)\n', (1458, 1478), True, 'import numpy as np\n'), ((1497, 1527), 'numpy.arange', 'np.arange', (['(1)', '(mel.shape[0] + 1)'], {}), '(1, mel.shape[0] + 1)\n', (1506, 1527), True, 'import numpy as np\n'), ((6499, 6521), 'megengine.Tensor', 'mge.Tensor', (['text_input'], {}), '(text_input)\n', (6509, 6521), True, 'import megengine as mge\n'), ((6531, 6554), 'megengine.Tensor', 'mge.Tensor', (['text_output'], {}), '(text_output)\n', (6541, 6554), True, 'import megengine as mge\n'), ((6564, 6579), 'megengine.Tensor', 'mge.Tensor', (['mel'], {}), '(mel)\n', (6574, 6579), True, 'import megengine as mge\n'), ((6589, 6609), 'megengine.Tensor', 'mge.Tensor', (['pos_text'], {}), '(pos_text)\n', (6599, 6609), True, 'import megengine as mge\n'), ((6619, 6638), 'megengine.Tensor', 'mge.Tensor', (['pos_mel'], {}), '(pos_mel)\n', (6629, 6638), True, 'import megengine as mge\n'), ((6648, 6671), 'megengine.Tensor', 'mge.Tensor', (['text_length'], {}), '(text_length)\n', (6658, 6671), True, 'import megengine as mge\n'), ((6681, 6703), 'megengine.Tensor', 'mge.Tensor', (['mel_length'], {}), '(mel_length)\n', (6691, 6703), True, 'import megengine as mge\n'), ((7334, 7420), 'numpy.pad', 'np.pad', (['x', '[[0, max_len - mel_len], [0, 0]]'], {'mode': '"""constant"""', 'constant_values': '_pad'}), "(x, [[0, max_len - mel_len], [0, 0]], mode='constant',\n constant_values=_pad)\n", (7340, 7420), True, 'import numpy as np\n'), ((607, 644), 'os.path.join', 'os.path.join', (['root', 'f"""{data_set}.txt"""'], {}), "(root, f'{data_set}.txt')\n", (619, 644), False, 'import os\n'), ((4312, 4334), 'megengine.Tensor', 'mge.Tensor', (['text_input'], {}), '(text_input)\n', (4322, 4334), True, 'import megengine as mge\n'), ((4352, 4375), 'megengine.Tensor', 'mge.Tensor', (['text_output'], {}), '(text_output)\n', (4362, 4375), True, 'import megengine as mge\n'), ((4393, 4408), 'megengine.Tensor', 'mge.Tensor', (['mel'], {}), '(mel)\n', (4403, 4408), True, 'import megengine as mge\n'), ((4426, 4446), 'megengine.Tensor', 'mge.Tensor', (['pos_text'], {}), '(pos_text)\n', (4436, 4446), True, 'import megengine as mge\n'), ((4464, 4483), 'megengine.Tensor', 'mge.Tensor', (['pos_mel'], {}), '(pos_mel)\n', (4474, 4483), True, 'import megengine as mge\n'), ((4501, 4524), 'megengine.Tensor', 'mge.Tensor', (['text_length'], {}), '(text_length)\n', (4511, 4524), True, 'import megengine as mge\n'), ((4542, 4564), 'megengine.Tensor', 'mge.Tensor', (['mel_length'], {}), '(mel_length)\n', (4552, 4564), True, 'import megengine as mge\n'), ((818, 845), 'os.path.join', 'os.path.join', (['root', 'info[0]'], {}), '(root, info[0])\n', (830, 845), False, 'import os\n')]
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import bisect
import multiprocessing
import os
import time
# pylint: disable=import-error
import model as resnet_model
import megengine
import megengine.autodiff as autodiff
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.optimizer as optim
logging = megengine.logger.get_logger()
def main():
parser = argparse.ArgumentParser(description="MegEngine ImageNet Training")
parser.add_argument("-d", "--data", metavar="DIR", help="path to imagenet dataset")
parser.add_argument(
"-a",
"--arch",
default="resnet50",
help="model architecture (default: resnet50)",
)
parser.add_argument(
"-n",
"--ngpus",
default=None,
type=int,
help="number of GPUs per node (default: None, use all available GPUs)",
)
parser.add_argument(
"--save",
metavar="DIR",
default="output",
help="path to save checkpoint and log",
)
parser.add_argument(
"--epochs",
default=10,
type=int,
help="number of total epochs to run (default: 10)",
)
parser.add_argument(
"-b",
"--batch-size",
metavar="SIZE",
default=64,
type=int,
help="batch size for single GPU (default: 64)",
)
parser.add_argument(
"--lr",
"--learning-rate",
metavar="LR",
default=0.025,
type=float,
help="learning rate for single GPU (default: 0.025)",
)
parser.add_argument(
"--momentum", default=0.9, type=float, help="momentum (default: 0.9)"
)
parser.add_argument(
"--weight-decay", default=1e-4, type=float, help="weight decay (default: 1e-4)"
)
parser.add_argument("-j", "--workers", default=2, type=int)
parser.add_argument(
"-p",
"--print-freq",
default=20,
type=int,
metavar="N",
help="print frequency (default: 20)",
)
parser.add_argument("--dist-addr", default="localhost")
parser.add_argument("--dist-port", default=23456, type=int)
parser.add_argument("--world-size", default=1, type=int)
parser.add_argument("--rank", default=0, type=int)
parser.add_argument(
"--enable-dtr",
dest="enable_dtr",
action="store_true",
help="Enable DTR")
args = parser.parse_args()
# create server if is master
if args.rank <= 0:
server = dist.Server(port=args.dist_port) # pylint: disable=unused-variable # noqa: F841
# get device count
with multiprocessing.Pool(1) as pool:
ngpus_per_node, _ = pool.map(megengine.get_device_count, ["gpu", "cpu"])
if args.ngpus:
ngpus_per_node = args.ngpus
# launch processes
procs = []
for local_rank in range(ngpus_per_node):
p = multiprocessing.Process(
target=worker,
kwargs=dict(
rank=args.rank * ngpus_per_node + local_rank,
world_size=args.world_size * ngpus_per_node,
ngpus_per_node=ngpus_per_node,
args=args,
),
)
p.start()
procs.append(p)
# join processes
for p in procs:
p.join()
def worker(rank, world_size, ngpus_per_node, args):
# pylint: disable=too-many-statements
# enable DTR
if args.enable_dtr:
from megengine.utils.dtr import DTR
ds = DTR(memory_budget=5*1024**3)
if rank == 0:
os.makedirs(os.path.join(args.save, args.arch), exist_ok=True)
megengine.logger.set_log_file(os.path.join(args.save, args.arch, "log.txt"))
# init process group
if world_size > 1:
dist.init_process_group(
master_ip=args.dist_addr,
port=args.dist_port,
world_size=world_size,
rank=rank,
device=rank % ngpus_per_node,
backend="nccl",
)
logging.info(
"init process group rank %d / %d", dist.get_rank(), dist.get_world_size()
)
# build dataset
train_dataloader, valid_dataloader = build_dataset(args)
train_queue = iter(train_dataloader) # infinite
steps_per_epoch = 1280000 // (world_size * args.batch_size)
# build model
model = resnet_model.__dict__[args.arch]()
# Sync parameters
if world_size > 1:
dist.bcast_list_(model.parameters(), dist.WORLD)
# Autodiff gradient manager
gm = autodiff.GradManager().attach(
model.parameters(),
callbacks=dist.make_allreduce_cb("SUM") if world_size > 1 else None,
)
# Optimizer
opt = optim.SGD(
model.parameters(),
lr=args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay * world_size, # scale weight decay in "SUM" mode
)
# train and valid func
def train_step(image, label):
with gm:
logits = model(image)
loss = F.nn.cross_entropy(logits, label)
acc1, acc5 = F.topk_accuracy(logits, label, topk=(1, 5))
gm.backward(loss)
opt.step().clear_grad()
return loss, acc1, acc5
def valid_step(image, label):
logits = model(image)
loss = F.nn.cross_entropy(logits, label)
acc1, acc5 = F.topk_accuracy(logits, label, topk=(1, 5))
# calculate mean values
if world_size > 1:
loss = F.distributed.all_reduce_sum(loss) / world_size
acc1 = F.distributed.all_reduce_sum(acc1) / world_size
acc5 = F.distributed.all_reduce_sum(acc5) / world_size
return loss, acc1, acc5
# multi-step learning rate scheduler with warmup
def adjust_learning_rate(step):
lr = args.lr * 0.1 ** bisect.bisect_right(
[30 * steps_per_epoch, 60 * steps_per_epoch, 80 * steps_per_epoch], step
)
if step < 5 * steps_per_epoch: # warmup
lr = args.lr * (step / (5 * steps_per_epoch))
for param_group in opt.param_groups:
param_group["lr"] = lr
return lr
# start training
objs = AverageMeter("Loss")
top1 = AverageMeter("Acc@1")
top5 = AverageMeter("Acc@5")
clck = AverageMeter("Time")
for step in range(0, args.epochs * steps_per_epoch):
lr = adjust_learning_rate(step)
t = time.time()
image, label = next(train_queue)
image = megengine.tensor(image, dtype="float32")
label = megengine.tensor(label, dtype="int32")
loss, acc1, acc5 = train_step(image, label)
objs.update(loss.item())
top1.update(100 * acc1.item())
top5.update(100 * acc5.item())
clck.update(time.time() - t)
if step % args.print_freq == 0 and dist.get_rank() == 0:
logging.info(
"Epoch %d Step %d, LR %.4f, %s %s %s %s",
step // steps_per_epoch,
step,
lr,
objs,
top1,
top5,
clck,
)
objs.reset()
top1.reset()
top5.reset()
clck.reset()
if (step + 1) % steps_per_epoch == 0:
model.eval()
_, valid_acc1, valid_acc5 = valid(valid_step, valid_dataloader, args)
model.train()
logging.info(
"Epoch %d Test Acc@1 %.3f, Acc@5 %.3f",
(step + 1) // steps_per_epoch,
valid_acc1,
valid_acc5,
)
megengine.save(
{
"epoch": (step + 1) // steps_per_epoch,
"state_dict": model.state_dict(),
},
os.path.join(args.save, args.arch, "checkpoint.pkl"),
)
def valid(func, data_queue, args):
objs = AverageMeter("Loss")
top1 = AverageMeter("Acc@1")
top5 = AverageMeter("Acc@5")
clck = AverageMeter("Time")
t = time.time()
for step, (image, label) in enumerate(data_queue):
image = megengine.tensor(image, dtype="float32")
label = megengine.tensor(label, dtype="int32")
n = image.shape[0]
loss, acc1, acc5 = func(image, label)
objs.update(loss.item(), n)
top1.update(100 * acc1.item(), n)
top5.update(100 * acc5.item(), n)
clck.update(time.time() - t, n)
t = time.time()
if step % args.print_freq == 0 and dist.get_rank() == 0:
logging.info("Test step %d, %s %s %s %s", step, objs, top1, top5, clck)
return objs.avg, top1.avg, top5.avg
def build_dataset(args):
train_dataset = data.dataset.ImageNet(args.data, train=True)
train_sampler = data.Infinite(
data.RandomSampler(train_dataset, batch_size=args.batch_size, drop_last=True)
)
train_dataloader = data.DataLoader(
train_dataset,
sampler=train_sampler,
transform=T.Compose(
[ # Baseline Augmentation for small models
T.RandomResizedCrop(224),
T.RandomHorizontalFlip(),
T.Normalize(
mean=[103.530, 116.280, 123.675], std=[57.375, 57.120, 58.395]
), # BGR
T.ToMode("CHW"),
]
)
if args.arch in ("resnet18", "resnet34")
else T.Compose(
[ # Facebook Augmentation for large models
T.RandomResizedCrop(224),
T.RandomHorizontalFlip(),
T.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4),
T.Normalize(
mean=[103.530, 116.280, 123.675], std=[57.375, 57.120, 58.395]
), # BGR
T.ToMode("CHW"),
]
),
num_workers=args.workers,
)
valid_dataset = data.dataset.ImageNet(args.data, train=False)
valid_sampler = data.SequentialSampler(
valid_dataset, batch_size=100, drop_last=False
)
valid_dataloader = data.DataLoader(
valid_dataset,
sampler=valid_sampler,
transform=T.Compose(
[
T.Resize(256),
T.CenterCrop(224),
T.Normalize(
mean=[103.530, 116.280, 123.675], std=[57.375, 57.120, 58.395]
), # BGR
T.ToMode("CHW"),
]
),
num_workers=args.workers,
)
return train_dataloader, valid_dataloader
class AverageMeter:
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=":.3f"):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = "{name} {val" + self.fmt + "} ({avg" + self.fmt + "})"
return fmtstr.format(**self.__dict__)
if __name__ == "__main__":
main()
|
[
"megengine.functional.topk_accuracy",
"megengine.data.transform.RandomHorizontalFlip",
"megengine.distributed.init_process_group",
"megengine.utils.dtr.DTR",
"megengine.distributed.get_rank",
"megengine.functional.distributed.all_reduce_sum",
"megengine.data.transform.CenterCrop",
"megengine.distributed.get_world_size",
"megengine.tensor",
"megengine.data.transform.Resize",
"megengine.functional.nn.cross_entropy",
"megengine.data.SequentialSampler",
"megengine.distributed.Server",
"megengine.data.transform.Normalize",
"megengine.logger.get_logger",
"megengine.data.transform.ToMode",
"megengine.distributed.make_allreduce_cb",
"megengine.data.RandomSampler",
"megengine.data.transform.RandomResizedCrop",
"megengine.data.transform.ColorJitter",
"megengine.data.dataset.ImageNet",
"megengine.autodiff.GradManager"
] |
[((753, 782), 'megengine.logger.get_logger', 'megengine.logger.get_logger', ([], {}), '()\n', (780, 782), False, 'import megengine\n'), ((809, 875), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""MegEngine ImageNet Training"""'}), "(description='MegEngine ImageNet Training')\n", (832, 875), False, 'import argparse\n'), ((8402, 8413), 'time.time', 'time.time', ([], {}), '()\n', (8411, 8413), False, 'import time\n'), ((9079, 9123), 'megengine.data.dataset.ImageNet', 'data.dataset.ImageNet', (['args.data'], {'train': '(True)'}), '(args.data, train=True)\n', (9100, 9123), True, 'import megengine.data as data\n'), ((10255, 10300), 'megengine.data.dataset.ImageNet', 'data.dataset.ImageNet', (['args.data'], {'train': '(False)'}), '(args.data, train=False)\n', (10276, 10300), True, 'import megengine.data as data\n'), ((10321, 10391), 'megengine.data.SequentialSampler', 'data.SequentialSampler', (['valid_dataset'], {'batch_size': '(100)', 'drop_last': '(False)'}), '(valid_dataset, batch_size=100, drop_last=False)\n', (10343, 10391), True, 'import megengine.data as data\n'), ((2923, 2955), 'megengine.distributed.Server', 'dist.Server', ([], {'port': 'args.dist_port'}), '(port=args.dist_port)\n', (2934, 2955), True, 'import megengine.distributed as dist\n'), ((3038, 3061), 'multiprocessing.Pool', 'multiprocessing.Pool', (['(1)'], {}), '(1)\n', (3058, 3061), False, 'import multiprocessing\n'), ((3897, 3929), 'megengine.utils.dtr.DTR', 'DTR', ([], {'memory_budget': '(5 * 1024 ** 3)'}), '(memory_budget=5 * 1024 ** 3)\n', (3900, 3929), False, 'from megengine.utils.dtr import DTR\n'), ((4156, 4315), 'megengine.distributed.init_process_group', 'dist.init_process_group', ([], {'master_ip': 'args.dist_addr', 'port': 'args.dist_port', 'world_size': 'world_size', 'rank': 'rank', 'device': '(rank % ngpus_per_node)', 'backend': '"""nccl"""'}), "(master_ip=args.dist_addr, port=args.dist_port,\n world_size=world_size, rank=rank, device=rank % ngpus_per_node, backend\n ='nccl')\n", (4179, 4315), True, 'import megengine.distributed as dist\n'), ((5686, 5719), 'megengine.functional.nn.cross_entropy', 'F.nn.cross_entropy', (['logits', 'label'], {}), '(logits, label)\n', (5704, 5719), True, 'import megengine.functional as F\n'), ((5741, 5784), 'megengine.functional.topk_accuracy', 'F.topk_accuracy', (['logits', 'label'], {'topk': '(1, 5)'}), '(logits, label, topk=(1, 5))\n', (5756, 5784), True, 'import megengine.functional as F\n'), ((6781, 6792), 'time.time', 'time.time', ([], {}), '()\n', (6790, 6792), False, 'import time\n'), ((6851, 6891), 'megengine.tensor', 'megengine.tensor', (['image'], {'dtype': '"""float32"""'}), "(image, dtype='float32')\n", (6867, 6891), False, 'import megengine\n'), ((6908, 6946), 'megengine.tensor', 'megengine.tensor', (['label'], {'dtype': '"""int32"""'}), "(label, dtype='int32')\n", (6924, 6946), False, 'import megengine\n'), ((8485, 8525), 'megengine.tensor', 'megengine.tensor', (['image'], {'dtype': '"""float32"""'}), "(image, dtype='float32')\n", (8501, 8525), False, 'import megengine\n'), ((8542, 8580), 'megengine.tensor', 'megengine.tensor', (['label'], {'dtype': '"""int32"""'}), "(label, dtype='int32')\n", (8558, 8580), False, 'import megengine\n'), ((8829, 8840), 'time.time', 'time.time', ([], {}), '()\n', (8838, 8840), False, 'import time\n'), ((9167, 9244), 'megengine.data.RandomSampler', 'data.RandomSampler', (['train_dataset'], {'batch_size': 'args.batch_size', 'drop_last': '(True)'}), '(train_dataset, batch_size=args.batch_size, drop_last=True)\n', (9185, 9244), True, 'import megengine.data as data\n'), ((3964, 3998), 'os.path.join', 'os.path.join', (['args.save', 'args.arch'], {}), '(args.save, args.arch)\n', (3976, 3998), False, 'import os\n'), ((4053, 4098), 'os.path.join', 'os.path.join', (['args.save', 'args.arch', '"""log.txt"""'], {}), "(args.save, args.arch, 'log.txt')\n", (4065, 4098), False, 'import os\n'), ((4459, 4474), 'megengine.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (4472, 4474), True, 'import megengine.distributed as dist\n'), ((4476, 4497), 'megengine.distributed.get_world_size', 'dist.get_world_size', ([], {}), '()\n', (4495, 4497), True, 'import megengine.distributed as dist\n'), ((4918, 4940), 'megengine.autodiff.GradManager', 'autodiff.GradManager', ([], {}), '()\n', (4938, 4940), True, 'import megengine.autodiff as autodiff\n'), ((5405, 5438), 'megengine.functional.nn.cross_entropy', 'F.nn.cross_entropy', (['logits', 'label'], {}), '(logits, label)\n', (5423, 5438), True, 'import megengine.functional as F\n'), ((5464, 5507), 'megengine.functional.topk_accuracy', 'F.topk_accuracy', (['logits', 'label'], {'topk': '(1, 5)'}), '(logits, label, topk=(1, 5))\n', (5479, 5507), True, 'import megengine.functional as F\n'), ((4995, 5024), 'megengine.distributed.make_allreduce_cb', 'dist.make_allreduce_cb', (['"""SUM"""'], {}), "('SUM')\n", (5017, 5024), True, 'import megengine.distributed as dist\n'), ((5863, 5897), 'megengine.functional.distributed.all_reduce_sum', 'F.distributed.all_reduce_sum', (['loss'], {}), '(loss)\n', (5891, 5897), True, 'import megengine.functional as F\n'), ((5930, 5964), 'megengine.functional.distributed.all_reduce_sum', 'F.distributed.all_reduce_sum', (['acc1'], {}), '(acc1)\n', (5958, 5964), True, 'import megengine.functional as F\n'), ((5997, 6031), 'megengine.functional.distributed.all_reduce_sum', 'F.distributed.all_reduce_sum', (['acc5'], {}), '(acc5)\n', (6025, 6031), True, 'import megengine.functional as F\n'), ((6197, 6294), 'bisect.bisect_right', 'bisect.bisect_right', (['[30 * steps_per_epoch, 60 * steps_per_epoch, 80 * steps_per_epoch]', 'step'], {}), '([30 * steps_per_epoch, 60 * steps_per_epoch, 80 *\n steps_per_epoch], step)\n', (6216, 6294), False, 'import bisect\n'), ((7132, 7143), 'time.time', 'time.time', ([], {}), '()\n', (7141, 7143), False, 'import time\n'), ((7193, 7208), 'megengine.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (7206, 7208), True, 'import megengine.distributed as dist\n'), ((8158, 8210), 'os.path.join', 'os.path.join', (['args.save', 'args.arch', '"""checkpoint.pkl"""'], {}), "(args.save, args.arch, 'checkpoint.pkl')\n", (8170, 8210), False, 'import os\n'), ((8797, 8808), 'time.time', 'time.time', ([], {}), '()\n', (8806, 8808), False, 'import time\n'), ((8885, 8900), 'megengine.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (8898, 8900), True, 'import megengine.distributed as dist\n'), ((10559, 10572), 'megengine.data.transform.Resize', 'T.Resize', (['(256)'], {}), '(256)\n', (10567, 10572), True, 'import megengine.data.transform as T\n'), ((10590, 10607), 'megengine.data.transform.CenterCrop', 'T.CenterCrop', (['(224)'], {}), '(224)\n', (10602, 10607), True, 'import megengine.data.transform as T\n'), ((10625, 10697), 'megengine.data.transform.Normalize', 'T.Normalize', ([], {'mean': '[103.53, 116.28, 123.675]', 'std': '[57.375, 57.12, 58.395]'}), '(mean=[103.53, 116.28, 123.675], std=[57.375, 57.12, 58.395])\n', (10636, 10697), True, 'import megengine.data.transform as T\n'), ((10763, 10778), 'megengine.data.transform.ToMode', 'T.ToMode', (['"""CHW"""'], {}), "('CHW')\n", (10771, 10778), True, 'import megengine.data.transform as T\n'), ((9446, 9470), 'megengine.data.transform.RandomResizedCrop', 'T.RandomResizedCrop', (['(224)'], {}), '(224)\n', (9465, 9470), True, 'import megengine.data.transform as T\n'), ((9488, 9512), 'megengine.data.transform.RandomHorizontalFlip', 'T.RandomHorizontalFlip', ([], {}), '()\n', (9510, 9512), True, 'import megengine.data.transform as T\n'), ((9530, 9602), 'megengine.data.transform.Normalize', 'T.Normalize', ([], {'mean': '[103.53, 116.28, 123.675]', 'std': '[57.375, 57.12, 58.395]'}), '(mean=[103.53, 116.28, 123.675], std=[57.375, 57.12, 58.395])\n', (9541, 9602), True, 'import megengine.data.transform as T\n'), ((9668, 9683), 'megengine.data.transform.ToMode', 'T.ToMode', (['"""CHW"""'], {}), "('CHW')\n", (9676, 9683), True, 'import megengine.data.transform as T\n'), ((9854, 9878), 'megengine.data.transform.RandomResizedCrop', 'T.RandomResizedCrop', (['(224)'], {}), '(224)\n', (9873, 9878), True, 'import megengine.data.transform as T\n'), ((9896, 9920), 'megengine.data.transform.RandomHorizontalFlip', 'T.RandomHorizontalFlip', ([], {}), '()\n', (9918, 9920), True, 'import megengine.data.transform as T\n'), ((9938, 9997), 'megengine.data.transform.ColorJitter', 'T.ColorJitter', ([], {'brightness': '(0.4)', 'contrast': '(0.4)', 'saturation': '(0.4)'}), '(brightness=0.4, contrast=0.4, saturation=0.4)\n', (9951, 9997), True, 'import megengine.data.transform as T\n'), ((10015, 10087), 'megengine.data.transform.Normalize', 'T.Normalize', ([], {'mean': '[103.53, 116.28, 123.675]', 'std': '[57.375, 57.12, 58.395]'}), '(mean=[103.53, 116.28, 123.675], std=[57.375, 57.12, 58.395])\n', (10026, 10087), True, 'import megengine.data.transform as T\n'), ((10153, 10168), 'megengine.data.transform.ToMode', 'T.ToMode', (['"""CHW"""'], {}), "('CHW')\n", (10161, 10168), True, 'import megengine.data.transform as T\n')]
|
# -*- coding: utf-8 -*-
import megengine as mge
import megengine.random as rand
import megengine.functional as F
import numpy as np
from config import config
from det_opr.bbox_opr import box_overlap_opr, bbox_transform_opr, box_overlap_ignore_opr
import pdb
def fpn_roi_target(rpn_rois, im_info, gt_boxes, fg_threshold = config.fg_threshold, top_k=1):
return_rois, return_labels = [], []
return_bbox_targets = []
# get per image proposals and gt_boxes
batch_per_gpu = im_info.shape[0]
sampling = True
# is_sample = True if top_k < 2 else False
for bid in range(batch_per_gpu):
gt_boxes_perimg = gt_boxes[bid, :im_info[bid, 5].astype(np.int32), :]
dummy_gt = F.ones([1, gt_boxes_perimg.shape[1]])
batch_inds = F.ones((gt_boxes_perimg.shape[0], 1)) * bid
#if config.proposal_append_gt:
gt_rois = F.concat([batch_inds, gt_boxes_perimg[:, :4]], axis=1)
batch_rois_mask = F.equal(rpn_rois[:, 0], bid) > 0
_, batch_rois_index = F.cond_take(batch_rois_mask, batch_rois_mask)
# batch_roi_mask = rpn_rois[:, 0] == bid
# batch_roi_inds = mask_to_inds(batch_roi_mask)
all_rois= F.concat([rpn_rois[batch_rois_index], gt_rois], axis=0) if sampling \
else rpn_rois[batch_rois_index]
# all_rois = F.concat([rpn_rois.ai[batch_roi_inds], gt_rois], axis=0)
gt_boxes_perimg = F.concat([gt_boxes_perimg, dummy_gt],axis=0)
overlaps_normal, overlaps_ignore = box_overlap_ignore_opr(
all_rois[:, 1:5], gt_boxes_perimg)
# overlaps_normal, overlaps_normal_indices = F.argsort(overlaps_normal, descending=True)
# overlaps_ignore, overlaps_ignore_indices = F.argsort(overlaps_ignore, descending=True)
overlaps_normal_indices = F.argsort(overlaps_normal, descending=True)
overlaps_normal = F.gather(overlaps_normal, 1, overlaps_normal_indices)
# overlaps_normal = F.nn.indexing_one_hot(overlaps_normal, overlaps_normal_indices, 1)
overlaps_ignore_indices = F.argsort(overlaps_ignore, descending = True)
overlaps_ignore = F.gather(overlaps_ignore, 1, overlaps_ignore_indices)
# overlaps_ignore = F.nn.indexing_one_hot(overlaps_ignore, overlaps_ignore_indices, 1)
# gt max and indices, ignore max and indices
max_overlaps_normal = overlaps_normal[:, :top_k].flatten()
gt_assignment_normal = overlaps_normal_indices[:, :top_k].flatten()
max_overlaps_ignore = overlaps_ignore[:, :top_k].flatten()
gt_assignment_ignore = overlaps_ignore_indices[:, :top_k].flatten()
# cons masks
ignore_assign_mask = (max_overlaps_normal < fg_threshold).astype(np.float32) * (
max_overlaps_ignore > max_overlaps_normal).astype(np.float32)
max_overlaps = max_overlaps_normal * (1 - ignore_assign_mask).astype(np.float32) + \
max_overlaps_ignore * ignore_assign_mask
gt_assignment = gt_assignment_normal * (1- ignore_assign_mask) + \
gt_assignment_ignore * ignore_assign_mask
gt_assignment = gt_assignment.astype(np.int32)
labels = gt_boxes_perimg[gt_assignment, 4]
fg_mask = (max_overlaps >= fg_threshold).astype(np.float32) * (1 - F.equal(labels, config.ignore_label))
bg_mask = (max_overlaps < config.bg_threshold_high).astype(np.float32) * (
max_overlaps >= config.bg_threshold_low).astype(np.float32)
fg_mask = fg_mask.reshape(-1, top_k)
bg_mask = bg_mask.reshape(-1, top_k)
pos_max = config.num_rois * config.fg_ratio
fg_inds_mask = _bernoulli_sample_masks(fg_mask[:, 0], pos_max, 1) if sampling else F.equal(fg_mask[:, 0], 0)
neg_max = config.num_rois - fg_inds_mask.sum()
bg_inds_mask = _bernoulli_sample_masks(bg_mask[:, 0], neg_max, 1) if sampling else F.equal(bg_mask[:, 0], 0)
labels = labels * fg_mask.reshape(-1)
keep_mask = fg_inds_mask + bg_inds_mask
keep_mask = keep_mask + F.equal(keep_mask.sum(), 0)
# keep_inds = mask_to_inds(keep_mask)
_, keep_inds = F.cond_take(keep_mask > 0, keep_mask)
#keep_inds = keep_inds[:F.minimum(config.num_rois, keep_inds.shapeof()[0])]
# labels
labels = labels.reshape(-1, top_k)[keep_inds]
gt_assignment = gt_assignment.reshape(-1, top_k)[keep_inds].reshape(-1).astype(np.int32)
target_boxes = gt_boxes_perimg[gt_assignment, :4]
# rois = all_rois.ai[keep_inds]
rois = all_rois[keep_inds]
# target_shape = (rois.shapeof()[0], top_k, rois.shapeof()[-1])
n, c = rois.shape[0], rois.shape[1]
target_rois = F.broadcast_to(F.expand_dims(rois, 1), (n, top_k, c)).reshape(-1, c)
# target_rois = F.add_axis(rois, 1).broadcast(target_shape).reshape(-1, rois.shapeof()[-1])
bbox_targets = bbox_transform_opr(target_rois[:, 1:5], target_boxes[:, :4])
if config.rcnn_bbox_normalize_targets:
std_opr = mge.tensor(config.bbox_normalize_stds[None, :]).to(rois.device)
mean_opr = mge.tensor(config.bbox_normalize_means[None, :]).to(rois.device)
minus_opr = mean_opr / std_opr
bbox_targets = bbox_targets / std_opr - minus_opr
bbox_targets = bbox_targets.reshape(-1, top_k * 4)
return_rois.append(rois)
return_labels.append(labels)
return_bbox_targets.append(bbox_targets)
if config.batch_per_gpu == 1:
rois, labels, bbox_targets = rois.detach(), labels.detach(), bbox_targets.detach()
return rois, labels, bbox_targets
# return F.zero_grad(rois), F.zero_grad(labels), F.zero_grad(bbox_targets)
else:
return_rois = F.concat(return_rois, axis=0)
return_labels = F.concat(return_labels, axis=0)
return_bbox_targets = F.concat(return_bbox_targets, axis=0)
return_rois = return_rois.detach()
return_labels = return_labels.detach()
return_bbox_targets = return_bbox_targets.detach()
return return_rois, return_labels, return_bbox_targets
# rois, labels, bbox_targets = return_rois.detach(), return_labels.detach(), return_bbox_targets.detach()
# return rois, labels, bbox_targets
# return F.zero_grad(return_rois), F.zero_grad(return_labels), F.zero_grad(return_bbox_targets)
def _bernoulli_sample_masks(masks, num_samples, sample_value):
""" Using the bernoulli sampling method"""
sample_mask = F.equal(masks, sample_value)
num_mask = sample_mask.sum()
num_final_samples = F.minimum(num_mask, num_samples)
# here, we use the bernoulli probability to sample the anchors
sample_prob = num_final_samples / num_mask
# uniform_rng = rand.uniform(sample_mask.shapeof()[0])
uniform_rng = rand.uniform(0, 1, sample_mask.shape)
after_sampled_mask = (uniform_rng <= sample_prob) * sample_mask
return after_sampled_mask
|
[
"megengine.functional.gather",
"megengine.functional.minimum",
"megengine.functional.argsort",
"megengine.tensor",
"megengine.functional.cond_take",
"megengine.random.uniform",
"megengine.functional.equal",
"megengine.functional.expand_dims",
"megengine.functional.concat",
"megengine.functional.ones"
] |
[((6507, 6535), 'megengine.functional.equal', 'F.equal', (['masks', 'sample_value'], {}), '(masks, sample_value)\n', (6514, 6535), True, 'import megengine.functional as F\n'), ((6593, 6625), 'megengine.functional.minimum', 'F.minimum', (['num_mask', 'num_samples'], {}), '(num_mask, num_samples)\n', (6602, 6625), True, 'import megengine.functional as F\n'), ((6817, 6854), 'megengine.random.uniform', 'rand.uniform', (['(0)', '(1)', 'sample_mask.shape'], {}), '(0, 1, sample_mask.shape)\n', (6829, 6854), True, 'import megengine.random as rand\n'), ((706, 743), 'megengine.functional.ones', 'F.ones', (['[1, gt_boxes_perimg.shape[1]]'], {}), '([1, gt_boxes_perimg.shape[1]])\n', (712, 743), True, 'import megengine.functional as F\n'), ((867, 921), 'megengine.functional.concat', 'F.concat', (['[batch_inds, gt_boxes_perimg[:, :4]]'], {'axis': '(1)'}), '([batch_inds, gt_boxes_perimg[:, :4]], axis=1)\n', (875, 921), True, 'import megengine.functional as F\n'), ((1011, 1056), 'megengine.functional.cond_take', 'F.cond_take', (['batch_rois_mask', 'batch_rois_mask'], {}), '(batch_rois_mask, batch_rois_mask)\n', (1022, 1056), True, 'import megengine.functional as F\n'), ((1408, 1453), 'megengine.functional.concat', 'F.concat', (['[gt_boxes_perimg, dummy_gt]'], {'axis': '(0)'}), '([gt_boxes_perimg, dummy_gt], axis=0)\n', (1416, 1453), True, 'import megengine.functional as F\n'), ((1496, 1553), 'det_opr.bbox_opr.box_overlap_ignore_opr', 'box_overlap_ignore_opr', (['all_rois[:, 1:5]', 'gt_boxes_perimg'], {}), '(all_rois[:, 1:5], gt_boxes_perimg)\n', (1518, 1553), False, 'from det_opr.bbox_opr import box_overlap_opr, bbox_transform_opr, box_overlap_ignore_opr\n'), ((1800, 1843), 'megengine.functional.argsort', 'F.argsort', (['overlaps_normal'], {'descending': '(True)'}), '(overlaps_normal, descending=True)\n', (1809, 1843), True, 'import megengine.functional as F\n'), ((1870, 1923), 'megengine.functional.gather', 'F.gather', (['overlaps_normal', '(1)', 'overlaps_normal_indices'], {}), '(overlaps_normal, 1, overlaps_normal_indices)\n', (1878, 1923), True, 'import megengine.functional as F\n'), ((2053, 2096), 'megengine.functional.argsort', 'F.argsort', (['overlaps_ignore'], {'descending': '(True)'}), '(overlaps_ignore, descending=True)\n', (2062, 2096), True, 'import megengine.functional as F\n'), ((2125, 2178), 'megengine.functional.gather', 'F.gather', (['overlaps_ignore', '(1)', 'overlaps_ignore_indices'], {}), '(overlaps_ignore, 1, overlaps_ignore_indices)\n', (2133, 2178), True, 'import megengine.functional as F\n'), ((4149, 4186), 'megengine.functional.cond_take', 'F.cond_take', (['(keep_mask > 0)', 'keep_mask'], {}), '(keep_mask > 0, keep_mask)\n', (4160, 4186), True, 'import megengine.functional as F\n'), ((4902, 4962), 'det_opr.bbox_opr.bbox_transform_opr', 'bbox_transform_opr', (['target_rois[:, 1:5]', 'target_boxes[:, :4]'], {}), '(target_rois[:, 1:5], target_boxes[:, :4])\n', (4920, 4962), False, 'from det_opr.bbox_opr import box_overlap_opr, bbox_transform_opr, box_overlap_ignore_opr\n'), ((5749, 5778), 'megengine.functional.concat', 'F.concat', (['return_rois'], {'axis': '(0)'}), '(return_rois, axis=0)\n', (5757, 5778), True, 'import megengine.functional as F\n'), ((5803, 5834), 'megengine.functional.concat', 'F.concat', (['return_labels'], {'axis': '(0)'}), '(return_labels, axis=0)\n', (5811, 5834), True, 'import megengine.functional as F\n'), ((5865, 5902), 'megengine.functional.concat', 'F.concat', (['return_bbox_targets'], {'axis': '(0)'}), '(return_bbox_targets, axis=0)\n', (5873, 5902), True, 'import megengine.functional as F\n'), ((766, 803), 'megengine.functional.ones', 'F.ones', (['(gt_boxes_perimg.shape[0], 1)'], {}), '((gt_boxes_perimg.shape[0], 1))\n', (772, 803), True, 'import megengine.functional as F\n'), ((948, 976), 'megengine.functional.equal', 'F.equal', (['rpn_rois[:, 0]', 'bid'], {}), '(rpn_rois[:, 0], bid)\n', (955, 976), True, 'import megengine.functional as F\n'), ((1189, 1244), 'megengine.functional.concat', 'F.concat', (['[rpn_rois[batch_rois_index], gt_rois]'], {'axis': '(0)'}), '([rpn_rois[batch_rois_index], gt_rois], axis=0)\n', (1197, 1244), True, 'import megengine.functional as F\n'), ((3727, 3752), 'megengine.functional.equal', 'F.equal', (['fg_mask[:, 0]', '(0)'], {}), '(fg_mask[:, 0], 0)\n', (3734, 3752), True, 'import megengine.functional as F\n'), ((3899, 3924), 'megengine.functional.equal', 'F.equal', (['bg_mask[:, 0]', '(0)'], {}), '(bg_mask[:, 0], 0)\n', (3906, 3924), True, 'import megengine.functional as F\n'), ((3296, 3332), 'megengine.functional.equal', 'F.equal', (['labels', 'config.ignore_label'], {}), '(labels, config.ignore_label)\n', (3303, 3332), True, 'import megengine.functional as F\n'), ((4725, 4747), 'megengine.functional.expand_dims', 'F.expand_dims', (['rois', '(1)'], {}), '(rois, 1)\n', (4738, 4747), True, 'import megengine.functional as F\n'), ((5032, 5079), 'megengine.tensor', 'mge.tensor', (['config.bbox_normalize_stds[None, :]'], {}), '(config.bbox_normalize_stds[None, :])\n', (5042, 5079), True, 'import megengine as mge\n'), ((5119, 5167), 'megengine.tensor', 'mge.tensor', (['config.bbox_normalize_means[None, :]'], {}), '(config.bbox_normalize_means[None, :])\n', (5129, 5167), True, 'import megengine as mge\n')]
|
#!/usr/bin/env python3
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
import copy
from typing import Optional, Sequence
import cv2
import megengine.data as data
import megengine.data.transform as T
import numpy as np
from basecore.config import ConfigDict
from loguru import logger
from basecls.utils import registers
from .augment import WARP_PARAMS, TorchAutoAugment, TorchRandAugment
from .const import CV2_INTERP, PIL_INTERP
from .mixup import MixupCutmixCollator
from .rand_erase import RandomErasing
__all__ = [
"build_transform",
"AutoAugment",
"SimpleAugment",
"ColorAugment",
"RandAugment",
"build_mixup",
]
def build_transform(
cfg: ConfigDict, train: bool = True, augments: T.Transform = None
) -> T.Transform:
"""Build function for MegEngine transform.
Args:
cfg: config for building transform.
train: train set or test set. Default: ``True``
augments: augments for building transform.
Returns:
A transform.
"""
if train:
assert augments is not None
bgr_mean = copy.deepcopy(cfg.preprocess.img_mean)
bgr_std = copy.deepcopy(cfg.preprocess.img_std)
if cfg.preprocess.img_color_space == "RGB":
bgr_mean = bgr_mean[::-1]
bgr_std = bgr_std[::-1]
WARP_PARAMS["fillcolor"] = tuple(round(v) for v in bgr_mean[::-1]) # need RGB
WARP_PARAMS["resample"] = PIL_INTERP[cfg.augments.resize.interpolation]
transforms = [
T.RandomResizedCrop(
cfg.preprocess.img_size,
cfg.augments.resize.scale_range,
cfg.augments.resize.ratio_range,
CV2_INTERP[cfg.augments.resize.interpolation],
),
T.RandomHorizontalFlip(),
augments,
RandomErasing(
**cfg.augments.rand_erase.to_dict(),
pad_mean=bgr_mean, # need BGR
pad_std=bgr_std, # need BGR
),
ToColorSpace(cfg.preprocess.img_color_space),
T.ToMode(),
]
else:
assert augments is None
transforms = [
T.Resize(
int(cfg.test.img_size / cfg.test.crop_pct / 2 + 0.5) * 2, # make it even
CV2_INTERP[cfg.augments.resize.interpolation],
),
T.CenterCrop(cfg.test.img_size),
ToColorSpace(cfg.preprocess.img_color_space),
T.ToMode(),
]
return T.Compose(transforms=transforms, order=["image", "image_category"])
class ToColorSpace(T.VisionTransform):
"""Transform to transfer color space.
Args:
color_space: color space, supports ``"BGR"``, ``"RGB"`` and ``"GRAY"``.
"""
def __init__(self, color_space: str, *, order: Sequence = None):
super().__init__(order)
if color_space not in ("BGR", "RGB", "GRAY"):
raise ValueError(f"Color space '{color_space}' not supported")
self.color_space = color_space
def _apply_image(self, image: np.ndarray) -> np.ndarray:
if self.color_space == "BGR":
return image
elif self.color_space == "RGB":
return cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
elif self.color_space == "GRAY":
return cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)[..., np.newaxis]
else:
raise ValueError(f"Color space '{self.color_space}' not supported")
@registers.augments.register()
class SimpleAugment:
"""Simple augmentation."""
@classmethod
def build(cls, cfg: ConfigDict) -> T.Transform:
return T.PseudoTransform()
@registers.augments.register()
class ColorAugment:
"""Color augmentation."""
@classmethod
def build(cls, cfg: ConfigDict) -> T.Transform:
aug_args = cfg.augments.color_aug.to_dict()
lighting_scale = aug_args.pop("lighting")
return T.Compose([T.ColorJitter(**aug_args), T.Lighting(lighting_scale)])
@registers.augments.register()
class AutoAugment:
"""AutoAugment."""
@classmethod
def build(cls, cfg: ConfigDict) -> T.Transform:
return T.TorchTransformCompose([TorchAutoAugment()])
@registers.augments.register()
class RandAugment:
"""Random augmentation."""
@classmethod
def build(cls, cfg: ConfigDict) -> T.Transform:
return T.TorchTransformCompose([TorchRandAugment(**cfg.augments.rand_aug.to_dict())])
def build_mixup(cfg: ConfigDict, train: bool = True) -> Optional[data.Collator]:
"""Build (optionally) Mixup/CutMix augment.
Args:
cfg: config for building Mixup/CutMix collator.
train: train set or test set. Default: ``True``
Returns:
:py:class:`~basecls.data.mixup.MixupCutmixCollator` or ``None``
"""
mixup_cfg = cfg.augments.mixup
if train and (
mixup_cfg.mixup_alpha > 0.0
or mixup_cfg.cutmix_alpha > 0.0
or mixup_cfg.cutmix_minmax is not None
):
mixup_collator = MixupCutmixCollator(**mixup_cfg.to_dict(), num_classes=cfg.num_classes)
logger.info(f"Using mixup with configuration:\n{mixup_cfg}")
else:
mixup_collator = None
return mixup_collator
|
[
"megengine.data.transform.Lighting",
"megengine.data.transform.PseudoTransform",
"megengine.data.transform.RandomResizedCrop",
"megengine.data.transform.RandomHorizontalFlip",
"megengine.data.transform.ColorJitter",
"megengine.data.transform.Compose",
"megengine.data.transform.ToMode",
"megengine.data.transform.CenterCrop"
] |
[((3450, 3479), 'basecls.utils.registers.augments.register', 'registers.augments.register', ([], {}), '()\n', (3477, 3479), False, 'from basecls.utils import registers\n'), ((3640, 3669), 'basecls.utils.registers.augments.register', 'registers.augments.register', ([], {}), '()\n', (3667, 3669), False, 'from basecls.utils import registers\n'), ((3977, 4006), 'basecls.utils.registers.augments.register', 'registers.augments.register', ([], {}), '()\n', (4004, 4006), False, 'from basecls.utils import registers\n'), ((4183, 4212), 'basecls.utils.registers.augments.register', 'registers.augments.register', ([], {}), '()\n', (4210, 4212), False, 'from basecls.utils import registers\n'), ((2493, 2560), 'megengine.data.transform.Compose', 'T.Compose', ([], {'transforms': 'transforms', 'order': "['image', 'image_category']"}), "(transforms=transforms, order=['image', 'image_category'])\n", (2502, 2560), True, 'import megengine.data.transform as T\n'), ((1089, 1127), 'copy.deepcopy', 'copy.deepcopy', (['cfg.preprocess.img_mean'], {}), '(cfg.preprocess.img_mean)\n', (1102, 1127), False, 'import copy\n'), ((1146, 1183), 'copy.deepcopy', 'copy.deepcopy', (['cfg.preprocess.img_std'], {}), '(cfg.preprocess.img_std)\n', (1159, 1183), False, 'import copy\n'), ((3617, 3636), 'megengine.data.transform.PseudoTransform', 'T.PseudoTransform', ([], {}), '()\n', (3634, 3636), True, 'import megengine.data.transform as T\n'), ((5064, 5127), 'loguru.logger.info', 'logger.info', (['f"""Using mixup with configuration:\n{mixup_cfg}"""'], {}), '(f"""Using mixup with configuration:\n{mixup_cfg}""")\n', (5075, 5127), False, 'from loguru import logger\n'), ((1513, 1680), 'megengine.data.transform.RandomResizedCrop', 'T.RandomResizedCrop', (['cfg.preprocess.img_size', 'cfg.augments.resize.scale_range', 'cfg.augments.resize.ratio_range', 'CV2_INTERP[cfg.augments.resize.interpolation]'], {}), '(cfg.preprocess.img_size, cfg.augments.resize.\n scale_range, cfg.augments.resize.ratio_range, CV2_INTERP[cfg.augments.\n resize.interpolation])\n', (1532, 1680), True, 'import megengine.data.transform as T\n'), ((1763, 1787), 'megengine.data.transform.RandomHorizontalFlip', 'T.RandomHorizontalFlip', ([], {}), '()\n', (1785, 1787), True, 'import megengine.data.transform as T\n'), ((2068, 2078), 'megengine.data.transform.ToMode', 'T.ToMode', ([], {}), '()\n', (2076, 2078), True, 'import megengine.data.transform as T\n'), ((2357, 2388), 'megengine.data.transform.CenterCrop', 'T.CenterCrop', (['cfg.test.img_size'], {}), '(cfg.test.img_size)\n', (2369, 2388), True, 'import megengine.data.transform as T\n'), ((2460, 2470), 'megengine.data.transform.ToMode', 'T.ToMode', ([], {}), '()\n', (2468, 2470), True, 'import megengine.data.transform as T\n'), ((3197, 3235), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2RGB'], {}), '(image, cv2.COLOR_BGR2RGB)\n', (3209, 3235), False, 'import cv2\n'), ((3918, 3943), 'megengine.data.transform.ColorJitter', 'T.ColorJitter', ([], {}), '(**aug_args)\n', (3931, 3943), True, 'import megengine.data.transform as T\n'), ((3945, 3971), 'megengine.data.transform.Lighting', 'T.Lighting', (['lighting_scale'], {}), '(lighting_scale)\n', (3955, 3971), True, 'import megengine.data.transform as T\n'), ((3296, 3335), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2GRAY'], {}), '(image, cv2.COLOR_BGR2GRAY)\n', (3308, 3335), False, 'import cv2\n')]
|
# Copyright (c) 2020 <NAME>
# This code is licensed under MIT license
# (https://github.com/kwotsin/mimicry/blob/master/LICENSE)
# ------------------------------------------------------------------------------
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2014-2019 Megvii Inc. All rights reserved.
# ------------------------------------------------------------------------------
"""
Implementation of Base GAN models.
"""
import megengine
import megengine.functional as F
import megengine.module as M
import megengine.random as R
import numpy as np
from . import losses
from .basemodel import BaseModel
class BaseGenerator(BaseModel):
r"""
Base class for a generic unconditional generator model.
Attributes:
nz (int): Noise dimension for upsampling.
ngf (int): Variable controlling generator feature map sizes.
bottom_width (int): Starting width for upsampling generator output to an image.
loss_type (str): Name of loss to use for GAN loss.
"""
def __init__(self, nz, ngf, bottom_width, loss_type, **kwargs):
super().__init__(**kwargs)
self.nz = nz
self.ngf = ngf
self.bottom_width = bottom_width
self.loss_type = loss_type
def _train_step_implementation(
self,
real_batch,
netD=None,
optG=None):
# Produce fake images
fake_images = self._infer_step_implementation(real_batch)
# Compute output logit of D thinking image real
output = netD(fake_images)
# Compute loss
errG = self.compute_gan_loss(output=output)
optG.zero_grad()
optG.backward(errG)
optG.step()
return errG
def _infer_step_implementation(self, batch):
# Get only batch size from real batch
batch_size = batch.shape[0]
noise = R.gaussian(shape=[batch_size, self.nz])
fake_images = self.forward(noise)
return fake_images
def compute_gan_loss(self, output):
if self.loss_type == "ns":
errG = losses.ns_loss_gen(output)
elif self.loss_type == "wasserstein":
errG = losses.wasserstein_loss_gen(output)
else:
raise ValueError("Invalid loss_type {} selected.".format(
self.loss_type))
return errG
def generate_images(self, num_images):
"""Generate images of shape [`num_images`, C, H, W].
Depending on the final activation function, pixel values are NOT guarenteed
to be within [0, 1].
"""
return self.infer_step(np.empty(num_images, dtype="float32"))
class BaseDiscriminator(BaseModel):
r"""
Base class for a generic unconditional discriminator model.
Attributes:
ndf (int): Variable controlling discriminator feature map sizes.
loss_type (str): Name of loss to use for GAN loss.
"""
def __init__(self, ndf, loss_type, **kwargs):
super().__init__(**kwargs)
self.ndf = ndf
self.loss_type = loss_type
def _train_step_implementation(
self,
real_batch,
netG=None,
optD=None):
# Produce logits for real images
output_real = self._infer_step_implementation(real_batch)
# Produce fake images
fake_images = netG._infer_step_implementation(real_batch)
fake_images = F.zero_grad(fake_images)
# Produce logits for fake images
output_fake = self._infer_step_implementation(fake_images)
# Compute loss for D
errD = self.compute_gan_loss(output_real=output_real,
output_fake=output_fake)
D_x, D_Gz = self.compute_probs(output_real=output_real,
output_fake=output_fake)
# Backprop and update gradients
optD.zero_grad()
optD.backward(errD)
optD.step()
return errD, D_x, D_Gz
def _infer_step_implementation(self, batch):
return self.forward(batch)
def compute_gan_loss(self, output_real, output_fake):
r"""
Computes GAN loss for discriminator.
Args:
output_real (Tensor): A batch of output logits of shape (N, 1) from real images.
output_fake (Tensor): A batch of output logits of shape (N, 1) from fake images.
Returns:
errD (Tensor): A batch of GAN losses for the discriminator.
"""
# Compute loss for D
if self.loss_type == "gan" or self.loss_type == "ns":
errD = losses.minimax_loss_dis(output_fake=output_fake,
output_real=output_real)
elif self.loss_type == "wasserstein":
errD = losses.wasserstein_loss_dis(output_fake=output_fake,
output_real=output_real)
else:
raise ValueError("Invalid loss_type selected.")
return errD
def compute_probs(self, output_real, output_fake):
r"""
Computes probabilities from real/fake images logits.
Args:
output_real (Tensor): A batch of output logits of shape (N, 1) from real images.
output_fake (Tensor): A batch of output logits of shape (N, 1) from fake images.
Returns:
tuple: Average probabilities of real/fake image considered as real for the batch.
"""
D_x = F.sigmoid(output_real).mean()
D_Gz = F.sigmoid(output_fake).mean()
return D_x, D_Gz
|
[
"megengine.random.gaussian",
"megengine.functional.zero_grad",
"megengine.functional.sigmoid"
] |
[((2259, 2298), 'megengine.random.gaussian', 'R.gaussian', ([], {'shape': '[batch_size, self.nz]'}), '(shape=[batch_size, self.nz])\n', (2269, 2298), True, 'import megengine.random as R\n'), ((3780, 3804), 'megengine.functional.zero_grad', 'F.zero_grad', (['fake_images'], {}), '(fake_images)\n', (3791, 3804), True, 'import megengine.functional as F\n'), ((2994, 3031), 'numpy.empty', 'np.empty', (['num_images'], {'dtype': '"""float32"""'}), "(num_images, dtype='float32')\n", (3002, 3031), True, 'import numpy as np\n'), ((5829, 5851), 'megengine.functional.sigmoid', 'F.sigmoid', (['output_real'], {}), '(output_real)\n', (5838, 5851), True, 'import megengine.functional as F\n'), ((5874, 5896), 'megengine.functional.sigmoid', 'F.sigmoid', (['output_fake'], {}), '(output_fake)\n', (5883, 5896), True, 'import megengine.functional as F\n')]
|
import numpy as np
import megengine
import megengine.module as M
import megengine.functional as F
import math
from . import default_init_weights
class ShuffleV2Block(M.Module):
def __init__(self, inp, oup, mid_channels, *, ksize, stride):
super().__init__()
self.stride = stride
assert stride in [1, 2]
self.mid_channels = mid_channels
self.ksize = ksize
pad = ksize // 2
self.pad = pad
self.inp = inp
outputs = oup - inp
branch_main = [
# pw
M.Conv2d(inp, mid_channels, 1, 1, 0, bias=False),
M.ReLU(),
# dw
M.Conv2d(
mid_channels, mid_channels, ksize, stride, pad,
groups=mid_channels, bias=False,
),
# pw-linear
M.Conv2d(mid_channels, outputs, 1, 1, 0, bias=False),
M.ReLU(),
]
self.branch_main = M.Sequential(*branch_main)
if stride == 2:
branch_proj = [
# dw
M.Conv2d(inp, inp, ksize, stride, pad, groups=inp, bias=False),
M.BatchNorm2d(inp),
# pw-linear
M.Conv2d(inp, inp, 1, 1, 0, bias=False),
M.BatchNorm2d(inp),
M.ReLU(),
]
self.branch_proj = M.Sequential(*branch_proj)
else:
self.branch_proj = None
self.init_weights()
def forward(self, old_x):
if self.stride == 1:
x_proj, x = self.channel_shuffle(old_x)
return F.concat((x_proj, self.branch_main(x)), 1)
elif self.stride == 2:
x_proj = old_x
x = old_x
return F.concat((self.branch_proj(x_proj), self.branch_main(x)), 1)
else:
raise ValueError("use stride 1 or 2, current stride {}".format(self.stride))
def channel_shuffle(self, x):
batchsize, num_channels, height, width = x.shape
# assert (num_channels % 4 == 0)
x = x.reshape(batchsize * num_channels // 2, 2, height * width)
x = F.transpose(x, (1, 0, 2))
x = x.reshape(2, -1, num_channels // 2, height, width)
return x[0], x[1]
def init_weights(self):
default_init_weights(self, scale=0.2)
|
[
"megengine.module.ReLU",
"megengine.module.BatchNorm2d",
"megengine.functional.transpose",
"megengine.module.Sequential",
"megengine.module.Conv2d"
] |
[((943, 969), 'megengine.module.Sequential', 'M.Sequential', (['*branch_main'], {}), '(*branch_main)\n', (955, 969), True, 'import megengine.module as M\n'), ((2111, 2136), 'megengine.functional.transpose', 'F.transpose', (['x', '(1, 0, 2)'], {}), '(x, (1, 0, 2))\n', (2122, 2136), True, 'import megengine.functional as F\n'), ((555, 603), 'megengine.module.Conv2d', 'M.Conv2d', (['inp', 'mid_channels', '(1)', '(1)', '(0)'], {'bias': '(False)'}), '(inp, mid_channels, 1, 1, 0, bias=False)\n', (563, 603), True, 'import megengine.module as M\n'), ((617, 625), 'megengine.module.ReLU', 'M.ReLU', ([], {}), '()\n', (623, 625), True, 'import megengine.module as M\n'), ((656, 750), 'megengine.module.Conv2d', 'M.Conv2d', (['mid_channels', 'mid_channels', 'ksize', 'stride', 'pad'], {'groups': 'mid_channels', 'bias': '(False)'}), '(mid_channels, mid_channels, ksize, stride, pad, groups=\n mid_channels, bias=False)\n', (664, 750), True, 'import megengine.module as M\n'), ((830, 882), 'megengine.module.Conv2d', 'M.Conv2d', (['mid_channels', 'outputs', '(1)', '(1)', '(0)'], {'bias': '(False)'}), '(mid_channels, outputs, 1, 1, 0, bias=False)\n', (838, 882), True, 'import megengine.module as M\n'), ((896, 904), 'megengine.module.ReLU', 'M.ReLU', ([], {}), '()\n', (902, 904), True, 'import megengine.module as M\n'), ((1352, 1378), 'megengine.module.Sequential', 'M.Sequential', (['*branch_proj'], {}), '(*branch_proj)\n', (1364, 1378), True, 'import megengine.module as M\n'), ((1060, 1122), 'megengine.module.Conv2d', 'M.Conv2d', (['inp', 'inp', 'ksize', 'stride', 'pad'], {'groups': 'inp', 'bias': '(False)'}), '(inp, inp, ksize, stride, pad, groups=inp, bias=False)\n', (1068, 1122), True, 'import megengine.module as M\n'), ((1140, 1158), 'megengine.module.BatchNorm2d', 'M.BatchNorm2d', (['inp'], {}), '(inp)\n', (1153, 1158), True, 'import megengine.module as M\n'), ((1204, 1243), 'megengine.module.Conv2d', 'M.Conv2d', (['inp', 'inp', '(1)', '(1)', '(0)'], {'bias': '(False)'}), '(inp, inp, 1, 1, 0, bias=False)\n', (1212, 1243), True, 'import megengine.module as M\n'), ((1261, 1279), 'megengine.module.BatchNorm2d', 'M.BatchNorm2d', (['inp'], {}), '(inp)\n', (1274, 1279), True, 'import megengine.module as M\n'), ((1297, 1305), 'megengine.module.ReLU', 'M.ReLU', ([], {}), '()\n', (1303, 1305), True, 'import megengine.module as M\n')]
|
# -*- coding: utf-8 -*-
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
# ---------------------------------------------------------------------
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2014-2021 Megvii Inc. All rights reserved.
# ----------------------------------------------------------------------
"""Megengine BERT model."""
import copy
import json
import math
import os
import urllib
import urllib.request
from io import open
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.hub as hub
from megengine import Parameter
from megengine.functional.loss import cross_entropy
from megengine.module import Dropout, Embedding, Linear, Module, Sequential
from megengine.module.activation import Softmax
def transpose(inp, a, b):
cur_shape = list(range(0, inp.ndim))
cur_shape[a], cur_shape[b] = cur_shape[b], cur_shape[a]
return inp.transpose(cur_shape)
def gelu(x):
"""Implementation of the gelu activation function.
For information: OpenAI GPT's gelu is slightly different
(and gives slightly different results):
x * 0.5 * (1.0 + F.tanh((F.sqrt(2 / math.pi) * (x + 0.044715 * (x ** 3)))))
Also see https://arxiv.org/abs/1606.08415
"""
return x * 0.5 * (1.0 + F.tanh(F.sqrt(2 / math.pi) * (x + 0.044715 * (x ** 3))))
ACT2FN = {"gelu": gelu, "relu": F.relu}
class BertConfig:
"""Configuration class to store the configuration of a `BertModel`.
"""
def __init__(
self,
vocab_size_or_config_json_file,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02,
):
"""Constructs BertConfig.
Args:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `BertModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler. If string, "gelu", "relu" and "swish" are supported.
hidden_dropout_prob: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`BertModel`.
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
"""
if isinstance(vocab_size_or_config_json_file, str):
with open(vocab_size_or_config_json_file, "r", encoding="utf-8") as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size_or_config_json_file, int):
self.vocab_size = vocab_size_or_config_json_file
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
else:
raise ValueError(
"First argument must be either a vocabulary size (int)"
"or the path to a pretrained model config file (str)"
)
@classmethod
def from_dict(cls, json_object):
"""Constructs a `BertConfig` from a Python dictionary of parameters."""
config = BertConfig(vocab_size_or_config_json_file=-1)
for key, value in json_object.items():
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
"""Constructs a `BertConfig` from a json file of parameters."""
with open(json_file, "r", encoding="utf-8") as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
def to_json_file(self, json_file_path):
""" Save this instance to a json file."""
with open(json_file_path, "w", encoding="utf-8") as writer:
writer.write(self.to_json_string())
class BertLayerNorm(Module):
"""Construct a layernorm module in the TF style (epsilon inside the square root).
"""
def __init__(self, hidden_size, eps=1e-12):
super().__init__()
self.weight = Parameter(np.ones(hidden_size).astype(np.float32))
self.bias = Parameter(np.zeros(hidden_size).astype(np.float32))
self.variance_epsilon = eps
def forward(self, x):
u = F.mean(x, len(x.shape) - 1, True)
s = F.mean((x - u) ** 2, len(x.shape) - 1, True)
x = (x - u) / ((s + self.variance_epsilon) ** 0.5)
return self.weight * x + self.bias
class BertEmbeddings(Module):
"""Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config):
super().__init__()
self.word_embeddings = Embedding(config.vocab_size, config.hidden_size)
self.position_embeddings = Embedding(
config.max_position_embeddings, config.hidden_size
)
self.token_type_embeddings = Embedding(
config.type_vocab_size, config.hidden_size
)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name
# and be able to load any TensorFlow checkpoint file
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = Dropout(config.hidden_dropout_prob)
def forward(self, input_ids, token_type_ids=None):
seq_length = input_ids.shape[1]
if token_type_ids is None:
token_type_ids = F.zeros_like(input_ids)
position_ids = F.linspace(0, seq_length - 1, seq_length).astype(np.int32)
position_ids = F.broadcast_to(F.expand_dims(position_ids, 0), input_ids.shape)
words_embeddings = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = words_embeddings + position_embeddings + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class BertSelfAttention(Module):
def __init__(self, config):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads)
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = Linear(config.hidden_size, self.all_head_size)
self.key = Linear(config.hidden_size, self.all_head_size)
self.value = Linear(config.hidden_size, self.all_head_size)
self.dropout = Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
# using symbolic shapes to make trace happy
x_shape = mge.tensor(x.shape)
new_x_shape = F.concat(
[x_shape[:-1], (self.num_attention_heads, self.attention_head_size)]
)
x = x.reshape(new_x_shape)
return x.transpose(0, 2, 1, 3)
def forward(self, hidden_states, attention_mask):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = F.matmul(query_layer, transpose(key_layer, -1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = Softmax(len(attention_scores.shape) - 1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
context_layer = F.matmul(attention_probs, value_layer)
context_layer = context_layer.transpose(0, 2, 1, 3)
# using symbolic shapes to make trace happy
context_shape = mge.tensor(context_layer.shape)
new_context_layer_shape = F.concat([context_shape[:-2], self.all_head_size])
context_layer = context_layer.reshape(new_context_layer_shape)
return context_layer
class BertSelfOutput(Module):
def __init__(self, config):
super().__init__()
self.dense = Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertAttention(Module):
def __init__(self, config):
super().__init__()
self.self = BertSelfAttention(config)
self.output = BertSelfOutput(config)
def forward(self, input_tensor, attention_mask):
self_output = self.self(input_tensor, attention_mask)
attention_output = self.output(self_output, input_tensor)
return attention_output
class BertIntermediate(Module):
def __init__(self, config):
super().__init__()
self.dense = Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class BertOutput(Module):
def __init__(self, config):
super().__init__()
self.dense = Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertLayer(Module):
def __init__(self, config):
super().__init__()
self.attention = BertAttention(config)
self.intermediate = BertIntermediate(config)
self.output = BertOutput(config)
def forward(self, hidden_states, attention_mask):
attention_output = self.attention(hidden_states, attention_mask)
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
class BertEncoder(Module):
def __init__(self, config):
super().__init__()
self.layer = Sequential(
*[BertLayer(config) for _ in range(config.num_hidden_layers)]
)
# self.layer = ModuleList([BertLayer(config) for _ in range(config.num_hidden_layers)])
def forward(self, hidden_states, attention_mask, output_all_encoded_layers=True):
all_encoder_layers = []
for layer_module in self.layer:
hidden_states = layer_module(hidden_states, attention_mask)
if output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
if not output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
return all_encoder_layers
class BertPooler(Module):
def __init__(self, config):
super().__init__()
self.dense = Linear(config.hidden_size, config.hidden_size)
self.activation = F.tanh
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class BertModel(Module):
"""BERT model ("Bidirectional Embedding Representations from a Transformer").
Params:
config: a BertConfig class instance with the configuration to build a new model
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary
(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape
[batch_size, sequence_length] with the token types indices selected in [0, 1].
Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length]
with indices selected in [0, 1]. It's a mask to be used if the input sequence length
is smaller than the max input sequence length in the current batch.
It's the mask that we typically use for attention when
a batch has varying length sentences.
`output_all_encoded_layers`: boolean which controls the content of the `encoded_layers`
output as described below. Default: `True`.
Outputs: Tuple of (encoded_layers, pooled_output)
`encoded_layers`: controled by `output_all_encoded_layers` argument:
- `output_all_encoded_layers=True`: outputs a list of the full sequences of
encoded-hidden-states at the end of each attention block
(i.e. 12 full sequences for BERT-base, 24 for BERT-large), each
encoded-hidden-state is a torch.FloatTensor of size
[batch_size, sequence_length, hidden_size],
- `output_all_encoded_layers=False`: outputs only the full sequence of
hidden-states corresponding to the last attention block of shape
[batch_size, sequence_length, hidden_size],
`pooled_output`: a torch.FloatTensor of size [batch_size, hidden_size]
which is the output of classifier pretrained on top of the hidden state
associated to the first character of the
input (`CLS`) to train on the Next-Sentence task (see BERT's paper).
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = modeling.BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
model = modeling.BertModel(config=config)
all_encoder_layers, pooled_output = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config):
super().__init__()
self.embeddings = BertEmbeddings(config)
self.encoder = BertEncoder(config)
self.pooler = BertPooler(config)
def forward(
self,
input_ids,
token_type_ids=None,
attention_mask=None,
output_all_encoded_layers=True,
):
if attention_mask is None:
attention_mask = F.ones_like(input_ids)
if token_type_ids is None:
token_type_ids = F.zeros_like(input_ids)
# print('input_ids', input_ids.sum())
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
# print('attention_mask', attention_mask.sum())
extended_attention_mask = F.expand_dims(attention_mask, (1, 2))
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.astype(
next(self.parameters()).dtype
) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
embedding_output = self.embeddings(input_ids, token_type_ids)
encoded_layers = self.encoder(
embedding_output,
extended_attention_mask,
output_all_encoded_layers=output_all_encoded_layers,
)
sequence_output = encoded_layers[-1]
pooled_output = self.pooler(sequence_output)
if not output_all_encoded_layers:
encoded_layers = encoded_layers[-1]
return encoded_layers, pooled_output
class BertForSequenceClassification(Module):
"""BERT model for classification.
This module is composed of the BERT model with a linear layer on top of
the pooled output.
Params:
`config`: a BertConfig class instance with the configuration to build a new model.
`num_labels`: the number of classes for the classifier. Default = 2.
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary.
Items in the batch should begin with the special "CLS" token.
(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length]
with the token types indices selected in [0, 1]. Type 0 corresponds to a `sentence A`
and type 1 corresponds to a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length]
with indices selected in [0, 1]. It's a mask to be used if the input sequence length
is smaller than the max input sequence length in the current batch. It's the mask
that we typically use for attention when a batch has varying length sentences.
`labels`: labels for the classification output: torch.LongTensor of shape [batch_size]
with indices selected in [0, ..., num_labels].
Outputs:
if `labels` is not `None`:
Outputs the CrossEntropy classification loss of the output with the labels.
if `labels` is `None`:
Outputs the classification logits of shape [batch_size, num_labels].
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
num_labels = 2
model = BertForSequenceClassification(config, num_labels)
logits = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config, num_labels, bert=None):
super().__init__()
if bert is None:
self.bert = BertModel(config)
else:
self.bert = bert
self.num_labels = num_labels
self.dropout = Dropout(config.hidden_dropout_prob)
self.classifier = Linear(config.hidden_size, num_labels)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None):
_, pooled_output = self.bert(
input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False
)
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
if labels is not None:
loss = cross_entropy(
logits.reshape(-1, self.num_labels), labels.reshape(-1)
)
return logits, loss
else:
return logits, None
DATA_URL = "https://data.megengine.org.cn/models/weights/bert"
CONFIG_NAME = "bert_config.json"
VOCAB_NAME = "vocab.txt"
MODEL_NAME = {
"wwm_cased_L-24_H-1024_A-16": "wwm_cased_L_24_H_1024_A_16",
"wwm_uncased_L-24_H-1024_A-16": "wwm_uncased_L_24_H_1024_A_16",
"cased_L-12_H-768_A-12": "cased_L_12_H_768_A_12",
"cased_L-24_H-1024_A-16": "cased_L_24_H_1024_A_16",
"uncased_L-12_H-768_A-12": "uncased_L_12_H_768_A_12",
"uncased_L-24_H-1024_A-16": "uncased_L_24_H_1024_A_16",
"chinese_L-12_H-768_A-12": "chinese_L_12_H_768_A_12",
"multi_cased_L-12_H-768_A-12": "multi_cased_L_12_H_768_A_12",
}
def download_file(url, filename):
# urllib.URLopener().retrieve(url, filename)
urllib.request.urlretrieve(url, filename)
def create_hub_bert(model_name, pretrained):
assert model_name in MODEL_NAME, "{} not in the valid models {}".format(
model_name, MODEL_NAME
)
data_dir = "./{}".format(model_name)
if not os.path.exists(data_dir):
os.makedirs(data_dir)
vocab_url = "{}/{}/{}".format(DATA_URL, model_name, VOCAB_NAME)
config_url = "{}/{}/{}".format(DATA_URL, model_name, CONFIG_NAME)
vocab_file = "./{}/{}".format(model_name, VOCAB_NAME)
config_file = "./{}/{}".format(model_name, CONFIG_NAME)
download_file(vocab_url, vocab_file)
download_file(config_url, config_file)
config = BertConfig(config_file)
model = hub.load("megengine/models", MODEL_NAME[model_name], pretrained=pretrained)
return model, config, vocab_file
@hub.pretrained(
"https://data.megengine.org.cn/models/weights/bert/"
"uncased_L-12_H-768_A-12/bert_4f2157f7_uncased_L-12_H-768_A-12.pkl"
)
def uncased_L_12_H_768_A_12():
config_dict = {
"attention_probs_dropout_prob": 0.1,
"hidden_act": "gelu",
"hidden_dropout_prob": 0.1,
"hidden_size": 768,
"initializer_range": 0.02,
"intermediate_size": 3072,
"max_position_embeddings": 512,
"num_attention_heads": 12,
"num_hidden_layers": 12,
"type_vocab_size": 2,
"vocab_size": 30522,
}
config = BertConfig.from_dict(config_dict)
return BertModel(config)
@hub.pretrained(
"https://data.megengine.org.cn/models/weights/bert/"
"cased_L-12_H-768_A-12/bert_b9727c2f_cased_L-12_H-768_A-12.pkl"
)
def cased_L_12_H_768_A_12():
config_dict = {
"attention_probs_dropout_prob": 0.1,
"hidden_act": "gelu",
"hidden_dropout_prob": 0.1,
"hidden_size": 768,
"initializer_range": 0.02,
"intermediate_size": 3072,
"max_position_embeddings": 512,
"num_attention_heads": 12,
"num_hidden_layers": 12,
"type_vocab_size": 2,
"vocab_size": 28996,
}
config = BertConfig.from_dict(config_dict)
return BertModel(config)
@hub.pretrained(
"https://data.megengine.org.cn/models/weights/bert/"
"uncased_L-24_H-1024_A-16/bert_222f5012_uncased_L-24_H-1024_A-16.pkl"
)
def uncased_L_24_H_1024_A_16():
config_dict = {
"attention_probs_dropout_prob": 0.1,
"hidden_act": "gelu",
"hidden_dropout_prob": 0.1,
"hidden_size": 1024,
"initializer_range": 0.02,
"intermediate_size": 4096,
"max_position_embeddings": 512,
"num_attention_heads": 16,
"num_hidden_layers": 24,
"type_vocab_size": 2,
"vocab_size": 30522,
}
config = BertConfig.from_dict(config_dict)
return BertModel(config)
@hub.pretrained(
"https://data.megengine.org.cn/models/weights/bert/"
"cased_L-24_H-1024_A-16/bert_01f2a65f_cased_L-24_H-1024_A-16.pkl"
)
def cased_L_24_H_1024_A_16():
config_dict = {
"attention_probs_dropout_prob": 0.1,
"directionality": "bidi",
"hidden_act": "gelu",
"hidden_dropout_prob": 0.1,
"hidden_size": 1024,
"initializer_range": 0.02,
"intermediate_size": 4096,
"max_position_embeddings": 512,
"num_attention_heads": 16,
"num_hidden_layers": 24,
"pooler_fc_size": 768,
"pooler_num_attention_heads": 12,
"pooler_num_fc_layers": 3,
"pooler_size_per_head": 128,
"pooler_type": "first_token_transform",
"type_vocab_size": 2,
"vocab_size": 28996,
}
config = BertConfig.from_dict(config_dict)
return BertModel(config)
@hub.pretrained(
"https://data.megengine.org.cn/models/weights/bert/"
"chinese_L-12_H-768_A-12/bert_ee91be1a_chinese_L-12_H-768_A-12.pkl"
)
def chinese_L_12_H_768_A_12():
config_dict = {
"attention_probs_dropout_prob": 0.1,
"directionality": "bidi",
"hidden_act": "gelu",
"hidden_dropout_prob": 0.1,
"hidden_size": 768,
"initializer_range": 0.02,
"intermediate_size": 3072,
"max_position_embeddings": 512,
"num_attention_heads": 12,
"num_hidden_layers": 12,
"pooler_fc_size": 768,
"pooler_num_attention_heads": 12,
"pooler_num_fc_layers": 3,
"pooler_size_per_head": 128,
"pooler_type": "first_token_transform",
"type_vocab_size": 2,
"vocab_size": 21128,
}
config = BertConfig.from_dict(config_dict)
return BertModel(config)
@hub.pretrained(
"https://data.megengine.org.cn/models/weights/bert/"
"multi_cased_L-12_H-768_A-12/bert_283ceec5_multi_cased_L-12_H-768_A-12.pkl"
)
def multi_cased_L_12_H_768_A_12():
config_dict = {
"attention_probs_dropout_prob": 0.1,
"directionality": "bidi",
"hidden_act": "gelu",
"hidden_dropout_prob": 0.1,
"hidden_size": 768,
"initializer_range": 0.02,
"intermediate_size": 3072,
"max_position_embeddings": 512,
"num_attention_heads": 12,
"num_hidden_layers": 12,
"pooler_fc_size": 768,
"pooler_num_attention_heads": 12,
"pooler_num_fc_layers": 3,
"pooler_size_per_head": 128,
"pooler_type": "first_token_transform",
"type_vocab_size": 2,
"vocab_size": 119547,
}
config = BertConfig.from_dict(config_dict)
return BertModel(config)
@hub.pretrained(
"https://data.megengine.org.cn/models/weights/bert/"
"wwm_uncased_L-24_H-1024_A-16/bert_e2780a6a_wwm_uncased_L-24_H-1024_A-16.pkl"
)
def wwm_uncased_L_24_H_1024_A_16():
config_dict = {
"attention_probs_dropout_prob": 0.1,
"hidden_act": "gelu",
"hidden_dropout_prob": 0.1,
"hidden_size": 1024,
"initializer_range": 0.02,
"intermediate_size": 4096,
"max_position_embeddings": 512,
"num_attention_heads": 16,
"num_hidden_layers": 24,
"type_vocab_size": 2,
"vocab_size": 30522,
}
config = BertConfig.from_dict(config_dict)
return BertModel(config)
@hub.pretrained(
"https://data.megengine.org.cn/models/weights/bert/"
"wwm_cased_L-24_H-1024_A-16/bert_0a8f1389_wwm_cased_L-24_H-1024_A-16.pkl"
)
def wwm_cased_L_24_H_1024_A_16():
config_dict = {
"attention_probs_dropout_prob": 0.1,
"directionality": "bidi",
"hidden_act": "gelu",
"hidden_dropout_prob": 0.1,
"hidden_size": 1024,
"initializer_range": 0.02,
"intermediate_size": 4096,
"max_position_embeddings": 512,
"num_attention_heads": 16,
"num_hidden_layers": 24,
"pooler_fc_size": 768,
"pooler_num_attention_heads": 12,
"pooler_num_fc_layers": 3,
"pooler_size_per_head": 128,
"pooler_type": "first_token_transform",
"type_vocab_size": 2,
"vocab_size": 28996,
}
config = BertConfig.from_dict(config_dict)
return BertModel(config)
|
[
"megengine.hub.pretrained",
"megengine.module.Embedding",
"megengine.hub.load",
"megengine.tensor",
"megengine.functional.concat",
"megengine.functional.matmul",
"megengine.functional.sqrt",
"megengine.functional.ones_like",
"megengine.functional.zeros_like",
"megengine.functional.linspace",
"megengine.module.Dropout",
"megengine.module.Linear",
"megengine.functional.expand_dims"
] |
[((24900, 25043), 'megengine.hub.pretrained', 'hub.pretrained', (['"""https://data.megengine.org.cn/models/weights/bert/uncased_L-12_H-768_A-12/bert_4f2157f7_uncased_L-12_H-768_A-12.pkl"""'], {}), "(\n 'https://data.megengine.org.cn/models/weights/bert/uncased_L-12_H-768_A-12/bert_4f2157f7_uncased_L-12_H-768_A-12.pkl'\n )\n", (24914, 25043), True, 'import megengine.hub as hub\n'), ((25559, 25698), 'megengine.hub.pretrained', 'hub.pretrained', (['"""https://data.megengine.org.cn/models/weights/bert/cased_L-12_H-768_A-12/bert_b9727c2f_cased_L-12_H-768_A-12.pkl"""'], {}), "(\n 'https://data.megengine.org.cn/models/weights/bert/cased_L-12_H-768_A-12/bert_b9727c2f_cased_L-12_H-768_A-12.pkl'\n )\n", (25573, 25698), True, 'import megengine.hub as hub\n'), ((26212, 26357), 'megengine.hub.pretrained', 'hub.pretrained', (['"""https://data.megengine.org.cn/models/weights/bert/uncased_L-24_H-1024_A-16/bert_222f5012_uncased_L-24_H-1024_A-16.pkl"""'], {}), "(\n 'https://data.megengine.org.cn/models/weights/bert/uncased_L-24_H-1024_A-16/bert_222f5012_uncased_L-24_H-1024_A-16.pkl'\n )\n", (26226, 26357), True, 'import megengine.hub as hub\n'), ((26876, 27017), 'megengine.hub.pretrained', 'hub.pretrained', (['"""https://data.megengine.org.cn/models/weights/bert/cased_L-24_H-1024_A-16/bert_01f2a65f_cased_L-24_H-1024_A-16.pkl"""'], {}), "(\n 'https://data.megengine.org.cn/models/weights/bert/cased_L-24_H-1024_A-16/bert_01f2a65f_cased_L-24_H-1024_A-16.pkl'\n )\n", (26890, 27017), True, 'import megengine.hub as hub\n'), ((27761, 27904), 'megengine.hub.pretrained', 'hub.pretrained', (['"""https://data.megengine.org.cn/models/weights/bert/chinese_L-12_H-768_A-12/bert_ee91be1a_chinese_L-12_H-768_A-12.pkl"""'], {}), "(\n 'https://data.megengine.org.cn/models/weights/bert/chinese_L-12_H-768_A-12/bert_ee91be1a_chinese_L-12_H-768_A-12.pkl'\n )\n", (27775, 27904), True, 'import megengine.hub as hub\n'), ((28647, 28798), 'megengine.hub.pretrained', 'hub.pretrained', (['"""https://data.megengine.org.cn/models/weights/bert/multi_cased_L-12_H-768_A-12/bert_283ceec5_multi_cased_L-12_H-768_A-12.pkl"""'], {}), "(\n 'https://data.megengine.org.cn/models/weights/bert/multi_cased_L-12_H-768_A-12/bert_283ceec5_multi_cased_L-12_H-768_A-12.pkl'\n )\n", (28661, 28798), True, 'import megengine.hub as hub\n'), ((29547, 29700), 'megengine.hub.pretrained', 'hub.pretrained', (['"""https://data.megengine.org.cn/models/weights/bert/wwm_uncased_L-24_H-1024_A-16/bert_e2780a6a_wwm_uncased_L-24_H-1024_A-16.pkl"""'], {}), "(\n 'https://data.megengine.org.cn/models/weights/bert/wwm_uncased_L-24_H-1024_A-16/bert_e2780a6a_wwm_uncased_L-24_H-1024_A-16.pkl'\n )\n", (29561, 29700), True, 'import megengine.hub as hub\n'), ((30222, 30371), 'megengine.hub.pretrained', 'hub.pretrained', (['"""https://data.megengine.org.cn/models/weights/bert/wwm_cased_L-24_H-1024_A-16/bert_0a8f1389_wwm_cased_L-24_H-1024_A-16.pkl"""'], {}), "(\n 'https://data.megengine.org.cn/models/weights/bert/wwm_cased_L-24_H-1024_A-16/bert_0a8f1389_wwm_cased_L-24_H-1024_A-16.pkl'\n )\n", (30236, 30371), True, 'import megengine.hub as hub\n'), ((24078, 24119), 'urllib.request.urlretrieve', 'urllib.request.urlretrieve', (['url', 'filename'], {}), '(url, filename)\n', (24104, 24119), False, 'import urllib\n'), ((24783, 24858), 'megengine.hub.load', 'hub.load', (['"""megengine/models"""', 'MODEL_NAME[model_name]'], {'pretrained': 'pretrained'}), "('megengine/models', MODEL_NAME[model_name], pretrained=pretrained)\n", (24791, 24858), True, 'import megengine.hub as hub\n'), ((5786, 5814), 'copy.deepcopy', 'copy.deepcopy', (['self.__dict__'], {}), '(self.__dict__)\n', (5799, 5814), False, 'import copy\n'), ((7035, 7083), 'megengine.module.Embedding', 'Embedding', (['config.vocab_size', 'config.hidden_size'], {}), '(config.vocab_size, config.hidden_size)\n', (7044, 7083), False, 'from megengine.module import Dropout, Embedding, Linear, Module, Sequential\n'), ((7119, 7180), 'megengine.module.Embedding', 'Embedding', (['config.max_position_embeddings', 'config.hidden_size'], {}), '(config.max_position_embeddings, config.hidden_size)\n', (7128, 7180), False, 'from megengine.module import Dropout, Embedding, Linear, Module, Sequential\n'), ((7240, 7293), 'megengine.module.Embedding', 'Embedding', (['config.type_vocab_size', 'config.hidden_size'], {}), '(config.type_vocab_size, config.hidden_size)\n', (7249, 7293), False, 'from megengine.module import Dropout, Embedding, Linear, Module, Sequential\n'), ((7560, 7595), 'megengine.module.Dropout', 'Dropout', (['config.hidden_dropout_prob'], {}), '(config.hidden_dropout_prob)\n', (7567, 7595), False, 'from megengine.module import Dropout, Embedding, Linear, Module, Sequential\n'), ((8981, 9027), 'megengine.module.Linear', 'Linear', (['config.hidden_size', 'self.all_head_size'], {}), '(config.hidden_size, self.all_head_size)\n', (8987, 9027), False, 'from megengine.module import Dropout, Embedding, Linear, Module, Sequential\n'), ((9047, 9093), 'megengine.module.Linear', 'Linear', (['config.hidden_size', 'self.all_head_size'], {}), '(config.hidden_size, self.all_head_size)\n', (9053, 9093), False, 'from megengine.module import Dropout, Embedding, Linear, Module, Sequential\n'), ((9115, 9161), 'megengine.module.Linear', 'Linear', (['config.hidden_size', 'self.all_head_size'], {}), '(config.hidden_size, self.all_head_size)\n', (9121, 9161), False, 'from megengine.module import Dropout, Embedding, Linear, Module, Sequential\n'), ((9186, 9230), 'megengine.module.Dropout', 'Dropout', (['config.attention_probs_dropout_prob'], {}), '(config.attention_probs_dropout_prob)\n', (9193, 9230), False, 'from megengine.module import Dropout, Embedding, Linear, Module, Sequential\n'), ((9341, 9360), 'megengine.tensor', 'mge.tensor', (['x.shape'], {}), '(x.shape)\n', (9351, 9360), True, 'import megengine as mge\n'), ((9383, 9461), 'megengine.functional.concat', 'F.concat', (['[x_shape[:-1], (self.num_attention_heads, self.attention_head_size)]'], {}), '([x_shape[:-1], (self.num_attention_heads, self.attention_head_size)])\n', (9391, 9461), True, 'import megengine.functional as F\n'), ((10768, 10806), 'megengine.functional.matmul', 'F.matmul', (['attention_probs', 'value_layer'], {}), '(attention_probs, value_layer)\n', (10776, 10806), True, 'import megengine.functional as F\n'), ((10943, 10974), 'megengine.tensor', 'mge.tensor', (['context_layer.shape'], {}), '(context_layer.shape)\n', (10953, 10974), True, 'import megengine as mge\n'), ((11009, 11059), 'megengine.functional.concat', 'F.concat', (['[context_shape[:-2], self.all_head_size]'], {}), '([context_shape[:-2], self.all_head_size])\n', (11017, 11059), True, 'import megengine.functional as F\n'), ((11272, 11318), 'megengine.module.Linear', 'Linear', (['config.hidden_size', 'config.hidden_size'], {}), '(config.hidden_size, config.hidden_size)\n', (11278, 11318), False, 'from megengine.module import Dropout, Embedding, Linear, Module, Sequential\n'), ((11412, 11447), 'megengine.module.Dropout', 'Dropout', (['config.hidden_dropout_prob'], {}), '(config.hidden_dropout_prob)\n', (11419, 11447), False, 'from megengine.module import Dropout, Embedding, Linear, Module, Sequential\n'), ((12210, 12262), 'megengine.module.Linear', 'Linear', (['config.hidden_size', 'config.intermediate_size'], {}), '(config.hidden_size, config.intermediate_size)\n', (12216, 12262), False, 'from megengine.module import Dropout, Embedding, Linear, Module, Sequential\n'), ((12736, 12788), 'megengine.module.Linear', 'Linear', (['config.intermediate_size', 'config.hidden_size'], {}), '(config.intermediate_size, config.hidden_size)\n', (12742, 12788), False, 'from megengine.module import Dropout, Embedding, Linear, Module, Sequential\n'), ((12882, 12917), 'megengine.module.Dropout', 'Dropout', (['config.hidden_dropout_prob'], {}), '(config.hidden_dropout_prob)\n', (12889, 12917), False, 'from megengine.module import Dropout, Embedding, Linear, Module, Sequential\n'), ((14562, 14608), 'megengine.module.Linear', 'Linear', (['config.hidden_size', 'config.hidden_size'], {}), '(config.hidden_size, config.hidden_size)\n', (14568, 14608), False, 'from megengine.module import Dropout, Embedding, Linear, Module, Sequential\n'), ((18962, 18999), 'megengine.functional.expand_dims', 'F.expand_dims', (['attention_mask', '(1, 2)'], {}), '(attention_mask, (1, 2))\n', (18975, 18999), True, 'import megengine.functional as F\n'), ((22709, 22744), 'megengine.module.Dropout', 'Dropout', (['config.hidden_dropout_prob'], {}), '(config.hidden_dropout_prob)\n', (22716, 22744), False, 'from megengine.module import Dropout, Embedding, Linear, Module, Sequential\n'), ((22771, 22809), 'megengine.module.Linear', 'Linear', (['config.hidden_size', 'num_labels'], {}), '(config.hidden_size, num_labels)\n', (22777, 22809), False, 'from megengine.module import Dropout, Embedding, Linear, Module, Sequential\n'), ((24333, 24357), 'os.path.exists', 'os.path.exists', (['data_dir'], {}), '(data_dir)\n', (24347, 24357), False, 'import os\n'), ((24367, 24388), 'os.makedirs', 'os.makedirs', (['data_dir'], {}), '(data_dir)\n', (24378, 24388), False, 'import os\n'), ((5485, 5523), 'io.open', 'open', (['json_file', '"""r"""'], {'encoding': '"""utf-8"""'}), "(json_file, 'r', encoding='utf-8')\n", (5489, 5523), False, 'from io import open\n'), ((5597, 5613), 'json.loads', 'json.loads', (['text'], {}), '(text)\n', (5607, 5613), False, 'import json\n'), ((6108, 6151), 'io.open', 'open', (['json_file_path', '"""w"""'], {'encoding': '"""utf-8"""'}), "(json_file_path, 'w', encoding='utf-8')\n", (6112, 6151), False, 'from io import open\n'), ((7757, 7780), 'megengine.functional.zeros_like', 'F.zeros_like', (['input_ids'], {}), '(input_ids)\n', (7769, 7780), True, 'import megengine.functional as F\n'), ((7902, 7932), 'megengine.functional.expand_dims', 'F.expand_dims', (['position_ids', '(0)'], {}), '(position_ids, 0)\n', (7915, 7932), True, 'import megengine.functional as F\n'), ((10185, 10220), 'math.sqrt', 'math.sqrt', (['self.attention_head_size'], {}), '(self.attention_head_size)\n', (10194, 10220), False, 'import math\n'), ((18332, 18354), 'megengine.functional.ones_like', 'F.ones_like', (['input_ids'], {}), '(input_ids)\n', (18343, 18354), True, 'import megengine.functional as F\n'), ((18419, 18442), 'megengine.functional.zeros_like', 'F.zeros_like', (['input_ids'], {}), '(input_ids)\n', (18431, 18442), True, 'import megengine.functional as F\n'), ((3928, 3987), 'io.open', 'open', (['vocab_size_or_config_json_file', '"""r"""'], {'encoding': '"""utf-8"""'}), "(vocab_size_or_config_json_file, 'r', encoding='utf-8')\n", (3932, 3987), False, 'from io import open\n'), ((7805, 7846), 'megengine.functional.linspace', 'F.linspace', (['(0)', '(seq_length - 1)', 'seq_length'], {}), '(0, seq_length - 1, seq_length)\n', (7815, 7846), True, 'import megengine.functional as F\n'), ((1797, 1816), 'megengine.functional.sqrt', 'F.sqrt', (['(2 / math.pi)'], {}), '(2 / math.pi)\n', (1803, 1816), True, 'import megengine.functional as F\n'), ((6444, 6464), 'numpy.ones', 'np.ones', (['hidden_size'], {}), '(hidden_size)\n', (6451, 6464), True, 'import numpy as np\n'), ((6515, 6536), 'numpy.zeros', 'np.zeros', (['hidden_size'], {}), '(hidden_size)\n', (6523, 6536), True, 'import numpy as np\n')]
|
#!/usr/bin/env mdl
# This file will seal the nms opr within a better way than lib_nms
import ctypes
import os
import struct
import numpy as np
import megengine as mge
import megengine.functional as F
from megengine._internal.craniotome import CraniotomeBase
from megengine.core.tensor import wrap_io_tensor
_current_path = os.path.dirname(os.path.abspath(__file__))
_so_path = os.path.join(_current_path, "lib_nms.so")
try:
_so_lib = ctypes.CDLL(_so_path)
except Exception:
import subprocess
mge_path = os.path.join(os.path.dirname(mge.__file__), "_internal", "include")
assert os.path.exists(mge_path), "{} file not found".format(mge_path)
src_file = os.path.join(_current_path, "gpu_nms", "nms.cu")
assert os.path.exists(src_file), "{} file not found".format(src_file)
cmd = (
"nvcc -I {} -shared -o {} -Xcompiler '-fno-strict-aliasing -fPIC' {}".format(
mge_path, _so_path, src_file
)
)
subprocess.check_call(cmd, shell=True)
_so_lib = ctypes.CDLL(_so_path)
_TYPE_POINTER = ctypes.c_void_p
_TYPE_POINTER = ctypes.c_void_p
_TYPE_INT = ctypes.c_int32
_TYPE_FLOAT = ctypes.c_float
_so_lib.NMSForwardGpu.argtypes = [
_TYPE_POINTER,
_TYPE_POINTER,
_TYPE_POINTER,
_TYPE_POINTER,
_TYPE_FLOAT,
_TYPE_INT,
_TYPE_POINTER,
]
_so_lib.NMSForwardGpu.restype = _TYPE_INT
_so_lib.CreateHostDevice.restype = _TYPE_POINTER
class NMSCran(CraniotomeBase):
__nr_inputs__ = 1
__nr_outputs__ = 3
def setup(self, iou_threshold, max_output):
self._iou_threshold = iou_threshold
self._max_output = max_output
# Load the necessary host device
self._host_device = _so_lib.CreateHostDevice()
def execute(self, inputs, outputs):
box_tensor_ptr = inputs[0].pubapi_dev_tensor_ptr
output_tensor_ptr = outputs[0].pubapi_dev_tensor_ptr
output_num_tensor_ptr = outputs[1].pubapi_dev_tensor_ptr
mask_tensor_ptr = outputs[2].pubapi_dev_tensor_ptr
_so_lib.NMSForwardGpu(
box_tensor_ptr,
mask_tensor_ptr,
output_tensor_ptr,
output_num_tensor_ptr,
self._iou_threshold,
self._max_output,
self._host_device,
)
def grad(self, wrt_idx, inputs, outputs, out_grad):
return 0
def init_output_dtype(self, input_dtypes):
return [np.int32, np.int32, np.int32]
def get_serialize_params(self):
return ("nms", struct.pack("fi", self._iou_threshold, self._max_output))
def infer_shape(self, inp_shapes):
nr_box = inp_shapes[0][0]
threadsPerBlock = 64
output_size = nr_box
# here we compute the number of int32 used in mask_outputs.
# In original version, we compute the bytes only.
mask_size = int(
nr_box
* (nr_box // threadsPerBlock + int((nr_box % threadsPerBlock) > 0))
* 8
/ 4
)
return [[output_size], [1], [mask_size]]
@wrap_io_tensor
def gpu_nms(box, iou_threshold, max_output):
keep, num, _ = NMSCran.make(box, iou_threshold=iou_threshold, max_output=max_output)
return keep[:num]
def batched_nms(boxes, scores, idxs, iou_threshold, num_keep, use_offset=False):
if use_offset:
boxes_offset = (
mge.tensor([0, 0, 1, 1], device=boxes.device)
.reshape(1, 4)
.broadcast(boxes.shapeof(0), 4)
)
boxes = boxes - boxes_offset
max_coordinate = boxes.max()
offsets = idxs * (max_coordinate + 1)
boxes_for_nms = boxes + offsets.reshape(-1, 1).broadcast(boxes.shapeof(0), 4)
boxes_with_scores = F.concat([boxes_for_nms, scores.reshape(-1, 1)], axis=1)
keep_inds = gpu_nms(boxes_with_scores, iou_threshold, num_keep)
return keep_inds
|
[
"megengine.tensor"
] |
[((380, 421), 'os.path.join', 'os.path.join', (['_current_path', '"""lib_nms.so"""'], {}), "(_current_path, 'lib_nms.so')\n", (392, 421), False, 'import os\n'), ((342, 367), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (357, 367), False, 'import os\n'), ((441, 462), 'ctypes.CDLL', 'ctypes.CDLL', (['_so_path'], {}), '(_so_path)\n', (452, 462), False, 'import ctypes\n'), ((597, 621), 'os.path.exists', 'os.path.exists', (['mge_path'], {}), '(mge_path)\n', (611, 621), False, 'import os\n'), ((675, 723), 'os.path.join', 'os.path.join', (['_current_path', '"""gpu_nms"""', '"""nms.cu"""'], {}), "(_current_path, 'gpu_nms', 'nms.cu')\n", (687, 723), False, 'import os\n'), ((735, 759), 'os.path.exists', 'os.path.exists', (['src_file'], {}), '(src_file)\n', (749, 759), False, 'import os\n'), ((958, 996), 'subprocess.check_call', 'subprocess.check_call', (['cmd'], {'shell': '(True)'}), '(cmd, shell=True)\n', (979, 996), False, 'import subprocess\n'), ((1011, 1032), 'ctypes.CDLL', 'ctypes.CDLL', (['_so_path'], {}), '(_so_path)\n', (1022, 1032), False, 'import ctypes\n'), ((531, 560), 'os.path.dirname', 'os.path.dirname', (['mge.__file__'], {}), '(mge.__file__)\n', (546, 560), False, 'import os\n'), ((2486, 2542), 'struct.pack', 'struct.pack', (['"""fi"""', 'self._iou_threshold', 'self._max_output'], {}), "('fi', self._iou_threshold, self._max_output)\n", (2497, 2542), False, 'import struct\n'), ((3330, 3375), 'megengine.tensor', 'mge.tensor', (['[0, 0, 1, 1]'], {'device': 'boxes.device'}), '([0, 0, 1, 1], device=boxes.device)\n', (3340, 3375), True, 'import megengine as mge\n')]
|
# -*- coding: utf-8 -*-
# Copyright 2019 - present, Facebook, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ---------------------------------------------------------------------
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2014-2020 Megvii Inc. All rights reserved.
# ---------------------------------------------------------------------
import numpy as np
import megengine.functional as F
import megengine.module as M
from megengine import Parameter
class FrozenBatchNorm2d(M.Module):
"""
BatchNorm2d, which the weight, bias, running_mean, running_var
are immutable.
"""
def __init__(self, num_features, eps=1e-5):
super().__init__()
self.num_features = num_features
self.eps = eps
self.weight = Parameter(np.ones(num_features, dtype=np.float32))
self.bias = Parameter(np.zeros(num_features, dtype=np.float32))
self.running_mean = Parameter(np.zeros((1, num_features, 1, 1), dtype=np.float32))
self.running_var = Parameter(np.ones((1, num_features, 1, 1), dtype=np.float32))
def forward(self, x):
scale = self.weight.reshape(1, -1, 1, 1) * (
1.0 / F.sqrt(self.running_var + self.eps)
)
bias = self.bias.reshape(1, -1, 1, 1) - self.running_mean * scale
return x * scale.detach() + bias.detach()
class GroupNorm(M.Module):
def __init__(self, num_groups, num_channels, eps=1e-5, affine=True):
super().__init__()
self.num_groups = num_groups
self.num_channels = num_channels
self.eps = eps
self.affine = affine
if self.affine:
self.weight = Parameter(np.ones(num_channels, dtype=np.float32))
self.bias = Parameter(np.zeros(num_channels, dtype=np.float32))
else:
self.weight = None
self.bias = None
self.reset_parameters()
def reset_parameters(self):
if self.affine:
M.init.ones_(self.weight)
M.init.zeros_(self.bias)
def forward(self, x):
output = x.reshape(x.shape[0], self.num_groups, -1)
mean = F.mean(output, axis=2, keepdims=True)
mean2 = F.mean(output ** 2, axis=2, keepdims=True)
var = mean2 - mean * mean
output = (output - mean) / F.sqrt(var + self.eps)
output = output.reshape(x.shape)
if self.affine:
output = self.weight.reshape(1, -1, 1, 1) * output + \
self.bias.reshape(1, -1, 1, 1)
return output
def get_norm(norm):
"""
Args:
norm (str): currently support "BN", "SyncBN", "FrozenBN" and "GN"
Returns:
M.Module or None: the normalization layer
"""
if norm is None:
return None
norm = {
"BN": M.BatchNorm2d,
"SyncBN": M.SyncBatchNorm,
"FrozenBN": FrozenBatchNorm2d,
"GN": GroupNorm,
}[norm]
return norm
|
[
"megengine.functional.mean",
"megengine.functional.sqrt",
"megengine.module.init.zeros_",
"megengine.module.init.ones_"
] |
[((3029, 3066), 'megengine.functional.mean', 'F.mean', (['output'], {'axis': '(2)', 'keepdims': '(True)'}), '(output, axis=2, keepdims=True)\n', (3035, 3066), True, 'import megengine.functional as F\n'), ((3083, 3125), 'megengine.functional.mean', 'F.mean', (['(output ** 2)'], {'axis': '(2)', 'keepdims': '(True)'}), '(output ** 2, axis=2, keepdims=True)\n', (3089, 3125), True, 'import megengine.functional as F\n'), ((1691, 1730), 'numpy.ones', 'np.ones', (['num_features'], {'dtype': 'np.float32'}), '(num_features, dtype=np.float32)\n', (1698, 1730), True, 'import numpy as np\n'), ((1762, 1802), 'numpy.zeros', 'np.zeros', (['num_features'], {'dtype': 'np.float32'}), '(num_features, dtype=np.float32)\n', (1770, 1802), True, 'import numpy as np\n'), ((1843, 1894), 'numpy.zeros', 'np.zeros', (['(1, num_features, 1, 1)'], {'dtype': 'np.float32'}), '((1, num_features, 1, 1), dtype=np.float32)\n', (1851, 1894), True, 'import numpy as np\n'), ((1933, 1983), 'numpy.ones', 'np.ones', (['(1, num_features, 1, 1)'], {'dtype': 'np.float32'}), '((1, num_features, 1, 1), dtype=np.float32)\n', (1940, 1983), True, 'import numpy as np\n'), ((2864, 2889), 'megengine.module.init.ones_', 'M.init.ones_', (['self.weight'], {}), '(self.weight)\n', (2876, 2889), True, 'import megengine.module as M\n'), ((2902, 2926), 'megengine.module.init.zeros_', 'M.init.zeros_', (['self.bias'], {}), '(self.bias)\n', (2915, 2926), True, 'import megengine.module as M\n'), ((3196, 3218), 'megengine.functional.sqrt', 'F.sqrt', (['(var + self.eps)'], {}), '(var + self.eps)\n', (3202, 3218), True, 'import megengine.functional as F\n'), ((2083, 2118), 'megengine.functional.sqrt', 'F.sqrt', (['(self.running_var + self.eps)'], {}), '(self.running_var + self.eps)\n', (2089, 2118), True, 'import megengine.functional as F\n'), ((2572, 2611), 'numpy.ones', 'np.ones', (['num_channels'], {'dtype': 'np.float32'}), '(num_channels, dtype=np.float32)\n', (2579, 2611), True, 'import numpy as np\n'), ((2647, 2687), 'numpy.zeros', 'np.zeros', (['num_channels'], {'dtype': 'np.float32'}), '(num_channels, dtype=np.float32)\n', (2655, 2687), True, 'import numpy as np\n')]
|
# BSD 3-Clause License
# Copyright (c) <NAME> 2016,
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# ------------------------------------------------------------------------------
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2014-2019 Megvii Inc. All rights reserved.
# ------------------------------------------------------------------------------
import megengine.functional as F
import megengine.module as M
__all__ = ['MobileNetV2', 'mobilenet_v2']
def _make_divisible(v, divisor, min_value=None):
"""
This function is taken from the original tf repo.
It ensures that all layers have a channel number that is divisible by 8
It can be seen here:
https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
:param v:
:param divisor:
:param min_value:
:return:
"""
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v
class InvertedResidual(M.Module):
def __init__(self, inp, oup, stride, expand_ratio):
super(InvertedResidual, self).__init__()
self.stride = stride
assert stride in [1, 2]
hidden_dim = int(round(inp * expand_ratio))
self.use_res_connect = self.stride == 1 and inp == oup
layers = []
if expand_ratio != 1:
# pw
layers.append(M.ConvBnRelu2d(inp, hidden_dim, kernel_size=1, bias=False))
layers.extend([
# dw
M.ConvBnRelu2d(hidden_dim, hidden_dim, kernel_size=3, padding=1,
stride=stride, groups=hidden_dim, bias=False),
# pw-linear
M.ConvBn2d(hidden_dim, oup, kernel_size=1, bias=False)
])
self.conv = M.Sequential(*layers)
self.add = M.Elemwise("ADD")
def forward(self, x):
if self.use_res_connect:
return self.add(x, self.conv(x))
else:
return self.conv(x)
class MobileNetV2(M.Module):
def __init__(self, num_classes=1000, width_mult=1.0, inverted_residual_setting=None, round_nearest=8):
"""
MobileNet V2 main class
Args:
num_classes (int): Number of classes
width_mult (float): Width multiplier - adjusts number of channels in each layer by this amount
inverted_residual_setting: Network structure
round_nearest (int): Round the number of channels in each layer to be a multiple of this number
Set to 1 to turn off rounding
"""
super(MobileNetV2, self).__init__()
block = InvertedResidual
input_channel = 32
last_channel = 1280
if inverted_residual_setting is None:
inverted_residual_setting = [
# t, c, n, s
[1, 16, 1, 1],
[6, 24, 2, 2],
[6, 32, 3, 2],
[6, 64, 4, 2],
[6, 96, 3, 1],
[6, 160, 3, 2],
[6, 320, 1, 1],
]
# only check the first element, assuming user knows t,c,n,s are required
if len(inverted_residual_setting) == 0 or len(inverted_residual_setting[0]) != 4:
raise ValueError("inverted_residual_setting should be non-empty "
"or a 4-element list, got {}".format(inverted_residual_setting))
# building first layer
input_channel = _make_divisible(input_channel * width_mult, round_nearest)
self.last_channel = _make_divisible(last_channel * max(1.0, width_mult), round_nearest)
features = [M.ConvBnRelu2d(3, input_channel, kernel_size=3, padding=1, stride=2, bias=False)]
# building inverted residual blocks
for t, c, n, s in inverted_residual_setting:
output_channel = _make_divisible(c * width_mult, round_nearest)
for i in range(n):
stride = s if i == 0 else 1
features.append(block(input_channel, output_channel, stride, expand_ratio=t))
input_channel = output_channel
# building last several layers
features.append(M.ConvBnRelu2d(input_channel, self.last_channel, kernel_size=1, bias=False))
# make it M.Sequential
self.features = M.Sequential(*features)
# building classifier
self.classifier = M.Sequential(
M.Dropout(0.2),
M.Linear(self.last_channel, num_classes),
)
self.quant = M.QuantStub()
self.dequant = M.DequantStub()
# weight initialization
for m in self.modules():
if isinstance(m, M.Conv2d):
M.init.msra_normal_(m.weight, mode='fan_out')
if m.bias is not None:
M.init.zeros_(m.bias)
elif isinstance(m, M.BatchNorm2d):
M.init.ones_(m.weight)
M.init.zeros_(m.bias)
elif isinstance(m, M.Linear):
M.init.normal_(m.weight, 0, 0.01)
M.init.zeros_(m.bias)
def forward(self, x):
x = self.quant(x)
x = self.features(x)
x = F.avg_pool2d(x, 7)
x = F.flatten(x, 1)
x = self.dequant(x)
x = self.classifier(x)
return x
def mobilenet_v2(**kwargs):
"""
Constructs a MobileNetV2 architecture from
`"MobileNetV2: Inverted Residuals and Linear Bottlenecks" <https://arxiv.org/abs/1801.04381>`_.
"""
model = MobileNetV2(**kwargs)
return model
|
[
"megengine.module.Elemwise",
"megengine.module.ConvBnRelu2d",
"megengine.module.Dropout",
"megengine.functional.flatten",
"megengine.module.DequantStub",
"megengine.module.Linear",
"megengine.module.ConvBn2d",
"megengine.module.init.msra_normal_",
"megengine.module.init.zeros_",
"megengine.module.init.normal_",
"megengine.module.init.ones_",
"megengine.functional.avg_pool2d",
"megengine.module.Sequential",
"megengine.module.QuantStub"
] |
[((3759, 3780), 'megengine.module.Sequential', 'M.Sequential', (['*layers'], {}), '(*layers)\n', (3771, 3780), True, 'import megengine.module as M\n'), ((3800, 3817), 'megengine.module.Elemwise', 'M.Elemwise', (['"""ADD"""'], {}), "('ADD')\n", (3810, 3817), True, 'import megengine.module as M\n'), ((6265, 6288), 'megengine.module.Sequential', 'M.Sequential', (['*features'], {}), '(*features)\n', (6277, 6288), True, 'import megengine.module as M\n'), ((6474, 6487), 'megengine.module.QuantStub', 'M.QuantStub', ([], {}), '()\n', (6485, 6487), True, 'import megengine.module as M\n'), ((6511, 6526), 'megengine.module.DequantStub', 'M.DequantStub', ([], {}), '()\n', (6524, 6526), True, 'import megengine.module as M\n'), ((7124, 7142), 'megengine.functional.avg_pool2d', 'F.avg_pool2d', (['x', '(7)'], {}), '(x, 7)\n', (7136, 7142), True, 'import megengine.functional as F\n'), ((7155, 7170), 'megengine.functional.flatten', 'F.flatten', (['x', '(1)'], {}), '(x, 1)\n', (7164, 7170), True, 'import megengine.functional as F\n'), ((5599, 5684), 'megengine.module.ConvBnRelu2d', 'M.ConvBnRelu2d', (['(3)', 'input_channel'], {'kernel_size': '(3)', 'padding': '(1)', 'stride': '(2)', 'bias': '(False)'}), '(3, input_channel, kernel_size=3, padding=1, stride=2, bias=False\n )\n', (5613, 5684), True, 'import megengine.module as M\n'), ((6133, 6208), 'megengine.module.ConvBnRelu2d', 'M.ConvBnRelu2d', (['input_channel', 'self.last_channel'], {'kernel_size': '(1)', 'bias': '(False)'}), '(input_channel, self.last_channel, kernel_size=1, bias=False)\n', (6147, 6208), True, 'import megengine.module as M\n'), ((6372, 6386), 'megengine.module.Dropout', 'M.Dropout', (['(0.2)'], {}), '(0.2)\n', (6381, 6386), True, 'import megengine.module as M\n'), ((6400, 6440), 'megengine.module.Linear', 'M.Linear', (['self.last_channel', 'num_classes'], {}), '(self.last_channel, num_classes)\n', (6408, 6440), True, 'import megengine.module as M\n'), ((3385, 3443), 'megengine.module.ConvBnRelu2d', 'M.ConvBnRelu2d', (['inp', 'hidden_dim'], {'kernel_size': '(1)', 'bias': '(False)'}), '(inp, hidden_dim, kernel_size=1, bias=False)\n', (3399, 3443), True, 'import megengine.module as M\n'), ((3498, 3613), 'megengine.module.ConvBnRelu2d', 'M.ConvBnRelu2d', (['hidden_dim', 'hidden_dim'], {'kernel_size': '(3)', 'padding': '(1)', 'stride': 'stride', 'groups': 'hidden_dim', 'bias': '(False)'}), '(hidden_dim, hidden_dim, kernel_size=3, padding=1, stride=\n stride, groups=hidden_dim, bias=False)\n', (3512, 3613), True, 'import megengine.module as M\n'), ((3673, 3727), 'megengine.module.ConvBn2d', 'M.ConvBn2d', (['hidden_dim', 'oup'], {'kernel_size': '(1)', 'bias': '(False)'}), '(hidden_dim, oup, kernel_size=1, bias=False)\n', (3683, 3727), True, 'import megengine.module as M\n'), ((6649, 6694), 'megengine.module.init.msra_normal_', 'M.init.msra_normal_', (['m.weight'], {'mode': '"""fan_out"""'}), "(m.weight, mode='fan_out')\n", (6668, 6694), True, 'import megengine.module as M\n'), ((6754, 6775), 'megengine.module.init.zeros_', 'M.init.zeros_', (['m.bias'], {}), '(m.bias)\n', (6767, 6775), True, 'import megengine.module as M\n'), ((6839, 6861), 'megengine.module.init.ones_', 'M.init.ones_', (['m.weight'], {}), '(m.weight)\n', (6851, 6861), True, 'import megengine.module as M\n'), ((6878, 6899), 'megengine.module.init.zeros_', 'M.init.zeros_', (['m.bias'], {}), '(m.bias)\n', (6891, 6899), True, 'import megengine.module as M\n'), ((6958, 6991), 'megengine.module.init.normal_', 'M.init.normal_', (['m.weight', '(0)', '(0.01)'], {}), '(m.weight, 0, 0.01)\n', (6972, 6991), True, 'import megengine.module as M\n'), ((7008, 7029), 'megengine.module.init.zeros_', 'M.init.zeros_', (['m.bias'], {}), '(m.bias)\n', (7021, 7029), True, 'import megengine.module as M\n')]
|
# -*- coding: utf-8 -*-
# This repo is licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine.functional as F
class Matcher:
def __init__(self, thresholds, labels, allow_low_quality_matches=False):
assert len(thresholds) + 1 == len(labels), "thresholds and labels are not matched"
assert all(low <= high for (low, high) in zip(thresholds[:-1], thresholds[1:]))
thresholds.append(float("inf"))
thresholds.insert(0, -float("inf"))
self.thresholds = thresholds
self.labels = labels
self.allow_low_quality_matches = allow_low_quality_matches
def __call__(self, matrix):
"""
matrix(tensor): A two dim tensor with shape of (N, M). N is number of GT-boxes,
while M is the number of anchors in detection.
"""
assert len(matrix.shape) == 2
max_scores = matrix.max(axis=0)
match_indices = F.argmax(matrix, axis=0)
# default ignore label: -1
labels = F.full_like(match_indices, -1)
for label, low, high in zip(self.labels, self.thresholds[:-1], self.thresholds[1:]):
mask = (max_scores >= low) & (max_scores < high)
labels[mask] = label
if self.allow_low_quality_matches:
mask = (matrix == F.max(matrix, axis=1, keepdims=True)).sum(axis=0) > 0
labels[mask] = 1
return match_indices, labels
|
[
"megengine.functional.max",
"megengine.functional.argmax",
"megengine.functional.full_like"
] |
[((1208, 1232), 'megengine.functional.argmax', 'F.argmax', (['matrix'], {'axis': '(0)'}), '(matrix, axis=0)\n', (1216, 1232), True, 'import megengine.functional as F\n'), ((1286, 1316), 'megengine.functional.full_like', 'F.full_like', (['match_indices', '(-1)'], {}), '(match_indices, -1)\n', (1297, 1316), True, 'import megengine.functional as F\n'), ((1579, 1615), 'megengine.functional.max', 'F.max', (['matrix'], {'axis': '(1)', 'keepdims': '(True)'}), '(matrix, axis=1, keepdims=True)\n', (1584, 1615), True, 'import megengine.functional as F\n')]
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import itertools
import platform
from functools import partial
import numpy as np
import pytest
from utils import opr_test
import megengine.amp as amp
import megengine.config as config
import megengine.core.ops.builtin as builtin
import megengine.core.tensor.dtype as dtype
import megengine.functional as F
import megengine.jit as jit
from megengine import Parameter, Tensor, is_cuda_available, tensor
from megengine.core._trace_option import use_symbolic_shape
from megengine.core.autodiff.grad import Grad
from megengine.core.tensor.utils import make_shape_tuple
from megengine.device import get_device_count
from megengine.module import LayerNorm
def test_where():
maskv0 = np.array([[1, 0], [0, 1]], dtype=np.bool_)
xv0 = np.array([[1, np.inf], [np.nan, 4]], dtype=np.float32)
yv0 = np.array([[5, 6], [7, 8]], dtype=np.float32)
maskv1 = np.array([[1, 0, 1], [1, 0, 0], [1, 1, 0]], dtype=np.bool_)
xv1 = np.array([[1, np.inf, 2], [0, np.nan, 4], [1, 5, 7]], dtype=np.float32)
yv1 = np.array([[5, 6, 9], [2, 7, 8], [2, 1, 9]], dtype=np.float32)
cases = [
{"input": [maskv0, xv0, yv0]},
{"input": [maskv1, xv1, yv1]},
]
opr_test(cases, F.where, ref_fn=np.where, test_trace=False)
maskv2 = np.array([1, 1, 1], dtype=np.bool_)
xv2 = np.array([1, 3, 2], dtype=np.float32)
yv2 = np.array([5, 6, 9], dtype=np.float32)
maskv3 = np.array([0, 0, 0], dtype=np.bool_)
xv3 = np.array([1, 3, 2], dtype=np.float32)
yv3 = np.array([5, 6, 9], dtype=np.float32)
cases = [
{"input": [maskv2, xv2, yv2]},
{"input": [maskv3, xv3, yv3]},
]
opr_test(cases, F.where, ref_fn=np.where, test_trace=False)
def test_dropout():
from megengine.autodiff import GradManager
from megengine.core._imperative_rt.ops import set_global_rng_seed
def test_dropout_with_shape(shape, rate):
data = tensor(np.ones(shape, dtype=np.float32))
gm = GradManager().attach([data])
with gm:
out = F.nn.dropout(data, rate, training=True)
gm.backward(out, tensor(np.ones(shape, dtype=np.float32)))
assert not out.numpy().all()
np.testing.assert_allclose(out.numpy(), data.grad.numpy(), 1e-7, 1e-7)
def test_multiple_dropout(shape, rate):
data = tensor(np.ones(shape, dtype=np.float32))
gm = GradManager().attach([data])
with gm:
out1 = F.nn.dropout(data, rate, training=True)
out2 = F.nn.dropout(out1, rate, training=True)
out3 = F.nn.dropout(out2, rate, training=True)
gm.backward(out3, tensor(np.ones(shape, dtype=np.float32)))
np.testing.assert_allclose(out3.numpy(), data.grad.numpy(), 1e-7, 1e-7)
def test_dropout_seed(shape, rate):
data = tensor(np.random.randn(*shape), dtype="float32")
set_global_rng_seed(111)
out1 = F.nn.dropout(data, rate, training=True)
out2 = F.nn.dropout(data, rate, training=True)
assert not (out1.numpy() == out2.numpy()).all()
set_global_rng_seed(111)
out3 = F.nn.dropout(data, rate, training=True)
assert (out1.numpy() == out3.numpy()).all()
set_global_rng_seed(222)
out4 = F.nn.dropout(data, rate, training=True)
assert not (out1.numpy() == out4.numpy()).all()
test_dropout_with_shape([13, 17, 63, 21], 0.4)
test_dropout_with_shape([16, 32, 64], 0.3)
test_multiple_dropout([1024], 0.2)
test_dropout_seed([16, 32], 0.2)
def test_matinv():
shape1 = (5, 5)
shape2 = (3, 9, 9)
data1 = np.random.random(shape1).astype("float32")
data2 = np.random.random(shape2).astype("float32")
# make matrix diagonally dominant for numerical stability
data1 += (np.eye(shape1[0]) * shape1[0]).astype("float32")
data2 += np.broadcast_to((np.eye(shape2[1]) * shape2[1]).astype("float32"), shape2)
cases = [
{"input": data1},
{"input": data2},
]
opr_test(
cases,
F.matinv,
compare_fn=lambda x, y: np.testing.assert_allclose(x.numpy(), y, rtol=1e-4),
ref_fn=np.linalg.inv,
)
def test_matmul():
shape1 = 3
shape2 = 3
shape3 = (3, 5)
shape4 = (5, 6)
data1 = np.random.random(shape1).astype("float32")
data2 = np.random.random(shape2).astype("float32")
data3 = np.random.random(shape3).astype("float32")
data4 = np.random.random(shape4).astype("float32")
cases = [
{"input": [data1, data2]},
{"input": [data2, data3]},
{"input": [data3, data4]},
]
opr_test(cases, F.matmul, ref_fn=np.matmul)
batch_size = 10
shape1 = (2,)
shape2 = (batch_size, 2, 3)
shape3 = (batch_size, 3, 4)
shape4 = (batch_size, 10, 4, 2)
shape5 = (batch_size, 10, 2, 4)
data1 = np.random.random(shape1).astype("float32")
data2 = np.random.random(shape2).astype("float32")
data3 = np.random.random(shape3).astype("float32")
data4 = np.random.random(shape4).astype("float32")
data5 = np.random.random(shape5).astype("float32")
cases = [
{"input": [data1, data2]},
{"input": [data2, data3]},
{"input": [data3, data4]},
{"input": [data4, data5]},
]
opr_test(cases, F.matmul, ref_fn=np.matmul)
opr_test(
[{"input": [data1, data4]}],
F.matmul,
ref_fn=lambda x, y: np.matmul(x, y.transpose(0, 1, 3, 2)),
transpose_b=True,
)
opr_test(
[{"input": [data3, data2]}],
F.matmul,
ref_fn=lambda x, y: np.matmul(x.transpose(0, 2, 1), y.transpose(0, 2, 1)),
transpose_a=True,
transpose_b=True,
)
@pytest.mark.parametrize(
"shape_a, shape_b", [((0,), (0,)), ((10, 0), (0, 10)), ((3, 10, 0), (3, 0, 10)),],
)
@pytest.mark.parametrize("is_symbolic", [None, True, False])
def test_matmul_empty_tensor(shape_a, shape_b, is_symbolic):
def func(a, b):
return F.matmul(a, b)
if is_symbolic is not None:
func = jit.trace(symbolic=is_symbolic)(func)
a = tensor(np.random.randn(*shape_a))
b = tensor(np.random.randn(*shape_b))
for _ in range(3):
out = func(a, b)
assert np.all(out.numpy() == 0)
if is_symbolic is None:
break
def test_interpolate():
def linear_interpolate():
inp = tensor(np.arange(1, 3, dtype=np.float32).reshape(1, 1, 2))
out = F.vision.interpolate(inp, scale_factor=2.0, mode="linear")
out2 = F.vision.interpolate(inp, 4, mode="linear")
np.testing.assert_allclose(
out.numpy(), np.array([[[1.0, 1.25, 1.75, 2.0]]], dtype=np.float32)
)
np.testing.assert_allclose(
out2.numpy(), np.array([[[1.0, 1.25, 1.75, 2.0]]], dtype=np.float32)
)
def many_batch_interpolate():
inp = tensor(np.arange(1, 9, dtype=np.float32).reshape(2, 1, 2, 2))
out = F.vision.interpolate(inp, [4, 4])
out2 = F.vision.interpolate(inp, scale_factor=2.0)
np.testing.assert_allclose(out.numpy(), out2.numpy())
def assign_corner_interpolate():
inp = tensor(np.arange(1, 5, dtype=np.float32).reshape(1, 1, 2, 2))
out = F.vision.interpolate(inp, [4, 4], align_corners=True)
out2 = F.vision.interpolate(inp, scale_factor=2.0, align_corners=True)
np.testing.assert_allclose(out.numpy(), out2.numpy())
def error_shape_linear_interpolate():
inp = tensor(np.arange(1, 5, dtype=np.float32).reshape(1, 1, 2, 2))
with pytest.raises(ValueError):
F.vision.interpolate(inp, scale_factor=2.0, mode="linear")
def inappropriate_scale_linear_interpolate():
inp = tensor(np.arange(1, 3, dtype=np.float32).reshape(1, 1, 2))
with pytest.raises(ValueError):
F.vision.interpolate(inp, scale_factor=[2.0, 3.0], mode="linear")
linear_interpolate()
many_batch_interpolate()
assign_corner_interpolate()
error_shape_linear_interpolate()
inappropriate_scale_linear_interpolate()
def _save_to(self, name="grad"):
def callback(grad):
setattr(self, name, grad)
return callback
def _gen_roi_inp():
inp_feat = np.random.randn(2, 32, 256, 256)
rois = np.zeros((4, 5))
rois[:, 0] = [0, 0, 1, 1]
rois[:, 1:3] = np.random.rand(4, 2) * 100
rois[:, 3:] = np.random.rand(4, 2) * 100 + 150
inp_feat = tensor(inp_feat)
rois = tensor(rois)
return inp_feat, rois
def test_roi_align():
inp_feat, rois = _gen_roi_inp()
grad = Grad().wrt(inp_feat, callback=_save_to(inp_feat))
output_shape = (7, 7)
out_feat = F.vision.roi_align(
inp_feat,
rois,
output_shape=output_shape,
mode="average",
spatial_scale=1.0 / 4,
sample_points=2,
aligned=True,
)
assert make_shape_tuple(out_feat.shape) == (
rois.shape[0],
inp_feat.shape[1],
*output_shape,
)
grad(out_feat, tensor(F.ones_like(out_feat)))
assert make_shape_tuple(inp_feat.grad.shape) == make_shape_tuple(inp_feat.shape)
def _gen_correlation(random=True, constant=1, image_shape=(2, 1, 160, 160)):
if random:
inp_feat1 = np.random.randn(
image_shape[0], image_shape[1], image_shape[2], image_shape[3]
)
inp_feat2 = np.random.randn(
image_shape[0], image_shape[1], image_shape[2], image_shape[3]
)
else:
inp_feat1 = np.ones(image_shape) * constant
inp_feat2 = np.ones(image_shape) * constant
return tensor(inp_feat1), tensor(inp_feat2)
def test_correlation():
##test case 0 check the grad shape
data1, data2 = _gen_correlation()
grad = Grad().wrt(data1, callback=_save_to(data1))
out_feat = F.vision.correlation(
data1,
data2,
kernel_size=5,
max_displacement=4,
stride1=2,
stride2=2,
pad_size=2,
is_multiply=True,
)
grad(out_feat, tensor(F.ones_like(out_feat)))
assert make_shape_tuple(data1.grad.shape) == make_shape_tuple(data1.shape)
##test case 1 from https://github.com/NVIDIA/flownet2-pytorch/issues/194
data1, data2 = _gen_correlation(random=False, image_shape=(1, 1, 3, 3))
out_feat = F.vision.correlation(
data1,
data2,
kernel_size=3,
max_displacement=0,
stride1=1,
stride2=1,
pad_size=0,
is_multiply=True,
)
assert abs(out_feat.sum() - 1) < 1e-9
##test case 2 check same image subduction
data1, data2 = _gen_correlation(random=False, image_shape=(1, 1, 3, 3))
out_feat = F.vision.correlation(
data1,
data2,
kernel_size=3,
max_displacement=0,
stride1=1,
stride2=1,
pad_size=0,
is_multiply=False,
)
assert out_feat.sum() < 1e-9
##test case 3 check same image subduction
data1, data2 = _gen_correlation(random=False, image_shape=(1, 1, 3, 3))
out_feat = F.vision.correlation(
data1,
data2,
kernel_size=3,
max_displacement=0,
stride1=1,
stride2=1,
pad_size=0,
is_multiply=False,
)
assert out_feat.sum() < 1e-9
##test case 4 check correlation
data1, _ = _gen_correlation(
random=False, image_shape=(1, 1, 220, 220), constant=2.0
)
_, data2 = _gen_correlation(
random=False, image_shape=(1, 1, 220, 220), constant=1.0
)
out_feat = F.vision.correlation(
data1,
data2,
kernel_size=3,
max_displacement=2,
stride1=1,
stride2=2,
pad_size=0,
is_multiply=False,
)
assert abs(out_feat.mean() - 1) < 1e-9
def test_roi_pooling():
inp_feat, rois = _gen_roi_inp()
grad = Grad().wrt(inp_feat, callback=_save_to(inp_feat))
output_shape = (7, 7)
out_feat = F.vision.roi_pooling(
inp_feat, rois, output_shape=output_shape, mode="max", scale=1.0 / 4,
)
assert make_shape_tuple(out_feat.shape) == (
rois.shape[0],
inp_feat.shape[1],
*output_shape,
)
grad(out_feat, tensor(F.ones_like(out_feat)))
assert make_shape_tuple(inp_feat.grad.shape) == make_shape_tuple(inp_feat.shape)
def test_adaptive_avg_pool2d():
inp = tensor(np.arange(0, 16, dtype=np.float32).reshape(1, 1, 4, 4))
oshp = (2, 2)
grad = Grad().wrt(inp, callback=_save_to(inp))
outp = F.adaptive_avg_pool2d(inp, oshp,)
assert make_shape_tuple(outp.shape) == (inp.shape[0], inp.shape[1], *oshp,)
np.testing.assert_equal(
outp.numpy(), np.array([[[[2.5, 4.5], [10.5, 12.5]]]], dtype=np.float32)
)
grad(outp, tensor(F.ones_like(outp)))
assert make_shape_tuple(inp.grad.shape) == make_shape_tuple(inp.shape)
np.testing.assert_equal(
inp.grad.numpy(),
np.array(
[
[
[
[0.25, 0.25, 0.25, 0.25],
[0.25, 0.25, 0.25, 0.25],
[0.25, 0.25, 0.25, 0.25],
[0.25, 0.25, 0.25, 0.25],
]
]
],
dtype=np.float32,
),
)
def test_adaptive_max_pool2d():
inp = tensor(np.arange(0, 16, dtype=np.float32).reshape(1, 1, 4, 4))
oshp = (2, 2)
grad = Grad().wrt(inp, callback=_save_to(inp))
outp = F.adaptive_max_pool2d(inp, oshp,)
assert make_shape_tuple(outp.shape) == (inp.shape[0], inp.shape[1], *oshp,)
np.testing.assert_equal(
outp.numpy(), np.array([[[[5, 7], [13, 15]]]], dtype=np.float32)
)
grad(outp, tensor(F.ones_like(outp)))
assert make_shape_tuple(inp.grad.shape) == make_shape_tuple(inp.shape)
np.testing.assert_equal(
inp.grad.numpy(),
np.array(
[
[
[
[0.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 1.0],
[0.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 1.0],
]
]
],
dtype=np.float32,
),
)
def test_one_hot():
def onehot_low_dimension():
inp = tensor(np.arange(1, 4, dtype=np.int32))
out = F.one_hot(inp, num_classes=4)
np.testing.assert_allclose(
out.numpy(), np.eye(4, dtype=np.int32)[np.arange(1, 4, dtype=np.int32)]
)
def onehot_high_dimension():
arr = np.array(
[[3, 2, 4, 4, 2, 4, 0, 4, 4, 1], [4, 1, 1, 3, 2, 2, 4, 2, 4, 3]],
dtype=np.int32,
)
inp = tensor(arr)
out = F.one_hot(inp, 10)
np.testing.assert_allclose(out.numpy(), np.eye(10, dtype=np.int32)[arr])
onehot_low_dimension()
onehot_high_dimension()
def test_interpolate_fastpath():
# check shape
test_cases = [
[(1, 1, 10, 10), (5, 5)],
[(1, 3, 10, 10), (20, 20)],
[(10, 1, 10, 10), (1, 1)],
# [(10, 10, 1, 1), (10, 10)], # FIXME, it causes random CI failure
]
for inp_shape, target_shape in test_cases:
x = tensor(np.random.randn(*inp_shape), dtype=np.float32)
out = F.vision.interpolate(x, target_shape, mode="bilinear")
assert out.shape[0] == x.shape[0] and out.shape[1] == x.shape[1]
assert out.shape[2] == target_shape[0] and out.shape[3] == target_shape[1]
# check value
x = tensor(np.ones((3, 3, 10, 10)), dtype=np.float32)
out = F.vision.interpolate(x, (15, 5), mode="bilinear")
np.testing.assert_equal(out.numpy(), np.ones((3, 3, 15, 5)).astype(np.float32))
np_x = np.arange(32)
x = tensor(np_x).astype(np.float32).reshape(1, 1, 32, 1)
out = F.vision.interpolate(x, (1, 1), mode="bilinear")
np.testing.assert_equal(out.item(), np_x.mean())
@pytest.mark.parametrize("dt", [np.float32, np.int8, np.uint8, np.float16])
def test_warp_perspective(dt):
inp_shape = (1, 1, 4, 4)
x = tensor(np.arange(16, dtype=dt).reshape(inp_shape))
M_shape = (1, 3, 3)
# M defines a translation: dst(1, 1, h, w) = rst(1, 1, h+1, w+1)
M = tensor(
np.array(
[[1.0, 0.0, 1.0], [0.0, 1.0, 1.0], [0.0, 0.0, 1.0]], dtype=np.float32
).reshape(M_shape)
)
outp = F.vision.warp_perspective(x, M, (2, 2))
np.testing.assert_equal(outp.numpy(), np.array([[[[5, 6], [9, 10]]]], dtype=dt))
@pytest.mark.parametrize("dt", [np.float32, np.int8, np.uint8, np.float16])
def test_warp_perspective_mat_idx(dt):
inp_shape = (2, 1, 4, 4)
x = tensor(np.arange(32, dtype=dt).reshape(inp_shape))
M_shape = (1, 3, 3)
# M defines a translation: dst(1, 1, h, w) = rst(1, 1, h+1, w+1)
M = tensor(
np.array(
[[1.0, 0.0, 1.0], [0.0, 1.0, 1.0], [0.0, 0.0, 1.0]], dtype=np.float32
).reshape(M_shape)
)
M = F.concat([M,] * 4, 0)
outp = F.vision.warp_perspective(x, M, (2, 2), mat_idx=[0, 1, 1, 0])
np.testing.assert_equal(
outp.numpy(),
np.array(
[
[[[5, 6], [9, 10]]],
[[[21, 22], [25, 26]]],
[[[21, 22], [25, 26]]],
[[[5, 6], [9, 10]]],
],
dtype=dt,
),
)
def test_warp_affine():
inp_shape = (1, 3, 3, 3)
x = tensor(np.arange(27, dtype=np.float32).reshape(inp_shape))
weightv = [[[1.26666667, 0.6, -83.33333333], [-0.33333333, 1, 66.66666667]]]
outp = F.vision.warp_affine(x, tensor(weightv), (2, 2), border_mode="wrap")
res = np.array(
[
[
[[7.875, 8.875, 9.875], [8.90625, 9.90625, 10.90625]],
[[18.75, 19.75, 20.75], [14.90625, 15.90625, 16.90625]],
]
],
dtype=np.float32,
)
if not is_cuda_available():
np.testing.assert_almost_equal(outp.numpy(), res, 5)
def test_remap():
inp_shape = (1, 1, 4, 4)
inp = tensor(np.arange(16, dtype=np.float32).reshape(inp_shape))
map_xy_shape = (1, 2, 2, 2)
map_xy = tensor(
np.array(
[[[1.0, 0.0], [0.0, 1.0]], [[0.0, 1.0], [0.0, 1.0]]], dtype=np.float32
).reshape(map_xy_shape)
)
outp = F.vision.remap(inp, map_xy)
np.testing.assert_equal(
outp.numpy(), np.array([[[[1.0, 4.0], [4.0, 4.0]]]], dtype=np.float32)
)
def test_binary_cross_entropy():
data1_shape = (2, 2)
label1_shape = (2, 2)
data2_shape = (2, 3)
label2_shape = (2, 3)
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def compare_fn(x, y):
np.testing.assert_allclose(x.numpy(), y, atol=5e-4)
np.random.seed(123)
data1 = np.random.uniform(size=data1_shape).astype(np.float32)
label1 = np.random.uniform(size=label1_shape).astype(np.float32)
expect1 = np.array([0.6361], dtype=np.float32)
np.random.seed(123)
data2 = np.random.uniform(size=data2_shape).astype(np.float32)
label2 = np.random.uniform(size=label2_shape).astype(np.float32)
expect2 = np.array([0.6750], dtype=np.float32)
cases = [
{"input": [data1, label1], "output": expect1,},
{"input": [data2, label2], "output": expect2,},
]
opr_test(cases, F.nn.binary_cross_entropy, compare_fn=compare_fn)
cases = [
{"input": [sigmoid(data1), label1], "output": expect1,},
{"input": [sigmoid(data2), label2], "output": expect2,},
]
opr_test(
cases,
partial(F.nn.binary_cross_entropy, with_logits=False),
compare_fn=compare_fn,
)
def test_hinge_loss():
np.random.seed(123)
# case with L1 norm
cases = []
for shape in [(2, 2), (2, 3)]:
data = np.random.uniform(size=shape).astype(np.float32)
label = 2 * np.random.randint(0, 1, size=shape).astype(np.float32) - 1
expect = np.clip(0, np.inf, 1 - data * label).sum(axis=1).mean()
cases.append({"input": [data, label], "output": expect})
opr_test(cases, F.nn.hinge_loss)
# cases with L2 norm
cases = []
for shape in [(2, 2), (2, 3)]:
data = np.random.uniform(size=shape).astype(np.float32)
label = 2 * np.random.randint(0, 1, size=shape).astype(np.float32) - 1
expect = ((np.clip(0, np.inf, 1 - data * label) ** 2).sum(axis=1)).mean()
cases.append({"input": [data, label], "output": expect})
def hinge_loss_with_l2_norm(pred, label):
return F.nn.hinge_loss(pred, label, "L2")
opr_test(cases, hinge_loss_with_l2_norm)
@pytest.mark.parametrize("is_symbolic", [None, False, True])
def test_nms(is_symbolic):
def fn(inp, scores):
return F.vision.nms(
inp,
scores=scores,
iou_thresh=0.5,
max_output=None if is_symbolic is None else 4,
)
if is_symbolic is not None:
fn = jit.trace(symbolic=is_symbolic)(fn)
x = np.array(
[
[0, 0, 100, 100],
[10, 10, 100, 100],
[50, 50, 100, 100],
[100, 100, 150, 150],
],
dtype=np.float32,
)
inp = tensor(x)
scores = tensor([0.5, 0.8, 0.9, 0.6], dtype=np.float32)
for _ in range(3):
result = fn(inp, scores=scores)
np.testing.assert_equal(result.numpy(), np.array([2, 1, 3], dtype=np.int32))
x = np.array([], dtype=np.float32,).reshape(0, 4)
inp = tensor(x)
scores = tensor([], dtype=np.float32)
for _ in range(3):
result = fn(inp, scores=scores)
np.testing.assert_equal(result.numpy(), np.array([], dtype=np.int32))
@pytest.mark.skipif(
get_device_count("gpu") > 0, reason="cuda does not support nchw int8"
)
def test_conv_bias():
inp_scale = 1.5
w_scale = 2.5
outp_scale = 1.5
inp_dtype = dtype.qint8(inp_scale)
w_dtype = dtype.qint8(w_scale)
b_dtype = dtype.qint32(inp_scale * w_scale)
out_dtype = dtype.qint8(outp_scale)
def run(
N,
IC,
OC,
IH,
IW,
KH,
KW,
PH,
PW,
SH,
SW,
has_bias=True,
nonlinear_mode="identity",
):
inp_v = np.random.normal(size=(N, IC, IH, IW))
w_v = np.random.normal(size=(OC, IC, KH, KW))
b_v = np.random.normal(size=(1, OC, 1, 1))
inp_scale = dtype.get_scale(inp_dtype)
w_scale = dtype.get_scale(w_dtype)
b_scale = dtype.get_scale(b_dtype)
inpv = dtype.convert_to_qint8(inp_v * inp_scale, inp_dtype)
wv = dtype.convert_to_qint8(w_v * w_scale, w_dtype)
bv = dtype.convert_to_qint32(b_v * b_scale, b_dtype)
inp_int8 = tensor(inpv, dtype=inp_dtype)
w_int8 = Parameter(wv, dtype=w_dtype)
b_int32 = Parameter(bv, dtype=b_dtype)
inp_fp32 = inp_int8.astype("float32")
w_fp32 = w_int8.astype("float32")
b_fp32 = b_int32.astype("float32")
def convert_to_nchw4(var):
var = F.reshape(
var, (var.shape[0], var.shape[1] // 4, 4, var.shape[2], var.shape[3])
)
var = F.transpose(var, (0, 1, 3, 4, 2))
return var
def run_conv2d(inp, w, b):
O = F.conv2d(
inp, w, b if has_bias else None, stride=(SH, SW), padding=(PH, PW),
)
if nonlinear_mode == "relu":
return F.relu(O)
else:
return O
def run_conv_bias(inp, w, b, format="NCHW"):
b = b if has_bias else Parameter(np.zeros_like(b.numpy()))
if format == "NCHW4":
inp = convert_to_nchw4(inp)
w = convert_to_nchw4(w)
b = convert_to_nchw4(b)
return F.quantized.conv_bias_activation(
inp,
w,
b,
stride=(SH, SW),
padding=(PH, PW),
dtype=out_dtype,
nonlinear_mode=nonlinear_mode,
)
format = "NCHW4" if is_cuda_available() else "NCHW"
expected = run_conv2d(inp_fp32, w_fp32, b_fp32)
expected = expected.astype(out_dtype).astype("float32")
result = run_conv_bias(inp_int8, w_int8, b_int32, format=format).astype(
"float32"
)
if format == "NCHW4":
result = F.transpose(result, (0, 1, 4, 2, 3))
expected = F.flatten(expected)
result = F.flatten(result)
np.testing.assert_allclose(result.numpy(), expected.numpy(), atol=outp_scale)
run(1, 4, 4, 24, 33, 1, 1, 2, 3, 1, 1, False)
run(10, 12, 24, 46, 46, 1, 1, 2, 1, 3, 1, False)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, False)
run(1, 4, 4, 24, 33, 1, 1, 2, 3, 1, 1)
run(10, 12, 24, 46, 46, 1, 1, 2, 1, 3, 1)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, False, "relu")
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, True, "relu")
@pytest.mark.skipif(get_device_count("gpu") > 0, reason="no int8 algorithm on cuda")
def test_batch_conv_bias():
inp_scale = 1.5
w_scale = 2.5
outp_scale = 1.5
inp_dtype = dtype.qint8(inp_scale)
w_dtype = dtype.qint8(w_scale)
b_dtype = dtype.qint32(inp_scale * w_scale)
out_dtype = dtype.qint8(outp_scale)
def run(
N, IC, OC, IH, IW, KH, KW, PH, PW, SH, SW, has_bias=True,
):
inp_v = np.random.normal(size=(N, IC, IH, IW))
w_v = np.random.normal(size=(N, OC, IC, KH, KW))
b_v = np.random.normal(size=(1, OC, 1, 1))
inp_scale = dtype.get_scale(inp_dtype)
w_scale = dtype.get_scale(w_dtype)
b_scale = dtype.get_scale(b_dtype)
inpv = dtype.convert_to_qint8(inp_v * inp_scale, inp_dtype)
wv = dtype.convert_to_qint8(w_v * w_scale, w_dtype)
bv = dtype.convert_to_qint32(b_v * b_scale, b_dtype)
inp_int8 = tensor(inpv, dtype=inp_dtype)
w_int8 = Parameter(wv, dtype=w_dtype)
b_int32 = Parameter(bv, dtype=b_dtype)
inp_fp32 = inp_int8.astype("float32")
w_fp32 = w_int8.astype("float32")
b_fp32 = b_int32.astype("float32")
def run_batch_conv_bias(inp, w, b):
b = b if has_bias else Parameter(np.zeros_like(b.numpy()))
result = F.quantized.batch_conv_bias_activation(
inp, w, b, stride=(SH, SW), padding=(PH, PW), dtype=out_dtype,
)
return result.astype("float32")
expected = F.conv2d(inp_fp32, w_fp32[0], b_fp32 if has_bias else None)[0]
expected = expected.astype(out_dtype).astype("float32")
expected = F.flatten(expected)
result = run_batch_conv_bias(inp_int8, w_int8, b_int32)
result = F.flatten(result)
np.testing.assert_allclose(result.numpy(), expected.numpy(), atol=outp_scale)
run(1, 4, 4, 5, 5, 3, 3, 0, 0, 1, 1, True)
def test_conv2d_autocast():
"""check amp's result is equal to manually converted result"""
amp.enabled = True
inp = tensor(np.random.randn(1, 3, 224, 224), dtype=np.float32)
weight = tensor(np.random.randn(64, 3, 7, 7), dtype=np.float32)
out = F.conv2d(inp, weight, None, (2, 2), (3, 3), (1, 1), 1)
amp.enabled = False
expected = F.conv2d(
inp.astype("float16"),
weight.astype("float16"),
None,
(2, 2),
(3, 3),
(1, 1),
1,
compute_mode="float32",
)
assert out.dtype == np.float16
assert expected.dtype == np.float16
np.testing.assert_allclose(out.numpy(), expected.numpy())
def test_conv2d_zero_stride_numpy_array():
inp = np.random.randn(3, 224, 224).astype(np.float32)
inp = inp[np.newaxis, :]
inp = tensor(inp, dtype=np.float32)
weight = tensor(np.random.randn(16, 3, 3, 3), dtype=np.float32)
out = F.conv2d(inp, weight, None, (2, 2), (3, 3), (1, 1), 1)
def test_conv3d_zero_stride_numpy_array():
inp = np.random.randn(3, 224, 224, 224).astype(np.float32)
inp = inp[np.newaxis, :]
inp = tensor(inp, dtype=np.float32)
weight = tensor(np.random.randn(16, 3, 3, 3, 3), dtype=np.float32)
out = F.conv3d(inp, weight, None, (2, 2, 2), (3, 3, 3), (1, 1, 1), 1)
out.numpy()
def test_conv1d():
inp = tensor(np.ones((2, 2, 4), dtype=np.float32))
weight = tensor(np.ones((3, 2, 2), dtype=np.float32))
out = F.conv1d(inp, weight, None, 2, 0, 1, 1)
np.testing.assert_equal(
out.numpy(),
np.array(
[[[4, 4], [4, 4], [4, 4]], [[4, 4], [4, 4], [4, 4]]], dtype=np.float32
),
)
def test_batchnorm2d_autocast():
"""check amp's result is equal to manually converted result"""
amp.enabled = True
tshape = (1, 3, 224, 224)
pshape = (1, 3, 1, 1)
inp = tensor(np.random.randn(*tshape), dtype=np.float32)
weight = tensor(np.ones(pshape, dtype=np.float32))
bias = tensor(np.zeros(pshape, dtype=np.float32))
out = F.batch_norm(inp, weight=weight, bias=bias, training=True, inplace=False)
amp.enabled = False
expected = F.batch_norm(
inp.astype("float16"),
weight=weight,
bias=bias,
training=True,
inplace=False,
compute_mode="float32",
)
assert out.dtype == np.float16
assert expected.dtype == np.float16
np.testing.assert_allclose(out.numpy(), expected.numpy())
def test_conv3d():
inp = tensor(np.ones((2, 2, 4, 4, 4), dtype=np.float32))
weight = tensor(np.ones((3, 2, 2, 2, 2), dtype=np.float32))
out = F.conv3d(inp, weight, None, 2, 0, 1, 1)
np.testing.assert_equal(
out.numpy(), np.ones((2, 3, 2, 2, 2), dtype=np.float32) * 16
)
def test_condtake():
x = np.array([[1, 2, 3], [4, 5, 6]])
y = np.array([[True, False, True], [False, True, True]])
xx = tensor(x)
yy = tensor(y)
val, idx = F.cond_take(yy, xx)
np.testing.assert_equal(val.numpy(), x[y])
np.testing.assert_equal(idx.numpy(), np.where(y.reshape(-1))[0])
@pytest.mark.parametrize("is_symbolic", [None, False, True])
def test_condtake(is_symbolic):
shapes = [
(3, 3, 3),
(0,),
(3, 0, 3),
]
def fn(mask, data):
return F.cond_take(mask, data)
if is_symbolic is not None:
fn = jit.trace(symbolic=is_symbolic)(fn)
for shp in shapes:
x_np = np.random.randn(*shp).astype("float32")
mask_np = x_np > 0
x = tensor(x_np)
mask = tensor(mask_np)
ref_out = x_np[mask_np]
ref_idx = mask_np.flatten().nonzero()[0]
for i in range(3):
out, idx = fn(mask, x)
np.testing.assert_equal(out.numpy(), ref_out)
np.testing.assert_equal(idx.numpy(), ref_idx)
if is_symbolic is None:
break
def test_condtake_is_same():
op1 = builtin.CondTake()
op2 = builtin.CondTake()
assert op1 == op2
def test_nms_is_same():
op1 = builtin.NMSKeep(0.7, 100)
op2 = builtin.NMSKeep(0.7, 100)
op3 = builtin.NMSKeep(0.8, 100)
op4 = builtin.NMSKeep(0.7, 200)
assert op1 == op2
assert op1 != op3
assert op1 != op4
assert op3 != op4
def test_argmxx_on_inf():
def run_argmax():
x = F.zeros((100, 100))
x[:] = -float("inf")
idxs = F.argmax(x, axis=0)
return idxs
def run_argmin():
x = F.zeros((100, 100))
x[:] = float("inf")
idxs = F.argmin(x, axis=0)
return idxs
assert all(run_argmax() >= 0)
assert all(run_argmin() >= 0)
def test_deformable_psroi_pooling():
inp = np.random.random((1, 256, 64, 64)).astype("float32")
rois = np.random.random((1, 5)).astype("float32")
trans = np.random.random((24, 2, 7, 7)).astype("float32")
pooled_h = 7
pooled_w = 7
sample_per_part = 4
no_trans = False
part_size = 7
spatial_scale = 1.0 / 64
trans_std = 0.1
y = F.deformable_psroi_pooling(
tensor(inp),
tensor(rois),
tensor(trans),
no_trans,
part_size,
pooled_h,
pooled_w,
sample_per_part,
spatial_scale,
trans_std,
)
def test_cvt_color():
def rgb2gray(rgb):
return np.dot(rgb[..., :3], [0.299, 0.587, 0.114])
def bgr2gray(bgr):
return np.dot(bgr[..., :3], [0.114, 0.587, 0.299])
inp = np.random.randn(3, 3, 3, 3).astype(np.float32)
out = np.expand_dims(rgb2gray(inp), 3).astype(np.float32)
x = tensor(inp)
y = F.vision.cvt_color(x, mode="RGB2GRAY")
np.testing.assert_allclose(y.numpy(), out, atol=1e-5)
out1 = np.expand_dims(bgr2gray(inp), 3).astype(np.float32)
y1 = F.vision.cvt_color(x, mode="BGR2GRAY")
np.testing.assert_allclose(y1.numpy(), out1, atol=1e-5)
@pytest.mark.parametrize("val", [2, [2,], [2, 3]])
def test_ones(val):
shp = tensor(val)
np_shp = np.array(val)
np.testing.assert_equal(F.ones(shp), np.ones(np_shp))
def test_assert_equal():
shape = (2, 3, 4, 5)
x = F.ones(shape, dtype=np.float32)
y = F.zeros(shape, dtype=np.float32) + 1.00001
z = F.utils._assert_equal(x, y)
def test_assert_not_equal():
shape = (2, 3, 4, 5)
x = F.ones(shape, dtype=np.float32)
y = F.zeros(shape, dtype=np.float32) + 1.1
with pytest.raises(RuntimeError):
z = F.utils._assert_equal(x, y)
def test_neg_axis():
x = tensor(np.random.normal(0, 1, (32, 5)))
y = F.argmax(x, axis=-1)
yy = F.argmax(x, axis=1)
np.testing.assert_equal(y.numpy(), yy.numpy())
y = F.argmax(x, axis=(-1, -2))
yy = F.argmax(x, axis=(0, 1))
np.testing.assert_equal(y.numpy(), yy.numpy())
y = F.argmin(x, axis=(-1, -2))
yy = F.argmin(x, axis=(0, 1))
np.testing.assert_equal(y.numpy(), yy.numpy())
def test_sliding_window():
N, C, H, W = 2, 3, 7, 8
inp = np.random.normal(size=(N, C, H, W))
ph, pw = 1, 2
sh, sw = 2, 1
wh, ww = 3, 2
dh, dw = 1, 3
s = lambda i, p, s, d, w: (i + p * 2 - (w - 1) * d - 1) // s + 1
inp_pad = np.zeros((N, C, H + ph * 2, W + pw * 2))
inp_pad[:, :, ph : H + ph, pw : W + pw] = inp
gt_out = np.empty(
(N, C, s(H, ph, sh, dh, wh), s(W, pw, sw, dw, ww), wh, ww), dtype=np.float32
)
for n, c, oh, ow in itertools.product(*map(range, gt_out.shape[:4])):
ih, iw = oh * sh, ow * sw
gt_out[n, c, oh, ow, :] = inp_pad[
n, c, ih : ih + (wh - 1) * dh + 1 : dh, iw : iw + (ww - 1) * dw + 1 : dw
]
out = F.sliding_window(
tensor(inp), (wh, ww), padding=(ph, pw), stride=(sh, sw), dilation=(dh, dw)
)
np.testing.assert_equal(gt_out, out.numpy())
def test_sliding_window_transpose():
N, C, H, W = 2, 3, 7, 8
ph, pw = 1, 2
sh, sw = 2, 1
wh, ww = 3, 2
dh, dw = 1, 3
s = lambda i, p, s, d, w: (i + p * 2 - (w - 1) * d - 1) // s + 1
inp = np.random.normal(
size=(N, C, s(H, ph, sh, dh, wh), s(W, pw, sw, dw, ww), wh, ww)
).astype(np.float32)
gt_out = np.zeros((N, C, H, W), dtype=np.float32)
for n, c in itertools.product(*map(range, inp.shape[:2])):
oh = 0
for ih in range(-ph, H + ph - dh * (wh - 1), sh):
ow = 0
for iw in range(-pw, W + pw - dw * (ww - 1), sw):
for kh, kw in itertools.product(*map(range, inp.shape[-2:])):
ih2 = ih + dh * kh
iw2 = iw + dw * kw
if ih2 >= 0 and ih2 < H and iw2 >= 0 and iw2 < W:
gt_out[n, c, ih2, iw2] += inp[n, c, oh, ow, kh, kw]
ow += 1
oh += 1
out = F.sliding_window_transpose(
tensor(inp),
(H, W),
(wh, ww),
padding=(ph, pw),
stride=(sh, sw),
dilation=(dh, dw),
)
np.testing.assert_equal(gt_out, out.numpy())
def test_pad():
src = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float32)
dst = np.pad(src, ((2, 2), (2, 2)), "constant")
res = F.nn.pad(tensor(src), ((2, 2), (2, 2)), "CONSTANT")
np.testing.assert_allclose(res, dst, atol=1e-5)
dst = np.pad(src, ((2, 2), (2, 2)), "constant", constant_values=3)
res = F.nn.pad(tensor(src), ((2, 2), (2, 2)), "CONSTANT", constant_value=3)
np.testing.assert_allclose(res, dst, atol=1e-5)
dst = np.pad(src, ((2, 2), (2, 2)), "edge")
res = F.nn.pad(tensor(src), ((2, 2), (2, 2)), "EDGE")
np.testing.assert_allclose(res, dst, atol=1e-5)
dst = np.pad(src, ((2, 2), (2, 2)), "reflect")
res = F.nn.pad(tensor(src), ((2, 2), (2, 2)), "REFLECT")
np.testing.assert_allclose(res, dst, atol=1e-5)
def pixel_shuffle(data, r):
high_dim = data.shape[:-3]
data = data.reshape(-1, data.shape[-3], data.shape[-2], data.shape[-1])
inn, ic, ih, iw = data.shape
res = np.zeros((inn, int(ic / (r * r)), ih * r, iw * r))
for n in range(inn):
for c in range(ic):
for h in range(ih):
for w in range(iw):
res[
n,
int(c / r / r),
h * r + int((c % (r * r)) / r),
w * r + c % r,
] = data[n, c, h, w]
if len(high_dim) > 0:
res = res.reshape((*high_dim, int(ic / r / r), ih * r, iw * r))
else:
res = res[0]
return res
def test_pixel_shuffle():
# ndim = 3
inp = np.arange(16 * 3 * 3).reshape(16, 3, 3)
out = F.pixel_shuffle(tensor(inp), upscale_factor=4)
golden = pixel_shuffle(inp, 4)
np.testing.assert_equal(out.numpy(), golden)
# ndim = 4
inp = np.arange(3 * 18 * 3 * 3).reshape(3, 18, 3, 3)
out = F.pixel_shuffle(tensor(inp), upscale_factor=3)
golden = pixel_shuffle(inp, 3)
np.testing.assert_equal(out.numpy(), golden)
# ndim = 5
inp = np.arange(5 * 3 * 20 * 3 * 4).reshape(5, 3, 20, 3, 4)
out = F.pixel_shuffle(tensor(inp), upscale_factor=2)
golden = pixel_shuffle(inp, 2)
np.testing.assert_equal(out.numpy(), golden)
# ndim = 6
inp = np.arange(6 * 5 * 3 * 25 * 3 * 4).reshape(6, 5, 3, 25, 3, 4)
out = F.pixel_shuffle(tensor(inp), upscale_factor=5)
golden = pixel_shuffle(inp, 5)
np.testing.assert_equal(out.numpy(), golden)
# ndim = 7
inp = np.arange(2 * 3 * 5 * 3 * 20 * 3 * 4).reshape(2, 3, 5, 3, 20, 3, 4)
out = F.pixel_shuffle(tensor(inp), upscale_factor=2)
golden = pixel_shuffle(inp, 2)
np.testing.assert_equal(out.numpy(), golden)
@pytest.mark.parametrize("is_symbolic", [False, True])
def test_pixel_shuffle_symbolic(is_symbolic):
def fn(inp, upscale_factor):
return F.pixel_shuffle(inp, upscale_factor=upscale_factor)
if is_symbolic is not None:
fn = jit.trace(symbolic=is_symbolic)(fn)
inp = tensor(np.arange(3 * 4 * 5 * 5).reshape(3, 4, 5, 5))
golden = pixel_shuffle(inp, 2)
for _ in range(3):
out = fn(inp, 2)
np.testing.assert_equal(out.numpy(), golden)
if is_symbolic is None:
break
def test_set_conv2d_config():
"""check setting config by contextmanager is equal to manually converted result"""
config._compute_mode = "float32"
inp = tensor(np.random.randn(1, 3, 224, 224), dtype=np.float16)
weight = tensor(np.random.randn(64, 3, 7, 7), dtype=np.float16)
config_out = F.conv2d(inp, weight, None, (2, 2), (3, 3), (1, 1), 1)
config._compute_mode = "default"
with config._override(compute_mode="float32"):
context_out = F.conv2d(inp, weight, None, (2, 2), (3, 3), (1, 1), 1)
expected = F.conv2d(
inp, weight, None, (2, 2), (3, 3), (1, 1), 1, compute_mode="float32",
)
np.testing.assert_allclose(config_out.numpy(), expected.numpy())
np.testing.assert_allclose(context_out.numpy(), expected.numpy())
def test_set_warp_perspective_config():
config._conv_format = "NHWC"
inp_shape = (1, 1, 4, 4)
inp = Tensor(np.arange(16, dtype=np.float32).reshape(inp_shape))
M_shape = (1, 3, 3)
M = Tensor(np.random.randn(3, 3), dtype=np.float32).reshape(M_shape)
config_out = F.vision.warp_perspective(inp, M, (2, 2))
config._conv_format = "default"
with config._override(conv_format="NHWC"):
context_out = F.vision.warp_perspective(inp, M, (2, 2))
expected = F.vision.warp_perspective(inp, M, (2, 2), format="NHWC")
np.testing.assert_allclose(config_out.numpy(), expected.numpy())
np.testing.assert_allclose(context_out.numpy(), expected.numpy())
@pytest.mark.parametrize("stride", [(1, 1)])
@pytest.mark.parametrize("padding", [(1, 1)])
@pytest.mark.parametrize("dilation", [(1, 1)])
@pytest.mark.parametrize("ksize", [(3, 3)])
@pytest.mark.parametrize("groups", [1, 2])
def test_local_conv2d(stride, padding, dilation, ksize, groups):
batch_size, in_channels, out_channels = 2, 4, 8
input_height, input_width = 10, 10
output_height = (input_height + padding[0] * 2 - ksize[0]) // stride[0] + 1
output_width = (input_width + padding[1] * 2 - ksize[1]) // stride[1] + 1
def local_conv2d_np(data, weight, stride, padding, dialtion):
# naive calculation use numpy
# only test output_height == input_height, output_width == input_width
data = np.pad(data, ((0, 0), (0, 0), (1, 1), (1, 1)))
expected = np.zeros(
(batch_size, out_channels, output_height, output_width), dtype=np.float32,
)
ic_group_size = in_channels // groups
oc_group_size = out_channels // groups
for n, oc, oh, ow in itertools.product(
*map(range, [batch_size, out_channels, output_height, output_width])
):
ih, iw = oh * stride[0], ow * stride[1]
g_id = oc // oc_group_size
expected[n, oc, ih, iw] = np.sum(
data[
n,
g_id * ic_group_size : (g_id + 1) * ic_group_size,
ih : ih + ksize[0],
iw : iw + ksize[1],
]
* weight[g_id, oh, ow, :, :, :, oc % oc_group_size]
)
return expected
data = np.random.rand(batch_size, in_channels, input_height, input_width).astype(
"float32"
)
weight = np.random.rand(
groups,
output_height,
output_width,
in_channels // groups,
*ksize,
out_channels // groups,
).astype("float32")
output = F.local_conv2d(
tensor(data),
tensor(weight),
None,
stride=stride,
padding=padding,
dilation=dilation,
)
ref = local_conv2d_np(data, weight, stride, padding, dilation)
np.testing.assert_almost_equal(output.numpy(), ref, 5)
|
[
"megengine.functional.conv3d",
"megengine.functional.pixel_shuffle",
"megengine.core.tensor.dtype.convert_to_qint32",
"megengine.config._override",
"megengine.functional.argmax",
"megengine.functional.adaptive_avg_pool2d",
"megengine.functional.argmin",
"megengine.jit.trace",
"megengine.functional.utils._assert_equal",
"megengine.functional.nn.hinge_loss",
"megengine.core.tensor.dtype.get_scale",
"megengine.functional.transpose",
"megengine.functional.conv2d",
"megengine.functional.conv1d",
"megengine.device.get_device_count",
"megengine.core.ops.builtin.NMSKeep",
"megengine.functional.zeros",
"megengine.tensor",
"megengine.functional.flatten",
"megengine.functional.concat",
"megengine.functional.adaptive_max_pool2d",
"megengine.functional.vision.cvt_color",
"megengine.functional.matmul",
"megengine.core._imperative_rt.ops.set_global_rng_seed",
"megengine.core.autodiff.grad.Grad",
"megengine.functional.vision.correlation",
"megengine.functional.vision.remap",
"megengine.functional.cond_take",
"megengine.functional.vision.nms",
"megengine.functional.vision.interpolate",
"megengine.core.tensor.dtype.convert_to_qint8",
"megengine.functional.ones_like",
"megengine.core.tensor.dtype.qint8",
"megengine.functional.batch_norm",
"megengine.is_cuda_available",
"megengine.functional.relu",
"megengine.functional.vision.roi_align",
"megengine.functional.quantized.batch_conv_bias_activation",
"megengine.functional.one_hot",
"megengine.core.tensor.utils.make_shape_tuple",
"megengine.functional.vision.warp_perspective",
"megengine.core.ops.builtin.CondTake",
"megengine.functional.ones",
"megengine.Parameter",
"megengine.autodiff.GradManager",
"megengine.core.tensor.dtype.qint32",
"megengine.functional.nn.dropout",
"megengine.functional.quantized.conv_bias_activation",
"megengine.functional.reshape",
"megengine.functional.vision.roi_pooling"
] |
[((6039, 6149), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""shape_a, shape_b"""', '[((0,), (0,)), ((10, 0), (0, 10)), ((3, 10, 0), (3, 0, 10))]'], {}), "('shape_a, shape_b', [((0,), (0,)), ((10, 0), (0, 10\n )), ((3, 10, 0), (3, 0, 10))])\n", (6062, 6149), False, 'import pytest\n'), ((6154, 6213), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""is_symbolic"""', '[None, True, False]'], {}), "('is_symbolic', [None, True, False])\n", (6177, 6213), False, 'import pytest\n'), ((16189, 16263), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dt"""', '[np.float32, np.int8, np.uint8, np.float16]'], {}), "('dt', [np.float32, np.int8, np.uint8, np.float16])\n", (16212, 16263), False, 'import pytest\n'), ((16764, 16838), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dt"""', '[np.float32, np.int8, np.uint8, np.float16]'], {}), "('dt', [np.float32, np.int8, np.uint8, np.float16])\n", (16787, 16838), False, 'import pytest\n'), ((20828, 20887), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""is_symbolic"""', '[None, False, True]'], {}), "('is_symbolic', [None, False, True])\n", (20851, 20887), False, 'import pytest\n'), ((30215, 30274), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""is_symbolic"""', '[None, False, True]'], {}), "('is_symbolic', [None, False, True])\n", (30238, 30274), False, 'import pytest\n'), ((32970, 33018), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""val"""', '[2, [2], [2, 3]]'], {}), "('val', [2, [2], [2, 3]])\n", (32993, 33018), False, 'import pytest\n'), ((38668, 38721), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""is_symbolic"""', '[False, True]'], {}), "('is_symbolic', [False, True])\n", (38691, 38721), False, 'import pytest\n'), ((40667, 40710), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""stride"""', '[(1, 1)]'], {}), "('stride', [(1, 1)])\n", (40690, 40710), False, 'import pytest\n'), ((40712, 40756), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""padding"""', '[(1, 1)]'], {}), "('padding', [(1, 1)])\n", (40735, 40756), False, 'import pytest\n'), ((40758, 40803), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dilation"""', '[(1, 1)]'], {}), "('dilation', [(1, 1)])\n", (40781, 40803), False, 'import pytest\n'), ((40805, 40847), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""ksize"""', '[(3, 3)]'], {}), "('ksize', [(3, 3)])\n", (40828, 40847), False, 'import pytest\n'), ((40849, 40890), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""groups"""', '[1, 2]'], {}), "('groups', [1, 2])\n", (40872, 40890), False, 'import pytest\n'), ((1062, 1104), 'numpy.array', 'np.array', (['[[1, 0], [0, 1]]'], {'dtype': 'np.bool_'}), '([[1, 0], [0, 1]], dtype=np.bool_)\n', (1070, 1104), True, 'import numpy as np\n'), ((1115, 1169), 'numpy.array', 'np.array', (['[[1, np.inf], [np.nan, 4]]'], {'dtype': 'np.float32'}), '([[1, np.inf], [np.nan, 4]], dtype=np.float32)\n', (1123, 1169), True, 'import numpy as np\n'), ((1180, 1224), 'numpy.array', 'np.array', (['[[5, 6], [7, 8]]'], {'dtype': 'np.float32'}), '([[5, 6], [7, 8]], dtype=np.float32)\n', (1188, 1224), True, 'import numpy as np\n'), ((1239, 1298), 'numpy.array', 'np.array', (['[[1, 0, 1], [1, 0, 0], [1, 1, 0]]'], {'dtype': 'np.bool_'}), '([[1, 0, 1], [1, 0, 0], [1, 1, 0]], dtype=np.bool_)\n', (1247, 1298), True, 'import numpy as np\n'), ((1309, 1380), 'numpy.array', 'np.array', (['[[1, np.inf, 2], [0, np.nan, 4], [1, 5, 7]]'], {'dtype': 'np.float32'}), '([[1, np.inf, 2], [0, np.nan, 4], [1, 5, 7]], dtype=np.float32)\n', (1317, 1380), True, 'import numpy as np\n'), ((1391, 1452), 'numpy.array', 'np.array', (['[[5, 6, 9], [2, 7, 8], [2, 1, 9]]'], {'dtype': 'np.float32'}), '([[5, 6, 9], [2, 7, 8], [2, 1, 9]], dtype=np.float32)\n', (1399, 1452), True, 'import numpy as np\n'), ((1556, 1615), 'utils.opr_test', 'opr_test', (['cases', 'F.where'], {'ref_fn': 'np.where', 'test_trace': '(False)'}), '(cases, F.where, ref_fn=np.where, test_trace=False)\n', (1564, 1615), False, 'from utils import opr_test\n'), ((1630, 1665), 'numpy.array', 'np.array', (['[1, 1, 1]'], {'dtype': 'np.bool_'}), '([1, 1, 1], dtype=np.bool_)\n', (1638, 1665), True, 'import numpy as np\n'), ((1676, 1713), 'numpy.array', 'np.array', (['[1, 3, 2]'], {'dtype': 'np.float32'}), '([1, 3, 2], dtype=np.float32)\n', (1684, 1713), True, 'import numpy as np\n'), ((1724, 1761), 'numpy.array', 'np.array', (['[5, 6, 9]'], {'dtype': 'np.float32'}), '([5, 6, 9], dtype=np.float32)\n', (1732, 1761), True, 'import numpy as np\n'), ((1776, 1811), 'numpy.array', 'np.array', (['[0, 0, 0]'], {'dtype': 'np.bool_'}), '([0, 0, 0], dtype=np.bool_)\n', (1784, 1811), True, 'import numpy as np\n'), ((1822, 1859), 'numpy.array', 'np.array', (['[1, 3, 2]'], {'dtype': 'np.float32'}), '([1, 3, 2], dtype=np.float32)\n', (1830, 1859), True, 'import numpy as np\n'), ((1870, 1907), 'numpy.array', 'np.array', (['[5, 6, 9]'], {'dtype': 'np.float32'}), '([5, 6, 9], dtype=np.float32)\n', (1878, 1907), True, 'import numpy as np\n'), ((2011, 2070), 'utils.opr_test', 'opr_test', (['cases', 'F.where'], {'ref_fn': 'np.where', 'test_trace': '(False)'}), '(cases, F.where, ref_fn=np.where, test_trace=False)\n', (2019, 2070), False, 'from utils import opr_test\n'), ((4953, 4996), 'utils.opr_test', 'opr_test', (['cases', 'F.matmul'], {'ref_fn': 'np.matmul'}), '(cases, F.matmul, ref_fn=np.matmul)\n', (4961, 4996), False, 'from utils import opr_test\n'), ((5612, 5655), 'utils.opr_test', 'opr_test', (['cases', 'F.matmul'], {'ref_fn': 'np.matmul'}), '(cases, F.matmul, ref_fn=np.matmul)\n', (5620, 5655), False, 'from utils import opr_test\n'), ((8551, 8583), 'numpy.random.randn', 'np.random.randn', (['(2)', '(32)', '(256)', '(256)'], {}), '(2, 32, 256, 256)\n', (8566, 8583), True, 'import numpy as np\n'), ((8595, 8611), 'numpy.zeros', 'np.zeros', (['(4, 5)'], {}), '((4, 5))\n', (8603, 8611), True, 'import numpy as np\n'), ((8755, 8771), 'megengine.tensor', 'tensor', (['inp_feat'], {}), '(inp_feat)\n', (8761, 8771), False, 'from megengine import Parameter, Tensor, is_cuda_available, tensor\n'), ((8783, 8795), 'megengine.tensor', 'tensor', (['rois'], {}), '(rois)\n', (8789, 8795), False, 'from megengine import Parameter, Tensor, is_cuda_available, tensor\n'), ((8985, 9121), 'megengine.functional.vision.roi_align', 'F.vision.roi_align', (['inp_feat', 'rois'], {'output_shape': 'output_shape', 'mode': '"""average"""', 'spatial_scale': '(1.0 / 4)', 'sample_points': '(2)', 'aligned': '(True)'}), "(inp_feat, rois, output_shape=output_shape, mode=\n 'average', spatial_scale=1.0 / 4, sample_points=2, aligned=True)\n", (9003, 9121), True, 'import megengine.functional as F\n'), ((10119, 10244), 'megengine.functional.vision.correlation', 'F.vision.correlation', (['data1', 'data2'], {'kernel_size': '(5)', 'max_displacement': '(4)', 'stride1': '(2)', 'stride2': '(2)', 'pad_size': '(2)', 'is_multiply': '(True)'}), '(data1, data2, kernel_size=5, max_displacement=4,\n stride1=2, stride2=2, pad_size=2, is_multiply=True)\n', (10139, 10244), True, 'import megengine.functional as F\n'), ((10612, 10737), 'megengine.functional.vision.correlation', 'F.vision.correlation', (['data1', 'data2'], {'kernel_size': '(3)', 'max_displacement': '(0)', 'stride1': '(1)', 'stride2': '(1)', 'pad_size': '(0)', 'is_multiply': '(True)'}), '(data1, data2, kernel_size=3, max_displacement=0,\n stride1=1, stride2=1, pad_size=0, is_multiply=True)\n', (10632, 10737), True, 'import megengine.functional as F\n'), ((10986, 11112), 'megengine.functional.vision.correlation', 'F.vision.correlation', (['data1', 'data2'], {'kernel_size': '(3)', 'max_displacement': '(0)', 'stride1': '(1)', 'stride2': '(1)', 'pad_size': '(0)', 'is_multiply': '(False)'}), '(data1, data2, kernel_size=3, max_displacement=0,\n stride1=1, stride2=1, pad_size=0, is_multiply=False)\n', (11006, 11112), True, 'import megengine.functional as F\n'), ((11352, 11478), 'megengine.functional.vision.correlation', 'F.vision.correlation', (['data1', 'data2'], {'kernel_size': '(3)', 'max_displacement': '(0)', 'stride1': '(1)', 'stride2': '(1)', 'pad_size': '(0)', 'is_multiply': '(False)'}), '(data1, data2, kernel_size=3, max_displacement=0,\n stride1=1, stride2=1, pad_size=0, is_multiply=False)\n', (11372, 11478), True, 'import megengine.functional as F\n'), ((11840, 11966), 'megengine.functional.vision.correlation', 'F.vision.correlation', (['data1', 'data2'], {'kernel_size': '(3)', 'max_displacement': '(2)', 'stride1': '(1)', 'stride2': '(2)', 'pad_size': '(0)', 'is_multiply': '(False)'}), '(data1, data2, kernel_size=3, max_displacement=2,\n stride1=1, stride2=2, pad_size=0, is_multiply=False)\n', (11860, 11966), True, 'import megengine.functional as F\n'), ((12241, 12335), 'megengine.functional.vision.roi_pooling', 'F.vision.roi_pooling', (['inp_feat', 'rois'], {'output_shape': 'output_shape', 'mode': '"""max"""', 'scale': '(1.0 / 4)'}), "(inp_feat, rois, output_shape=output_shape, mode='max',\n scale=1.0 / 4)\n", (12261, 12335), True, 'import megengine.functional as F\n'), ((12798, 12830), 'megengine.functional.adaptive_avg_pool2d', 'F.adaptive_avg_pool2d', (['inp', 'oshp'], {}), '(inp, oshp)\n', (12819, 12830), True, 'import megengine.functional as F\n'), ((13762, 13794), 'megengine.functional.adaptive_max_pool2d', 'F.adaptive_max_pool2d', (['inp', 'oshp'], {}), '(inp, oshp)\n', (13783, 13794), True, 'import megengine.functional as F\n'), ((15853, 15902), 'megengine.functional.vision.interpolate', 'F.vision.interpolate', (['x', '(15, 5)'], {'mode': '"""bilinear"""'}), "(x, (15, 5), mode='bilinear')\n", (15873, 15902), True, 'import megengine.functional as F\n'), ((15999, 16012), 'numpy.arange', 'np.arange', (['(32)'], {}), '(32)\n', (16008, 16012), True, 'import numpy as np\n'), ((16084, 16132), 'megengine.functional.vision.interpolate', 'F.vision.interpolate', (['x', '(1, 1)'], {'mode': '"""bilinear"""'}), "(x, (1, 1), mode='bilinear')\n", (16104, 16132), True, 'import megengine.functional as F\n'), ((16636, 16675), 'megengine.functional.vision.warp_perspective', 'F.vision.warp_perspective', (['x', 'M', '(2, 2)'], {}), '(x, M, (2, 2))\n', (16661, 16675), True, 'import megengine.functional as F\n'), ((17216, 17236), 'megengine.functional.concat', 'F.concat', (['([M] * 4)', '(0)'], {}), '([M] * 4, 0)\n', (17224, 17236), True, 'import megengine.functional as F\n'), ((17249, 17310), 'megengine.functional.vision.warp_perspective', 'F.vision.warp_perspective', (['x', 'M', '(2, 2)'], {'mat_idx': '[0, 1, 1, 0]'}), '(x, M, (2, 2), mat_idx=[0, 1, 1, 0])\n', (17274, 17310), True, 'import megengine.functional as F\n'), ((17895, 18042), 'numpy.array', 'np.array', (['[[[[7.875, 8.875, 9.875], [8.90625, 9.90625, 10.90625]], [[18.75, 19.75, \n 20.75], [14.90625, 15.90625, 16.90625]]]]'], {'dtype': 'np.float32'}), '([[[[7.875, 8.875, 9.875], [8.90625, 9.90625, 10.90625]], [[18.75, \n 19.75, 20.75], [14.90625, 15.90625, 16.90625]]]], dtype=np.float32)\n', (17903, 18042), True, 'import numpy as np\n'), ((18544, 18571), 'megengine.functional.vision.remap', 'F.vision.remap', (['inp', 'map_xy'], {}), '(inp, map_xy)\n', (18558, 18571), True, 'import megengine.functional as F\n'), ((18972, 18991), 'numpy.random.seed', 'np.random.seed', (['(123)'], {}), '(123)\n', (18986, 18991), True, 'import numpy as np\n'), ((19142, 19178), 'numpy.array', 'np.array', (['[0.6361]'], {'dtype': 'np.float32'}), '([0.6361], dtype=np.float32)\n', (19150, 19178), True, 'import numpy as np\n'), ((19184, 19203), 'numpy.random.seed', 'np.random.seed', (['(123)'], {}), '(123)\n', (19198, 19203), True, 'import numpy as np\n'), ((19354, 19389), 'numpy.array', 'np.array', (['[0.675]'], {'dtype': 'np.float32'}), '([0.675], dtype=np.float32)\n', (19362, 19389), True, 'import numpy as np\n'), ((19528, 19593), 'utils.opr_test', 'opr_test', (['cases', 'F.nn.binary_cross_entropy'], {'compare_fn': 'compare_fn'}), '(cases, F.nn.binary_cross_entropy, compare_fn=compare_fn)\n', (19536, 19593), False, 'from utils import opr_test\n'), ((19903, 19922), 'numpy.random.seed', 'np.random.seed', (['(123)'], {}), '(123)\n', (19917, 19922), True, 'import numpy as np\n'), ((20283, 20315), 'utils.opr_test', 'opr_test', (['cases', 'F.nn.hinge_loss'], {}), '(cases, F.nn.hinge_loss)\n', (20291, 20315), False, 'from utils import opr_test\n'), ((20784, 20824), 'utils.opr_test', 'opr_test', (['cases', 'hinge_loss_with_l2_norm'], {}), '(cases, hinge_loss_with_l2_norm)\n', (20792, 20824), False, 'from utils import opr_test\n'), ((21201, 21314), 'numpy.array', 'np.array', (['[[0, 0, 100, 100], [10, 10, 100, 100], [50, 50, 100, 100], [100, 100, 150, 150]\n ]'], {'dtype': 'np.float32'}), '([[0, 0, 100, 100], [10, 10, 100, 100], [50, 50, 100, 100], [100, \n 100, 150, 150]], dtype=np.float32)\n', (21209, 21314), True, 'import numpy as np\n'), ((21402, 21411), 'megengine.tensor', 'tensor', (['x'], {}), '(x)\n', (21408, 21411), False, 'from megengine import Parameter, Tensor, is_cuda_available, tensor\n'), ((21425, 21471), 'megengine.tensor', 'tensor', (['[0.5, 0.8, 0.9, 0.6]'], {'dtype': 'np.float32'}), '([0.5, 0.8, 0.9, 0.6], dtype=np.float32)\n', (21431, 21471), False, 'from megengine import Parameter, Tensor, is_cuda_available, tensor\n'), ((21685, 21694), 'megengine.tensor', 'tensor', (['x'], {}), '(x)\n', (21691, 21694), False, 'from megengine import Parameter, Tensor, is_cuda_available, tensor\n'), ((21708, 21736), 'megengine.tensor', 'tensor', (['[]'], {'dtype': 'np.float32'}), '([], dtype=np.float32)\n', (21714, 21736), False, 'from megengine import Parameter, Tensor, is_cuda_available, tensor\n'), ((22074, 22096), 'megengine.core.tensor.dtype.qint8', 'dtype.qint8', (['inp_scale'], {}), '(inp_scale)\n', (22085, 22096), True, 'import megengine.core.tensor.dtype as dtype\n'), ((22111, 22131), 'megengine.core.tensor.dtype.qint8', 'dtype.qint8', (['w_scale'], {}), '(w_scale)\n', (22122, 22131), True, 'import megengine.core.tensor.dtype as dtype\n'), ((22146, 22179), 'megengine.core.tensor.dtype.qint32', 'dtype.qint32', (['(inp_scale * w_scale)'], {}), '(inp_scale * w_scale)\n', (22158, 22179), True, 'import megengine.core.tensor.dtype as dtype\n'), ((22196, 22219), 'megengine.core.tensor.dtype.qint8', 'dtype.qint8', (['outp_scale'], {}), '(outp_scale)\n', (22207, 22219), True, 'import megengine.core.tensor.dtype as dtype\n'), ((25405, 25427), 'megengine.core.tensor.dtype.qint8', 'dtype.qint8', (['inp_scale'], {}), '(inp_scale)\n', (25416, 25427), True, 'import megengine.core.tensor.dtype as dtype\n'), ((25442, 25462), 'megengine.core.tensor.dtype.qint8', 'dtype.qint8', (['w_scale'], {}), '(w_scale)\n', (25453, 25462), True, 'import megengine.core.tensor.dtype as dtype\n'), ((25477, 25510), 'megengine.core.tensor.dtype.qint32', 'dtype.qint32', (['(inp_scale * w_scale)'], {}), '(inp_scale * w_scale)\n', (25489, 25510), True, 'import megengine.core.tensor.dtype as dtype\n'), ((25527, 25550), 'megengine.core.tensor.dtype.qint8', 'dtype.qint8', (['outp_scale'], {}), '(outp_scale)\n', (25538, 25550), True, 'import megengine.core.tensor.dtype as dtype\n'), ((27400, 27454), 'megengine.functional.conv2d', 'F.conv2d', (['inp', 'weight', 'None', '(2, 2)', '(3, 3)', '(1, 1)', '(1)'], {}), '(inp, weight, None, (2, 2), (3, 3), (1, 1), 1)\n', (27408, 27454), True, 'import megengine.functional as F\n'), ((27960, 27989), 'megengine.tensor', 'tensor', (['inp'], {'dtype': 'np.float32'}), '(inp, dtype=np.float32)\n', (27966, 27989), False, 'from megengine import Parameter, Tensor, is_cuda_available, tensor\n'), ((28068, 28122), 'megengine.functional.conv2d', 'F.conv2d', (['inp', 'weight', 'None', '(2, 2)', '(3, 3)', '(1, 1)', '(1)'], {}), '(inp, weight, None, (2, 2), (3, 3), (1, 1), 1)\n', (28076, 28122), True, 'import megengine.functional as F\n'), ((28271, 28300), 'megengine.tensor', 'tensor', (['inp'], {'dtype': 'np.float32'}), '(inp, dtype=np.float32)\n', (28277, 28300), False, 'from megengine import Parameter, Tensor, is_cuda_available, tensor\n'), ((28382, 28445), 'megengine.functional.conv3d', 'F.conv3d', (['inp', 'weight', 'None', '(2, 2, 2)', '(3, 3, 3)', '(1, 1, 1)', '(1)'], {}), '(inp, weight, None, (2, 2, 2), (3, 3, 3), (1, 1, 1), 1)\n', (28390, 28445), True, 'import megengine.functional as F\n'), ((28606, 28645), 'megengine.functional.conv1d', 'F.conv1d', (['inp', 'weight', 'None', '(2)', '(0)', '(1)', '(1)'], {}), '(inp, weight, None, 2, 0, 1, 1)\n', (28614, 28645), True, 'import megengine.functional as F\n'), ((29176, 29249), 'megengine.functional.batch_norm', 'F.batch_norm', (['inp'], {'weight': 'weight', 'bias': 'bias', 'training': '(True)', 'inplace': '(False)'}), '(inp, weight=weight, bias=bias, training=True, inplace=False)\n', (29188, 29249), True, 'import megengine.functional as F\n'), ((29754, 29793), 'megengine.functional.conv3d', 'F.conv3d', (['inp', 'weight', 'None', '(2)', '(0)', '(1)', '(1)'], {}), '(inp, weight, None, 2, 0, 1, 1)\n', (29762, 29793), True, 'import megengine.functional as F\n'), ((29929, 29961), 'numpy.array', 'np.array', (['[[1, 2, 3], [4, 5, 6]]'], {}), '([[1, 2, 3], [4, 5, 6]])\n', (29937, 29961), True, 'import numpy as np\n'), ((29970, 30022), 'numpy.array', 'np.array', (['[[True, False, True], [False, True, True]]'], {}), '([[True, False, True], [False, True, True]])\n', (29978, 30022), True, 'import numpy as np\n'), ((30032, 30041), 'megengine.tensor', 'tensor', (['x'], {}), '(x)\n', (30038, 30041), False, 'from megengine import Parameter, Tensor, is_cuda_available, tensor\n'), ((30051, 30060), 'megengine.tensor', 'tensor', (['y'], {}), '(y)\n', (30057, 30060), False, 'from megengine import Parameter, Tensor, is_cuda_available, tensor\n'), ((30076, 30095), 'megengine.functional.cond_take', 'F.cond_take', (['yy', 'xx'], {}), '(yy, xx)\n', (30087, 30095), True, 'import megengine.functional as F\n'), ((31046, 31064), 'megengine.core.ops.builtin.CondTake', 'builtin.CondTake', ([], {}), '()\n', (31062, 31064), True, 'import megengine.core.ops.builtin as builtin\n'), ((31075, 31093), 'megengine.core.ops.builtin.CondTake', 'builtin.CondTake', ([], {}), '()\n', (31091, 31093), True, 'import megengine.core.ops.builtin as builtin\n'), ((31152, 31177), 'megengine.core.ops.builtin.NMSKeep', 'builtin.NMSKeep', (['(0.7)', '(100)'], {}), '(0.7, 100)\n', (31167, 31177), True, 'import megengine.core.ops.builtin as builtin\n'), ((31188, 31213), 'megengine.core.ops.builtin.NMSKeep', 'builtin.NMSKeep', (['(0.7)', '(100)'], {}), '(0.7, 100)\n', (31203, 31213), True, 'import megengine.core.ops.builtin as builtin\n'), ((31224, 31249), 'megengine.core.ops.builtin.NMSKeep', 'builtin.NMSKeep', (['(0.8)', '(100)'], {}), '(0.8, 100)\n', (31239, 31249), True, 'import megengine.core.ops.builtin as builtin\n'), ((31260, 31285), 'megengine.core.ops.builtin.NMSKeep', 'builtin.NMSKeep', (['(0.7)', '(200)'], {}), '(0.7, 200)\n', (31275, 31285), True, 'import megengine.core.ops.builtin as builtin\n'), ((32678, 32689), 'megengine.tensor', 'tensor', (['inp'], {}), '(inp)\n', (32684, 32689), False, 'from megengine import Parameter, Tensor, is_cuda_available, tensor\n'), ((32698, 32736), 'megengine.functional.vision.cvt_color', 'F.vision.cvt_color', (['x'], {'mode': '"""RGB2GRAY"""'}), "(x, mode='RGB2GRAY')\n", (32716, 32736), True, 'import megengine.functional as F\n'), ((32868, 32906), 'megengine.functional.vision.cvt_color', 'F.vision.cvt_color', (['x'], {'mode': '"""BGR2GRAY"""'}), "(x, mode='BGR2GRAY')\n", (32886, 32906), True, 'import megengine.functional as F\n'), ((33050, 33061), 'megengine.tensor', 'tensor', (['val'], {}), '(val)\n', (33056, 33061), False, 'from megengine import Parameter, Tensor, is_cuda_available, tensor\n'), ((33075, 33088), 'numpy.array', 'np.array', (['val'], {}), '(val)\n', (33083, 33088), True, 'import numpy as np\n'), ((33207, 33238), 'megengine.functional.ones', 'F.ones', (['shape'], {'dtype': 'np.float32'}), '(shape, dtype=np.float32)\n', (33213, 33238), True, 'import megengine.functional as F\n'), ((33298, 33325), 'megengine.functional.utils._assert_equal', 'F.utils._assert_equal', (['x', 'y'], {}), '(x, y)\n', (33319, 33325), True, 'import megengine.functional as F\n'), ((33390, 33421), 'megengine.functional.ones', 'F.ones', (['shape'], {'dtype': 'np.float32'}), '(shape, dtype=np.float32)\n', (33396, 33421), True, 'import megengine.functional as F\n'), ((33627, 33647), 'megengine.functional.argmax', 'F.argmax', (['x'], {'axis': '(-1)'}), '(x, axis=-1)\n', (33635, 33647), True, 'import megengine.functional as F\n'), ((33657, 33676), 'megengine.functional.argmax', 'F.argmax', (['x'], {'axis': '(1)'}), '(x, axis=1)\n', (33665, 33676), True, 'import megengine.functional as F\n'), ((33737, 33763), 'megengine.functional.argmax', 'F.argmax', (['x'], {'axis': '(-1, -2)'}), '(x, axis=(-1, -2))\n', (33745, 33763), True, 'import megengine.functional as F\n'), ((33773, 33797), 'megengine.functional.argmax', 'F.argmax', (['x'], {'axis': '(0, 1)'}), '(x, axis=(0, 1))\n', (33781, 33797), True, 'import megengine.functional as F\n'), ((33858, 33884), 'megengine.functional.argmin', 'F.argmin', (['x'], {'axis': '(-1, -2)'}), '(x, axis=(-1, -2))\n', (33866, 33884), True, 'import megengine.functional as F\n'), ((33894, 33918), 'megengine.functional.argmin', 'F.argmin', (['x'], {'axis': '(0, 1)'}), '(x, axis=(0, 1))\n', (33902, 33918), True, 'import megengine.functional as F\n'), ((34037, 34072), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(N, C, H, W)'}), '(size=(N, C, H, W))\n', (34053, 34072), True, 'import numpy as np\n'), ((34228, 34268), 'numpy.zeros', 'np.zeros', (['(N, C, H + ph * 2, W + pw * 2)'], {}), '((N, C, H + ph * 2, W + pw * 2))\n', (34236, 34268), True, 'import numpy as np\n'), ((35193, 35233), 'numpy.zeros', 'np.zeros', (['(N, C, H, W)'], {'dtype': 'np.float32'}), '((N, C, H, W), dtype=np.float32)\n', (35201, 35233), True, 'import numpy as np\n'), ((36053, 36114), 'numpy.array', 'np.array', (['[[1, 2, 3], [4, 5, 6], [7, 8, 9]]'], {'dtype': 'np.float32'}), '([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float32)\n', (36061, 36114), True, 'import numpy as np\n'), ((36125, 36166), 'numpy.pad', 'np.pad', (['src', '((2, 2), (2, 2))', '"""constant"""'], {}), "(src, ((2, 2), (2, 2)), 'constant')\n", (36131, 36166), True, 'import numpy as np\n'), ((36233, 36281), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['res', 'dst'], {'atol': '(1e-05)'}), '(res, dst, atol=1e-05)\n', (36259, 36281), True, 'import numpy as np\n'), ((36292, 36352), 'numpy.pad', 'np.pad', (['src', '((2, 2), (2, 2))', '"""constant"""'], {'constant_values': '(3)'}), "(src, ((2, 2), (2, 2)), 'constant', constant_values=3)\n", (36298, 36352), True, 'import numpy as np\n'), ((36437, 36485), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['res', 'dst'], {'atol': '(1e-05)'}), '(res, dst, atol=1e-05)\n', (36463, 36485), True, 'import numpy as np\n'), ((36496, 36533), 'numpy.pad', 'np.pad', (['src', '((2, 2), (2, 2))', '"""edge"""'], {}), "(src, ((2, 2), (2, 2)), 'edge')\n", (36502, 36533), True, 'import numpy as np\n'), ((36596, 36644), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['res', 'dst'], {'atol': '(1e-05)'}), '(res, dst, atol=1e-05)\n', (36622, 36644), True, 'import numpy as np\n'), ((36655, 36695), 'numpy.pad', 'np.pad', (['src', '((2, 2), (2, 2))', '"""reflect"""'], {}), "(src, ((2, 2), (2, 2)), 'reflect')\n", (36661, 36695), True, 'import numpy as np\n'), ((36761, 36809), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['res', 'dst'], {'atol': '(1e-05)'}), '(res, dst, atol=1e-05)\n', (36787, 36809), True, 'import numpy as np\n'), ((39509, 39563), 'megengine.functional.conv2d', 'F.conv2d', (['inp', 'weight', 'None', '(2, 2)', '(3, 3)', '(1, 1)', '(1)'], {}), '(inp, weight, None, (2, 2), (3, 3), (1, 1), 1)\n', (39517, 39563), True, 'import megengine.functional as F\n'), ((39744, 39822), 'megengine.functional.conv2d', 'F.conv2d', (['inp', 'weight', 'None', '(2, 2)', '(3, 3)', '(1, 1)', '(1)'], {'compute_mode': '"""float32"""'}), "(inp, weight, None, (2, 2), (3, 3), (1, 1), 1, compute_mode='float32')\n", (39752, 39822), True, 'import megengine.functional as F\n'), ((40264, 40305), 'megengine.functional.vision.warp_perspective', 'F.vision.warp_perspective', (['inp', 'M', '(2, 2)'], {}), '(inp, M, (2, 2))\n', (40289, 40305), True, 'import megengine.functional as F\n'), ((40468, 40524), 'megengine.functional.vision.warp_perspective', 'F.vision.warp_perspective', (['inp', 'M', '(2, 2)'], {'format': '"""NHWC"""'}), "(inp, M, (2, 2), format='NHWC')\n", (40493, 40524), True, 'import megengine.functional as F\n'), ((3231, 3255), 'megengine.core._imperative_rt.ops.set_global_rng_seed', 'set_global_rng_seed', (['(111)'], {}), '(111)\n', (3250, 3255), False, 'from megengine.core._imperative_rt.ops import set_global_rng_seed\n'), ((3271, 3310), 'megengine.functional.nn.dropout', 'F.nn.dropout', (['data', 'rate'], {'training': '(True)'}), '(data, rate, training=True)\n', (3283, 3310), True, 'import megengine.functional as F\n'), ((3326, 3365), 'megengine.functional.nn.dropout', 'F.nn.dropout', (['data', 'rate'], {'training': '(True)'}), '(data, rate, training=True)\n', (3338, 3365), True, 'import megengine.functional as F\n'), ((3431, 3455), 'megengine.core._imperative_rt.ops.set_global_rng_seed', 'set_global_rng_seed', (['(111)'], {}), '(111)\n', (3450, 3455), False, 'from megengine.core._imperative_rt.ops import set_global_rng_seed\n'), ((3471, 3510), 'megengine.functional.nn.dropout', 'F.nn.dropout', (['data', 'rate'], {'training': '(True)'}), '(data, rate, training=True)\n', (3483, 3510), True, 'import megengine.functional as F\n'), ((3572, 3596), 'megengine.core._imperative_rt.ops.set_global_rng_seed', 'set_global_rng_seed', (['(222)'], {}), '(222)\n', (3591, 3596), False, 'from megengine.core._imperative_rt.ops import set_global_rng_seed\n'), ((3612, 3651), 'megengine.functional.nn.dropout', 'F.nn.dropout', (['data', 'rate'], {'training': '(True)'}), '(data, rate, training=True)\n', (3624, 3651), True, 'import megengine.functional as F\n'), ((6310, 6324), 'megengine.functional.matmul', 'F.matmul', (['a', 'b'], {}), '(a, b)\n', (6318, 6324), True, 'import megengine.functional as F\n'), ((6427, 6452), 'numpy.random.randn', 'np.random.randn', (['*shape_a'], {}), '(*shape_a)\n', (6442, 6452), True, 'import numpy as np\n'), ((6469, 6494), 'numpy.random.randn', 'np.random.randn', (['*shape_b'], {}), '(*shape_b)\n', (6484, 6494), True, 'import numpy as np\n'), ((6778, 6836), 'megengine.functional.vision.interpolate', 'F.vision.interpolate', (['inp'], {'scale_factor': '(2.0)', 'mode': '"""linear"""'}), "(inp, scale_factor=2.0, mode='linear')\n", (6798, 6836), True, 'import megengine.functional as F\n'), ((6852, 6895), 'megengine.functional.vision.interpolate', 'F.vision.interpolate', (['inp', '(4)'], {'mode': '"""linear"""'}), "(inp, 4, mode='linear')\n", (6872, 6895), True, 'import megengine.functional as F\n'), ((7276, 7309), 'megengine.functional.vision.interpolate', 'F.vision.interpolate', (['inp', '[4, 4]'], {}), '(inp, [4, 4])\n', (7296, 7309), True, 'import megengine.functional as F\n'), ((7325, 7368), 'megengine.functional.vision.interpolate', 'F.vision.interpolate', (['inp'], {'scale_factor': '(2.0)'}), '(inp, scale_factor=2.0)\n', (7345, 7368), True, 'import megengine.functional as F\n'), ((7561, 7614), 'megengine.functional.vision.interpolate', 'F.vision.interpolate', (['inp', '[4, 4]'], {'align_corners': '(True)'}), '(inp, [4, 4], align_corners=True)\n', (7581, 7614), True, 'import megengine.functional as F\n'), ((7630, 7693), 'megengine.functional.vision.interpolate', 'F.vision.interpolate', (['inp'], {'scale_factor': '(2.0)', 'align_corners': '(True)'}), '(inp, scale_factor=2.0, align_corners=True)\n', (7650, 7693), True, 'import megengine.functional as F\n'), ((8661, 8681), 'numpy.random.rand', 'np.random.rand', (['(4)', '(2)'], {}), '(4, 2)\n', (8675, 8681), True, 'import numpy as np\n'), ((9191, 9223), 'megengine.core.tensor.utils.make_shape_tuple', 'make_shape_tuple', (['out_feat.shape'], {}), '(out_feat.shape)\n', (9207, 9223), False, 'from megengine.core.tensor.utils import make_shape_tuple\n'), ((9370, 9407), 'megengine.core.tensor.utils.make_shape_tuple', 'make_shape_tuple', (['inp_feat.grad.shape'], {}), '(inp_feat.grad.shape)\n', (9386, 9407), False, 'from megengine.core.tensor.utils import make_shape_tuple\n'), ((9411, 9443), 'megengine.core.tensor.utils.make_shape_tuple', 'make_shape_tuple', (['inp_feat.shape'], {}), '(inp_feat.shape)\n', (9427, 9443), False, 'from megengine.core.tensor.utils import make_shape_tuple\n'), ((9558, 9637), 'numpy.random.randn', 'np.random.randn', (['image_shape[0]', 'image_shape[1]', 'image_shape[2]', 'image_shape[3]'], {}), '(image_shape[0], image_shape[1], image_shape[2], image_shape[3])\n', (9573, 9637), True, 'import numpy as np\n'), ((9680, 9759), 'numpy.random.randn', 'np.random.randn', (['image_shape[0]', 'image_shape[1]', 'image_shape[2]', 'image_shape[3]'], {}), '(image_shape[0], image_shape[1], image_shape[2], image_shape[3])\n', (9695, 9759), True, 'import numpy as np\n'), ((9908, 9925), 'megengine.tensor', 'tensor', (['inp_feat1'], {}), '(inp_feat1)\n', (9914, 9925), False, 'from megengine import Parameter, Tensor, is_cuda_available, tensor\n'), ((9927, 9944), 'megengine.tensor', 'tensor', (['inp_feat2'], {}), '(inp_feat2)\n', (9933, 9944), False, 'from megengine import Parameter, Tensor, is_cuda_available, tensor\n'), ((10374, 10408), 'megengine.core.tensor.utils.make_shape_tuple', 'make_shape_tuple', (['data1.grad.shape'], {}), '(data1.grad.shape)\n', (10390, 10408), False, 'from megengine.core.tensor.utils import make_shape_tuple\n'), ((10412, 10441), 'megengine.core.tensor.utils.make_shape_tuple', 'make_shape_tuple', (['data1.shape'], {}), '(data1.shape)\n', (10428, 10441), False, 'from megengine.core.tensor.utils import make_shape_tuple\n'), ((12358, 12390), 'megengine.core.tensor.utils.make_shape_tuple', 'make_shape_tuple', (['out_feat.shape'], {}), '(out_feat.shape)\n', (12374, 12390), False, 'from megengine.core.tensor.utils import make_shape_tuple\n'), ((12537, 12574), 'megengine.core.tensor.utils.make_shape_tuple', 'make_shape_tuple', (['inp_feat.grad.shape'], {}), '(inp_feat.grad.shape)\n', (12553, 12574), False, 'from megengine.core.tensor.utils import make_shape_tuple\n'), ((12578, 12610), 'megengine.core.tensor.utils.make_shape_tuple', 'make_shape_tuple', (['inp_feat.shape'], {}), '(inp_feat.shape)\n', (12594, 12610), False, 'from megengine.core.tensor.utils import make_shape_tuple\n'), ((12843, 12871), 'megengine.core.tensor.utils.make_shape_tuple', 'make_shape_tuple', (['outp.shape'], {}), '(outp.shape)\n', (12859, 12871), False, 'from megengine.core.tensor.utils import make_shape_tuple\n'), ((12963, 13021), 'numpy.array', 'np.array', (['[[[[2.5, 4.5], [10.5, 12.5]]]]'], {'dtype': 'np.float32'}), '([[[[2.5, 4.5], [10.5, 12.5]]]], dtype=np.float32)\n', (12971, 13021), True, 'import numpy as np\n'), ((13082, 13114), 'megengine.core.tensor.utils.make_shape_tuple', 'make_shape_tuple', (['inp.grad.shape'], {}), '(inp.grad.shape)\n', (13098, 13114), False, 'from megengine.core.tensor.utils import make_shape_tuple\n'), ((13118, 13145), 'megengine.core.tensor.utils.make_shape_tuple', 'make_shape_tuple', (['inp.shape'], {}), '(inp.shape)\n', (13134, 13145), False, 'from megengine.core.tensor.utils import make_shape_tuple\n'), ((13209, 13349), 'numpy.array', 'np.array', (['[[[[0.25, 0.25, 0.25, 0.25], [0.25, 0.25, 0.25, 0.25], [0.25, 0.25, 0.25, \n 0.25], [0.25, 0.25, 0.25, 0.25]]]]'], {'dtype': 'np.float32'}), '([[[[0.25, 0.25, 0.25, 0.25], [0.25, 0.25, 0.25, 0.25], [0.25, 0.25,\n 0.25, 0.25], [0.25, 0.25, 0.25, 0.25]]]], dtype=np.float32)\n', (13217, 13349), True, 'import numpy as np\n'), ((13807, 13835), 'megengine.core.tensor.utils.make_shape_tuple', 'make_shape_tuple', (['outp.shape'], {}), '(outp.shape)\n', (13823, 13835), False, 'from megengine.core.tensor.utils import make_shape_tuple\n'), ((13927, 13977), 'numpy.array', 'np.array', (['[[[[5, 7], [13, 15]]]]'], {'dtype': 'np.float32'}), '([[[[5, 7], [13, 15]]]], dtype=np.float32)\n', (13935, 13977), True, 'import numpy as np\n'), ((14038, 14070), 'megengine.core.tensor.utils.make_shape_tuple', 'make_shape_tuple', (['inp.grad.shape'], {}), '(inp.grad.shape)\n', (14054, 14070), False, 'from megengine.core.tensor.utils import make_shape_tuple\n'), ((14074, 14101), 'megengine.core.tensor.utils.make_shape_tuple', 'make_shape_tuple', (['inp.shape'], {}), '(inp.shape)\n', (14090, 14101), False, 'from megengine.core.tensor.utils import make_shape_tuple\n'), ((14165, 14290), 'numpy.array', 'np.array', (['[[[[0.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 1.0], [0.0, 0.0, 0.0, 0.0], [0.0, \n 1.0, 0.0, 1.0]]]]'], {'dtype': 'np.float32'}), '([[[[0.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 1.0], [0.0, 0.0, 0.0, 0.0\n ], [0.0, 1.0, 0.0, 1.0]]]], dtype=np.float32)\n', (14173, 14290), True, 'import numpy as np\n'), ((14637, 14666), 'megengine.functional.one_hot', 'F.one_hot', (['inp'], {'num_classes': '(4)'}), '(inp, num_classes=4)\n', (14646, 14666), True, 'import megengine.functional as F\n'), ((14846, 14940), 'numpy.array', 'np.array', (['[[3, 2, 4, 4, 2, 4, 0, 4, 4, 1], [4, 1, 1, 3, 2, 2, 4, 2, 4, 3]]'], {'dtype': 'np.int32'}), '([[3, 2, 4, 4, 2, 4, 0, 4, 4, 1], [4, 1, 1, 3, 2, 2, 4, 2, 4, 3]],\n dtype=np.int32)\n', (14854, 14940), True, 'import numpy as np\n'), ((14987, 14998), 'megengine.tensor', 'tensor', (['arr'], {}), '(arr)\n', (14993, 14998), False, 'from megengine import Parameter, Tensor, is_cuda_available, tensor\n'), ((15013, 15031), 'megengine.functional.one_hot', 'F.one_hot', (['inp', '(10)'], {}), '(inp, 10)\n', (15022, 15031), True, 'import megengine.functional as F\n'), ((15555, 15609), 'megengine.functional.vision.interpolate', 'F.vision.interpolate', (['x', 'target_shape'], {'mode': '"""bilinear"""'}), "(x, target_shape, mode='bilinear')\n", (15575, 15609), True, 'import megengine.functional as F\n'), ((15800, 15823), 'numpy.ones', 'np.ones', (['(3, 3, 10, 10)'], {}), '((3, 3, 10, 10))\n', (15807, 15823), True, 'import numpy as np\n'), ((16718, 16759), 'numpy.array', 'np.array', (['[[[[5, 6], [9, 10]]]]'], {'dtype': 'dt'}), '([[[[5, 6], [9, 10]]]], dtype=dt)\n', (16726, 16759), True, 'import numpy as np\n'), ((17370, 17485), 'numpy.array', 'np.array', (['[[[[5, 6], [9, 10]]], [[[21, 22], [25, 26]]], [[[21, 22], [25, 26]]], [[[5,\n 6], [9, 10]]]]'], {'dtype': 'dt'}), '([[[[5, 6], [9, 10]]], [[[21, 22], [25, 26]]], [[[21, 22], [25, 26]\n ]], [[[5, 6], [9, 10]]]], dtype=dt)\n', (17378, 17485), True, 'import numpy as np\n'), ((17840, 17855), 'megengine.tensor', 'tensor', (['weightv'], {}), '(weightv)\n', (17846, 17855), False, 'from megengine import Parameter, Tensor, is_cuda_available, tensor\n'), ((18141, 18160), 'megengine.is_cuda_available', 'is_cuda_available', ([], {}), '()\n', (18158, 18160), False, 'from megengine import Parameter, Tensor, is_cuda_available, tensor\n'), ((18623, 18679), 'numpy.array', 'np.array', (['[[[[1.0, 4.0], [4.0, 4.0]]]]'], {'dtype': 'np.float32'}), '([[[[1.0, 4.0], [4.0, 4.0]]]], dtype=np.float32)\n', (18631, 18679), True, 'import numpy as np\n'), ((19782, 19835), 'functools.partial', 'partial', (['F.nn.binary_cross_entropy'], {'with_logits': '(False)'}), '(F.nn.binary_cross_entropy, with_logits=False)\n', (19789, 19835), False, 'from functools import partial\n'), ((20744, 20778), 'megengine.functional.nn.hinge_loss', 'F.nn.hinge_loss', (['pred', 'label', '"""L2"""'], {}), "(pred, label, 'L2')\n", (20759, 20778), True, 'import megengine.functional as F\n'), ((20955, 21055), 'megengine.functional.vision.nms', 'F.vision.nms', (['inp'], {'scores': 'scores', 'iou_thresh': '(0.5)', 'max_output': '(None if is_symbolic is None else 4)'}), '(inp, scores=scores, iou_thresh=0.5, max_output=None if \n is_symbolic is None else 4)\n', (20967, 21055), True, 'import megengine.functional as F\n'), ((22446, 22484), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(N, IC, IH, IW)'}), '(size=(N, IC, IH, IW))\n', (22462, 22484), True, 'import numpy as np\n'), ((22499, 22538), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(OC, IC, KH, KW)'}), '(size=(OC, IC, KH, KW))\n', (22515, 22538), True, 'import numpy as np\n'), ((22553, 22589), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(1, OC, 1, 1)'}), '(size=(1, OC, 1, 1))\n', (22569, 22589), True, 'import numpy as np\n'), ((22610, 22636), 'megengine.core.tensor.dtype.get_scale', 'dtype.get_scale', (['inp_dtype'], {}), '(inp_dtype)\n', (22625, 22636), True, 'import megengine.core.tensor.dtype as dtype\n'), ((22655, 22679), 'megengine.core.tensor.dtype.get_scale', 'dtype.get_scale', (['w_dtype'], {}), '(w_dtype)\n', (22670, 22679), True, 'import megengine.core.tensor.dtype as dtype\n'), ((22698, 22722), 'megengine.core.tensor.dtype.get_scale', 'dtype.get_scale', (['b_dtype'], {}), '(b_dtype)\n', (22713, 22722), True, 'import megengine.core.tensor.dtype as dtype\n'), ((22739, 22791), 'megengine.core.tensor.dtype.convert_to_qint8', 'dtype.convert_to_qint8', (['(inp_v * inp_scale)', 'inp_dtype'], {}), '(inp_v * inp_scale, inp_dtype)\n', (22761, 22791), True, 'import megengine.core.tensor.dtype as dtype\n'), ((22805, 22851), 'megengine.core.tensor.dtype.convert_to_qint8', 'dtype.convert_to_qint8', (['(w_v * w_scale)', 'w_dtype'], {}), '(w_v * w_scale, w_dtype)\n', (22827, 22851), True, 'import megengine.core.tensor.dtype as dtype\n'), ((22865, 22912), 'megengine.core.tensor.dtype.convert_to_qint32', 'dtype.convert_to_qint32', (['(b_v * b_scale)', 'b_dtype'], {}), '(b_v * b_scale, b_dtype)\n', (22888, 22912), True, 'import megengine.core.tensor.dtype as dtype\n'), ((22933, 22962), 'megengine.tensor', 'tensor', (['inpv'], {'dtype': 'inp_dtype'}), '(inpv, dtype=inp_dtype)\n', (22939, 22962), False, 'from megengine import Parameter, Tensor, is_cuda_available, tensor\n'), ((22980, 23008), 'megengine.Parameter', 'Parameter', (['wv'], {'dtype': 'w_dtype'}), '(wv, dtype=w_dtype)\n', (22989, 23008), False, 'from megengine import Parameter, Tensor, is_cuda_available, tensor\n'), ((23027, 23055), 'megengine.Parameter', 'Parameter', (['bv'], {'dtype': 'b_dtype'}), '(bv, dtype=b_dtype)\n', (23036, 23055), False, 'from megengine import Parameter, Tensor, is_cuda_available, tensor\n'), ((24663, 24682), 'megengine.functional.flatten', 'F.flatten', (['expected'], {}), '(expected)\n', (24672, 24682), True, 'import megengine.functional as F\n'), ((24700, 24717), 'megengine.functional.flatten', 'F.flatten', (['result'], {}), '(result)\n', (24709, 24717), True, 'import megengine.functional as F\n'), ((21905, 21928), 'megengine.device.get_device_count', 'get_device_count', (['"""gpu"""'], {}), "('gpu')\n", (21921, 21928), False, 'from megengine.device import get_device_count\n'), ((25654, 25692), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(N, IC, IH, IW)'}), '(size=(N, IC, IH, IW))\n', (25670, 25692), True, 'import numpy as np\n'), ((25707, 25749), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(N, OC, IC, KH, KW)'}), '(size=(N, OC, IC, KH, KW))\n', (25723, 25749), True, 'import numpy as np\n'), ((25764, 25800), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(1, OC, 1, 1)'}), '(size=(1, OC, 1, 1))\n', (25780, 25800), True, 'import numpy as np\n'), ((25821, 25847), 'megengine.core.tensor.dtype.get_scale', 'dtype.get_scale', (['inp_dtype'], {}), '(inp_dtype)\n', (25836, 25847), True, 'import megengine.core.tensor.dtype as dtype\n'), ((25866, 25890), 'megengine.core.tensor.dtype.get_scale', 'dtype.get_scale', (['w_dtype'], {}), '(w_dtype)\n', (25881, 25890), True, 'import megengine.core.tensor.dtype as dtype\n'), ((25909, 25933), 'megengine.core.tensor.dtype.get_scale', 'dtype.get_scale', (['b_dtype'], {}), '(b_dtype)\n', (25924, 25933), True, 'import megengine.core.tensor.dtype as dtype\n'), ((25950, 26002), 'megengine.core.tensor.dtype.convert_to_qint8', 'dtype.convert_to_qint8', (['(inp_v * inp_scale)', 'inp_dtype'], {}), '(inp_v * inp_scale, inp_dtype)\n', (25972, 26002), True, 'import megengine.core.tensor.dtype as dtype\n'), ((26016, 26062), 'megengine.core.tensor.dtype.convert_to_qint8', 'dtype.convert_to_qint8', (['(w_v * w_scale)', 'w_dtype'], {}), '(w_v * w_scale, w_dtype)\n', (26038, 26062), True, 'import megengine.core.tensor.dtype as dtype\n'), ((26076, 26123), 'megengine.core.tensor.dtype.convert_to_qint32', 'dtype.convert_to_qint32', (['(b_v * b_scale)', 'b_dtype'], {}), '(b_v * b_scale, b_dtype)\n', (26099, 26123), True, 'import megengine.core.tensor.dtype as dtype\n'), ((26144, 26173), 'megengine.tensor', 'tensor', (['inpv'], {'dtype': 'inp_dtype'}), '(inpv, dtype=inp_dtype)\n', (26150, 26173), False, 'from megengine import Parameter, Tensor, is_cuda_available, tensor\n'), ((26191, 26219), 'megengine.Parameter', 'Parameter', (['wv'], {'dtype': 'w_dtype'}), '(wv, dtype=w_dtype)\n', (26200, 26219), False, 'from megengine import Parameter, Tensor, is_cuda_available, tensor\n'), ((26238, 26266), 'megengine.Parameter', 'Parameter', (['bv'], {'dtype': 'b_dtype'}), '(bv, dtype=b_dtype)\n', (26247, 26266), False, 'from megengine import Parameter, Tensor, is_cuda_available, tensor\n'), ((26879, 26898), 'megengine.functional.flatten', 'F.flatten', (['expected'], {}), '(expected)\n', (26888, 26898), True, 'import megengine.functional as F\n'), ((26981, 26998), 'megengine.functional.flatten', 'F.flatten', (['result'], {}), '(result)\n', (26990, 26998), True, 'import megengine.functional as F\n'), ((25237, 25260), 'megengine.device.get_device_count', 'get_device_count', (['"""gpu"""'], {}), "('gpu')\n", (25253, 25260), False, 'from megengine.device import get_device_count\n'), ((27271, 27302), 'numpy.random.randn', 'np.random.randn', (['(1)', '(3)', '(224)', '(224)'], {}), '(1, 3, 224, 224)\n', (27286, 27302), True, 'import numpy as np\n'), ((27342, 27370), 'numpy.random.randn', 'np.random.randn', (['(64)', '(3)', '(7)', '(7)'], {}), '(64, 3, 7, 7)\n', (27357, 27370), True, 'import numpy as np\n'), ((28010, 28038), 'numpy.random.randn', 'np.random.randn', (['(16)', '(3)', '(3)', '(3)'], {}), '(16, 3, 3, 3)\n', (28025, 28038), True, 'import numpy as np\n'), ((28321, 28352), 'numpy.random.randn', 'np.random.randn', (['(16)', '(3)', '(3)', '(3)', '(3)'], {}), '(16, 3, 3, 3, 3)\n', (28336, 28352), True, 'import numpy as np\n'), ((28500, 28536), 'numpy.ones', 'np.ones', (['(2, 2, 4)'], {'dtype': 'np.float32'}), '((2, 2, 4), dtype=np.float32)\n', (28507, 28536), True, 'import numpy as np\n'), ((28558, 28594), 'numpy.ones', 'np.ones', (['(3, 2, 2)'], {'dtype': 'np.float32'}), '((3, 2, 2), dtype=np.float32)\n', (28565, 28594), True, 'import numpy as np\n'), ((28704, 28789), 'numpy.array', 'np.array', (['[[[4, 4], [4, 4], [4, 4]], [[4, 4], [4, 4], [4, 4]]]'], {'dtype': 'np.float32'}), '([[[4, 4], [4, 4], [4, 4]], [[4, 4], [4, 4], [4, 4]]], dtype=np.float32\n )\n', (28712, 28789), True, 'import numpy as np\n'), ((29012, 29036), 'numpy.random.randn', 'np.random.randn', (['*tshape'], {}), '(*tshape)\n', (29027, 29036), True, 'import numpy as np\n'), ((29076, 29109), 'numpy.ones', 'np.ones', (['pshape'], {'dtype': 'np.float32'}), '(pshape, dtype=np.float32)\n', (29083, 29109), True, 'import numpy as np\n'), ((29129, 29163), 'numpy.zeros', 'np.zeros', (['pshape'], {'dtype': 'np.float32'}), '(pshape, dtype=np.float32)\n', (29137, 29163), True, 'import numpy as np\n'), ((29636, 29678), 'numpy.ones', 'np.ones', (['(2, 2, 4, 4, 4)'], {'dtype': 'np.float32'}), '((2, 2, 4, 4, 4), dtype=np.float32)\n', (29643, 29678), True, 'import numpy as np\n'), ((29700, 29742), 'numpy.ones', 'np.ones', (['(3, 2, 2, 2, 2)'], {'dtype': 'np.float32'}), '((3, 2, 2, 2, 2), dtype=np.float32)\n', (29707, 29742), True, 'import numpy as np\n'), ((30420, 30443), 'megengine.functional.cond_take', 'F.cond_take', (['mask', 'data'], {}), '(mask, data)\n', (30431, 30443), True, 'import megengine.functional as F\n'), ((30644, 30656), 'megengine.tensor', 'tensor', (['x_np'], {}), '(x_np)\n', (30650, 30656), False, 'from megengine import Parameter, Tensor, is_cuda_available, tensor\n'), ((30672, 30687), 'megengine.tensor', 'tensor', (['mask_np'], {}), '(mask_np)\n', (30678, 30687), False, 'from megengine import Parameter, Tensor, is_cuda_available, tensor\n'), ((31436, 31455), 'megengine.functional.zeros', 'F.zeros', (['(100, 100)'], {}), '((100, 100))\n', (31443, 31455), True, 'import megengine.functional as F\n'), ((31500, 31519), 'megengine.functional.argmax', 'F.argmax', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (31508, 31519), True, 'import megengine.functional as F\n'), ((31575, 31594), 'megengine.functional.zeros', 'F.zeros', (['(100, 100)'], {}), '((100, 100))\n', (31582, 31594), True, 'import megengine.functional as F\n'), ((31638, 31657), 'megengine.functional.argmin', 'F.argmin', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (31646, 31657), True, 'import megengine.functional as F\n'), ((32157, 32168), 'megengine.tensor', 'tensor', (['inp'], {}), '(inp)\n', (32163, 32168), False, 'from megengine import Parameter, Tensor, is_cuda_available, tensor\n'), ((32178, 32190), 'megengine.tensor', 'tensor', (['rois'], {}), '(rois)\n', (32184, 32190), False, 'from megengine import Parameter, Tensor, is_cuda_available, tensor\n'), ((32200, 32213), 'megengine.tensor', 'tensor', (['trans'], {}), '(trans)\n', (32206, 32213), False, 'from megengine import Parameter, Tensor, is_cuda_available, tensor\n'), ((32423, 32466), 'numpy.dot', 'np.dot', (['rgb[..., :3]', '[0.299, 0.587, 0.114]'], {}), '(rgb[..., :3], [0.299, 0.587, 0.114])\n', (32429, 32466), True, 'import numpy as np\n'), ((32506, 32549), 'numpy.dot', 'np.dot', (['bgr[..., :3]', '[0.114, 0.587, 0.299]'], {}), '(bgr[..., :3], [0.114, 0.587, 0.299])\n', (32512, 32549), True, 'import numpy as np\n'), ((33117, 33128), 'megengine.functional.ones', 'F.ones', (['shp'], {}), '(shp)\n', (33123, 33128), True, 'import megengine.functional as F\n'), ((33130, 33145), 'numpy.ones', 'np.ones', (['np_shp'], {}), '(np_shp)\n', (33137, 33145), True, 'import numpy as np\n'), ((33247, 33279), 'megengine.functional.zeros', 'F.zeros', (['shape'], {'dtype': 'np.float32'}), '(shape, dtype=np.float32)\n', (33254, 33279), True, 'import megengine.functional as F\n'), ((33430, 33462), 'megengine.functional.zeros', 'F.zeros', (['shape'], {'dtype': 'np.float32'}), '(shape, dtype=np.float32)\n', (33437, 33462), True, 'import megengine.functional as F\n'), ((33478, 33505), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (33491, 33505), False, 'import pytest\n'), ((33519, 33546), 'megengine.functional.utils._assert_equal', 'F.utils._assert_equal', (['x', 'y'], {}), '(x, y)\n', (33540, 33546), True, 'import megengine.functional as F\n'), ((33585, 33616), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', '(32, 5)'], {}), '(0, 1, (32, 5))\n', (33601, 33616), True, 'import numpy as np\n'), ((34716, 34727), 'megengine.tensor', 'tensor', (['inp'], {}), '(inp)\n', (34722, 34727), False, 'from megengine import Parameter, Tensor, is_cuda_available, tensor\n'), ((35845, 35856), 'megengine.tensor', 'tensor', (['inp'], {}), '(inp)\n', (35851, 35856), False, 'from megengine import Parameter, Tensor, is_cuda_available, tensor\n'), ((36186, 36197), 'megengine.tensor', 'tensor', (['src'], {}), '(src)\n', (36192, 36197), False, 'from megengine import Parameter, Tensor, is_cuda_available, tensor\n'), ((36372, 36383), 'megengine.tensor', 'tensor', (['src'], {}), '(src)\n', (36378, 36383), False, 'from megengine import Parameter, Tensor, is_cuda_available, tensor\n'), ((36553, 36564), 'megengine.tensor', 'tensor', (['src'], {}), '(src)\n', (36559, 36564), False, 'from megengine import Parameter, Tensor, is_cuda_available, tensor\n'), ((36715, 36726), 'megengine.tensor', 'tensor', (['src'], {}), '(src)\n', (36721, 36726), False, 'from megengine import Parameter, Tensor, is_cuda_available, tensor\n'), ((37652, 37663), 'megengine.tensor', 'tensor', (['inp'], {}), '(inp)\n', (37658, 37663), False, 'from megengine import Parameter, Tensor, is_cuda_available, tensor\n'), ((37866, 37877), 'megengine.tensor', 'tensor', (['inp'], {}), '(inp)\n', (37872, 37877), False, 'from megengine import Parameter, Tensor, is_cuda_available, tensor\n'), ((38087, 38098), 'megengine.tensor', 'tensor', (['inp'], {}), '(inp)\n', (38093, 38098), False, 'from megengine import Parameter, Tensor, is_cuda_available, tensor\n'), ((38315, 38326), 'megengine.tensor', 'tensor', (['inp'], {}), '(inp)\n', (38321, 38326), False, 'from megengine import Parameter, Tensor, is_cuda_available, tensor\n'), ((38550, 38561), 'megengine.tensor', 'tensor', (['inp'], {}), '(inp)\n', (38556, 38561), False, 'from megengine import Parameter, Tensor, is_cuda_available, tensor\n'), ((38816, 38867), 'megengine.functional.pixel_shuffle', 'F.pixel_shuffle', (['inp'], {'upscale_factor': 'upscale_factor'}), '(inp, upscale_factor=upscale_factor)\n', (38831, 38867), True, 'import megengine.functional as F\n'), ((39373, 39404), 'numpy.random.randn', 'np.random.randn', (['(1)', '(3)', '(224)', '(224)'], {}), '(1, 3, 224, 224)\n', (39388, 39404), True, 'import numpy as np\n'), ((39444, 39472), 'numpy.random.randn', 'np.random.randn', (['(64)', '(3)', '(7)', '(7)'], {}), '(64, 3, 7, 7)\n', (39459, 39472), True, 'import numpy as np\n'), ((39610, 39650), 'megengine.config._override', 'config._override', ([], {'compute_mode': '"""float32"""'}), "(compute_mode='float32')\n", (39626, 39650), True, 'import megengine.config as config\n'), ((39674, 39728), 'megengine.functional.conv2d', 'F.conv2d', (['inp', 'weight', 'None', '(2, 2)', '(3, 3)', '(1, 1)', '(1)'], {}), '(inp, weight, None, (2, 2), (3, 3), (1, 1), 1)\n', (39682, 39728), True, 'import megengine.functional as F\n'), ((40351, 40387), 'megengine.config._override', 'config._override', ([], {'conv_format': '"""NHWC"""'}), "(conv_format='NHWC')\n", (40367, 40387), True, 'import megengine.config as config\n'), ((40411, 40452), 'megengine.functional.vision.warp_perspective', 'F.vision.warp_perspective', (['inp', 'M', '(2, 2)'], {}), '(inp, M, (2, 2))\n', (40436, 40452), True, 'import megengine.functional as F\n'), ((41404, 41450), 'numpy.pad', 'np.pad', (['data', '((0, 0), (0, 0), (1, 1), (1, 1))'], {}), '(data, ((0, 0), (0, 0), (1, 1), (1, 1)))\n', (41410, 41450), True, 'import numpy as np\n'), ((41470, 41558), 'numpy.zeros', 'np.zeros', (['(batch_size, out_channels, output_height, output_width)'], {'dtype': 'np.float32'}), '((batch_size, out_channels, output_height, output_width), dtype=np.\n float32)\n', (41478, 41558), True, 'import numpy as np\n'), ((42608, 42620), 'megengine.tensor', 'tensor', (['data'], {}), '(data)\n', (42614, 42620), False, 'from megengine import Parameter, Tensor, is_cuda_available, tensor\n'), ((42630, 42644), 'megengine.tensor', 'tensor', (['weight'], {}), '(weight)\n', (42636, 42644), False, 'from megengine import Parameter, Tensor, is_cuda_available, tensor\n'), ((2279, 2311), 'numpy.ones', 'np.ones', (['shape'], {'dtype': 'np.float32'}), '(shape, dtype=np.float32)\n', (2286, 2311), True, 'import numpy as np\n'), ((2390, 2429), 'megengine.functional.nn.dropout', 'F.nn.dropout', (['data', 'rate'], {'training': '(True)'}), '(data, rate, training=True)\n', (2402, 2429), True, 'import megengine.functional as F\n'), ((2692, 2724), 'numpy.ones', 'np.ones', (['shape'], {'dtype': 'np.float32'}), '(shape, dtype=np.float32)\n', (2699, 2724), True, 'import numpy as np\n'), ((2804, 2843), 'megengine.functional.nn.dropout', 'F.nn.dropout', (['data', 'rate'], {'training': '(True)'}), '(data, rate, training=True)\n', (2816, 2843), True, 'import megengine.functional as F\n'), ((2863, 2902), 'megengine.functional.nn.dropout', 'F.nn.dropout', (['out1', 'rate'], {'training': '(True)'}), '(out1, rate, training=True)\n', (2875, 2902), True, 'import megengine.functional as F\n'), ((2922, 2961), 'megengine.functional.nn.dropout', 'F.nn.dropout', (['out2', 'rate'], {'training': '(True)'}), '(out2, rate, training=True)\n', (2934, 2961), True, 'import megengine.functional as F\n'), ((3181, 3204), 'numpy.random.randn', 'np.random.randn', (['*shape'], {}), '(*shape)\n', (3196, 3204), True, 'import numpy as np\n'), ((3959, 3983), 'numpy.random.random', 'np.random.random', (['shape1'], {}), '(shape1)\n', (3975, 3983), True, 'import numpy as np\n'), ((4014, 4038), 'numpy.random.random', 'np.random.random', (['shape2'], {}), '(shape2)\n', (4030, 4038), True, 'import numpy as np\n'), ((4615, 4639), 'numpy.random.random', 'np.random.random', (['shape1'], {}), '(shape1)\n', (4631, 4639), True, 'import numpy as np\n'), ((4670, 4694), 'numpy.random.random', 'np.random.random', (['shape2'], {}), '(shape2)\n', (4686, 4694), True, 'import numpy as np\n'), ((4725, 4749), 'numpy.random.random', 'np.random.random', (['shape3'], {}), '(shape3)\n', (4741, 4749), True, 'import numpy as np\n'), ((4780, 4804), 'numpy.random.random', 'np.random.random', (['shape4'], {}), '(shape4)\n', (4796, 4804), True, 'import numpy as np\n'), ((5184, 5208), 'numpy.random.random', 'np.random.random', (['shape1'], {}), '(shape1)\n', (5200, 5208), True, 'import numpy as np\n'), ((5239, 5263), 'numpy.random.random', 'np.random.random', (['shape2'], {}), '(shape2)\n', (5255, 5263), True, 'import numpy as np\n'), ((5294, 5318), 'numpy.random.random', 'np.random.random', (['shape3'], {}), '(shape3)\n', (5310, 5318), True, 'import numpy as np\n'), ((5349, 5373), 'numpy.random.random', 'np.random.random', (['shape4'], {}), '(shape4)\n', (5365, 5373), True, 'import numpy as np\n'), ((5404, 5428), 'numpy.random.random', 'np.random.random', (['shape5'], {}), '(shape5)\n', (5420, 5428), True, 'import numpy as np\n'), ((6373, 6404), 'megengine.jit.trace', 'jit.trace', ([], {'symbolic': 'is_symbolic'}), '(symbolic=is_symbolic)\n', (6382, 6404), True, 'import megengine.jit as jit\n'), ((6958, 7012), 'numpy.array', 'np.array', (['[[[1.0, 1.25, 1.75, 2.0]]]'], {'dtype': 'np.float32'}), '([[[1.0, 1.25, 1.75, 2.0]]], dtype=np.float32)\n', (6966, 7012), True, 'import numpy as np\n'), ((7085, 7139), 'numpy.array', 'np.array', (['[[[1.0, 1.25, 1.75, 2.0]]]'], {'dtype': 'np.float32'}), '([[[1.0, 1.25, 1.75, 2.0]]], dtype=np.float32)\n', (7093, 7139), True, 'import numpy as np\n'), ((7890, 7915), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (7903, 7915), False, 'import pytest\n'), ((7929, 7987), 'megengine.functional.vision.interpolate', 'F.vision.interpolate', (['inp'], {'scale_factor': '(2.0)', 'mode': '"""linear"""'}), "(inp, scale_factor=2.0, mode='linear')\n", (7949, 7987), True, 'import megengine.functional as F\n'), ((8126, 8151), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (8139, 8151), False, 'import pytest\n'), ((8165, 8230), 'megengine.functional.vision.interpolate', 'F.vision.interpolate', (['inp'], {'scale_factor': '[2.0, 3.0]', 'mode': '"""linear"""'}), "(inp, scale_factor=[2.0, 3.0], mode='linear')\n", (8185, 8230), True, 'import megengine.functional as F\n'), ((8706, 8726), 'numpy.random.rand', 'np.random.rand', (['(4)', '(2)'], {}), '(4, 2)\n', (8720, 8726), True, 'import numpy as np\n'), ((8893, 8899), 'megengine.core.autodiff.grad.Grad', 'Grad', ([], {}), '()\n', (8897, 8899), False, 'from megengine.core.autodiff.grad import Grad\n'), ((9335, 9356), 'megengine.functional.ones_like', 'F.ones_like', (['out_feat'], {}), '(out_feat)\n', (9346, 9356), True, 'import megengine.functional as F\n'), ((9812, 9832), 'numpy.ones', 'np.ones', (['image_shape'], {}), '(image_shape)\n', (9819, 9832), True, 'import numpy as np\n'), ((9864, 9884), 'numpy.ones', 'np.ones', (['image_shape'], {}), '(image_shape)\n', (9871, 9884), True, 'import numpy as np\n'), ((10059, 10065), 'megengine.core.autodiff.grad.Grad', 'Grad', ([], {}), '()\n', (10063, 10065), False, 'from megengine.core.autodiff.grad import Grad\n'), ((10339, 10360), 'megengine.functional.ones_like', 'F.ones_like', (['out_feat'], {}), '(out_feat)\n', (10350, 10360), True, 'import megengine.functional as F\n'), ((12150, 12156), 'megengine.core.autodiff.grad.Grad', 'Grad', ([], {}), '()\n', (12154, 12156), False, 'from megengine.core.autodiff.grad import Grad\n'), ((12502, 12523), 'megengine.functional.ones_like', 'F.ones_like', (['out_feat'], {}), '(out_feat)\n', (12513, 12523), True, 'import megengine.functional as F\n'), ((12747, 12753), 'megengine.core.autodiff.grad.Grad', 'Grad', ([], {}), '()\n', (12751, 12753), False, 'from megengine.core.autodiff.grad import Grad\n'), ((13051, 13068), 'megengine.functional.ones_like', 'F.ones_like', (['outp'], {}), '(outp)\n', (13062, 13068), True, 'import megengine.functional as F\n'), ((13711, 13717), 'megengine.core.autodiff.grad.Grad', 'Grad', ([], {}), '()\n', (13715, 13717), False, 'from megengine.core.autodiff.grad import Grad\n'), ((14007, 14024), 'megengine.functional.ones_like', 'F.ones_like', (['outp'], {}), '(outp)\n', (14018, 14024), True, 'import megengine.functional as F\n'), ((14590, 14621), 'numpy.arange', 'np.arange', (['(1)', '(4)'], {'dtype': 'np.int32'}), '(1, 4, dtype=np.int32)\n', (14599, 14621), True, 'import numpy as np\n'), ((15494, 15521), 'numpy.random.randn', 'np.random.randn', (['*inp_shape'], {}), '(*inp_shape)\n', (15509, 15521), True, 'import numpy as np\n'), ((19004, 19039), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'data1_shape'}), '(size=data1_shape)\n', (19021, 19039), True, 'import numpy as np\n'), ((19072, 19108), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'label1_shape'}), '(size=label1_shape)\n', (19089, 19108), True, 'import numpy as np\n'), ((19216, 19251), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'data2_shape'}), '(size=data2_shape)\n', (19233, 19251), True, 'import numpy as np\n'), ((19284, 19320), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'label2_shape'}), '(size=label2_shape)\n', (19301, 19320), True, 'import numpy as np\n'), ((21156, 21187), 'megengine.jit.trace', 'jit.trace', ([], {'symbolic': 'is_symbolic'}), '(symbolic=is_symbolic)\n', (21165, 21187), True, 'import megengine.jit as jit\n'), ((21583, 21618), 'numpy.array', 'np.array', (['[2, 1, 3]'], {'dtype': 'np.int32'}), '([2, 1, 3], dtype=np.int32)\n', (21591, 21618), True, 'import numpy as np\n'), ((21629, 21659), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.float32'}), '([], dtype=np.float32)\n', (21637, 21659), True, 'import numpy as np\n'), ((21848, 21876), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.int32'}), '([], dtype=np.int32)\n', (21856, 21876), True, 'import numpy as np\n'), ((23242, 23327), 'megengine.functional.reshape', 'F.reshape', (['var', '(var.shape[0], var.shape[1] // 4, 4, var.shape[2], var.shape[3])'], {}), '(var, (var.shape[0], var.shape[1] // 4, 4, var.shape[2], var.shape[3])\n )\n', (23251, 23327), True, 'import megengine.functional as F\n'), ((23371, 23404), 'megengine.functional.transpose', 'F.transpose', (['var', '(0, 1, 3, 4, 2)'], {}), '(var, (0, 1, 3, 4, 2))\n', (23382, 23404), True, 'import megengine.functional as F\n'), ((23480, 23556), 'megengine.functional.conv2d', 'F.conv2d', (['inp', 'w', '(b if has_bias else None)'], {'stride': '(SH, SW)', 'padding': '(PH, PW)'}), '(inp, w, b if has_bias else None, stride=(SH, SW), padding=(PH, PW))\n', (23488, 23556), True, 'import megengine.functional as F\n'), ((24007, 24137), 'megengine.functional.quantized.conv_bias_activation', 'F.quantized.conv_bias_activation', (['inp', 'w', 'b'], {'stride': '(SH, SW)', 'padding': '(PH, PW)', 'dtype': 'out_dtype', 'nonlinear_mode': 'nonlinear_mode'}), '(inp, w, b, stride=(SH, SW), padding=(PH,\n PW), dtype=out_dtype, nonlinear_mode=nonlinear_mode)\n', (24039, 24137), True, 'import megengine.functional as F\n'), ((24290, 24309), 'megengine.is_cuda_available', 'is_cuda_available', ([], {}), '()\n', (24307, 24309), False, 'from megengine import Parameter, Tensor, is_cuda_available, tensor\n'), ((24607, 24643), 'megengine.functional.transpose', 'F.transpose', (['result', '(0, 1, 4, 2, 3)'], {}), '(result, (0, 1, 4, 2, 3))\n', (24618, 24643), True, 'import megengine.functional as F\n'), ((26536, 26642), 'megengine.functional.quantized.batch_conv_bias_activation', 'F.quantized.batch_conv_bias_activation', (['inp', 'w', 'b'], {'stride': '(SH, SW)', 'padding': '(PH, PW)', 'dtype': 'out_dtype'}), '(inp, w, b, stride=(SH, SW), padding=\n (PH, PW), dtype=out_dtype)\n', (26574, 26642), True, 'import megengine.functional as F\n'), ((26733, 26792), 'megengine.functional.conv2d', 'F.conv2d', (['inp_fp32', 'w_fp32[0]', '(b_fp32 if has_bias else None)'], {}), '(inp_fp32, w_fp32[0], b_fp32 if has_bias else None)\n', (26741, 26792), True, 'import megengine.functional as F\n'), ((27872, 27900), 'numpy.random.randn', 'np.random.randn', (['(3)', '(224)', '(224)'], {}), '(3, 224, 224)\n', (27887, 27900), True, 'import numpy as np\n'), ((28178, 28211), 'numpy.random.randn', 'np.random.randn', (['(3)', '(224)', '(224)', '(224)'], {}), '(3, 224, 224, 224)\n', (28193, 28211), True, 'import numpy as np\n'), ((29844, 29886), 'numpy.ones', 'np.ones', (['(2, 3, 2, 2, 2)'], {'dtype': 'np.float32'}), '((2, 3, 2, 2, 2), dtype=np.float32)\n', (29851, 29886), True, 'import numpy as np\n'), ((30490, 30521), 'megengine.jit.trace', 'jit.trace', ([], {'symbolic': 'is_symbolic'}), '(symbolic=is_symbolic)\n', (30499, 30521), True, 'import megengine.jit as jit\n'), ((31796, 31830), 'numpy.random.random', 'np.random.random', (['(1, 256, 64, 64)'], {}), '((1, 256, 64, 64))\n', (31812, 31830), True, 'import numpy as np\n'), ((31860, 31884), 'numpy.random.random', 'np.random.random', (['(1, 5)'], {}), '((1, 5))\n', (31876, 31884), True, 'import numpy as np\n'), ((31915, 31946), 'numpy.random.random', 'np.random.random', (['(24, 2, 7, 7)'], {}), '((24, 2, 7, 7))\n', (31931, 31946), True, 'import numpy as np\n'), ((32561, 32588), 'numpy.random.randn', 'np.random.randn', (['(3)', '(3)', '(3)', '(3)'], {}), '(3, 3, 3, 3)\n', (32576, 32588), True, 'import numpy as np\n'), ((37586, 37607), 'numpy.arange', 'np.arange', (['(16 * 3 * 3)'], {}), '(16 * 3 * 3)\n', (37595, 37607), True, 'import numpy as np\n'), ((37793, 37818), 'numpy.arange', 'np.arange', (['(3 * 18 * 3 * 3)'], {}), '(3 * 18 * 3 * 3)\n', (37802, 37818), True, 'import numpy as np\n'), ((38007, 38036), 'numpy.arange', 'np.arange', (['(5 * 3 * 20 * 3 * 4)'], {}), '(5 * 3 * 20 * 3 * 4)\n', (38016, 38036), True, 'import numpy as np\n'), ((38228, 38261), 'numpy.arange', 'np.arange', (['(6 * 5 * 3 * 25 * 3 * 4)'], {}), '(6 * 5 * 3 * 25 * 3 * 4)\n', (38237, 38261), True, 'import numpy as np\n'), ((38456, 38493), 'numpy.arange', 'np.arange', (['(2 * 3 * 5 * 3 * 20 * 3 * 4)'], {}), '(2 * 3 * 5 * 3 * 20 * 3 * 4)\n', (38465, 38493), True, 'import numpy as np\n'), ((38914, 38945), 'megengine.jit.trace', 'jit.trace', ([], {'symbolic': 'is_symbolic'}), '(symbolic=is_symbolic)\n', (38923, 38945), True, 'import megengine.jit as jit\n'), ((41939, 42099), 'numpy.sum', 'np.sum', (['(data[n, g_id * ic_group_size:(g_id + 1) * ic_group_size, ih:ih + ksize[0],\n iw:iw + ksize[1]] * weight[g_id, oh, ow, :, :, :, oc % oc_group_size])'], {}), '(data[n, g_id * ic_group_size:(g_id + 1) * ic_group_size, ih:ih +\n ksize[0], iw:iw + ksize[1]] * weight[g_id, oh, ow, :, :, :, oc %\n oc_group_size])\n', (41945, 42099), True, 'import numpy as np\n'), ((42279, 42345), 'numpy.random.rand', 'np.random.rand', (['batch_size', 'in_channels', 'input_height', 'input_width'], {}), '(batch_size, in_channels, input_height, input_width)\n', (42293, 42345), True, 'import numpy as np\n'), ((42391, 42501), 'numpy.random.rand', 'np.random.rand', (['groups', 'output_height', 'output_width', '(in_channels // groups)', '*ksize', '(out_channels // groups)'], {}), '(groups, output_height, output_width, in_channels // groups,\n *ksize, out_channels // groups)\n', (42405, 42501), True, 'import numpy as np\n'), ((2326, 2339), 'megengine.autodiff.GradManager', 'GradManager', ([], {}), '()\n', (2337, 2339), False, 'from megengine.autodiff import GradManager\n'), ((2739, 2752), 'megengine.autodiff.GradManager', 'GradManager', ([], {}), '()\n', (2750, 2752), False, 'from megengine.autodiff import GradManager\n'), ((4133, 4150), 'numpy.eye', 'np.eye', (['shape1[0]'], {}), '(shape1[0])\n', (4139, 4150), True, 'import numpy as np\n'), ((12662, 12696), 'numpy.arange', 'np.arange', (['(0)', '(16)'], {'dtype': 'np.float32'}), '(0, 16, dtype=np.float32)\n', (12671, 12696), True, 'import numpy as np\n'), ((13626, 13660), 'numpy.arange', 'np.arange', (['(0)', '(16)'], {'dtype': 'np.float32'}), '(0, 16, dtype=np.float32)\n', (13635, 13660), True, 'import numpy as np\n'), ((14729, 14754), 'numpy.eye', 'np.eye', (['(4)'], {'dtype': 'np.int32'}), '(4, dtype=np.int32)\n', (14735, 14754), True, 'import numpy as np\n'), ((14755, 14786), 'numpy.arange', 'np.arange', (['(1)', '(4)'], {'dtype': 'np.int32'}), '(1, 4, dtype=np.int32)\n', (14764, 14786), True, 'import numpy as np\n'), ((15081, 15107), 'numpy.eye', 'np.eye', (['(10)'], {'dtype': 'np.int32'}), '(10, dtype=np.int32)\n', (15087, 15107), True, 'import numpy as np\n'), ((15944, 15966), 'numpy.ones', 'np.ones', (['(3, 3, 15, 5)'], {}), '((3, 3, 15, 5))\n', (15951, 15966), True, 'import numpy as np\n'), ((16339, 16362), 'numpy.arange', 'np.arange', (['(16)'], {'dtype': 'dt'}), '(16, dtype=dt)\n', (16348, 16362), True, 'import numpy as np\n'), ((16500, 16579), 'numpy.array', 'np.array', (['[[1.0, 0.0, 1.0], [0.0, 1.0, 1.0], [0.0, 0.0, 1.0]]'], {'dtype': 'np.float32'}), '([[1.0, 0.0, 1.0], [0.0, 1.0, 1.0], [0.0, 0.0, 1.0]], dtype=np.float32)\n', (16508, 16579), True, 'import numpy as np\n'), ((16922, 16945), 'numpy.arange', 'np.arange', (['(32)'], {'dtype': 'dt'}), '(32, dtype=dt)\n', (16931, 16945), True, 'import numpy as np\n'), ((17083, 17162), 'numpy.array', 'np.array', (['[[1.0, 0.0, 1.0], [0.0, 1.0, 1.0], [0.0, 0.0, 1.0]]'], {'dtype': 'np.float32'}), '([[1.0, 0.0, 1.0], [0.0, 1.0, 1.0], [0.0, 0.0, 1.0]], dtype=np.float32)\n', (17091, 17162), True, 'import numpy as np\n'), ((17672, 17703), 'numpy.arange', 'np.arange', (['(27)'], {'dtype': 'np.float32'}), '(27, dtype=np.float32)\n', (17681, 17703), True, 'import numpy as np\n'), ((18289, 18320), 'numpy.arange', 'np.arange', (['(16)'], {'dtype': 'np.float32'}), '(16, dtype=np.float32)\n', (18298, 18320), True, 'import numpy as np\n'), ((18402, 18487), 'numpy.array', 'np.array', (['[[[1.0, 0.0], [0.0, 1.0]], [[0.0, 1.0], [0.0, 1.0]]]'], {'dtype': 'np.float32'}), '([[[1.0, 0.0], [0.0, 1.0]], [[0.0, 1.0], [0.0, 1.0]]], dtype=np.float32\n )\n', (18410, 18487), True, 'import numpy as np\n'), ((18868, 18878), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (18874, 18878), True, 'import numpy as np\n'), ((20012, 20041), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'shape'}), '(size=shape)\n', (20029, 20041), True, 'import numpy as np\n'), ((20407, 20436), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'shape'}), '(size=shape)\n', (20424, 20436), True, 'import numpy as np\n'), ((23652, 23661), 'megengine.functional.relu', 'F.relu', (['O'], {}), '(O)\n', (23658, 23661), True, 'import megengine.functional as F\n'), ((30565, 30586), 'numpy.random.randn', 'np.random.randn', (['*shp'], {}), '(*shp)\n', (30580, 30586), True, 'import numpy as np\n'), ((38968, 38992), 'numpy.arange', 'np.arange', (['(3 * 4 * 5 * 5)'], {}), '(3 * 4 * 5 * 5)\n', (38977, 38992), True, 'import numpy as np\n'), ((40098, 40129), 'numpy.arange', 'np.arange', (['(16)'], {'dtype': 'np.float32'}), '(16, dtype=np.float32)\n', (40107, 40129), True, 'import numpy as np\n'), ((40189, 40210), 'numpy.random.randn', 'np.random.randn', (['(3)', '(3)'], {}), '(3, 3)\n', (40204, 40210), True, 'import numpy as np\n'), ((2466, 2498), 'numpy.ones', 'np.ones', (['shape'], {'dtype': 'np.float32'}), '(shape, dtype=np.float32)\n', (2473, 2498), True, 'import numpy as np\n'), ((2999, 3031), 'numpy.ones', 'np.ones', (['shape'], {'dtype': 'np.float32'}), '(shape, dtype=np.float32)\n', (3006, 3031), True, 'import numpy as np\n'), ((4212, 4229), 'numpy.eye', 'np.eye', (['shape2[1]'], {}), '(shape2[1])\n', (4218, 4229), True, 'import numpy as np\n'), ((6711, 6744), 'numpy.arange', 'np.arange', (['(1)', '(3)'], {'dtype': 'np.float32'}), '(1, 3, dtype=np.float32)\n', (6720, 6744), True, 'import numpy as np\n'), ((7206, 7239), 'numpy.arange', 'np.arange', (['(1)', '(9)'], {'dtype': 'np.float32'}), '(1, 9, dtype=np.float32)\n', (7215, 7239), True, 'import numpy as np\n'), ((7491, 7524), 'numpy.arange', 'np.arange', (['(1)', '(5)'], {'dtype': 'np.float32'}), '(1, 5, dtype=np.float32)\n', (7500, 7524), True, 'import numpy as np\n'), ((7821, 7854), 'numpy.arange', 'np.arange', (['(1)', '(5)'], {'dtype': 'np.float32'}), '(1, 5, dtype=np.float32)\n', (7830, 7854), True, 'import numpy as np\n'), ((8060, 8093), 'numpy.arange', 'np.arange', (['(1)', '(3)'], {'dtype': 'np.float32'}), '(1, 3, dtype=np.float32)\n', (8069, 8093), True, 'import numpy as np\n'), ((16021, 16033), 'megengine.tensor', 'tensor', (['np_x'], {}), '(np_x)\n', (16027, 16033), False, 'from megengine import Parameter, Tensor, is_cuda_available, tensor\n'), ((20081, 20116), 'numpy.random.randint', 'np.random.randint', (['(0)', '(1)'], {'size': 'shape'}), '(0, 1, size=shape)\n', (20098, 20116), True, 'import numpy as np\n'), ((20157, 20193), 'numpy.clip', 'np.clip', (['(0)', 'np.inf', '(1 - data * label)'], {}), '(0, np.inf, 1 - data * label)\n', (20164, 20193), True, 'import numpy as np\n'), ((20476, 20511), 'numpy.random.randint', 'np.random.randint', (['(0)', '(1)'], {'size': 'shape'}), '(0, 1, size=shape)\n', (20493, 20511), True, 'import numpy as np\n'), ((20554, 20590), 'numpy.clip', 'np.clip', (['(0)', 'np.inf', '(1 - data * label)'], {}), '(0, np.inf, 1 - data * label)\n', (20561, 20590), True, 'import numpy as np\n')]
|
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
from functools import partial
import numpy as np
import tabulate
import megengine as mge
import megengine._internal as mgb
import megengine.module as m
import megengine.module.qat as qatm
import megengine.module.quantized as qm
try:
mge.logger.MegEngineLogFormatter.max_lines = float("inf")
except AttributeError as e:
raise ValueError("set logger max lines failed")
logger = mge.get_logger(__name__)
CALC_FLOPS = {}
def _register_modules(*modules):
def callback(impl):
for module in modules:
CALC_FLOPS[module] = impl
return impl
return callback
@_register_modules(
m.Conv2d,
m.ConvTranspose2d,
m.LocalConv2d,
qm.Conv2d,
qm.ConvRelu2d,
qm.ConvBn2d,
qm.ConvBnRelu2d,
qatm.Conv2d,
qatm.ConvRelu2d,
qatm.ConvBn2d,
qatm.ConvBnRelu2d,
)
def count_convNd(module, input, output):
bias = 1 if module.bias is not None else 0
group = module.groups
ic = input[0].shape[1]
oc = output[0].shape[1]
goc = oc // group
gic = ic // group
N = output[0].shape[0]
HW = np.prod(output[0].shape[2:])
# N x Cout x H x W x (Cin x Kw x Kh + bias)
return N * HW * goc * (gic * np.prod(module.kernel_size) + bias)
@_register_modules(m.ConvTranspose2d)
def count_deconvNd(module, input, output):
return np.prod(input[0].shape) * output[0].shape[1] * np.prod(module.kernel_size)
@_register_modules(m.Linear, qatm.Linear, qm.Linear)
def count_linear(module, input, output):
return np.prod(output[0].shape) * module.in_features
# does not need import qat and quantized module since they inherit from float module.
hook_modules = (
m.Conv2d,
m.ConvTranspose2d,
m.LocalConv2d,
m.BatchNorm2d,
m.Linear,
)
def net_stats(model, input_size, bar_length_max=20, log_params=True, log_flops=True):
def dict2table(list_of_dict, header):
table_data = [header]
for d in list_of_dict:
row = []
for h in header:
v = ""
if h in d:
v = d[h]
row.append(v)
table_data.append(row)
return table_data
def sizeof_fmt(num, suffix="B"):
for unit in ["", "Ki", "Mi", "Gi", "Ti", "Pi", "Ei", "Zi"]:
if abs(num) < 1024.0:
return "{:3.3f} {}{}".format(num, unit, suffix)
num /= 1024.0
sign_str = "-" if num < 0 else ""
return "{}{:.1f} {}{}".format(sign_str, num, "Yi", suffix)
def get_byteswidth(tensor):
dtype = tensor.dtype
if mgb.dtype.is_quantize(dtype):
return 1
elif mgb.dtype.is_bfloat16(dtype):
return 2
else:
return 4
def print_flops_stats(flops):
flops_list = [i["flops_num"] for i in flops]
max_flops_num = max(flops_list + [0])
# calc total flops and set flops_cum
total_flops_num = 0
for d in flops:
total_flops_num += int(d["flops_num"])
d["flops_cum"] = sizeof_fmt(total_flops_num, suffix="OPs")
for i in flops:
f = i["flops_num"]
i["flops"] = sizeof_fmt(f, suffix="OPs")
r = i["ratio"] = f / total_flops_num
i["percentage"] = "{:.2f}%".format(r * 100)
bar_length = int(f / max_flops_num * bar_length_max)
i["bar"] = "#" * bar_length
header = [
"name",
"class_name",
"input_shapes",
"output_shapes",
"flops",
"flops_cum",
"percentage",
"bar",
]
total_flops_str = sizeof_fmt(total_flops_num, suffix="OPs")
total_var_size = sum(sum(s[1] for s in i["output_shapes"]) for i in flops)
flops.append(
dict(name="total", flops=total_flops_str, output_shapes=total_var_size)
)
logger.info(
"flops stats: \n" + tabulate.tabulate(dict2table(flops, header=header))
)
return total_flops_num
def print_params_stats(params):
total_param_dims, total_param_size = 0, 0
for d in params:
total_param_dims += int(d["param_dim"])
total_param_size += int(d["size"])
d["size"] = sizeof_fmt(d["size"])
d["size_cum"] = sizeof_fmt(total_param_size)
for d in params:
ratio = d["param_dim"] / total_param_dims
d["ratio"] = ratio
d["percentage"] = "{:.2f}%".format(ratio * 100)
# construct bar
max_ratio = max([d["ratio"] for d in params])
for d in params:
bar_length = int(d["ratio"] / max_ratio * bar_length_max)
d["size_bar"] = "#" * bar_length
param_size = sizeof_fmt(total_param_size)
params.append(dict(name="total", param_dim=total_param_dims, size=param_size,))
header = [
"name",
"shape",
"mean",
"std",
"param_dim",
"bits",
"size",
"size_cum",
"percentage",
"size_bar",
]
logger.info(
"param stats: \n" + tabulate.tabulate(dict2table(params, header=header))
)
return total_param_size
def net_stats_hook(module, input, output, name=""):
class_name = str(module.__class__).split(".")[-1].split("'")[0]
flops_fun = CALC_FLOPS.get(type(module))
if callable(flops_fun):
flops_num = flops_fun(module, input, output)
if not isinstance(output, (list, tuple)):
output = [output]
flops.append(
dict(
name=name,
class_name=class_name,
input_shapes=[i.shape for i in input],
output_shapes=[o.shape for o in output],
flops_num=flops_num,
flops_cum=0,
)
)
if hasattr(module, "weight") and module.weight is not None:
w = module.weight
value = w.numpy()
param_dim = np.prod(w.shape)
param_bytes = get_byteswidth(w)
params.append(
dict(
name=name + "-w",
shape=w.shape,
param_dim=param_dim,
bits=param_bytes * 8,
size=param_dim * param_bytes,
size_cum=0,
mean="{:.2g}".format(value.mean()),
std="{:.2g}".format(value.std()),
)
)
if hasattr(module, "bias") and module.bias is not None:
b = module.bias
value = b.numpy()
param_dim = np.prod(b.shape)
param_bytes = get_byteswidth(b)
params.append(
dict(
name=name + "-b",
shape=b.shape,
param_dim=param_dim,
bits=param_bytes * 8,
size=param_dim * param_bytes,
size_cum=0,
mean="{:.2g}".format(value.mean()),
std="{:.2g}".format(value.std()),
)
)
# multiple inputs to the network
if not isinstance(input_size[0], tuple):
input_size = [input_size]
params = []
flops = []
hooks = []
for (name, module) in model.named_modules():
if isinstance(module, hook_modules):
hooks.append(
module.register_forward_hook(partial(net_stats_hook, name=name))
)
inputs = [mge.zeros(in_size, dtype=np.float32) for in_size in input_size]
model.eval()
model(*inputs)
for h in hooks:
h.remove()
total_flops, total_params = 0, 0
if log_params:
total_params = print_params_stats(params)
if log_flops:
total_flops = print_flops_stats(flops)
return total_params, total_flops
|
[
"megengine._internal.dtype.is_quantize",
"megengine._internal.dtype.is_bfloat16",
"megengine.zeros",
"megengine.get_logger"
] |
[((741, 765), 'megengine.get_logger', 'mge.get_logger', (['__name__'], {}), '(__name__)\n', (755, 765), True, 'import megengine as mge\n'), ((1434, 1462), 'numpy.prod', 'np.prod', (['output[0].shape[2:]'], {}), '(output[0].shape[2:])\n', (1441, 1462), True, 'import numpy as np\n'), ((1722, 1749), 'numpy.prod', 'np.prod', (['module.kernel_size'], {}), '(module.kernel_size)\n', (1729, 1749), True, 'import numpy as np\n'), ((1857, 1881), 'numpy.prod', 'np.prod', (['output[0].shape'], {}), '(output[0].shape)\n', (1864, 1881), True, 'import numpy as np\n'), ((2922, 2950), 'megengine._internal.dtype.is_quantize', 'mgb.dtype.is_quantize', (['dtype'], {}), '(dtype)\n', (2943, 2950), True, 'import megengine._internal as mgb\n'), ((8007, 8043), 'megengine.zeros', 'mge.zeros', (['in_size'], {'dtype': 'np.float32'}), '(in_size, dtype=np.float32)\n', (8016, 8043), True, 'import megengine as mge\n'), ((1675, 1698), 'numpy.prod', 'np.prod', (['input[0].shape'], {}), '(input[0].shape)\n', (1682, 1698), True, 'import numpy as np\n'), ((2986, 3014), 'megengine._internal.dtype.is_bfloat16', 'mgb.dtype.is_bfloat16', (['dtype'], {}), '(dtype)\n', (3007, 3014), True, 'import megengine._internal as mgb\n'), ((6485, 6501), 'numpy.prod', 'np.prod', (['w.shape'], {}), '(w.shape)\n', (6492, 6501), True, 'import numpy as np\n'), ((7122, 7138), 'numpy.prod', 'np.prod', (['b.shape'], {}), '(b.shape)\n', (7129, 7138), True, 'import numpy as np\n'), ((1545, 1572), 'numpy.prod', 'np.prod', (['module.kernel_size'], {}), '(module.kernel_size)\n', (1552, 1572), True, 'import numpy as np\n'), ((7942, 7976), 'functools.partial', 'partial', (['net_stats_hook'], {'name': 'name'}), '(net_stats_hook, name=name)\n', (7949, 7976), False, 'from functools import partial\n')]
|
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import platform
import weakref
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
import megengine.functional as F
import megengine.module as M
import megengine.optimizer as optim
from megengine.autodiff import GradManager
from megengine.core._imperative_rt.imperative import sync
from megengine.distributed.helper import get_device_count_by_fork
from megengine.jit import trace
def test_basic():
x = mge.tensor([1.0, 3.0, 5.0]).reshape(1, 3)
w = mge.tensor([2.0, 4.0, 6.0]).reshape(3, 1)
b = mge.tensor(-1.0)
gm = GradManager().attach([w, b])
gm.record()
p = F.matmul(x, w)
y = p + b
gm.backward(y)
gm.release() # is not necessary
np.testing.assert_equal(w.grad.numpy(), [[1], [3], [5]])
np.testing.assert_equal(b.grad.numpy(), [1])
w.grad = None
b.grad = None
with gm:
p = F.matmul(x, w)
y = p + b
gm.backward(y)
np.testing.assert_equal(w.grad.numpy(), [[1], [3], [5]])
np.testing.assert_equal(b.grad.numpy(), [1])
def test_attach_in_with_block():
a = mge.Parameter([1.0])
gm = GradManager()
with gm:
b = a * 3
gm.attach(b)
c = b + 1
gm.backward(c)
assert int(b.grad.numpy()) == 1
def test_attach_temporary():
w = mge.Parameter(2.0)
gm = GradManager()
gm.attach(w)
def cb(x, g):
assert x is ref()
cb.called = True
for i in range(3):
with gm:
cb.called = False
x = mge.Tensor(i, dtype="float32")
gm.attach(x, callbacks=cb)
ref = weakref.ref(x)
y = x * w
gm.backward(y)
assert cb.called
del x
assert ref() is None
# NOTE: does not guarantee timely release when recording
# for i in range(3):
# with gm:
# x = mge.Tensor(i, dtype='float32')
# gm.attach(x)
# ref = weakref.ref(x)
# y = x * w
# del x
# assert ref() is None
# gm.backward(y)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="windows disable MGB_ENABLE_OPR_MM"
)
@pytest.mark.skipif(get_device_count_by_fork("gpu") < 2, reason="need more gpu device")
@pytest.mark.isolated_distributed
def test_remote_grad():
@dist.launcher
def worker():
rank = dist.get_rank()
size = dist.get_world_size()
x = mge.tensor(np.random.randn(1, rank * 2 + 2), dtype=np.float32)
m = M.Linear(rank * 2 + 2, rank * 2 + 4)
gm = GradManager().attach(m.parameters())
opt = optim.SGD(m.parameters(), 1e-3, momentum=0.9)
@trace(symbolic=True)
def train_func(x):
with gm:
if rank != 0:
x = dist.functional.remote_recv(
rank - 1, shape=(1, rank * 2 + 2), dtype=np.float32
)
y = m(x)
if rank != size - 1:
y = dist.functional.remote_send(y, dest_rank=rank + 1)
if rank == size - 1:
y = y.mean()
gm.backward(y)
else:
gm.backward()
opt.step().clear_grad()
for i in range(3):
train_func(x)
for param in m.parameters():
param.numpy()
worker()
|
[
"megengine.distributed.helper.get_device_count_by_fork",
"megengine.jit.trace",
"megengine.Tensor",
"megengine.tensor",
"megengine.module.Linear",
"megengine.distributed.get_rank",
"megengine.autodiff.GradManager",
"megengine.Parameter",
"megengine.functional.matmul",
"megengine.distributed.functional.remote_send",
"megengine.distributed.functional.remote_recv",
"megengine.distributed.get_world_size"
] |
[((905, 921), 'megengine.tensor', 'mge.tensor', (['(-1.0)'], {}), '(-1.0)\n', (915, 921), True, 'import megengine as mge\n'), ((986, 1000), 'megengine.functional.matmul', 'F.matmul', (['x', 'w'], {}), '(x, w)\n', (994, 1000), True, 'import megengine.functional as F\n'), ((1454, 1474), 'megengine.Parameter', 'mge.Parameter', (['[1.0]'], {}), '([1.0])\n', (1467, 1474), True, 'import megengine as mge\n'), ((1484, 1497), 'megengine.autodiff.GradManager', 'GradManager', ([], {}), '()\n', (1495, 1497), False, 'from megengine.autodiff import GradManager\n'), ((1666, 1684), 'megengine.Parameter', 'mge.Parameter', (['(2.0)'], {}), '(2.0)\n', (1679, 1684), True, 'import megengine as mge\n'), ((1694, 1707), 'megengine.autodiff.GradManager', 'GradManager', ([], {}), '()\n', (1705, 1707), False, 'from megengine.autodiff import GradManager\n'), ((1244, 1258), 'megengine.functional.matmul', 'F.matmul', (['x', 'w'], {}), '(x, w)\n', (1252, 1258), True, 'import megengine.functional as F\n'), ((2833, 2848), 'megengine.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (2846, 2848), True, 'import megengine.distributed as dist\n'), ((2864, 2885), 'megengine.distributed.get_world_size', 'dist.get_world_size', ([], {}), '()\n', (2883, 2885), True, 'import megengine.distributed as dist\n'), ((2973, 3009), 'megengine.module.Linear', 'M.Linear', (['(rank * 2 + 2)', '(rank * 2 + 4)'], {}), '(rank * 2 + 2, rank * 2 + 4)\n', (2981, 3009), True, 'import megengine.module as M\n'), ((3130, 3150), 'megengine.jit.trace', 'trace', ([], {'symbolic': '(True)'}), '(symbolic=True)\n', (3135, 3150), False, 'from megengine.jit import trace\n'), ((2458, 2475), 'platform.system', 'platform.system', ([], {}), '()\n', (2473, 2475), False, 'import platform\n'), ((2558, 2575), 'platform.system', 'platform.system', ([], {}), '()\n', (2573, 2575), False, 'import platform\n'), ((2655, 2686), 'megengine.distributed.helper.get_device_count_by_fork', 'get_device_count_by_fork', (['"""gpu"""'], {}), "('gpu')\n", (2679, 2686), False, 'from megengine.distributed.helper import get_device_count_by_fork\n'), ((805, 832), 'megengine.tensor', 'mge.tensor', (['[1.0, 3.0, 5.0]'], {}), '([1.0, 3.0, 5.0])\n', (815, 832), True, 'import megengine as mge\n'), ((855, 882), 'megengine.tensor', 'mge.tensor', (['[2.0, 4.0, 6.0]'], {}), '([2.0, 4.0, 6.0])\n', (865, 882), True, 'import megengine as mge\n'), ((932, 945), 'megengine.autodiff.GradManager', 'GradManager', ([], {}), '()\n', (943, 945), False, 'from megengine.autodiff import GradManager\n'), ((1882, 1912), 'megengine.Tensor', 'mge.Tensor', (['i'], {'dtype': '"""float32"""'}), "(i, dtype='float32')\n", (1892, 1912), True, 'import megengine as mge\n'), ((1970, 1984), 'weakref.ref', 'weakref.ref', (['x'], {}), '(x)\n', (1981, 1984), False, 'import weakref\n'), ((2909, 2941), 'numpy.random.randn', 'np.random.randn', (['(1)', '(rank * 2 + 2)'], {}), '(1, rank * 2 + 2)\n', (2924, 2941), True, 'import numpy as np\n'), ((3023, 3036), 'megengine.autodiff.GradManager', 'GradManager', ([], {}), '()\n', (3034, 3036), False, 'from megengine.autodiff import GradManager\n'), ((3253, 3338), 'megengine.distributed.functional.remote_recv', 'dist.functional.remote_recv', (['(rank - 1)'], {'shape': '(1, rank * 2 + 2)', 'dtype': 'np.float32'}), '(rank - 1, shape=(1, rank * 2 + 2), dtype=np.float32\n )\n', (3280, 3338), True, 'import megengine.distributed as dist\n'), ((3466, 3516), 'megengine.distributed.functional.remote_send', 'dist.functional.remote_send', (['y'], {'dest_rank': '(rank + 1)'}), '(y, dest_rank=rank + 1)\n', (3493, 3516), True, 'import megengine.distributed as dist\n')]
|
#!/usr/bin/env python3
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
import megengine as mge
import megengine.module as M
import pytest
from basecls.models.regnet import RegBottleneckBlock
from basecls.models.resnet import (
AnyStage,
ResBasicBlock,
ResBottleneckBlock,
ResDeepStem,
ResStem,
SimpleStem,
)
@pytest.mark.parametrize("Block", [RegBottleneckBlock, ResBasicBlock, ResBottleneckBlock])
@pytest.mark.parametrize("w_in", [32])
@pytest.mark.parametrize("w_out", [32, 64])
@pytest.mark.parametrize("stride", [1, 2])
@pytest.mark.parametrize("bot_mul", [1.0, 0.25])
@pytest.mark.parametrize("group_w", [8])
@pytest.mark.parametrize("se_r", [0.0, 0.25])
@pytest.mark.parametrize("avg_down", [True, False])
@pytest.mark.parametrize("drop_path_prob", [0.05, 0.1])
@pytest.mark.parametrize("norm_name", ["BN"])
@pytest.mark.parametrize("act_name", ["relu"])
def test_block(
Block,
w_in,
w_out,
stride,
bot_mul,
group_w,
se_r,
avg_down,
drop_path_prob,
norm_name,
act_name,
):
m = Block(
w_in,
w_out,
stride,
bot_mul=bot_mul,
group_w=group_w,
se_r=se_r,
avg_down=avg_down,
drop_path_prob=drop_path_prob,
norm_name=norm_name,
act_name=act_name,
)
assert isinstance(m, M.Module)
m(mge.random.normal(size=(2, 32, 8, 8)))
@pytest.mark.parametrize("Stem", [ResDeepStem, ResStem, SimpleStem])
@pytest.mark.parametrize("w_in", [3])
@pytest.mark.parametrize("w_out", [8, 16])
@pytest.mark.parametrize("norm_name", ["BN"])
@pytest.mark.parametrize("act_name", ["relu"])
def test_stem(Stem, w_in, w_out, norm_name, act_name):
m = Stem(w_in, w_out, norm_name=norm_name, act_name=act_name)
assert isinstance(m, M.Module)
m(mge.random.normal(size=(2, 3, 8, 8)))
@pytest.mark.parametrize("w_in", [4])
@pytest.mark.parametrize("w_out", [4, 8])
@pytest.mark.parametrize("stride", [1, 2])
@pytest.mark.parametrize("depth", [2])
@pytest.mark.parametrize("block_func", [RegBottleneckBlock, ResBasicBlock, ResBottleneckBlock])
@pytest.mark.parametrize("drop_path_prob", [[0.05, 0.1]])
def test_any_stage(w_in, w_out, stride, depth, block_func, drop_path_prob):
m = AnyStage(
w_in,
w_out,
stride,
depth,
block_func,
drop_path_prob,
bot_mul=1.0,
group_w=4,
se_r=0.0,
avg_down=False,
norm_name="BN",
act_name="relu",
)
assert isinstance(m, M.Module)
assert len(m) == depth
m(mge.random.normal(size=(2, 4, 8, 8)))
|
[
"megengine.random.normal"
] |
[((347, 440), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""Block"""', '[RegBottleneckBlock, ResBasicBlock, ResBottleneckBlock]'], {}), "('Block', [RegBottleneckBlock, ResBasicBlock,\n ResBottleneckBlock])\n", (370, 440), False, 'import pytest\n'), ((438, 475), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""w_in"""', '[32]'], {}), "('w_in', [32])\n", (461, 475), False, 'import pytest\n'), ((477, 519), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""w_out"""', '[32, 64]'], {}), "('w_out', [32, 64])\n", (500, 519), False, 'import pytest\n'), ((521, 562), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""stride"""', '[1, 2]'], {}), "('stride', [1, 2])\n", (544, 562), False, 'import pytest\n'), ((564, 611), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""bot_mul"""', '[1.0, 0.25]'], {}), "('bot_mul', [1.0, 0.25])\n", (587, 611), False, 'import pytest\n'), ((613, 652), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""group_w"""', '[8]'], {}), "('group_w', [8])\n", (636, 652), False, 'import pytest\n'), ((654, 698), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""se_r"""', '[0.0, 0.25]'], {}), "('se_r', [0.0, 0.25])\n", (677, 698), False, 'import pytest\n'), ((700, 750), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""avg_down"""', '[True, False]'], {}), "('avg_down', [True, False])\n", (723, 750), False, 'import pytest\n'), ((752, 806), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""drop_path_prob"""', '[0.05, 0.1]'], {}), "('drop_path_prob', [0.05, 0.1])\n", (775, 806), False, 'import pytest\n'), ((808, 852), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""norm_name"""', "['BN']"], {}), "('norm_name', ['BN'])\n", (831, 852), False, 'import pytest\n'), ((854, 899), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""act_name"""', "['relu']"], {}), "('act_name', ['relu'])\n", (877, 899), False, 'import pytest\n'), ((1403, 1470), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""Stem"""', '[ResDeepStem, ResStem, SimpleStem]'], {}), "('Stem', [ResDeepStem, ResStem, SimpleStem])\n", (1426, 1470), False, 'import pytest\n'), ((1472, 1508), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""w_in"""', '[3]'], {}), "('w_in', [3])\n", (1495, 1508), False, 'import pytest\n'), ((1510, 1551), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""w_out"""', '[8, 16]'], {}), "('w_out', [8, 16])\n", (1533, 1551), False, 'import pytest\n'), ((1553, 1597), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""norm_name"""', "['BN']"], {}), "('norm_name', ['BN'])\n", (1576, 1597), False, 'import pytest\n'), ((1599, 1644), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""act_name"""', "['relu']"], {}), "('act_name', ['relu'])\n", (1622, 1644), False, 'import pytest\n'), ((1849, 1885), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""w_in"""', '[4]'], {}), "('w_in', [4])\n", (1872, 1885), False, 'import pytest\n'), ((1887, 1927), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""w_out"""', '[4, 8]'], {}), "('w_out', [4, 8])\n", (1910, 1927), False, 'import pytest\n'), ((1929, 1970), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""stride"""', '[1, 2]'], {}), "('stride', [1, 2])\n", (1952, 1970), False, 'import pytest\n'), ((1972, 2009), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""depth"""', '[2]'], {}), "('depth', [2])\n", (1995, 2009), False, 'import pytest\n'), ((2011, 2109), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""block_func"""', '[RegBottleneckBlock, ResBasicBlock, ResBottleneckBlock]'], {}), "('block_func', [RegBottleneckBlock, ResBasicBlock,\n ResBottleneckBlock])\n", (2034, 2109), False, 'import pytest\n'), ((2107, 2163), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""drop_path_prob"""', '[[0.05, 0.1]]'], {}), "('drop_path_prob', [[0.05, 0.1]])\n", (2130, 2163), False, 'import pytest\n'), ((2248, 2400), 'basecls.models.resnet.AnyStage', 'AnyStage', (['w_in', 'w_out', 'stride', 'depth', 'block_func', 'drop_path_prob'], {'bot_mul': '(1.0)', 'group_w': '(4)', 'se_r': '(0.0)', 'avg_down': '(False)', 'norm_name': '"""BN"""', 'act_name': '"""relu"""'}), "(w_in, w_out, stride, depth, block_func, drop_path_prob, bot_mul=\n 1.0, group_w=4, se_r=0.0, avg_down=False, norm_name='BN', act_name='relu')\n", (2256, 2400), False, 'from basecls.models.resnet import AnyStage, ResBasicBlock, ResBottleneckBlock, ResDeepStem, ResStem, SimpleStem\n'), ((1361, 1398), 'megengine.random.normal', 'mge.random.normal', ([], {'size': '(2, 32, 8, 8)'}), '(size=(2, 32, 8, 8))\n', (1378, 1398), True, 'import megengine as mge\n'), ((1808, 1844), 'megengine.random.normal', 'mge.random.normal', ([], {'size': '(2, 3, 8, 8)'}), '(size=(2, 3, 8, 8))\n', (1825, 1844), True, 'import megengine as mge\n'), ((2568, 2604), 'megengine.random.normal', 'mge.random.normal', ([], {'size': '(2, 4, 8, 8)'}), '(size=(2, 4, 8, 8))\n', (2585, 2604), True, 'import megengine as mge\n')]
|
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import copy
import numpy as np
import megengine.autodiff as ad
import megengine.functional as F
import megengine.optimizer as optimizer
from megengine import Parameter
from megengine import Tensor as tensor
from megengine import tensor
from megengine.core.tensor.function import Function
from megengine.module import Module
def test_single_input():
data_shape = (9, 2, 6)
av = np.random.random(data_shape).astype(np.float32)
class MulFunc(Function):
def forward(self, a):
self.a = a
return a * 10
def backward(self, grad_o):
return grad_o * 10
class Simple(Module):
def __init__(self, a):
super().__init__()
self.a = Parameter(a, dtype=np.float32)
self.layer1 = MulFunc()
def forward(self):
x = self.layer1(self.a)
return x
net = Simple(av)
gm = ad.GradManager().attach(net.parameters())
opt = optimizer.SGD(net.parameters(), lr=1.0)
opt.clear_grad()
with gm:
loss = net()
gm.backward(loss.sum())
opt.step()
np.testing.assert_almost_equal(loss.numpy(), (av * 10))
np.testing.assert_almost_equal(net.a.numpy(), (av - 10))
def test_multi_input():
data_shape = (9, 2, 6)
av = np.random.random(data_shape).astype(np.float32)
bv = np.random.random(data_shape).astype(np.float32)
class MulFunc(Function):
def forward(self, a, b):
self.a = a
self.b = b
return a * b
def backward(self, grad_o):
return grad_o * self.b * 2, grad_o * self.a * 3
class Simple(Module):
def __init__(self, a, b):
super().__init__()
self.a = Parameter(a, dtype=np.float32)
self.b = Parameter(b, dtype=np.float32)
self.layer1 = MulFunc()
def forward(self):
x = self.layer1(self.a, self.b)
return x
net = Simple(av, bv)
gm = ad.GradManager().attach(net.parameters())
opt = optimizer.SGD(net.parameters(), lr=1.0)
opt.clear_grad()
with gm:
loss = net()
gm.backward(loss.sum())
opt.step()
np.testing.assert_almost_equal(loss.numpy(), (av * bv))
np.testing.assert_almost_equal(net.a.numpy(), (av - 2 * bv))
np.testing.assert_almost_equal(net.b.numpy(), (bv - 3 * av))
def test_multi_output():
data_shape = (9, 2, 6)
av = np.random.random(data_shape).astype(np.float32)
bv = np.random.random(data_shape).astype(np.float32)
class MulFunc(Function):
def forward(self, a, b):
self.a = a
self.b = b
return a * b, a + b
def backward(self, grad_1, grad_2):
return grad_1 * (self.b + 1), grad_2 * (self.a + 1)
class Simple(Module):
def __init__(self, a, b):
super().__init__()
self.a = Parameter(a, dtype=np.float32)
self.b = Parameter(b, dtype=np.float32)
self.layer1 = MulFunc()
def forward(self):
x, y = self.layer1(self.a, self.b)
return x + y
net = Simple(av, bv)
gm = ad.GradManager().attach(net.parameters())
opt = optimizer.SGD(net.parameters(), lr=1.0)
opt.clear_grad()
with gm:
loss = net()
gm.backward(loss.sum())
opt.step()
np.testing.assert_almost_equal(loss.numpy(), (av * bv + av + bv), decimal=6)
np.testing.assert_almost_equal(net.a.numpy(), (av - bv - 1), decimal=6)
np.testing.assert_almost_equal(net.b.numpy(), (bv - av - 1), decimal=6)
def test_skip_invalid_grad():
data_shape = (1, 9, 2, 6)
av = np.random.random(data_shape).astype(np.float32)
bv = np.random.random(data_shape).astype(np.float32)
c = np.random.random(data_shape).astype(np.float32)
cookie = tensor(c)
class EqWithFakeGrad(Function):
def forward(self, a, b):
return a + b
def backward(self, grad_o):
_ = grad_o
return cookie, cookie
class Simple(Module):
def __init__(self, a, b):
super().__init__()
self.a = Parameter(a, dtype=np.float32)
self.b = Parameter(b, dtype=np.float32)
self.layer1 = EqWithFakeGrad()
def forward(self):
x = self.layer1(self.a, self.b)
return x
net = Simple(av, bv)
optim = optimizer.SGD(net.parameters(), lr=1.0)
gm = ad.GradManager().attach(net.parameters())
optim.clear_grad()
with gm:
loss = net().sum()
gm.backward(loss)
optim.step()
np.testing.assert_almost_equal(net.a.numpy(), av - c)
np.testing.assert_almost_equal(net.b.numpy(), bv - c)
def test_ste():
class STE(Function):
def forward(self, x):
maxv, minv = x.max(), x.min()
scale = F.maximum(maxv, -minv) / 127
return F.round(x / scale) * scale
def backward(self, grad_y):
return grad_y
class Simple(Module):
def __init__(self, a):
super().__init__()
self.a = Parameter(a, dtype=np.float32)
self.layer1 = STE()
def forward(self):
x = self.layer1(self.a)
x = (x * 2.0).sum()
return x
data_shape = (1, 9, 2, 6)
av = np.random.random(data_shape).astype(np.float32)
net = Simple(av)
optim = optimizer.SGD(net.parameters(), lr=1.0)
gm = ad.GradManager().attach(net.parameters())
optim.clear_grad()
with gm:
loss = net()
gm.backward(loss.sum())
optim.step()
np.testing.assert_almost_equal(
net.a.numpy(),
av - np.broadcast_to(np.array([2.0], dtype=np.float32), data_shape),
)
def test_deepcopy():
class Sigmoid(Function):
def __init__(self, param):
super().__init__()
self.param = param
def forward(self, x):
y = 1 / (1 + F.exp(-x))
self.save_for_backward(y)
return y
def backward(self, grad_y):
(y,) = self.saved_tensors
return grad_y * y * (1 - y)
origin = Sigmoid(0)
new = copy.deepcopy(Sigmoid(0))
assert new.param == origin.param
def test_none_in_out_grad():
class Test(Function):
def forward(self, a, b):
return a, b
def backward(self, grad_a, grad_b):
assert grad_b is None
return (grad_a, 0.0)
class Simple(Module):
def __init__(self, a, b):
super().__init__()
self.a = Parameter(a, dtype=np.float32)
self.b = Parameter(b, dtype=np.float32)
self.layer = Test()
def forward(self):
aa, bb = self.layer(self.a, self.b)
return aa, bb
a = tensor(np.array([1.0], dtype=np.float32))
b = tensor(np.array([2.0], dtype=np.float32))
net = Simple(a, b)
optim = optimizer.SGD(net.parameters(), lr=1.0)
gm = ad.GradManager().attach(net.parameters())
optim.clear_grad()
with gm:
loss, _ = net()
gm.backward(loss)
optim.step()
np.testing.assert_almost_equal(
net.a.numpy(), np.array([1.0 - 1.0], dtype=np.float32)
)
np.testing.assert_almost_equal(
net.b.numpy(), np.array([2.0 - 0.0], dtype=np.float32)
)
def test_zero_grad():
class StopGradient(Function):
def forward(self, a):
return a
def backward(self, *_):
return None
class Simple(Module):
def __init__(self, a):
super().__init__()
self.a = Parameter(a, dtype=np.float32)
self.layer = StopGradient()
def forward(self):
b = self.a * 3.0
c = self.a * 4.0
return self.layer(b) + c
a = tensor(np.array([1.0], dtype=np.float32))
net = Simple(a)
optim = optimizer.SGD(net.parameters(), lr=1.0)
gm = ad.GradManager().attach(net.parameters())
optim.clear_grad()
with gm:
loss = net()
gm.backward(loss.sum())
optim.step()
np.testing.assert_almost_equal(
net.a.numpy(), np.array([1.0 - 4.0], dtype=np.float32),
)
|
[
"megengine.functional.round",
"megengine.functional.maximum",
"megengine.tensor",
"megengine.autodiff.GradManager",
"megengine.Parameter",
"megengine.functional.exp"
] |
[((4180, 4189), 'megengine.tensor', 'tensor', (['c'], {}), '(c)\n', (4186, 4189), False, 'from megengine import tensor\n'), ((7147, 7180), 'numpy.array', 'np.array', (['[1.0]'], {'dtype': 'np.float32'}), '([1.0], dtype=np.float32)\n', (7155, 7180), True, 'import numpy as np\n'), ((7197, 7230), 'numpy.array', 'np.array', (['[2.0]'], {'dtype': 'np.float32'}), '([2.0], dtype=np.float32)\n', (7205, 7230), True, 'import numpy as np\n'), ((7521, 7560), 'numpy.array', 'np.array', (['[1.0 - 1.0]'], {'dtype': 'np.float32'}), '([1.0 - 1.0], dtype=np.float32)\n', (7529, 7560), True, 'import numpy as np\n'), ((7626, 7665), 'numpy.array', 'np.array', (['[2.0 - 0.0]'], {'dtype': 'np.float32'}), '([2.0 - 0.0], dtype=np.float32)\n', (7634, 7665), True, 'import numpy as np\n'), ((8158, 8191), 'numpy.array', 'np.array', (['[1.0]'], {'dtype': 'np.float32'}), '([1.0], dtype=np.float32)\n', (8166, 8191), True, 'import numpy as np\n'), ((8482, 8521), 'numpy.array', 'np.array', (['[1.0 - 4.0]'], {'dtype': 'np.float32'}), '([1.0 - 4.0], dtype=np.float32)\n', (8490, 8521), True, 'import numpy as np\n'), ((742, 770), 'numpy.random.random', 'np.random.random', (['data_shape'], {}), '(data_shape)\n', (758, 770), True, 'import numpy as np\n'), ((1077, 1107), 'megengine.Parameter', 'Parameter', (['a'], {'dtype': 'np.float32'}), '(a, dtype=np.float32)\n', (1086, 1107), False, 'from megengine import Parameter\n'), ((1260, 1276), 'megengine.autodiff.GradManager', 'ad.GradManager', ([], {}), '()\n', (1274, 1276), True, 'import megengine.autodiff as ad\n'), ((1639, 1667), 'numpy.random.random', 'np.random.random', (['data_shape'], {}), '(data_shape)\n', (1655, 1667), True, 'import numpy as np\n'), ((1696, 1724), 'numpy.random.random', 'np.random.random', (['data_shape'], {}), '(data_shape)\n', (1712, 1724), True, 'import numpy as np\n'), ((2088, 2118), 'megengine.Parameter', 'Parameter', (['a'], {'dtype': 'np.float32'}), '(a, dtype=np.float32)\n', (2097, 2118), False, 'from megengine import Parameter\n'), ((2140, 2170), 'megengine.Parameter', 'Parameter', (['b'], {'dtype': 'np.float32'}), '(b, dtype=np.float32)\n', (2149, 2170), False, 'from megengine import Parameter\n'), ((2335, 2351), 'megengine.autodiff.GradManager', 'ad.GradManager', ([], {}), '()\n', (2349, 2351), True, 'import megengine.autodiff as ad\n'), ((2784, 2812), 'numpy.random.random', 'np.random.random', (['data_shape'], {}), '(data_shape)\n', (2800, 2812), True, 'import numpy as np\n'), ((2841, 2869), 'numpy.random.random', 'np.random.random', (['data_shape'], {}), '(data_shape)\n', (2857, 2869), True, 'import numpy as np\n'), ((3252, 3282), 'megengine.Parameter', 'Parameter', (['a'], {'dtype': 'np.float32'}), '(a, dtype=np.float32)\n', (3261, 3282), False, 'from megengine import Parameter\n'), ((3304, 3334), 'megengine.Parameter', 'Parameter', (['b'], {'dtype': 'np.float32'}), '(b, dtype=np.float32)\n', (3313, 3334), False, 'from megengine import Parameter\n'), ((3506, 3522), 'megengine.autodiff.GradManager', 'ad.GradManager', ([], {}), '()\n', (3520, 3522), True, 'import megengine.autodiff as ad\n'), ((4006, 4034), 'numpy.random.random', 'np.random.random', (['data_shape'], {}), '(data_shape)\n', (4022, 4034), True, 'import numpy as np\n'), ((4063, 4091), 'numpy.random.random', 'np.random.random', (['data_shape'], {}), '(data_shape)\n', (4079, 4091), True, 'import numpy as np\n'), ((4119, 4147), 'numpy.random.random', 'np.random.random', (['data_shape'], {}), '(data_shape)\n', (4135, 4147), True, 'import numpy as np\n'), ((4492, 4522), 'megengine.Parameter', 'Parameter', (['a'], {'dtype': 'np.float32'}), '(a, dtype=np.float32)\n', (4501, 4522), False, 'from megengine import Parameter\n'), ((4544, 4574), 'megengine.Parameter', 'Parameter', (['b'], {'dtype': 'np.float32'}), '(b, dtype=np.float32)\n', (4553, 4574), False, 'from megengine import Parameter\n'), ((4798, 4814), 'megengine.autodiff.GradManager', 'ad.GradManager', ([], {}), '()\n', (4812, 4814), True, 'import megengine.autodiff as ad\n'), ((5445, 5475), 'megengine.Parameter', 'Parameter', (['a'], {'dtype': 'np.float32'}), '(a, dtype=np.float32)\n', (5454, 5475), False, 'from megengine import Parameter\n'), ((5665, 5693), 'numpy.random.random', 'np.random.random', (['data_shape'], {}), '(data_shape)\n', (5681, 5693), True, 'import numpy as np\n'), ((5795, 5811), 'megengine.autodiff.GradManager', 'ad.GradManager', ([], {}), '()\n', (5809, 5811), True, 'import megengine.autodiff as ad\n'), ((6914, 6944), 'megengine.Parameter', 'Parameter', (['a'], {'dtype': 'np.float32'}), '(a, dtype=np.float32)\n', (6923, 6944), False, 'from megengine import Parameter\n'), ((6966, 6996), 'megengine.Parameter', 'Parameter', (['b'], {'dtype': 'np.float32'}), '(b, dtype=np.float32)\n', (6975, 6996), False, 'from megengine import Parameter\n'), ((7316, 7332), 'megengine.autodiff.GradManager', 'ad.GradManager', ([], {}), '()\n', (7330, 7332), True, 'import megengine.autodiff as ad\n'), ((7948, 7978), 'megengine.Parameter', 'Parameter', (['a'], {'dtype': 'np.float32'}), '(a, dtype=np.float32)\n', (7957, 7978), False, 'from megengine import Parameter\n'), ((8274, 8290), 'megengine.autodiff.GradManager', 'ad.GradManager', ([], {}), '()\n', (8288, 8290), True, 'import megengine.autodiff as ad\n'), ((5197, 5219), 'megengine.functional.maximum', 'F.maximum', (['maxv', '(-minv)'], {}), '(maxv, -minv)\n', (5206, 5219), True, 'import megengine.functional as F\n'), ((5245, 5263), 'megengine.functional.round', 'F.round', (['(x / scale)'], {}), '(x / scale)\n', (5252, 5263), True, 'import megengine.functional as F\n'), ((6033, 6066), 'numpy.array', 'np.array', (['[2.0]'], {'dtype': 'np.float32'}), '([2.0], dtype=np.float32)\n', (6041, 6066), True, 'import numpy as np\n'), ((6292, 6301), 'megengine.functional.exp', 'F.exp', (['(-x)'], {}), '(-x)\n', (6297, 6301), True, 'import megengine.functional as F\n')]
|
import megengine as mge
import megengine.functional as F
from megengine import tensor
import numpy as np
from megengine.functional.nn import nms
from config import config
from det_opr.bbox_opr import bbox_transform_inv_opr, clip_boxes_opr, \
filter_boxes_opr, box_overlap_opr
# from bbox_opr import box_overlap_opr
import pdb
def find_top_rpn_proposals(is_train, rpn_bbox_offsets_list, rpn_cls_prob_list,
all_anchors_list, im_info):
prev_nms_top_n = config.train_prev_nms_top_n \
if is_train else config.test_prev_nms_top_n
post_nms_top_n = config.train_post_nms_top_n \
if is_train else config.test_post_nms_top_n
batch_per_gpu = config.batch_per_gpu if is_train else 1
nms_threshold = config.rpn_nms_threshold
box_min_size = config.rpn_min_box_size
bbox_normalize_targets = config.rpn_bbox_normalize_targets
bbox_normalize_means = config.bbox_normalize_means
bbox_normalize_stds = config.bbox_normalize_stds
list_size = len(rpn_bbox_offsets_list)
return_rois, return_probs = [], []
batch_per_gpu = rpn_cls_prob_list[0].shape[0]
for bid in range(batch_per_gpu):
batch_proposals_list = []
batch_probs_list = []
for l in range(list_size):
# get proposals and probs
offsets = rpn_bbox_offsets_list[l][bid] \
.transpose(1, 2, 0).reshape(-1, 4)
if bbox_normalize_targets:
std_opr = tensor(config.bbox_normalize_stds[None, :])
mean_opr = tensor(config.bbox_normalize_means[None, :])
pred_offsets = pred_offsets * std_opr
pred_offsets = pred_offsets + mean_opr
all_anchors = all_anchors_list[l]
proposals = bbox_transform_inv_opr(all_anchors, offsets)
if config.anchor_within_border:
proposals = clip_boxes_opr(proposals, im_info[bid, :])
probs = rpn_cls_prob_list[l][bid] \
.transpose(1,2,0).reshape(-1, 2)
probs = F.softmax(probs)[:, 1]
# gather the proposals and probs
batch_proposals_list.append(proposals)
batch_probs_list.append(probs)
batch_proposals = F.concat(batch_proposals_list, axis=0)
batch_probs = F.concat(batch_probs_list, axis=0)
# filter the boxes with small size.
wh = batch_proposals[:, 2:4] - batch_proposals[:, :2] + 1
thresh = box_min_size * im_info[bid, 2]
keep_mask = F.prod((wh >= thresh), axis=1)
keep_mask = keep_mask + F.equal(keep_mask.sum(), 0)
keep_mask, inds = F.cond_take(keep_mask > 0, keep_mask)
inds = inds.astype(np.int32)
# batch_proposals = F.nn.indexing_one_hot(batch_proposals, inds, 0)
# batch_probs = F.nn.indexing_one_hot(batch_probs, inds, 0)
batch_proposals, batch_probs = batch_proposals[inds], batch_probs[inds]
# prev_nms_top_n
num_proposals = F.minimum(prev_nms_top_n, batch_proposals.shape[0])
idx = F.argsort(batch_probs, descending=True)
topk_idx = idx[:num_proposals].reshape(-1)
batch_proposals = batch_proposals[topk_idx].detach()
batch_probs = batch_probs[topk_idx].detach()
# For each image, run a total-level NMS, and choose topk results.
keep_inds = nms(batch_proposals, batch_probs, nms_threshold, max_output = 2000)
# num = F.minimum(post_nms_top_n, keep_inds.shape[0])
# keep_inds = keep_inds[:num]
batch_rois, batch_probs = batch_proposals[keep_inds], batch_probs[keep_inds]
# cons the rois
batch_inds = F.ones((batch_rois.shape[0], 1)) * bid
batch_rois = F.concat([batch_inds, batch_rois[:, :4]], axis=1)
return_rois.append(batch_rois)
return_probs.append(batch_probs)
if batch_per_gpu == 1:
return batch_rois, batch_probs
else:
concated_rois = F.concat(return_rois, axis=0)
concated_probs = F.concat(return_probs, axis=0)
return concated_rois, concated_probs
|
[
"megengine.functional.nn.nms",
"megengine.functional.prod",
"megengine.functional.minimum",
"megengine.functional.argsort",
"megengine.tensor",
"megengine.functional.cond_take",
"megengine.functional.concat",
"megengine.functional.ones",
"megengine.functional.softmax"
] |
[((2223, 2261), 'megengine.functional.concat', 'F.concat', (['batch_proposals_list'], {'axis': '(0)'}), '(batch_proposals_list, axis=0)\n', (2231, 2261), True, 'import megengine.functional as F\n'), ((2284, 2318), 'megengine.functional.concat', 'F.concat', (['batch_probs_list'], {'axis': '(0)'}), '(batch_probs_list, axis=0)\n', (2292, 2318), True, 'import megengine.functional as F\n'), ((2497, 2525), 'megengine.functional.prod', 'F.prod', (['(wh >= thresh)'], {'axis': '(1)'}), '(wh >= thresh, axis=1)\n', (2503, 2525), True, 'import megengine.functional as F\n'), ((2614, 2651), 'megengine.functional.cond_take', 'F.cond_take', (['(keep_mask > 0)', 'keep_mask'], {}), '(keep_mask > 0, keep_mask)\n', (2625, 2651), True, 'import megengine.functional as F\n'), ((2964, 3015), 'megengine.functional.minimum', 'F.minimum', (['prev_nms_top_n', 'batch_proposals.shape[0]'], {}), '(prev_nms_top_n, batch_proposals.shape[0])\n', (2973, 3015), True, 'import megengine.functional as F\n'), ((3030, 3069), 'megengine.functional.argsort', 'F.argsort', (['batch_probs'], {'descending': '(True)'}), '(batch_probs, descending=True)\n', (3039, 3069), True, 'import megengine.functional as F\n'), ((3338, 3403), 'megengine.functional.nn.nms', 'nms', (['batch_proposals', 'batch_probs', 'nms_threshold'], {'max_output': '(2000)'}), '(batch_proposals, batch_probs, nms_threshold, max_output=2000)\n', (3341, 3403), False, 'from megengine.functional.nn import nms\n'), ((3698, 3747), 'megengine.functional.concat', 'F.concat', (['[batch_inds, batch_rois[:, :4]]'], {'axis': '(1)'}), '([batch_inds, batch_rois[:, :4]], axis=1)\n', (3706, 3747), True, 'import megengine.functional as F\n'), ((3929, 3958), 'megengine.functional.concat', 'F.concat', (['return_rois'], {'axis': '(0)'}), '(return_rois, axis=0)\n', (3937, 3958), True, 'import megengine.functional as F\n'), ((3984, 4014), 'megengine.functional.concat', 'F.concat', (['return_probs'], {'axis': '(0)'}), '(return_probs, axis=0)\n', (3992, 4014), True, 'import megengine.functional as F\n'), ((1744, 1788), 'det_opr.bbox_opr.bbox_transform_inv_opr', 'bbox_transform_inv_opr', (['all_anchors', 'offsets'], {}), '(all_anchors, offsets)\n', (1766, 1788), False, 'from det_opr.bbox_opr import bbox_transform_inv_opr, clip_boxes_opr, filter_boxes_opr, box_overlap_opr\n'), ((3638, 3670), 'megengine.functional.ones', 'F.ones', (['(batch_rois.shape[0], 1)'], {}), '((batch_rois.shape[0], 1))\n', (3644, 3670), True, 'import megengine.functional as F\n'), ((1448, 1491), 'megengine.tensor', 'tensor', (['config.bbox_normalize_stds[None, :]'], {}), '(config.bbox_normalize_stds[None, :])\n', (1454, 1491), False, 'from megengine import tensor\n'), ((1519, 1563), 'megengine.tensor', 'tensor', (['config.bbox_normalize_means[None, :]'], {}), '(config.bbox_normalize_means[None, :])\n', (1525, 1563), False, 'from megengine import tensor\n'), ((1861, 1903), 'det_opr.bbox_opr.clip_boxes_opr', 'clip_boxes_opr', (['proposals', 'im_info[bid, :]'], {}), '(proposals, im_info[bid, :])\n', (1875, 1903), False, 'from det_opr.bbox_opr import bbox_transform_inv_opr, clip_boxes_opr, filter_boxes_opr, box_overlap_opr\n'), ((2025, 2041), 'megengine.functional.softmax', 'F.softmax', (['probs'], {}), '(probs)\n', (2034, 2041), True, 'import megengine.functional as F\n')]
|
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
from test.utils import (
ActiveOpr,
AdaptiveAvgPool2dOpr,
BnOpr,
BroadcastOpr,
ConvBn2dOpr,
ConvBnRelu2dOpr,
ConvOpr,
ConvRelu2dOpr,
DropoutOpr,
ElemwiseOpr,
FConcatOpr,
FlattenOpr,
LinearBnOpr,
LinearOpr,
MatrixMulBnOpr,
PoolOpr,
ReduceOpr,
RepeatOpr,
ReshapeOpr,
SqueezeOpr,
SubtensorOpr,
TransposeOpr,
XORNet,
XORNet_LeakyRelu,
)
import caffe # pylint: disable=import-error
import megengine as mge
import megengine.hub
import numpy as np
import pytest
from mgeconvert.converters.tm_to_caffe import tracedmodule_to_caffe
from .tm_utils import get_traced_module
max_error = 1e-6
tmp_file = "test_module"
def _test_convert_result(
inputs,
trace_module,
mge_results,
max_err,
input_data_type=None,
input_scales=None,
input_zero_points=None,
require_quantize=False,
param_fake_quant=False,
split_conv_relu=False,
fuse_bn=False,
input_name="x",
convert_backend=1,
):
tracedmodule_to_caffe(
trace_module,
prototxt=tmp_file + ".txt",
caffemodel=tmp_file + ".caffemodel",
input_data_type=input_data_type,
input_scales=input_scales,
input_zero_points=input_zero_points,
require_quantize=require_quantize,
param_fake_quant=param_fake_quant,
split_conv_relu=split_conv_relu,
fuse_bn=fuse_bn,
convert_backend=convert_backend,
)
caffe_net = caffe.Net(tmp_file + ".txt", tmp_file + ".caffemodel", caffe.TEST)
for i in caffe_net.blobs.keys():
if isinstance(input_name, list):
for idx, name in enumerate(input_name):
if name.strip() == i.strip():
caffe_net.blobs[i].data[...] = inputs[idx]
break
else:
if input_name in i:
caffe_net.blobs[i].data[...] = inputs
break
out_dict = caffe_net.forward()
if isinstance(mge_results, dict):
assert len(list(out_dict.keys())) == len(list(mge_results.keys()))
for name in mge_results.keys():
assert name._name in out_dict.keys()
assert out_dict[name._name].shape == mge_results[name].shape
np.testing.assert_allclose(
out_dict[name._name], mge_results[name], atol=max_err
)
else:
caffe_results = list(out_dict.values())[0]
assert caffe_results.shape == mge_results.shape
np.testing.assert_allclose(
caffe_results, mge_results, rtol=max_err, atol=max_err
)
@pytest.mark.parametrize("mode", ["normal", "group", "transpose"])
def test_conv2d(mode):
net = ConvOpr(mode)
tm_module, mge_result = get_traced_module(net, mge.tensor(net.data))
_test_convert_result(net.data, tm_module, mge_result, max_error)
def test_convrelu():
net = ConvRelu2dOpr()
traced_module, tm_result = get_traced_module(net, mge.tensor(net.data))
_test_convert_result(net.data, traced_module, tm_result, max_error)
def test_convbn():
net = ConvBn2dOpr()
net.eval()
traced_module, tm_result = get_traced_module(net, mge.tensor(net.data))
_test_convert_result(net.data, traced_module, tm_result, max_error)
def test_convbnrelu():
net = ConvBnRelu2dOpr()
net.eval()
traced_module, tm_result = get_traced_module(net, mge.tensor(net.data))
_test_convert_result(net.data, traced_module, tm_result, max_error)
def test_linear():
net = LinearOpr()
tm_module, mge_result = get_traced_module(net, mge.tensor(net.data))
_test_convert_result(net.data, tm_module, mge_result, max_error)
def test_flatten_linear():
net = LinearOpr("flatten")
tm_module, mge_result = get_traced_module(net, mge.tensor(net.data1))
_test_convert_result(net.data1, tm_module, mge_result, max_error, convert_backend=4)
def test_linear_bn():
net = LinearBnOpr()
for _ in range(10):
net(mge.tensor(net.data)).numpy()
net.eval()
tm_module, mge_result = get_traced_module(net, mge.tensor(net.data))
_test_convert_result(net.data, tm_module, mge_result, 1e-4, fuse_bn=True)
@pytest.mark.parametrize("mode", [True, False])
def test_matmul_bn(mode):
net = MatrixMulBnOpr(mode)
for _ in range(10):
net(mge.tensor(net.data)).numpy()
net.eval()
tm_module, mge_result = get_traced_module(net, mge.tensor(net.data))
_test_convert_result(net.data, tm_module, mge_result, 1e-4, fuse_bn=True)
def test_squeeze():
net = SqueezeOpr()
tm_module, mge_result = get_traced_module(net, mge.tensor(net.data))
_test_convert_result(net.data, tm_module, mge_result, max_error, input_name="a")
@pytest.mark.parametrize("mode", ["max", "avg"])
def test_pooling(mode):
if megengine.__version__ > "0.6.0" and mode == "avg":
return
net = PoolOpr(mode)
tm_module, mge_result = get_traced_module(net, mge.tensor(net.data))
_test_convert_result(net.data, tm_module, mge_result, max_error)
@pytest.mark.parametrize("mode", ["bn1d", "bn2d"])
def test_batchnorm(mode):
net = BnOpr(mode)
net.eval()
data = net.data1 if mode == "bn1d" else net.data2
tm_module, mge_result = get_traced_module(net, mge.tensor(data))
_test_convert_result(data, tm_module, mge_result, max_error)
def test_subtensor():
net = SubtensorOpr()
tm_module, mge_result = get_traced_module(net, mge.tensor(net.data))
_test_convert_result(net.data, tm_module, mge_result, max_error)
def test_transpose():
net = TransposeOpr()
tm_module, mge_result = get_traced_module(net, mge.tensor(net.data))
_test_convert_result(net.data, tm_module, mge_result, max_error)
def test_concat():
net = FConcatOpr()
data = np.random.random((1, 2, 4, 5)).astype(np.float32)
list_data = [mge.tensor(data), mge.tensor(data)]
tm_module, mge_result = get_traced_module(net, list_data)
_test_convert_result(
[data, data], tm_module, mge_result, max_error, input_name=["inps_0", "inps_1"]
)
def test_reshape():
net = ReshapeOpr()
tm_module, mge_result = get_traced_module(net, mge.tensor(net.data))
_test_convert_result(net.data, tm_module, mge_result, max_error)
@pytest.mark.parametrize(
"mode", ["add", "sub", "mul", "div", "abs", "exp", "log", "max", "pow"]
)
def test_elemwise(mode):
net = ElemwiseOpr(mode)
tm_module, mge_result = get_traced_module(net, mge.tensor(net.data))
_test_convert_result(net.data, tm_module, mge_result, max_error, input_name="a")
@pytest.mark.parametrize(
"mode", ["add", "sub", "mul", "div", "abs", "exp", "log", "pow"]
)
def test_elemwise_broadcast(mode):
net = ElemwiseOpr(mode)
tm_module, mge_result = get_traced_module(
net, mge.tensor(np.array([2.0]).astype("float32"))
)
_test_convert_result(
np.array([2.0]), tm_module, mge_result, max_error, input_name="a"
)
@pytest.mark.parametrize(
"mode",
[
"relu",
"sigmoid",
"tanh",
"leaky_relu",
"softmax",
"silu",
"relu6",
"hsigmoid",
"hswish",
],
)
def test_active(mode):
if megengine.__version__ < "1.5.0" and mode == "silu":
return
net = ActiveOpr(mode)
tm_module, mge_result = get_traced_module(net, mge.tensor(net.data))
_test_convert_result(net.data, tm_module, mge_result, max_error)
@pytest.mark.parametrize("mode", ["relu",])
def test_active_inplace(mode):
net = ActiveOpr(mode)
tm_module, mge_result = get_traced_module(net, mge.tensor(net.data))
_test_convert_result(net.data, tm_module, mge_result, max_error, convert_backend=4)
@pytest.mark.parametrize("mode", ["max", "sum", "mean"])
def test_reduce(mode):
net = ReduceOpr(mode)
tm_module, mge_result = get_traced_module(net, mge.tensor(net.data))
_test_convert_result(net.data, tm_module, mge_result, max_error, input_name="a")
def test_broadcast():
net = BroadcastOpr()
tm_module, mge_result = get_traced_module(net, mge.tensor(net.data))
_test_convert_result(net.data, tm_module, mge_result, max_error)
def test_repeat():
net = RepeatOpr()
tm_module, mge_result = get_traced_module(net, mge.tensor(net.data))
_test_convert_result(net.data, tm_module, mge_result, max_error)
def test_flatten():
net = FlattenOpr()
tm_module, mge_result = get_traced_module(net, mge.tensor(net.data))
_test_convert_result(net.data, tm_module, mge_result, max_error, input_name="inps")
def test_dropout():
net = DropoutOpr()
tm_module, mge_result = get_traced_module(net, mge.tensor(net.data))
_test_convert_result(net.data, tm_module, mge_result, max_error, input_name="inps")
def test_adapetive_avg_pool():
net = AdaptiveAvgPool2dOpr()
tm_module, mge_result = get_traced_module(net, mge.tensor(net.data))
_test_convert_result(net.data, tm_module, mge_result, max_error, input_name="inps")
@pytest.mark.parametrize(
"model",
[
"shufflenet_v2_x0_5",
"shufflenet_v2_x1_0",
"resnet18",
"resnet50",
"resnet101",
"resnext50_32x4d",
],
)
def test_model(model):
data = (
np.random.randint(0, 255, 3 * 224 * 224)
.reshape((1, 3, 224, 224))
.astype(np.float32)
)
if megengine.__version__ < "1.1.0":
commit_id = "dc2f2cfb228a135747d083517b98aea56e7aab92"
else:
commit_id = None
net = megengine.hub.load(
"megengine/models", model, use_cache=False, commit=commit_id, pretrained=True
)
net.eval()
tm_module, mge_result = get_traced_module(net, mge.tensor(data))
_test_convert_result(data, tm_module, mge_result, 1e-2)
def test_xornet():
if megengine.__version__ < "1.1.0":
return
net = XORNet()
net.eval()
tm_module, mge_result = get_traced_module(net, mge.tensor(net.data))
_test_convert_result(net.data, tm_module, mge_result, max_error)
def test_leakyrelu_model():
if megengine.__version__ < "1.1.0":
return
net = XORNet_LeakyRelu()
net.eval()
tm_module, mge_result = get_traced_module(net, mge.tensor(net.data))
_test_convert_result(net.data, tm_module, mge_result, max_error)
|
[
"megengine.tensor"
] |
[((2960, 3025), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""mode"""', "['normal', 'group', 'transpose']"], {}), "('mode', ['normal', 'group', 'transpose'])\n", (2983, 3025), False, 'import pytest\n'), ((4527, 4573), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""mode"""', '[True, False]'], {}), "('mode', [True, False])\n", (4550, 4573), False, 'import pytest\n'), ((5069, 5116), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""mode"""', "['max', 'avg']"], {}), "('mode', ['max', 'avg'])\n", (5092, 5116), False, 'import pytest\n'), ((5383, 5432), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""mode"""', "['bn1d', 'bn2d']"], {}), "('mode', ['bn1d', 'bn2d'])\n", (5406, 5432), False, 'import pytest\n'), ((6596, 6696), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""mode"""', "['add', 'sub', 'mul', 'div', 'abs', 'exp', 'log', 'max', 'pow']"], {}), "('mode', ['add', 'sub', 'mul', 'div', 'abs', 'exp',\n 'log', 'max', 'pow'])\n", (6619, 6696), False, 'import pytest\n'), ((6913, 7006), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""mode"""', "['add', 'sub', 'mul', 'div', 'abs', 'exp', 'log', 'pow']"], {}), "('mode', ['add', 'sub', 'mul', 'div', 'abs', 'exp',\n 'log', 'pow'])\n", (6936, 7006), False, 'import pytest\n'), ((7293, 7421), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""mode"""', "['relu', 'sigmoid', 'tanh', 'leaky_relu', 'softmax', 'silu', 'relu6',\n 'hsigmoid', 'hswish']"], {}), "('mode', ['relu', 'sigmoid', 'tanh', 'leaky_relu',\n 'softmax', 'silu', 'relu6', 'hsigmoid', 'hswish'])\n", (7316, 7421), False, 'import pytest\n'), ((7776, 7817), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""mode"""', "['relu']"], {}), "('mode', ['relu'])\n", (7799, 7817), False, 'import pytest\n'), ((8040, 8095), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""mode"""', "['max', 'sum', 'mean']"], {}), "('mode', ['max', 'sum', 'mean'])\n", (8063, 8095), False, 'import pytest\n'), ((9321, 9463), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""model"""', "['shufflenet_v2_x0_5', 'shufflenet_v2_x1_0', 'resnet18', 'resnet50',\n 'resnet101', 'resnext50_32x4d']"], {}), "('model', ['shufflenet_v2_x0_5',\n 'shufflenet_v2_x1_0', 'resnet18', 'resnet50', 'resnet101',\n 'resnext50_32x4d'])\n", (9344, 9463), False, 'import pytest\n'), ((1375, 1745), 'mgeconvert.converters.tm_to_caffe.tracedmodule_to_caffe', 'tracedmodule_to_caffe', (['trace_module'], {'prototxt': "(tmp_file + '.txt')", 'caffemodel': "(tmp_file + '.caffemodel')", 'input_data_type': 'input_data_type', 'input_scales': 'input_scales', 'input_zero_points': 'input_zero_points', 'require_quantize': 'require_quantize', 'param_fake_quant': 'param_fake_quant', 'split_conv_relu': 'split_conv_relu', 'fuse_bn': 'fuse_bn', 'convert_backend': 'convert_backend'}), "(trace_module, prototxt=tmp_file + '.txt', caffemodel=\n tmp_file + '.caffemodel', input_data_type=input_data_type, input_scales\n =input_scales, input_zero_points=input_zero_points, require_quantize=\n require_quantize, param_fake_quant=param_fake_quant, split_conv_relu=\n split_conv_relu, fuse_bn=fuse_bn, convert_backend=convert_backend)\n", (1396, 1745), False, 'from mgeconvert.converters.tm_to_caffe import tracedmodule_to_caffe\n'), ((1838, 1904), 'caffe.Net', 'caffe.Net', (["(tmp_file + '.txt')", "(tmp_file + '.caffemodel')", 'caffe.TEST'], {}), "(tmp_file + '.txt', tmp_file + '.caffemodel', caffe.TEST)\n", (1847, 1904), False, 'import caffe\n'), ((3059, 3072), 'test.utils.ConvOpr', 'ConvOpr', (['mode'], {}), '(mode)\n', (3066, 3072), False, 'from test.utils import ActiveOpr, AdaptiveAvgPool2dOpr, BnOpr, BroadcastOpr, ConvBn2dOpr, ConvBnRelu2dOpr, ConvOpr, ConvRelu2dOpr, DropoutOpr, ElemwiseOpr, FConcatOpr, FlattenOpr, LinearBnOpr, LinearOpr, MatrixMulBnOpr, PoolOpr, ReduceOpr, RepeatOpr, ReshapeOpr, SqueezeOpr, SubtensorOpr, TransposeOpr, XORNet, XORNet_LeakyRelu\n'), ((3248, 3263), 'test.utils.ConvRelu2dOpr', 'ConvRelu2dOpr', ([], {}), '()\n', (3261, 3263), False, 'from test.utils import ActiveOpr, AdaptiveAvgPool2dOpr, BnOpr, BroadcastOpr, ConvBn2dOpr, ConvBnRelu2dOpr, ConvOpr, ConvRelu2dOpr, DropoutOpr, ElemwiseOpr, FConcatOpr, FlattenOpr, LinearBnOpr, LinearOpr, MatrixMulBnOpr, PoolOpr, ReduceOpr, RepeatOpr, ReshapeOpr, SqueezeOpr, SubtensorOpr, TransposeOpr, XORNet, XORNet_LeakyRelu\n'), ((3443, 3456), 'test.utils.ConvBn2dOpr', 'ConvBn2dOpr', ([], {}), '()\n', (3454, 3456), False, 'from test.utils import ActiveOpr, AdaptiveAvgPool2dOpr, BnOpr, BroadcastOpr, ConvBn2dOpr, ConvBnRelu2dOpr, ConvOpr, ConvRelu2dOpr, DropoutOpr, ElemwiseOpr, FConcatOpr, FlattenOpr, LinearBnOpr, LinearOpr, MatrixMulBnOpr, PoolOpr, ReduceOpr, RepeatOpr, ReshapeOpr, SqueezeOpr, SubtensorOpr, TransposeOpr, XORNet, XORNet_LeakyRelu\n'), ((3655, 3672), 'test.utils.ConvBnRelu2dOpr', 'ConvBnRelu2dOpr', ([], {}), '()\n', (3670, 3672), False, 'from test.utils import ActiveOpr, AdaptiveAvgPool2dOpr, BnOpr, BroadcastOpr, ConvBn2dOpr, ConvBnRelu2dOpr, ConvOpr, ConvRelu2dOpr, DropoutOpr, ElemwiseOpr, FConcatOpr, FlattenOpr, LinearBnOpr, LinearOpr, MatrixMulBnOpr, PoolOpr, ReduceOpr, RepeatOpr, ReshapeOpr, SqueezeOpr, SubtensorOpr, TransposeOpr, XORNet, XORNet_LeakyRelu\n'), ((3867, 3878), 'test.utils.LinearOpr', 'LinearOpr', ([], {}), '()\n', (3876, 3878), False, 'from test.utils import ActiveOpr, AdaptiveAvgPool2dOpr, BnOpr, BroadcastOpr, ConvBn2dOpr, ConvBnRelu2dOpr, ConvOpr, ConvRelu2dOpr, DropoutOpr, ElemwiseOpr, FConcatOpr, FlattenOpr, LinearBnOpr, LinearOpr, MatrixMulBnOpr, PoolOpr, ReduceOpr, RepeatOpr, ReshapeOpr, SqueezeOpr, SubtensorOpr, TransposeOpr, XORNet, XORNet_LeakyRelu\n'), ((4060, 4080), 'test.utils.LinearOpr', 'LinearOpr', (['"""flatten"""'], {}), "('flatten')\n", (4069, 4080), False, 'from test.utils import ActiveOpr, AdaptiveAvgPool2dOpr, BnOpr, BroadcastOpr, ConvBn2dOpr, ConvBnRelu2dOpr, ConvOpr, ConvRelu2dOpr, DropoutOpr, ElemwiseOpr, FConcatOpr, FlattenOpr, LinearBnOpr, LinearOpr, MatrixMulBnOpr, PoolOpr, ReduceOpr, RepeatOpr, ReshapeOpr, SqueezeOpr, SubtensorOpr, TransposeOpr, XORNet, XORNet_LeakyRelu\n'), ((4278, 4291), 'test.utils.LinearBnOpr', 'LinearBnOpr', ([], {}), '()\n', (4289, 4291), False, 'from test.utils import ActiveOpr, AdaptiveAvgPool2dOpr, BnOpr, BroadcastOpr, ConvBn2dOpr, ConvBnRelu2dOpr, ConvOpr, ConvRelu2dOpr, DropoutOpr, ElemwiseOpr, FConcatOpr, FlattenOpr, LinearBnOpr, LinearOpr, MatrixMulBnOpr, PoolOpr, ReduceOpr, RepeatOpr, ReshapeOpr, SqueezeOpr, SubtensorOpr, TransposeOpr, XORNet, XORNet_LeakyRelu\n'), ((4610, 4630), 'test.utils.MatrixMulBnOpr', 'MatrixMulBnOpr', (['mode'], {}), '(mode)\n', (4624, 4630), False, 'from test.utils import ActiveOpr, AdaptiveAvgPool2dOpr, BnOpr, BroadcastOpr, ConvBn2dOpr, ConvBnRelu2dOpr, ConvOpr, ConvRelu2dOpr, DropoutOpr, ElemwiseOpr, FConcatOpr, FlattenOpr, LinearBnOpr, LinearOpr, MatrixMulBnOpr, PoolOpr, ReduceOpr, RepeatOpr, ReshapeOpr, SqueezeOpr, SubtensorOpr, TransposeOpr, XORNet, XORNet_LeakyRelu\n'), ((4895, 4907), 'test.utils.SqueezeOpr', 'SqueezeOpr', ([], {}), '()\n', (4905, 4907), False, 'from test.utils import ActiveOpr, AdaptiveAvgPool2dOpr, BnOpr, BroadcastOpr, ConvBn2dOpr, ConvBnRelu2dOpr, ConvOpr, ConvRelu2dOpr, DropoutOpr, ElemwiseOpr, FConcatOpr, FlattenOpr, LinearBnOpr, LinearOpr, MatrixMulBnOpr, PoolOpr, ReduceOpr, RepeatOpr, ReshapeOpr, SqueezeOpr, SubtensorOpr, TransposeOpr, XORNet, XORNet_LeakyRelu\n'), ((5224, 5237), 'test.utils.PoolOpr', 'PoolOpr', (['mode'], {}), '(mode)\n', (5231, 5237), False, 'from test.utils import ActiveOpr, AdaptiveAvgPool2dOpr, BnOpr, BroadcastOpr, ConvBn2dOpr, ConvBnRelu2dOpr, ConvOpr, ConvRelu2dOpr, DropoutOpr, ElemwiseOpr, FConcatOpr, FlattenOpr, LinearBnOpr, LinearOpr, MatrixMulBnOpr, PoolOpr, ReduceOpr, RepeatOpr, ReshapeOpr, SqueezeOpr, SubtensorOpr, TransposeOpr, XORNet, XORNet_LeakyRelu\n'), ((5469, 5480), 'test.utils.BnOpr', 'BnOpr', (['mode'], {}), '(mode)\n', (5474, 5480), False, 'from test.utils import ActiveOpr, AdaptiveAvgPool2dOpr, BnOpr, BroadcastOpr, ConvBn2dOpr, ConvBnRelu2dOpr, ConvOpr, ConvRelu2dOpr, DropoutOpr, ElemwiseOpr, FConcatOpr, FlattenOpr, LinearBnOpr, LinearOpr, MatrixMulBnOpr, PoolOpr, ReduceOpr, RepeatOpr, ReshapeOpr, SqueezeOpr, SubtensorOpr, TransposeOpr, XORNet, XORNet_LeakyRelu\n'), ((5718, 5732), 'test.utils.SubtensorOpr', 'SubtensorOpr', ([], {}), '()\n', (5730, 5732), False, 'from test.utils import ActiveOpr, AdaptiveAvgPool2dOpr, BnOpr, BroadcastOpr, ConvBn2dOpr, ConvBnRelu2dOpr, ConvOpr, ConvRelu2dOpr, DropoutOpr, ElemwiseOpr, FConcatOpr, FlattenOpr, LinearBnOpr, LinearOpr, MatrixMulBnOpr, PoolOpr, ReduceOpr, RepeatOpr, ReshapeOpr, SqueezeOpr, SubtensorOpr, TransposeOpr, XORNet, XORNet_LeakyRelu\n'), ((5909, 5923), 'test.utils.TransposeOpr', 'TransposeOpr', ([], {}), '()\n', (5921, 5923), False, 'from test.utils import ActiveOpr, AdaptiveAvgPool2dOpr, BnOpr, BroadcastOpr, ConvBn2dOpr, ConvBnRelu2dOpr, ConvOpr, ConvRelu2dOpr, DropoutOpr, ElemwiseOpr, FConcatOpr, FlattenOpr, LinearBnOpr, LinearOpr, MatrixMulBnOpr, PoolOpr, ReduceOpr, RepeatOpr, ReshapeOpr, SqueezeOpr, SubtensorOpr, TransposeOpr, XORNet, XORNet_LeakyRelu\n'), ((6097, 6109), 'test.utils.FConcatOpr', 'FConcatOpr', ([], {}), '()\n', (6107, 6109), False, 'from test.utils import ActiveOpr, AdaptiveAvgPool2dOpr, BnOpr, BroadcastOpr, ConvBn2dOpr, ConvBnRelu2dOpr, ConvOpr, ConvRelu2dOpr, DropoutOpr, ElemwiseOpr, FConcatOpr, FlattenOpr, LinearBnOpr, LinearOpr, MatrixMulBnOpr, PoolOpr, ReduceOpr, RepeatOpr, ReshapeOpr, SqueezeOpr, SubtensorOpr, TransposeOpr, XORNet, XORNet_LeakyRelu\n'), ((6438, 6450), 'test.utils.ReshapeOpr', 'ReshapeOpr', ([], {}), '()\n', (6448, 6450), False, 'from test.utils import ActiveOpr, AdaptiveAvgPool2dOpr, BnOpr, BroadcastOpr, ConvBn2dOpr, ConvBnRelu2dOpr, ConvOpr, ConvRelu2dOpr, DropoutOpr, ElemwiseOpr, FConcatOpr, FlattenOpr, LinearBnOpr, LinearOpr, MatrixMulBnOpr, PoolOpr, ReduceOpr, RepeatOpr, ReshapeOpr, SqueezeOpr, SubtensorOpr, TransposeOpr, XORNet, XORNet_LeakyRelu\n'), ((6734, 6751), 'test.utils.ElemwiseOpr', 'ElemwiseOpr', (['mode'], {}), '(mode)\n', (6745, 6751), False, 'from test.utils import ActiveOpr, AdaptiveAvgPool2dOpr, BnOpr, BroadcastOpr, ConvBn2dOpr, ConvBnRelu2dOpr, ConvOpr, ConvRelu2dOpr, DropoutOpr, ElemwiseOpr, FConcatOpr, FlattenOpr, LinearBnOpr, LinearOpr, MatrixMulBnOpr, PoolOpr, ReduceOpr, RepeatOpr, ReshapeOpr, SqueezeOpr, SubtensorOpr, TransposeOpr, XORNet, XORNet_LeakyRelu\n'), ((7054, 7071), 'test.utils.ElemwiseOpr', 'ElemwiseOpr', (['mode'], {}), '(mode)\n', (7065, 7071), False, 'from test.utils import ActiveOpr, AdaptiveAvgPool2dOpr, BnOpr, BroadcastOpr, ConvBn2dOpr, ConvBnRelu2dOpr, ConvOpr, ConvRelu2dOpr, DropoutOpr, ElemwiseOpr, FConcatOpr, FlattenOpr, LinearBnOpr, LinearOpr, MatrixMulBnOpr, PoolOpr, ReduceOpr, RepeatOpr, ReshapeOpr, SqueezeOpr, SubtensorOpr, TransposeOpr, XORNet, XORNet_LeakyRelu\n'), ((7615, 7630), 'test.utils.ActiveOpr', 'ActiveOpr', (['mode'], {}), '(mode)\n', (7624, 7630), False, 'from test.utils import ActiveOpr, AdaptiveAvgPool2dOpr, BnOpr, BroadcastOpr, ConvBn2dOpr, ConvBnRelu2dOpr, ConvOpr, ConvRelu2dOpr, DropoutOpr, ElemwiseOpr, FConcatOpr, FlattenOpr, LinearBnOpr, LinearOpr, MatrixMulBnOpr, PoolOpr, ReduceOpr, RepeatOpr, ReshapeOpr, SqueezeOpr, SubtensorOpr, TransposeOpr, XORNet, XORNet_LeakyRelu\n'), ((7860, 7875), 'test.utils.ActiveOpr', 'ActiveOpr', (['mode'], {}), '(mode)\n', (7869, 7875), False, 'from test.utils import ActiveOpr, AdaptiveAvgPool2dOpr, BnOpr, BroadcastOpr, ConvBn2dOpr, ConvBnRelu2dOpr, ConvOpr, ConvRelu2dOpr, DropoutOpr, ElemwiseOpr, FConcatOpr, FlattenOpr, LinearBnOpr, LinearOpr, MatrixMulBnOpr, PoolOpr, ReduceOpr, RepeatOpr, ReshapeOpr, SqueezeOpr, SubtensorOpr, TransposeOpr, XORNet, XORNet_LeakyRelu\n'), ((8129, 8144), 'test.utils.ReduceOpr', 'ReduceOpr', (['mode'], {}), '(mode)\n', (8138, 8144), False, 'from test.utils import ActiveOpr, AdaptiveAvgPool2dOpr, BnOpr, BroadcastOpr, ConvBn2dOpr, ConvBnRelu2dOpr, ConvOpr, ConvRelu2dOpr, DropoutOpr, ElemwiseOpr, FConcatOpr, FlattenOpr, LinearBnOpr, LinearOpr, MatrixMulBnOpr, PoolOpr, ReduceOpr, RepeatOpr, ReshapeOpr, SqueezeOpr, SubtensorOpr, TransposeOpr, XORNet, XORNet_LeakyRelu\n'), ((8337, 8351), 'test.utils.BroadcastOpr', 'BroadcastOpr', ([], {}), '()\n', (8349, 8351), False, 'from test.utils import ActiveOpr, AdaptiveAvgPool2dOpr, BnOpr, BroadcastOpr, ConvBn2dOpr, ConvBnRelu2dOpr, ConvOpr, ConvRelu2dOpr, DropoutOpr, ElemwiseOpr, FConcatOpr, FlattenOpr, LinearBnOpr, LinearOpr, MatrixMulBnOpr, PoolOpr, ReduceOpr, RepeatOpr, ReshapeOpr, SqueezeOpr, SubtensorOpr, TransposeOpr, XORNet, XORNet_LeakyRelu\n'), ((8525, 8536), 'test.utils.RepeatOpr', 'RepeatOpr', ([], {}), '()\n', (8534, 8536), False, 'from test.utils import ActiveOpr, AdaptiveAvgPool2dOpr, BnOpr, BroadcastOpr, ConvBn2dOpr, ConvBnRelu2dOpr, ConvOpr, ConvRelu2dOpr, DropoutOpr, ElemwiseOpr, FConcatOpr, FlattenOpr, LinearBnOpr, LinearOpr, MatrixMulBnOpr, PoolOpr, ReduceOpr, RepeatOpr, ReshapeOpr, SqueezeOpr, SubtensorOpr, TransposeOpr, XORNet, XORNet_LeakyRelu\n'), ((8711, 8723), 'test.utils.FlattenOpr', 'FlattenOpr', ([], {}), '()\n', (8721, 8723), False, 'from test.utils import ActiveOpr, AdaptiveAvgPool2dOpr, BnOpr, BroadcastOpr, ConvBn2dOpr, ConvBnRelu2dOpr, ConvOpr, ConvRelu2dOpr, DropoutOpr, ElemwiseOpr, FConcatOpr, FlattenOpr, LinearBnOpr, LinearOpr, MatrixMulBnOpr, PoolOpr, ReduceOpr, RepeatOpr, ReshapeOpr, SqueezeOpr, SubtensorOpr, TransposeOpr, XORNet, XORNet_LeakyRelu\n'), ((8917, 8929), 'test.utils.DropoutOpr', 'DropoutOpr', ([], {}), '()\n', (8927, 8929), False, 'from test.utils import ActiveOpr, AdaptiveAvgPool2dOpr, BnOpr, BroadcastOpr, ConvBn2dOpr, ConvBnRelu2dOpr, ConvOpr, ConvRelu2dOpr, DropoutOpr, ElemwiseOpr, FConcatOpr, FlattenOpr, LinearBnOpr, LinearOpr, MatrixMulBnOpr, PoolOpr, ReduceOpr, RepeatOpr, ReshapeOpr, SqueezeOpr, SubtensorOpr, TransposeOpr, XORNet, XORNet_LeakyRelu\n'), ((9134, 9156), 'test.utils.AdaptiveAvgPool2dOpr', 'AdaptiveAvgPool2dOpr', ([], {}), '()\n', (9154, 9156), False, 'from test.utils import ActiveOpr, AdaptiveAvgPool2dOpr, BnOpr, BroadcastOpr, ConvBn2dOpr, ConvBnRelu2dOpr, ConvOpr, ConvRelu2dOpr, DropoutOpr, ElemwiseOpr, FConcatOpr, FlattenOpr, LinearBnOpr, LinearOpr, MatrixMulBnOpr, PoolOpr, ReduceOpr, RepeatOpr, ReshapeOpr, SqueezeOpr, SubtensorOpr, TransposeOpr, XORNet, XORNet_LeakyRelu\n'), ((10166, 10174), 'test.utils.XORNet', 'XORNet', ([], {}), '()\n', (10172, 10174), False, 'from test.utils import ActiveOpr, AdaptiveAvgPool2dOpr, BnOpr, BroadcastOpr, ConvBn2dOpr, ConvBnRelu2dOpr, ConvOpr, ConvRelu2dOpr, DropoutOpr, ElemwiseOpr, FConcatOpr, FlattenOpr, LinearBnOpr, LinearOpr, MatrixMulBnOpr, PoolOpr, ReduceOpr, RepeatOpr, ReshapeOpr, SqueezeOpr, SubtensorOpr, TransposeOpr, XORNet, XORNet_LeakyRelu\n'), ((10427, 10445), 'test.utils.XORNet_LeakyRelu', 'XORNet_LeakyRelu', ([], {}), '()\n', (10443, 10445), False, 'from test.utils import ActiveOpr, AdaptiveAvgPool2dOpr, BnOpr, BroadcastOpr, ConvBn2dOpr, ConvBnRelu2dOpr, ConvOpr, ConvRelu2dOpr, DropoutOpr, ElemwiseOpr, FConcatOpr, FlattenOpr, LinearBnOpr, LinearOpr, MatrixMulBnOpr, PoolOpr, ReduceOpr, RepeatOpr, ReshapeOpr, SqueezeOpr, SubtensorOpr, TransposeOpr, XORNet, XORNet_LeakyRelu\n'), ((2852, 2939), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['caffe_results', 'mge_results'], {'rtol': 'max_err', 'atol': 'max_err'}), '(caffe_results, mge_results, rtol=max_err, atol=\n max_err)\n', (2878, 2939), True, 'import numpy as np\n'), ((3124, 3144), 'megengine.tensor', 'mge.tensor', (['net.data'], {}), '(net.data)\n', (3134, 3144), True, 'import megengine as mge\n'), ((3318, 3338), 'megengine.tensor', 'mge.tensor', (['net.data'], {}), '(net.data)\n', (3328, 3338), True, 'import megengine as mge\n'), ((3526, 3546), 'megengine.tensor', 'mge.tensor', (['net.data'], {}), '(net.data)\n', (3536, 3546), True, 'import megengine as mge\n'), ((3742, 3762), 'megengine.tensor', 'mge.tensor', (['net.data'], {}), '(net.data)\n', (3752, 3762), True, 'import megengine as mge\n'), ((3930, 3950), 'megengine.tensor', 'mge.tensor', (['net.data'], {}), '(net.data)\n', (3940, 3950), True, 'import megengine as mge\n'), ((4132, 4153), 'megengine.tensor', 'mge.tensor', (['net.data1'], {}), '(net.data1)\n', (4142, 4153), True, 'import megengine as mge\n'), ((4424, 4444), 'megengine.tensor', 'mge.tensor', (['net.data'], {}), '(net.data)\n', (4434, 4444), True, 'import megengine as mge\n'), ((4763, 4783), 'megengine.tensor', 'mge.tensor', (['net.data'], {}), '(net.data)\n', (4773, 4783), True, 'import megengine as mge\n'), ((4959, 4979), 'megengine.tensor', 'mge.tensor', (['net.data'], {}), '(net.data)\n', (4969, 4979), True, 'import megengine as mge\n'), ((5289, 5309), 'megengine.tensor', 'mge.tensor', (['net.data'], {}), '(net.data)\n', (5299, 5309), True, 'import megengine as mge\n'), ((5601, 5617), 'megengine.tensor', 'mge.tensor', (['data'], {}), '(data)\n', (5611, 5617), True, 'import megengine as mge\n'), ((5784, 5804), 'megengine.tensor', 'mge.tensor', (['net.data'], {}), '(net.data)\n', (5794, 5804), True, 'import megengine as mge\n'), ((5975, 5995), 'megengine.tensor', 'mge.tensor', (['net.data'], {}), '(net.data)\n', (5985, 5995), True, 'import megengine as mge\n'), ((6188, 6204), 'megengine.tensor', 'mge.tensor', (['data'], {}), '(data)\n', (6198, 6204), True, 'import megengine as mge\n'), ((6206, 6222), 'megengine.tensor', 'mge.tensor', (['data'], {}), '(data)\n', (6216, 6222), True, 'import megengine as mge\n'), ((6502, 6522), 'megengine.tensor', 'mge.tensor', (['net.data'], {}), '(net.data)\n', (6512, 6522), True, 'import megengine as mge\n'), ((6803, 6823), 'megengine.tensor', 'mge.tensor', (['net.data'], {}), '(net.data)\n', (6813, 6823), True, 'import megengine as mge\n'), ((7218, 7233), 'numpy.array', 'np.array', (['[2.0]'], {}), '([2.0])\n', (7226, 7233), True, 'import numpy as np\n'), ((7682, 7702), 'megengine.tensor', 'mge.tensor', (['net.data'], {}), '(net.data)\n', (7692, 7702), True, 'import megengine as mge\n'), ((7927, 7947), 'megengine.tensor', 'mge.tensor', (['net.data'], {}), '(net.data)\n', (7937, 7947), True, 'import megengine as mge\n'), ((8196, 8216), 'megengine.tensor', 'mge.tensor', (['net.data'], {}), '(net.data)\n', (8206, 8216), True, 'import megengine as mge\n'), ((8403, 8423), 'megengine.tensor', 'mge.tensor', (['net.data'], {}), '(net.data)\n', (8413, 8423), True, 'import megengine as mge\n'), ((8588, 8608), 'megengine.tensor', 'mge.tensor', (['net.data'], {}), '(net.data)\n', (8598, 8608), True, 'import megengine as mge\n'), ((8775, 8795), 'megengine.tensor', 'mge.tensor', (['net.data'], {}), '(net.data)\n', (8785, 8795), True, 'import megengine as mge\n'), ((8981, 9001), 'megengine.tensor', 'mge.tensor', (['net.data'], {}), '(net.data)\n', (8991, 9001), True, 'import megengine as mge\n'), ((9208, 9228), 'megengine.tensor', 'mge.tensor', (['net.data'], {}), '(net.data)\n', (9218, 9228), True, 'import megengine as mge\n'), ((10002, 10018), 'megengine.tensor', 'mge.tensor', (['data'], {}), '(data)\n', (10012, 10018), True, 'import megengine as mge\n'), ((10241, 10261), 'megengine.tensor', 'mge.tensor', (['net.data'], {}), '(net.data)\n', (10251, 10261), True, 'import megengine as mge\n'), ((10512, 10532), 'megengine.tensor', 'mge.tensor', (['net.data'], {}), '(net.data)\n', (10522, 10532), True, 'import megengine as mge\n'), ((2615, 2701), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['out_dict[name._name]', 'mge_results[name]'], {'atol': 'max_err'}), '(out_dict[name._name], mge_results[name], atol=\n max_err)\n', (2641, 2701), True, 'import numpy as np\n'), ((6121, 6151), 'numpy.random.random', 'np.random.random', (['(1, 2, 4, 5)'], {}), '((1, 2, 4, 5))\n', (6137, 6151), True, 'import numpy as np\n'), ((4328, 4348), 'megengine.tensor', 'mge.tensor', (['net.data'], {}), '(net.data)\n', (4338, 4348), True, 'import megengine as mge\n'), ((4667, 4687), 'megengine.tensor', 'mge.tensor', (['net.data'], {}), '(net.data)\n', (4677, 4687), True, 'import megengine as mge\n'), ((7143, 7158), 'numpy.array', 'np.array', (['[2.0]'], {}), '([2.0])\n', (7151, 7158), True, 'import numpy as np\n'), ((9566, 9606), 'numpy.random.randint', 'np.random.randint', (['(0)', '(255)', '(3 * 224 * 224)'], {}), '(0, 255, 3 * 224 * 224)\n', (9583, 9606), True, 'import numpy as np\n')]
|
import numpy as np
import pytest
import megengine.functional as F
from megengine import tensor
from megengine.test import assertTensorClose
def test_onehot_low_dimension():
inp = tensor(np.arange(1, 4, dtype=np.int32))
out = F.one_hot(inp)
assertTensorClose(
out.numpy(), np.eye(4, dtype=np.int32)[np.arange(1, 4, dtype=np.int32)]
)
def test_onehot_high_dimension():
arr = np.array(
[[3, 2, 4, 4, 2, 4, 0, 4, 4, 1], [4, 1, 1, 3, 2, 2, 4, 2, 4, 3]], dtype=np.int32
)
inp = tensor(arr)
out = F.one_hot(inp, 10)
assertTensorClose(out.numpy(), np.eye(10, dtype=np.int32)[arr])
|
[
"megengine.functional.one_hot",
"megengine.tensor"
] |
[((236, 250), 'megengine.functional.one_hot', 'F.one_hot', (['inp'], {}), '(inp)\n', (245, 250), True, 'import megengine.functional as F\n'), ((407, 501), 'numpy.array', 'np.array', (['[[3, 2, 4, 4, 2, 4, 0, 4, 4, 1], [4, 1, 1, 3, 2, 2, 4, 2, 4, 3]]'], {'dtype': 'np.int32'}), '([[3, 2, 4, 4, 2, 4, 0, 4, 4, 1], [4, 1, 1, 3, 2, 2, 4, 2, 4, 3]],\n dtype=np.int32)\n', (415, 501), True, 'import numpy as np\n'), ((523, 534), 'megengine.tensor', 'tensor', (['arr'], {}), '(arr)\n', (529, 534), False, 'from megengine import tensor\n'), ((545, 563), 'megengine.functional.one_hot', 'F.one_hot', (['inp', '(10)'], {}), '(inp, 10)\n', (554, 563), True, 'import megengine.functional as F\n'), ((193, 224), 'numpy.arange', 'np.arange', (['(1)', '(4)'], {'dtype': 'np.int32'}), '(1, 4, dtype=np.int32)\n', (202, 224), True, 'import numpy as np\n'), ((296, 321), 'numpy.eye', 'np.eye', (['(4)'], {'dtype': 'np.int32'}), '(4, dtype=np.int32)\n', (302, 321), True, 'import numpy as np\n'), ((322, 353), 'numpy.arange', 'np.arange', (['(1)', '(4)'], {'dtype': 'np.int32'}), '(1, 4, dtype=np.int32)\n', (331, 353), True, 'import numpy as np\n'), ((600, 626), 'numpy.eye', 'np.eye', (['(10)'], {'dtype': 'np.int32'}), '(10, dtype=np.int32)\n', (606, 626), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import caffe # pylint: disable=import-error
import megengine
import megengine.hub
import numpy as np
import pytest
from mgeconvert.caffe_converter import convert_to_caffe
from .utils import (
ActiveOpr,
BnOpr,
BroadcastOpr,
ConcatOpr,
ConvOpr,
ElemwiseOpr,
LinearOpr,
PoolOpr,
ReduceOpr,
ReshapeOpr,
SoftmaxOpr,
SqueezeOpr,
SubtensorOpr,
TransposeOpr,
XORNet,
dump_mge_model,
)
max_error = 1e-6
tmp_file = "test_model"
def _test_convert_result(inputs, fpath, mge_results, max_err):
convert_to_caffe(
fpath + ".mge", prototxt=tmp_file + ".txt", caffemodel=tmp_file + ".caffemodel"
)
caffe_net = caffe.Net(tmp_file + ".txt", "test_model.caffemodel", caffe.TEST)
for i in caffe_net.blobs.keys():
if "data" in i:
caffe_net.blobs[i].data[...] = inputs
break
caffe_net.forward()
caffe_dict = caffe_net.blobs
caffe_results = list(caffe_dict.items())[-1][1].data
assert caffe_results.shape == mge_results.shape
assert np.allclose(caffe_results, mge_results, atol=max_err)
@pytest.mark.parametrize("mode", ["normal", "group", "transpose"])
def test_conv2d(mode):
net = ConvOpr(mode)
mge_result = dump_mge_model(net, net.data, tmp_file)
_test_convert_result(net.data, tmp_file, mge_result, max_error)
def test_linear():
net = LinearOpr()
mge_result = dump_mge_model(net, net.data, tmp_file)
_test_convert_result(net.data, tmp_file, mge_result, max_error)
def test_softmax():
net = SoftmaxOpr()
mge_result = dump_mge_model(net, net.data, tmp_file)
_test_convert_result(net.data, tmp_file, mge_result, max_error)
def test_squeeze():
net = SqueezeOpr()
mge_result = dump_mge_model(net, net.data, tmp_file)
_test_convert_result(net.data, tmp_file, mge_result, max_error)
@pytest.mark.parametrize("mode", ["max", "avg"])
def test_pooling(mode):
if megengine.__version__ > "0.6.0" and mode == "avg":
return
net = PoolOpr(mode)
mge_result = dump_mge_model(net, net.data, tmp_file)
_test_convert_result(net.data, tmp_file, mge_result, max_error)
@pytest.mark.parametrize("mode", ["bn1d", "bn2d"])
def test_batchnorm(mode):
net = BnOpr(mode)
data = net.data1 if mode == "bn1d" else net.data2
mge_result = dump_mge_model(net, data, tmp_file)
_test_convert_result(data, tmp_file, mge_result, max_error)
def test_subtensor():
net = SubtensorOpr()
mge_result = dump_mge_model(net, net.data, tmp_file)
_test_convert_result(net.data, tmp_file, mge_result, max_error)
def test_transopse():
net = TransposeOpr()
mge_result = dump_mge_model(net, net.data, tmp_file)
_test_convert_result(net.data, tmp_file, mge_result, max_error)
def test_concat():
net = ConcatOpr()
mge_result = dump_mge_model(net, net.data, tmp_file)
_test_convert_result(net.data, tmp_file, mge_result, max_error)
def test_reshape():
net = ReshapeOpr()
mge_result = dump_mge_model(net, net.data, tmp_file)
_test_convert_result(net.data, tmp_file, mge_result, max_error)
@pytest.mark.parametrize(
"mode", ["add", "sub", "mul", "div", "abs", "exp", "log", "max", "pow"]
)
def test_elemwise(mode):
net = ElemwiseOpr(mode)
mge_result = dump_mge_model(net, net.data, tmp_file)
_test_convert_result(net.data, tmp_file, mge_result, max_error)
@pytest.mark.parametrize(
"mode", ["add", "sub", "mul", "div", "abs", "exp", "log", "pow"]
)
def test_elemwise_broadcast(mode):
net = ElemwiseOpr(mode)
mge_result = dump_mge_model(net, np.array([2.0]).astype("float32"), tmp_file)
_test_convert_result(np.array([2.0]), tmp_file, mge_result, max_error)
@pytest.mark.parametrize("mode", ["relu", "sigmoid", "tanh", "leaky_relu"])
def test_active(mode):
net = ActiveOpr(mode)
mge_result = dump_mge_model(net, net.data, tmp_file)
_test_convert_result(net.data, tmp_file, mge_result, max_error)
@pytest.mark.parametrize("mode", ["max", "sum", "mean"])
def test_reduce(mode):
net = ReduceOpr(mode)
mge_result = dump_mge_model(net, net.data, tmp_file)
_test_convert_result(net.data, tmp_file, mge_result, max_error)
def test_broadcast():
net = BroadcastOpr()
mge_result = dump_mge_model(net, net.data, tmp_file)
_test_convert_result(net.data, tmp_file, mge_result, max_error)
@pytest.mark.parametrize(
"model",
[
"shufflenet_v2_x0_5",
"shufflenet_v2_x1_0",
"resnet18",
"resnet50",
"resnet101",
"resnext50_32x4d",
],
)
def test_model(model):
data = (
np.random.randint(0, 255, 3 * 224 * 224)
.reshape((1, 3, 224, 224))
.astype(np.float32)
)
if megengine.__version__ < "1.1.0":
commit_id = "dc2f2cfb228a135747d083517b98aea56e7aab92"
else:
commit_id = None
net = megengine.hub.load(
"megengine/models", model, use_cache=False, commit=commit_id, pretrained=True
)
mge_result = dump_mge_model(net, data, tmp_file)
_test_convert_result(data, tmp_file, mge_result, 1e-2)
def test_xornet():
if megengine.__version__ < "1.1.0":
return
net = XORNet()
mge_result = dump_mge_model(net, net.data, tmp_file, True)
_test_convert_result(net.data, tmp_file, mge_result, max_error)
def test_leakyrelu_model():
if megengine.__version__ < "1.1.0":
return
net = XORNet()
mge_result = dump_mge_model(net, net.data, tmp_file, False)
_test_convert_result(net.data, tmp_file, mge_result, max_error)
|
[
"megengine.hub.load"
] |
[((1489, 1554), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""mode"""', "['normal', 'group', 'transpose']"], {}), "('mode', ['normal', 'group', 'transpose'])\n", (1512, 1554), False, 'import pytest\n'), ((2238, 2285), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""mode"""', "['max', 'avg']"], {}), "('mode', ['max', 'avg'])\n", (2261, 2285), False, 'import pytest\n'), ((2535, 2584), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""mode"""', "['bn1d', 'bn2d']"], {}), "('mode', ['bn1d', 'bn2d'])\n", (2558, 2584), False, 'import pytest\n'), ((3493, 3593), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""mode"""', "['add', 'sub', 'mul', 'div', 'abs', 'exp', 'log', 'max', 'pow']"], {}), "('mode', ['add', 'sub', 'mul', 'div', 'abs', 'exp',\n 'log', 'max', 'pow'])\n", (3516, 3593), False, 'import pytest\n'), ((3777, 3870), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""mode"""', "['add', 'sub', 'mul', 'div', 'abs', 'exp', 'log', 'pow']"], {}), "('mode', ['add', 'sub', 'mul', 'div', 'abs', 'exp',\n 'log', 'pow'])\n", (3800, 3870), False, 'import pytest\n'), ((4096, 4170), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""mode"""', "['relu', 'sigmoid', 'tanh', 'leaky_relu']"], {}), "('mode', ['relu', 'sigmoid', 'tanh', 'leaky_relu'])\n", (4119, 4170), False, 'import pytest\n'), ((4348, 4403), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""mode"""', "['max', 'sum', 'mean']"], {}), "('mode', ['max', 'sum', 'mean'])\n", (4371, 4403), False, 'import pytest\n'), ((4755, 4897), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""model"""', "['shufflenet_v2_x0_5', 'shufflenet_v2_x1_0', 'resnet18', 'resnet50',\n 'resnet101', 'resnext50_32x4d']"], {}), "('model', ['shufflenet_v2_x0_5',\n 'shufflenet_v2_x1_0', 'resnet18', 'resnet50', 'resnet101',\n 'resnext50_32x4d'])\n", (4778, 4897), False, 'import pytest\n'), ((932, 1034), 'mgeconvert.caffe_converter.convert_to_caffe', 'convert_to_caffe', (["(fpath + '.mge')"], {'prototxt': "(tmp_file + '.txt')", 'caffemodel': "(tmp_file + '.caffemodel')"}), "(fpath + '.mge', prototxt=tmp_file + '.txt', caffemodel=\n tmp_file + '.caffemodel')\n", (948, 1034), False, 'from mgeconvert.caffe_converter import convert_to_caffe\n'), ((1060, 1125), 'caffe.Net', 'caffe.Net', (["(tmp_file + '.txt')", '"""test_model.caffemodel"""', 'caffe.TEST'], {}), "(tmp_file + '.txt', 'test_model.caffemodel', caffe.TEST)\n", (1069, 1125), False, 'import caffe\n'), ((1432, 1485), 'numpy.allclose', 'np.allclose', (['caffe_results', 'mge_results'], {'atol': 'max_err'}), '(caffe_results, mge_results, atol=max_err)\n', (1443, 1485), True, 'import numpy as np\n'), ((5258, 5360), 'megengine.hub.load', 'megengine.hub.load', (['"""megengine/models"""', 'model'], {'use_cache': '(False)', 'commit': 'commit_id', 'pretrained': '(True)'}), "('megengine/models', model, use_cache=False, commit=\n commit_id, pretrained=True)\n", (5276, 5360), False, 'import megengine\n'), ((4043, 4058), 'numpy.array', 'np.array', (['[2.0]'], {}), '([2.0])\n', (4051, 4058), True, 'import numpy as np\n'), ((3973, 3988), 'numpy.array', 'np.array', (['[2.0]'], {}), '([2.0])\n', (3981, 3988), True, 'import numpy as np\n'), ((5000, 5040), 'numpy.random.randint', 'np.random.randint', (['(0)', '(255)', '(3 * 224 * 224)'], {}), '(0, 255, 3 * 224 * 224)\n', (5017, 5040), True, 'import numpy as np\n')]
|
import megengine as mge
import megengine.functional as F
import numpy as np
def bilinear_sampler(img, coords, mode="bilinear", mask=False):
"""Wrapper for grid_sample, uses pixel coordinates"""
H, W = img.shape[-2:]
img = F.remap(img, coords, border_mode="constant")
if mask:
mask = (
(coords[:, :, :, 0:1] < 0)
| (coords[:, :, :, 0:1] > W - 1)
| (coords[:, :, :, 1:2] < 0)
| (coords[:, :, :, 1:2] > H - 1)
)
mask = F.logical_not(mask)
return img, mask.astype("float32")
return img
def coords_grid(batch, ht, wd):
x_grid, y_grid = np.meshgrid(np.arange(wd), np.arange(ht))
y_grid, x_grid = mge.tensor(y_grid, dtype="float32"), mge.tensor(
x_grid, dtype="float32"
)
coords = F.stack([x_grid, y_grid], axis=0)
coords = F.repeat(F.expand_dims(coords, axis=0), batch, axis=0)
return coords
def manual_pad(x, pady, padx):
if pady > 0:
u = F.repeat(x[:, :, 0:1, :], pady, axis=2)
d = F.repeat(x[:, :, -1:, :], pady, axis=2)
x = F.concat([u, x, d], axis=2)
if padx > 0:
l = F.repeat(x[:, :, :, 0:1], padx, axis=3)
r = F.repeat(x[:, :, :, -1:], padx, axis=3)
x = F.concat([l, x, r], axis=3)
return x
|
[
"megengine.functional.remap",
"megengine.functional.stack",
"megengine.tensor",
"megengine.functional.expand_dims",
"megengine.functional.concat",
"megengine.functional.repeat",
"megengine.functional.logical_not"
] |
[((237, 281), 'megengine.functional.remap', 'F.remap', (['img', 'coords'], {'border_mode': '"""constant"""'}), "(img, coords, border_mode='constant')\n", (244, 281), True, 'import megengine.functional as F\n'), ((805, 838), 'megengine.functional.stack', 'F.stack', (['[x_grid, y_grid]'], {'axis': '(0)'}), '([x_grid, y_grid], axis=0)\n', (812, 838), True, 'import megengine.functional as F\n'), ((508, 527), 'megengine.functional.logical_not', 'F.logical_not', (['mask'], {}), '(mask)\n', (521, 527), True, 'import megengine.functional as F\n'), ((654, 667), 'numpy.arange', 'np.arange', (['wd'], {}), '(wd)\n', (663, 667), True, 'import numpy as np\n'), ((669, 682), 'numpy.arange', 'np.arange', (['ht'], {}), '(ht)\n', (678, 682), True, 'import numpy as np\n'), ((705, 740), 'megengine.tensor', 'mge.tensor', (['y_grid'], {'dtype': '"""float32"""'}), "(y_grid, dtype='float32')\n", (715, 740), True, 'import megengine as mge\n'), ((742, 777), 'megengine.tensor', 'mge.tensor', (['x_grid'], {'dtype': '"""float32"""'}), "(x_grid, dtype='float32')\n", (752, 777), True, 'import megengine as mge\n'), ((861, 890), 'megengine.functional.expand_dims', 'F.expand_dims', (['coords'], {'axis': '(0)'}), '(coords, axis=0)\n', (874, 890), True, 'import megengine.functional as F\n'), ((987, 1026), 'megengine.functional.repeat', 'F.repeat', (['x[:, :, 0:1, :]', 'pady'], {'axis': '(2)'}), '(x[:, :, 0:1, :], pady, axis=2)\n', (995, 1026), True, 'import megengine.functional as F\n'), ((1039, 1078), 'megengine.functional.repeat', 'F.repeat', (['x[:, :, -1:, :]', 'pady'], {'axis': '(2)'}), '(x[:, :, -1:, :], pady, axis=2)\n', (1047, 1078), True, 'import megengine.functional as F\n'), ((1091, 1118), 'megengine.functional.concat', 'F.concat', (['[u, x, d]'], {'axis': '(2)'}), '([u, x, d], axis=2)\n', (1099, 1118), True, 'import megengine.functional as F\n'), ((1148, 1187), 'megengine.functional.repeat', 'F.repeat', (['x[:, :, :, 0:1]', 'padx'], {'axis': '(3)'}), '(x[:, :, :, 0:1], padx, axis=3)\n', (1156, 1187), True, 'import megengine.functional as F\n'), ((1200, 1239), 'megengine.functional.repeat', 'F.repeat', (['x[:, :, :, -1:]', 'padx'], {'axis': '(3)'}), '(x[:, :, :, -1:], padx, axis=3)\n', (1208, 1239), True, 'import megengine.functional as F\n'), ((1252, 1279), 'megengine.functional.concat', 'F.concat', (['[l, x, r]'], {'axis': '(3)'}), '([l, x, r], axis=3)\n', (1260, 1279), True, 'import megengine.functional as F\n')]
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import os
import platform
import time
import numpy as np
import pytest
from megengine.data.collator import Collator
from megengine.data.dataloader import DataLoader
from megengine.data.dataset import ArrayDataset, StreamDataset
from megengine.data.sampler import RandomSampler, SequentialSampler, StreamSampler
from megengine.data.transform import (
Compose,
Normalize,
PseudoTransform,
ToMode,
Transform,
)
def init_dataset():
sample_num = 100
rand_data = np.random.randint(0, 255, size=(sample_num, 1, 32, 32), dtype=np.uint8)
label = np.random.randint(0, 10, size=(sample_num,), dtype=int)
dataset = ArrayDataset(rand_data, label)
return dataset
def test_dataloader_init():
dataset = init_dataset()
with pytest.raises(ValueError):
dataloader = DataLoader(dataset, num_workers=2, divide=True)
with pytest.raises(ValueError):
dataloader = DataLoader(dataset, num_workers=-1)
with pytest.raises(ValueError):
dataloader = DataLoader(dataset, timeout=-1)
with pytest.raises(ValueError):
dataloader = DataLoader(dataset, num_workers=0, divide=True)
dataloader = DataLoader(dataset)
assert isinstance(dataloader.sampler, SequentialSampler)
assert isinstance(dataloader.transform, PseudoTransform)
assert isinstance(dataloader.collator, Collator)
dataloader = DataLoader(
dataset, sampler=RandomSampler(dataset, batch_size=6, drop_last=False)
)
assert len(dataloader) == 17
dataloader = DataLoader(
dataset, sampler=RandomSampler(dataset, batch_size=6, drop_last=True)
)
assert len(dataloader) == 16
class MyStream(StreamDataset):
def __init__(self, number, batch=False, error_foramt=False, block=False):
self.number = number
self.batch = batch
self.error_format = error_foramt
self.block = block
def __iter__(self):
for cnt in range(self.number):
if self.block:
for _ in range(10):
time.sleep(1)
if self.batch:
data = np.random.randint(0, 256, (2, 2, 2, 3), dtype="uint8")
yield (True, (data, [cnt, cnt - self.number]))
else:
data = np.random.randint(0, 256, (2, 2, 3), dtype="uint8")
if self.error_format:
yield (data, cnt)
else:
yield (False, (data, cnt))
raise StopIteration
@pytest.mark.parametrize("batch", [True, False])
@pytest.mark.parametrize("num_workers", [0, 2])
def test_stream_dataloader(batch, num_workers):
dataset = MyStream(100, batch=batch)
sampler = StreamSampler(batch_size=4)
dataloader = DataLoader(
dataset,
sampler,
Compose([Normalize(mean=(103, 116, 123), std=(57, 57, 58)), ToMode("CHW")]),
num_workers=num_workers,
)
check_set = set()
for step, data in enumerate(dataloader):
if step == 10:
break
assert data[0].shape == (4, 3, 2, 2)
assert data[1].shape == (4,)
for i in data[1]:
assert i not in check_set
check_set.add(i)
def test_stream_dataloader_error():
dataset = MyStream(100, error_foramt=True)
sampler = StreamSampler(batch_size=4)
dataloader = DataLoader(dataset, sampler)
with pytest.raises(AssertionError, match=r".*tuple.*"):
data_iter = iter(dataloader)
next(data_iter)
@pytest.mark.parametrize("num_workers", [0, 2])
def test_stream_dataloader_timeout(num_workers):
dataset = MyStream(100, False, block=True)
sampler = StreamSampler(batch_size=4)
dataloader = DataLoader(dataset, sampler, num_workers=num_workers, timeout=2)
with pytest.raises(RuntimeError, match=r".*timeout.*"):
data_iter = iter(dataloader)
next(data_iter)
def test_dataloader_serial():
dataset = init_dataset()
dataloader = DataLoader(
dataset, sampler=RandomSampler(dataset, batch_size=4, drop_last=False)
)
for (data, label) in dataloader:
assert data.shape == (4, 1, 32, 32)
assert label.shape == (4,)
def test_dataloader_parallel():
# set max shared memory to 100M
os.environ["MGE_PLASMA_MEMORY"] = "100000000"
dataset = init_dataset()
dataloader = DataLoader(
dataset,
sampler=RandomSampler(dataset, batch_size=4, drop_last=False),
num_workers=2,
divide=False,
)
for (data, label) in dataloader:
assert data.shape == (4, 1, 32, 32)
assert label.shape == (4,)
dataloader = DataLoader(
dataset,
sampler=RandomSampler(dataset, batch_size=4, drop_last=False),
num_workers=2,
divide=True,
)
for (data, label) in dataloader:
assert data.shape == (4, 1, 32, 32)
assert label.shape == (4,)
@pytest.mark.skipif(
platform.system() == "Windows",
reason="dataloader do not support parallel on windows",
)
def test_dataloader_parallel_timeout():
dataset = init_dataset()
class TimeoutTransform(Transform):
def __init__(self):
pass
def apply(self, input):
time.sleep(10)
return input
dataloader = DataLoader(
dataset,
sampler=RandomSampler(dataset, batch_size=4, drop_last=False),
transform=TimeoutTransform(),
num_workers=2,
timeout=2,
)
with pytest.raises(RuntimeError, match=r".*timeout.*"):
data_iter = iter(dataloader)
batch_data = next(data_iter)
@pytest.mark.skipif(
platform.system() == "Windows",
reason="dataloader do not support parallel on windows",
)
def test_dataloader_parallel_worker_exception():
dataset = init_dataset()
class FakeErrorTransform(Transform):
def __init__(self):
pass
def apply(self, input):
raise RuntimeError("test raise error")
return input
dataloader = DataLoader(
dataset,
sampler=RandomSampler(dataset, batch_size=4, drop_last=False),
transform=FakeErrorTransform(),
num_workers=2,
)
with pytest.raises(RuntimeError, match=r"worker.*died"):
data_iter = iter(dataloader)
batch_data = next(data_iter)
def _multi_instances_parallel_dataloader_worker():
dataset = init_dataset()
for divide_flag in [True, False]:
train_dataloader = DataLoader(
dataset,
sampler=RandomSampler(dataset, batch_size=4, drop_last=False),
num_workers=2,
divide=divide_flag,
)
val_dataloader = DataLoader(
dataset,
sampler=RandomSampler(dataset, batch_size=10, drop_last=False),
num_workers=2,
divide=divide_flag,
)
for idx, (data, label) in enumerate(train_dataloader):
assert data.shape == (4, 1, 32, 32)
assert label.shape == (4,)
if idx % 5 == 0:
for val_data, val_label in val_dataloader:
assert val_data.shape == (10, 1, 32, 32)
assert val_label.shape == (10,)
def test_dataloader_parallel_multi_instances():
# set max shared memory to 100M
os.environ["MGE_PLASMA_MEMORY"] = "100000000"
_multi_instances_parallel_dataloader_worker()
@pytest.mark.isolated_distributed
def test_dataloader_parallel_multi_instances_multiprocessing():
# set max shared memory to 100M
os.environ["MGE_PLASMA_MEMORY"] = "100000000"
import multiprocessing as mp
# mp.set_start_method("spawn")
processes = []
for i in range(4):
p = mp.Process(target=_multi_instances_parallel_dataloader_worker)
p.start()
processes.append(p)
for p in processes:
p.join()
assert p.exitcode == 0
@pytest.mark.parametrize("num_workers", [0, 2])
def test_timeout_event(num_workers):
def cb():
return (True, (np.zeros(shape=(2, 2, 2, 3)), np.ones(shape=(2,))))
dataset = MyStream(100, block=True)
sampler = StreamSampler(batch_size=4)
dataloader = DataLoader(
dataset, sampler, num_workers=num_workers, timeout=2, timeout_event=cb
)
for _, data in enumerate(dataloader):
np.testing.assert_equal(data[0], np.zeros(shape=(4, 2, 2, 3)))
np.testing.assert_equal(data[1], np.ones(shape=(4,)))
break
|
[
"megengine.data.sampler.RandomSampler",
"megengine.data.dataset.ArrayDataset",
"megengine.data.dataloader.DataLoader",
"megengine.data.sampler.StreamSampler",
"megengine.data.transform.Normalize",
"megengine.data.transform.ToMode"
] |
[((2861, 2908), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""batch"""', '[True, False]'], {}), "('batch', [True, False])\n", (2884, 2908), False, 'import pytest\n'), ((2910, 2956), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""num_workers"""', '[0, 2]'], {}), "('num_workers', [0, 2])\n", (2933, 2956), False, 'import pytest\n'), ((3857, 3903), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""num_workers"""', '[0, 2]'], {}), "('num_workers', [0, 2])\n", (3880, 3903), False, 'import pytest\n'), ((8231, 8277), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""num_workers"""', '[0, 2]'], {}), "('num_workers', [0, 2])\n", (8254, 8277), False, 'import pytest\n'), ((866, 937), 'numpy.random.randint', 'np.random.randint', (['(0)', '(255)'], {'size': '(sample_num, 1, 32, 32)', 'dtype': 'np.uint8'}), '(0, 255, size=(sample_num, 1, 32, 32), dtype=np.uint8)\n', (883, 937), True, 'import numpy as np\n'), ((950, 1005), 'numpy.random.randint', 'np.random.randint', (['(0)', '(10)'], {'size': '(sample_num,)', 'dtype': 'int'}), '(0, 10, size=(sample_num,), dtype=int)\n', (967, 1005), True, 'import numpy as np\n'), ((1020, 1050), 'megengine.data.dataset.ArrayDataset', 'ArrayDataset', (['rand_data', 'label'], {}), '(rand_data, label)\n', (1032, 1050), False, 'from megengine.data.dataset import ArrayDataset, StreamDataset\n'), ((1539, 1558), 'megengine.data.dataloader.DataLoader', 'DataLoader', (['dataset'], {}), '(dataset)\n', (1549, 1558), False, 'from megengine.data.dataloader import DataLoader\n'), ((3060, 3087), 'megengine.data.sampler.StreamSampler', 'StreamSampler', ([], {'batch_size': '(4)'}), '(batch_size=4)\n', (3073, 3087), False, 'from megengine.data.sampler import RandomSampler, SequentialSampler, StreamSampler\n'), ((3659, 3686), 'megengine.data.sampler.StreamSampler', 'StreamSampler', ([], {'batch_size': '(4)'}), '(batch_size=4)\n', (3672, 3686), False, 'from megengine.data.sampler import RandomSampler, SequentialSampler, StreamSampler\n'), ((3704, 3732), 'megengine.data.dataloader.DataLoader', 'DataLoader', (['dataset', 'sampler'], {}), '(dataset, sampler)\n', (3714, 3732), False, 'from megengine.data.dataloader import DataLoader\n'), ((4014, 4041), 'megengine.data.sampler.StreamSampler', 'StreamSampler', ([], {'batch_size': '(4)'}), '(batch_size=4)\n', (4027, 4041), False, 'from megengine.data.sampler import RandomSampler, SequentialSampler, StreamSampler\n'), ((4060, 4124), 'megengine.data.dataloader.DataLoader', 'DataLoader', (['dataset', 'sampler'], {'num_workers': 'num_workers', 'timeout': '(2)'}), '(dataset, sampler, num_workers=num_workers, timeout=2)\n', (4070, 4124), False, 'from megengine.data.dataloader import DataLoader\n'), ((8459, 8486), 'megengine.data.sampler.StreamSampler', 'StreamSampler', ([], {'batch_size': '(4)'}), '(batch_size=4)\n', (8472, 8486), False, 'from megengine.data.sampler import RandomSampler, SequentialSampler, StreamSampler\n'), ((8505, 8591), 'megengine.data.dataloader.DataLoader', 'DataLoader', (['dataset', 'sampler'], {'num_workers': 'num_workers', 'timeout': '(2)', 'timeout_event': 'cb'}), '(dataset, sampler, num_workers=num_workers, timeout=2,\n timeout_event=cb)\n', (8515, 8591), False, 'from megengine.data.dataloader import DataLoader\n'), ((1138, 1163), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1151, 1163), False, 'import pytest\n'), ((1186, 1233), 'megengine.data.dataloader.DataLoader', 'DataLoader', (['dataset'], {'num_workers': '(2)', 'divide': '(True)'}), '(dataset, num_workers=2, divide=True)\n', (1196, 1233), False, 'from megengine.data.dataloader import DataLoader\n'), ((1243, 1268), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1256, 1268), False, 'import pytest\n'), ((1291, 1326), 'megengine.data.dataloader.DataLoader', 'DataLoader', (['dataset'], {'num_workers': '(-1)'}), '(dataset, num_workers=-1)\n', (1301, 1326), False, 'from megengine.data.dataloader import DataLoader\n'), ((1336, 1361), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1349, 1361), False, 'import pytest\n'), ((1384, 1415), 'megengine.data.dataloader.DataLoader', 'DataLoader', (['dataset'], {'timeout': '(-1)'}), '(dataset, timeout=-1)\n', (1394, 1415), False, 'from megengine.data.dataloader import DataLoader\n'), ((1425, 1450), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1438, 1450), False, 'import pytest\n'), ((1473, 1520), 'megengine.data.dataloader.DataLoader', 'DataLoader', (['dataset'], {'num_workers': '(0)', 'divide': '(True)'}), '(dataset, num_workers=0, divide=True)\n', (1483, 1520), False, 'from megengine.data.dataloader import DataLoader\n'), ((3742, 3790), 'pytest.raises', 'pytest.raises', (['AssertionError'], {'match': '""".*tuple.*"""'}), "(AssertionError, match='.*tuple.*')\n", (3755, 3790), False, 'import pytest\n'), ((4134, 4182), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {'match': '""".*timeout.*"""'}), "(RuntimeError, match='.*timeout.*')\n", (4147, 4182), False, 'import pytest\n'), ((5828, 5876), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {'match': '""".*timeout.*"""'}), "(RuntimeError, match='.*timeout.*')\n", (5841, 5876), False, 'import pytest\n'), ((5282, 5299), 'platform.system', 'platform.system', ([], {}), '()\n', (5297, 5299), False, 'import platform\n'), ((6544, 6593), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {'match': '"""worker.*died"""'}), "(RuntimeError, match='worker.*died')\n", (6557, 6593), False, 'import pytest\n'), ((5980, 5997), 'platform.system', 'platform.system', ([], {}), '()\n', (5995, 5997), False, 'import platform\n'), ((8046, 8108), 'multiprocessing.Process', 'mp.Process', ([], {'target': '_multi_instances_parallel_dataloader_worker'}), '(target=_multi_instances_parallel_dataloader_worker)\n', (8056, 8108), True, 'import multiprocessing as mp\n'), ((1789, 1842), 'megengine.data.sampler.RandomSampler', 'RandomSampler', (['dataset'], {'batch_size': '(6)', 'drop_last': '(False)'}), '(dataset, batch_size=6, drop_last=False)\n', (1802, 1842), False, 'from megengine.data.sampler import RandomSampler, SequentialSampler, StreamSampler\n'), ((1936, 1988), 'megengine.data.sampler.RandomSampler', 'RandomSampler', (['dataset'], {'batch_size': '(6)', 'drop_last': '(True)'}), '(dataset, batch_size=6, drop_last=True)\n', (1949, 1988), False, 'from megengine.data.sampler import RandomSampler, SequentialSampler, StreamSampler\n'), ((4361, 4414), 'megengine.data.sampler.RandomSampler', 'RandomSampler', (['dataset'], {'batch_size': '(4)', 'drop_last': '(False)'}), '(dataset, batch_size=4, drop_last=False)\n', (4374, 4414), False, 'from megengine.data.sampler import RandomSampler, SequentialSampler, StreamSampler\n'), ((4749, 4802), 'megengine.data.sampler.RandomSampler', 'RandomSampler', (['dataset'], {'batch_size': '(4)', 'drop_last': '(False)'}), '(dataset, batch_size=4, drop_last=False)\n', (4762, 4802), False, 'from megengine.data.sampler import RandomSampler, SequentialSampler, StreamSampler\n'), ((5034, 5087), 'megengine.data.sampler.RandomSampler', 'RandomSampler', (['dataset'], {'batch_size': '(4)', 'drop_last': '(False)'}), '(dataset, batch_size=4, drop_last=False)\n', (5047, 5087), False, 'from megengine.data.sampler import RandomSampler, SequentialSampler, StreamSampler\n'), ((5575, 5589), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (5585, 5589), False, 'import time\n'), ((5678, 5731), 'megengine.data.sampler.RandomSampler', 'RandomSampler', (['dataset'], {'batch_size': '(4)', 'drop_last': '(False)'}), '(dataset, batch_size=4, drop_last=False)\n', (5691, 5731), False, 'from megengine.data.sampler import RandomSampler, SequentialSampler, StreamSampler\n'), ((6411, 6464), 'megengine.data.sampler.RandomSampler', 'RandomSampler', (['dataset'], {'batch_size': '(4)', 'drop_last': '(False)'}), '(dataset, batch_size=4, drop_last=False)\n', (6424, 6464), False, 'from megengine.data.sampler import RandomSampler, SequentialSampler, StreamSampler\n'), ((8685, 8713), 'numpy.zeros', 'np.zeros', ([], {'shape': '(4, 2, 2, 3)'}), '(shape=(4, 2, 2, 3))\n', (8693, 8713), True, 'import numpy as np\n'), ((8756, 8775), 'numpy.ones', 'np.ones', ([], {'shape': '(4,)'}), '(shape=(4,))\n', (8763, 8775), True, 'import numpy as np\n'), ((2474, 2528), 'numpy.random.randint', 'np.random.randint', (['(0)', '(256)', '(2, 2, 2, 3)'], {'dtype': '"""uint8"""'}), "(0, 256, (2, 2, 2, 3), dtype='uint8')\n", (2491, 2528), True, 'import numpy as np\n'), ((2633, 2684), 'numpy.random.randint', 'np.random.randint', (['(0)', '(256)', '(2, 2, 3)'], {'dtype': '"""uint8"""'}), "(0, 256, (2, 2, 3), dtype='uint8')\n", (2650, 2684), True, 'import numpy as np\n'), ((3168, 3217), 'megengine.data.transform.Normalize', 'Normalize', ([], {'mean': '(103, 116, 123)', 'std': '(57, 57, 58)'}), '(mean=(103, 116, 123), std=(57, 57, 58))\n', (3177, 3217), False, 'from megengine.data.transform import Compose, Normalize, PseudoTransform, ToMode, Transform\n'), ((3219, 3232), 'megengine.data.transform.ToMode', 'ToMode', (['"""CHW"""'], {}), "('CHW')\n", (3225, 3232), False, 'from megengine.data.transform import Compose, Normalize, PseudoTransform, ToMode, Transform\n'), ((6871, 6924), 'megengine.data.sampler.RandomSampler', 'RandomSampler', (['dataset'], {'batch_size': '(4)', 'drop_last': '(False)'}), '(dataset, batch_size=4, drop_last=False)\n', (6884, 6924), False, 'from megengine.data.sampler import RandomSampler, SequentialSampler, StreamSampler\n'), ((7073, 7127), 'megengine.data.sampler.RandomSampler', 'RandomSampler', (['dataset'], {'batch_size': '(10)', 'drop_last': '(False)'}), '(dataset, batch_size=10, drop_last=False)\n', (7086, 7127), False, 'from megengine.data.sampler import RandomSampler, SequentialSampler, StreamSampler\n'), ((8352, 8380), 'numpy.zeros', 'np.zeros', ([], {'shape': '(2, 2, 2, 3)'}), '(shape=(2, 2, 2, 3))\n', (8360, 8380), True, 'import numpy as np\n'), ((8382, 8401), 'numpy.ones', 'np.ones', ([], {'shape': '(2,)'}), '(shape=(2,))\n', (8389, 8401), True, 'import numpy as np\n'), ((2410, 2423), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (2420, 2423), False, 'import time\n')]
|
import numpy as np
import pytest
import megengine as mge
import megengine.functional as F
import megengine.module as Float
import megengine.module.qat as QAT
import megengine.module.quantized as Q
from megengine.core.tensor import dtype
from megengine.quantization import min_max_fakequant_qconfig
from megengine.quantization.quantize import (
disable_fake_quant,
disable_observer,
propagate_qconfig,
)
"""
Calculate testing scales based on ``min_max_fakequant_qconfig``
"""
inp_scale = np.float32(np.random.rand() + 1)
min_val = np.random.randint(-127, 0, size=(2,)).astype("float32")
max_val = np.random.randint(1, 127, size=(2,)).astype("float32")
weight_scale = np.float32(np.max([-min_val[0], max_val[0]]) / 254 * 2)
act_scale = np.float32(np.max([-min_val[1], max_val[1]]) / 255 * 2)
def quant(x, scale):
inp_dtype = dtype.qint8(scale)
return x.astype(inp_dtype)
def fake_quant(x, scale):
x = x / scale
x = F.round(x)
x = F.clip(x, -128, 127)
x = x * scale
return x
def init_qat_net(net):
if net.with_weight:
net.weight_observer.min_val.set_value(min_val[0])
net.weight_observer.max_val.set_value(max_val[0])
if net.with_act:
net.act_observer.min_val.set_value(min_val[1])
net.act_observer.max_val.set_value(max_val[1])
def test_quant_stub():
normal_net = Float.QuantStub()
normal_net.eval()
qat_from_float = QAT.QuantStub.from_float_module(normal_net)
qat_from_float.eval()
disable_observer(qat_from_float)
disable_fake_quant(qat_from_float)
qat_net = QAT.QuantStub()
qat_net.eval()
disable_observer(qat_net)
propagate_qconfig(qat_net, min_max_fakequant_qconfig)
init_qat_net(qat_net)
q_net = Q.QuantStub.from_qat_module(qat_net)
q_net.eval()
x = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
normal = normal_net(x)
qat_without_fakequant = qat_from_float(x)
fake_quant_normal = fake_quant(normal_net(x), act_scale)
qat = qat_net(x)
q = q_net(x).numpy() * act_scale
np.testing.assert_allclose(qat_without_fakequant, normal)
np.testing.assert_allclose(qat, fake_quant_normal)
np.testing.assert_allclose(q, fake_quant_normal.numpy())
def test_dequant_stub():
normal_net = Float.DequantStub()
normal_net.eval()
qat_from_float = QAT.DequantStub.from_float_module(normal_net)
qat_from_float.eval()
disable_fake_quant(qat_from_float)
disable_observer(qat_from_float)
qat_net = QAT.DequantStub()
qat_net.eval()
disable_observer(qat_net)
propagate_qconfig(qat_net, min_max_fakequant_qconfig)
init_qat_net(qat_net)
q_net = Q.DequantStub.from_qat_module(qat_net)
q_net.eval()
x = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
x = fake_quant(x, inp_scale)
x.q_dict["scale"] = inp_scale
normal = normal_net(x)
qat_without_fakequant = qat_from_float(x)
fake_quant_normal = normal_net(x)
qat = qat_net(x)
q = q_net(quant(x, inp_scale)).numpy()
np.testing.assert_allclose(qat_without_fakequant, normal)
np.testing.assert_allclose(qat, fake_quant_normal)
np.testing.assert_allclose(q, fake_quant_normal.numpy())
@pytest.mark.parametrize("kind", ["COS", "RELU", "ADD", "MUL", "FUSE_ADD_RELU"])
def test_elemwise(kind):
normal_net = Float.Elemwise(kind)
normal_net.eval()
qat_from_float = QAT.Elemwise.from_float_module(normal_net)
qat_from_float.eval()
disable_observer(qat_from_float)
disable_fake_quant(qat_from_float)
qat_net = QAT.Elemwise(kind)
qat_net.eval()
disable_observer(qat_net)
propagate_qconfig(qat_net, min_max_fakequant_qconfig)
init_qat_net(qat_net)
q_net = Q.Elemwise.from_qat_module(qat_net)
q_net.eval()
x1_scale = np.float32(np.random.rand() + 1)
x1 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
x1 = fake_quant(x1, x1_scale)
x1.q_dict["scale"] = x1_scale
x2_scale = np.float32(np.random.rand() + 1)
x2 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
x2 = fake_quant(x2, x2_scale)
x2.q_dict["scale"] = x2_scale
x1_int8 = quant(x1, x1_scale)
x2_int8 = quant(x2, x2_scale)
# test correctness of `Float`, `QAT` and `Quantized`
if kind in ("ADD", "MUL", "FUSE_ADD_RELU"):
normal = normal_net(x1, x2)
qat_without_fakequant = qat_from_float(x1, x2)
fake_quant_normal = fake_quant(normal_net(x1, x2), act_scale)
qat = qat_net(x1, x2)
q = q_net(x1_int8, x2_int8).numpy() * act_scale
else:
normal = normal_net(x1)
qat_without_fakequant = qat_from_float(x1)
fake_quant_normal = fake_quant(normal_net(x1), act_scale)
qat = qat_net(x1)
q = q_net(x1_int8).numpy() * act_scale
np.testing.assert_allclose(qat_without_fakequant, normal)
np.testing.assert_allclose(qat, fake_quant_normal)
np.testing.assert_allclose(q, fake_quant_normal.numpy())
def test_linear():
normal_net = Float.Linear(3, 3, bias=True)
normal_net.eval()
qat_net = QAT.Linear(3, 3, bias=True)
qat_net.eval()
disable_observer(qat_net)
propagate_qconfig(qat_net, min_max_fakequant_qconfig)
init_qat_net(qat_net)
x = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
x = fake_quant(x, inp_scale)
x.q_dict["scale"] = inp_scale
x_int8 = quant(x, inp_scale)
weight = np.random.normal(size=(3, 3)).astype("float32")
bias = np.random.normal(size=(3,)).astype("float32")
normal_net.weight.set_value(fake_quant(weight, weight_scale))
normal_net.bias.set_value(fake_quant(bias, inp_scale * weight_scale))
qat_net.weight.set_value(weight)
qat_net.bias.set_value(bias)
qat_from_float = QAT.Linear.from_float_module(normal_net)
qat_from_float.eval()
disable_fake_quant(qat_from_float)
disable_observer(qat_from_float)
q_net = Q.Linear.from_qat_module(qat_net)
q_net.eval()
normal = normal_net(x)
qat_without_fakequant = qat_from_float(x)
fake_quant_normal = fake_quant(normal_net(x), act_scale)
qat = qat_net(x)
q = q_net(x_int8).numpy() * act_scale
np.testing.assert_allclose(qat_without_fakequant, normal)
np.testing.assert_allclose(qat, fake_quant_normal)
np.testing.assert_allclose(q, fake_quant_normal.numpy())
@pytest.mark.parametrize("module", ["Conv2d", "ConvBn2d", "ConvBnRelu2d"])
def test_conv(module):
normal_net = getattr(Float, module)(3, 3, 3, 1, 1, 1, bias=True)
normal_net.eval()
qat_net = getattr(QAT, module)(3, 3, 3, 1, 1, 1, bias=True)
qat_net.eval()
disable_observer(qat_net)
propagate_qconfig(qat_net, min_max_fakequant_qconfig)
init_qat_net(qat_net)
x = mge.tensor(np.random.normal(size=(1, 3, 3, 3)).astype("float32"))
x = fake_quant(x, inp_scale)
x.q_dict["scale"] = inp_scale
x_int8 = quant(x, inp_scale)
weight = np.random.normal(size=(3, 3, 3, 3)).astype("float32")
bias = np.random.normal(size=(1, 3, 1, 1)).astype("float32")
if module in ("ConvBn2d", "ConvBnRelu2d"):
normal_net.conv.weight.set_value(fake_quant(weight, weight_scale))
normal_net.conv.bias.set_value(fake_quant(bias, inp_scale * weight_scale))
qat_net.conv.weight.set_value(weight)
qat_net.conv.bias.set_value(bias)
else:
normal_net.weight.set_value(fake_quant(weight, weight_scale))
normal_net.bias.set_value(fake_quant(bias, inp_scale * weight_scale))
qat_net.weight.set_value(weight)
qat_net.bias.set_value(bias)
qat_from_float = getattr(QAT, module).from_float_module(normal_net)
qat_from_float.eval()
disable_observer(qat_from_float)
disable_fake_quant(qat_from_float)
q_net = getattr(Q, module).from_qat_module(qat_net)
q_net.eval()
normal = normal_net(x)
qat_without_fakequant = qat_from_float(x)
fake_quant_normal = fake_quant(normal_net(x), act_scale)
qat = qat_net(x)
q = q_net(x_int8).numpy() * act_scale
np.testing.assert_allclose(qat_without_fakequant, normal, atol=1e-6)
np.testing.assert_allclose(qat, fake_quant_normal)
np.testing.assert_allclose(q, fake_quant_normal.numpy())
|
[
"megengine.functional.round",
"megengine.module.quantized.Linear.from_qat_module",
"megengine.module.qat.Elemwise.from_float_module",
"megengine.module.qat.DequantStub.from_float_module",
"megengine.module.QuantStub",
"megengine.quantization.quantize.propagate_qconfig",
"megengine.module.Elemwise",
"megengine.module.qat.DequantStub",
"megengine.module.qat.QuantStub",
"megengine.module.quantized.Elemwise.from_qat_module",
"megengine.module.qat.QuantStub.from_float_module",
"megengine.module.quantized.DequantStub.from_qat_module",
"megengine.module.qat.Elemwise",
"megengine.core.tensor.dtype.qint8",
"megengine.module.qat.Linear",
"megengine.module.qat.Linear.from_float_module",
"megengine.quantization.quantize.disable_observer",
"megengine.quantization.quantize.disable_fake_quant",
"megengine.functional.clip",
"megengine.module.DequantStub",
"megengine.module.Linear",
"megengine.module.quantized.QuantStub.from_qat_module"
] |
[((3223, 3302), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""kind"""', "['COS', 'RELU', 'ADD', 'MUL', 'FUSE_ADD_RELU']"], {}), "('kind', ['COS', 'RELU', 'ADD', 'MUL', 'FUSE_ADD_RELU'])\n", (3246, 3302), False, 'import pytest\n'), ((6367, 6440), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""module"""', "['Conv2d', 'ConvBn2d', 'ConvBnRelu2d']"], {}), "('module', ['Conv2d', 'ConvBn2d', 'ConvBnRelu2d'])\n", (6390, 6440), False, 'import pytest\n'), ((845, 863), 'megengine.core.tensor.dtype.qint8', 'dtype.qint8', (['scale'], {}), '(scale)\n', (856, 863), False, 'from megengine.core.tensor import dtype\n'), ((949, 959), 'megengine.functional.round', 'F.round', (['x'], {}), '(x)\n', (956, 959), True, 'import megengine.functional as F\n'), ((968, 988), 'megengine.functional.clip', 'F.clip', (['x', '(-128)', '(127)'], {}), '(x, -128, 127)\n', (974, 988), True, 'import megengine.functional as F\n'), ((1358, 1375), 'megengine.module.QuantStub', 'Float.QuantStub', ([], {}), '()\n', (1373, 1375), True, 'import megengine.module as Float\n'), ((1420, 1463), 'megengine.module.qat.QuantStub.from_float_module', 'QAT.QuantStub.from_float_module', (['normal_net'], {}), '(normal_net)\n', (1451, 1463), True, 'import megengine.module.qat as QAT\n'), ((1494, 1526), 'megengine.quantization.quantize.disable_observer', 'disable_observer', (['qat_from_float'], {}), '(qat_from_float)\n', (1510, 1526), False, 'from megengine.quantization.quantize import disable_fake_quant, disable_observer, propagate_qconfig\n'), ((1531, 1565), 'megengine.quantization.quantize.disable_fake_quant', 'disable_fake_quant', (['qat_from_float'], {}), '(qat_from_float)\n', (1549, 1565), False, 'from megengine.quantization.quantize import disable_fake_quant, disable_observer, propagate_qconfig\n'), ((1581, 1596), 'megengine.module.qat.QuantStub', 'QAT.QuantStub', ([], {}), '()\n', (1594, 1596), True, 'import megengine.module.qat as QAT\n'), ((1620, 1645), 'megengine.quantization.quantize.disable_observer', 'disable_observer', (['qat_net'], {}), '(qat_net)\n', (1636, 1645), False, 'from megengine.quantization.quantize import disable_fake_quant, disable_observer, propagate_qconfig\n'), ((1651, 1704), 'megengine.quantization.quantize.propagate_qconfig', 'propagate_qconfig', (['qat_net', 'min_max_fakequant_qconfig'], {}), '(qat_net, min_max_fakequant_qconfig)\n', (1668, 1704), False, 'from megengine.quantization.quantize import disable_fake_quant, disable_observer, propagate_qconfig\n'), ((1744, 1780), 'megengine.module.quantized.QuantStub.from_qat_module', 'Q.QuantStub.from_qat_module', (['qat_net'], {}), '(qat_net)\n', (1771, 1780), True, 'import megengine.module.quantized as Q\n'), ((2064, 2121), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['qat_without_fakequant', 'normal'], {}), '(qat_without_fakequant, normal)\n', (2090, 2121), True, 'import numpy as np\n'), ((2126, 2176), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['qat', 'fake_quant_normal'], {}), '(qat, fake_quant_normal)\n', (2152, 2176), True, 'import numpy as np\n'), ((2282, 2301), 'megengine.module.DequantStub', 'Float.DequantStub', ([], {}), '()\n', (2299, 2301), True, 'import megengine.module as Float\n'), ((2346, 2391), 'megengine.module.qat.DequantStub.from_float_module', 'QAT.DequantStub.from_float_module', (['normal_net'], {}), '(normal_net)\n', (2379, 2391), True, 'import megengine.module.qat as QAT\n'), ((2422, 2456), 'megengine.quantization.quantize.disable_fake_quant', 'disable_fake_quant', (['qat_from_float'], {}), '(qat_from_float)\n', (2440, 2456), False, 'from megengine.quantization.quantize import disable_fake_quant, disable_observer, propagate_qconfig\n'), ((2461, 2493), 'megengine.quantization.quantize.disable_observer', 'disable_observer', (['qat_from_float'], {}), '(qat_from_float)\n', (2477, 2493), False, 'from megengine.quantization.quantize import disable_fake_quant, disable_observer, propagate_qconfig\n'), ((2509, 2526), 'megengine.module.qat.DequantStub', 'QAT.DequantStub', ([], {}), '()\n', (2524, 2526), True, 'import megengine.module.qat as QAT\n'), ((2550, 2575), 'megengine.quantization.quantize.disable_observer', 'disable_observer', (['qat_net'], {}), '(qat_net)\n', (2566, 2575), False, 'from megengine.quantization.quantize import disable_fake_quant, disable_observer, propagate_qconfig\n'), ((2581, 2634), 'megengine.quantization.quantize.propagate_qconfig', 'propagate_qconfig', (['qat_net', 'min_max_fakequant_qconfig'], {}), '(qat_net, min_max_fakequant_qconfig)\n', (2598, 2634), False, 'from megengine.quantization.quantize import disable_fake_quant, disable_observer, propagate_qconfig\n'), ((2674, 2712), 'megengine.module.quantized.DequantStub.from_qat_module', 'Q.DequantStub.from_qat_module', (['qat_net'], {}), '(qat_net)\n', (2703, 2712), True, 'import megengine.module.quantized as Q\n'), ((3046, 3103), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['qat_without_fakequant', 'normal'], {}), '(qat_without_fakequant, normal)\n', (3072, 3103), True, 'import numpy as np\n'), ((3108, 3158), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['qat', 'fake_quant_normal'], {}), '(qat, fake_quant_normal)\n', (3134, 3158), True, 'import numpy as np\n'), ((3345, 3365), 'megengine.module.Elemwise', 'Float.Elemwise', (['kind'], {}), '(kind)\n', (3359, 3365), True, 'import megengine.module as Float\n'), ((3410, 3452), 'megengine.module.qat.Elemwise.from_float_module', 'QAT.Elemwise.from_float_module', (['normal_net'], {}), '(normal_net)\n', (3440, 3452), True, 'import megengine.module.qat as QAT\n'), ((3483, 3515), 'megengine.quantization.quantize.disable_observer', 'disable_observer', (['qat_from_float'], {}), '(qat_from_float)\n', (3499, 3515), False, 'from megengine.quantization.quantize import disable_fake_quant, disable_observer, propagate_qconfig\n'), ((3520, 3554), 'megengine.quantization.quantize.disable_fake_quant', 'disable_fake_quant', (['qat_from_float'], {}), '(qat_from_float)\n', (3538, 3554), False, 'from megengine.quantization.quantize import disable_fake_quant, disable_observer, propagate_qconfig\n'), ((3570, 3588), 'megengine.module.qat.Elemwise', 'QAT.Elemwise', (['kind'], {}), '(kind)\n', (3582, 3588), True, 'import megengine.module.qat as QAT\n'), ((3612, 3637), 'megengine.quantization.quantize.disable_observer', 'disable_observer', (['qat_net'], {}), '(qat_net)\n', (3628, 3637), False, 'from megengine.quantization.quantize import disable_fake_quant, disable_observer, propagate_qconfig\n'), ((3643, 3696), 'megengine.quantization.quantize.propagate_qconfig', 'propagate_qconfig', (['qat_net', 'min_max_fakequant_qconfig'], {}), '(qat_net, min_max_fakequant_qconfig)\n', (3660, 3696), False, 'from megengine.quantization.quantize import disable_fake_quant, disable_observer, propagate_qconfig\n'), ((3736, 3771), 'megengine.module.quantized.Elemwise.from_qat_module', 'Q.Elemwise.from_qat_module', (['qat_net'], {}), '(qat_net)\n', (3762, 3771), True, 'import megengine.module.quantized as Q\n'), ((4819, 4876), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['qat_without_fakequant', 'normal'], {}), '(qat_without_fakequant, normal)\n', (4845, 4876), True, 'import numpy as np\n'), ((4881, 4931), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['qat', 'fake_quant_normal'], {}), '(qat, fake_quant_normal)\n', (4907, 4931), True, 'import numpy as np\n'), ((5031, 5060), 'megengine.module.Linear', 'Float.Linear', (['(3)', '(3)'], {'bias': '(True)'}), '(3, 3, bias=True)\n', (5043, 5060), True, 'import megengine.module as Float\n'), ((5098, 5125), 'megengine.module.qat.Linear', 'QAT.Linear', (['(3)', '(3)'], {'bias': '(True)'}), '(3, 3, bias=True)\n', (5108, 5125), True, 'import megengine.module.qat as QAT\n'), ((5149, 5174), 'megengine.quantization.quantize.disable_observer', 'disable_observer', (['qat_net'], {}), '(qat_net)\n', (5165, 5174), False, 'from megengine.quantization.quantize import disable_fake_quant, disable_observer, propagate_qconfig\n'), ((5180, 5233), 'megengine.quantization.quantize.propagate_qconfig', 'propagate_qconfig', (['qat_net', 'min_max_fakequant_qconfig'], {}), '(qat_net, min_max_fakequant_qconfig)\n', (5197, 5233), False, 'from megengine.quantization.quantize import disable_fake_quant, disable_observer, propagate_qconfig\n'), ((5781, 5821), 'megengine.module.qat.Linear.from_float_module', 'QAT.Linear.from_float_module', (['normal_net'], {}), '(normal_net)\n', (5809, 5821), True, 'import megengine.module.qat as QAT\n'), ((5852, 5886), 'megengine.quantization.quantize.disable_fake_quant', 'disable_fake_quant', (['qat_from_float'], {}), '(qat_from_float)\n', (5870, 5886), False, 'from megengine.quantization.quantize import disable_fake_quant, disable_observer, propagate_qconfig\n'), ((5891, 5923), 'megengine.quantization.quantize.disable_observer', 'disable_observer', (['qat_from_float'], {}), '(qat_from_float)\n', (5907, 5923), False, 'from megengine.quantization.quantize import disable_fake_quant, disable_observer, propagate_qconfig\n'), ((5937, 5970), 'megengine.module.quantized.Linear.from_qat_module', 'Q.Linear.from_qat_module', (['qat_net'], {}), '(qat_net)\n', (5961, 5970), True, 'import megengine.module.quantized as Q\n'), ((6190, 6247), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['qat_without_fakequant', 'normal'], {}), '(qat_without_fakequant, normal)\n', (6216, 6247), True, 'import numpy as np\n'), ((6252, 6302), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['qat', 'fake_quant_normal'], {}), '(qat, fake_quant_normal)\n', (6278, 6302), True, 'import numpy as np\n'), ((6643, 6668), 'megengine.quantization.quantize.disable_observer', 'disable_observer', (['qat_net'], {}), '(qat_net)\n', (6659, 6668), False, 'from megengine.quantization.quantize import disable_fake_quant, disable_observer, propagate_qconfig\n'), ((6674, 6727), 'megengine.quantization.quantize.propagate_qconfig', 'propagate_qconfig', (['qat_net', 'min_max_fakequant_qconfig'], {}), '(qat_net, min_max_fakequant_qconfig)\n', (6691, 6727), False, 'from megengine.quantization.quantize import disable_fake_quant, disable_observer, propagate_qconfig\n'), ((7695, 7727), 'megengine.quantization.quantize.disable_observer', 'disable_observer', (['qat_from_float'], {}), '(qat_from_float)\n', (7711, 7727), False, 'from megengine.quantization.quantize import disable_fake_quant, disable_observer, propagate_qconfig\n'), ((7732, 7766), 'megengine.quantization.quantize.disable_fake_quant', 'disable_fake_quant', (['qat_from_float'], {}), '(qat_from_float)\n', (7750, 7766), False, 'from megengine.quantization.quantize import disable_fake_quant, disable_observer, propagate_qconfig\n'), ((8043, 8112), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['qat_without_fakequant', 'normal'], {'atol': '(1e-06)'}), '(qat_without_fakequant, normal, atol=1e-06)\n', (8069, 8112), True, 'import numpy as np\n'), ((8116, 8166), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['qat', 'fake_quant_normal'], {}), '(qat, fake_quant_normal)\n', (8142, 8166), True, 'import numpy as np\n'), ((513, 529), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (527, 529), True, 'import numpy as np\n'), ((546, 583), 'numpy.random.randint', 'np.random.randint', (['(-127)', '(0)'], {'size': '(2,)'}), '(-127, 0, size=(2,))\n', (563, 583), True, 'import numpy as np\n'), ((612, 648), 'numpy.random.randint', 'np.random.randint', (['(1)', '(127)'], {'size': '(2,)'}), '(1, 127, size=(2,))\n', (629, 648), True, 'import numpy as np\n'), ((693, 726), 'numpy.max', 'np.max', (['[-min_val[0], max_val[0]]'], {}), '([-min_val[0], max_val[0]])\n', (699, 726), True, 'import numpy as np\n'), ((761, 794), 'numpy.max', 'np.max', (['[-min_val[1], max_val[1]]'], {}), '([-min_val[1], max_val[1]])\n', (767, 794), True, 'import numpy as np\n'), ((3816, 3832), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (3830, 3832), True, 'import numpy as np\n'), ((4002, 4018), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (4016, 4018), True, 'import numpy as np\n'), ((5444, 5473), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(3, 3)'}), '(size=(3, 3))\n', (5460, 5473), True, 'import numpy as np\n'), ((5503, 5530), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(3,)'}), '(size=(3,))\n', (5519, 5530), True, 'import numpy as np\n'), ((6944, 6979), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(3, 3, 3, 3)'}), '(size=(3, 3, 3, 3))\n', (6960, 6979), True, 'import numpy as np\n'), ((7009, 7044), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(1, 3, 1, 1)'}), '(size=(1, 3, 1, 1))\n', (7025, 7044), True, 'import numpy as np\n'), ((1818, 1847), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(3, 3)'}), '(size=(3, 3))\n', (1834, 1847), True, 'import numpy as np\n'), ((2750, 2779), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(3, 3)'}), '(size=(3, 3))\n', (2766, 2779), True, 'import numpy as np\n'), ((3858, 3887), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(3, 3)'}), '(size=(3, 3))\n', (3874, 3887), True, 'import numpy as np\n'), ((4044, 4073), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(3, 3)'}), '(size=(3, 3))\n', (4060, 4073), True, 'import numpy as np\n'), ((5280, 5309), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(3, 3)'}), '(size=(3, 3))\n', (5296, 5309), True, 'import numpy as np\n'), ((6774, 6809), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(1, 3, 3, 3)'}), '(size=(1, 3, 3, 3))\n', (6790, 6809), True, 'import numpy as np\n')]
|
# Copyright (c) Megvii, Inc. and its affiliates.
"""do the evaluation work with single gpu
"""
import argparse
import os
import megengine as mge
import megengine.data as data
import megengine.data.transform as T
import megengine.functional as F
import numpy as np
from tqdm.auto import tqdm
from recognition.datasets import get_eval_dataset
from recognition.models import FaceRecognitionModel
from recognition.tools.utils import load_config_from_path
logger = mge.get_logger(__name__)
def get_inference_func(configs):
"""load checkpoint and construct inference function
Args:
configs (dict): configuration, required fields include:
base_dir: base directory of experiment outputs
evaluate_epoch: model of evaluate_epoch to evaluate
Raises:
FileNotFoundError: model of given epoch is not found
Returns:
inference_func (function): inference function mapping image to embedding
"""
model = FaceRecognitionModel(configs)
evaluate_epoch = configs["evaluate_epoch"]
checkpoint_path = os.path.join(configs["base_dir"], f"epoch-{evaluate_epoch}-checkpoint.pkl")
if os.path.exists(checkpoint_path):
checkpoint_data = mge.load(checkpoint_path)
model.load_state_dict(checkpoint_data["state_dict"], strict=False)
else:
raise FileNotFoundError(f"{checkpoint_path} not found!!!")
def inference_func(images):
model.eval()
# classic test-time mirror augment
embedding_origin = model.forward_embedding_only(images)
embedding_mirror = model.forward_embedding_only(images[:, :, :, ::-1])
embedding = embedding_origin + embedding_mirror
embedding = F.normalize(embedding, axis=1)
return embedding
return inference_func
def extract_feature_and_clean_noise(configs, inference_func):
"""extract feature and clean noise. the noise cleaning algorithm is proposed in
`"ArcFace: Additive Angular Margin Loss for Deep Face Recognition" <https://arxiv.org/pdf/1801.07698.pdf>`_
please refer to https://github.com/deepinsight/insightface/blob/master/Evaluation/Megaface/remove_noises.py for
more detail. this implement does basicly the same thing as the above, but with much higher speed
Args:
configs (dict): configuration, required fields include:
batch_size: inference batch size
feature_dim: model output feature dimension
base_dir: base directory of experiment outputs
dataset_dir: directory of dataset root
inference_func (function): constructed inference function
Returns:
facescrub_feature (np.array): noise-cleaned feature of facescrub (shape: n * (feature_dim + 1))
facescrub_label (np.array): label of facescrub (shape: n)
megaface_feature (np.array): noise-cleaned feature of megaface (shape: m * (feature_dim + 1))
"""
def prepare_dataset(name):
"""prepare dataset
Args:
name (str): name of the dataset, should be one of {facescrub, megaface}
Returns:
dataset (data.Dataset): required dataset
queue (data.DataLoader): corresponding dataloader
"""
preprocess = T.Compose([T.Normalize(mean=127.5, std=128), T.ToMode("CHW")])
dataset = get_eval_dataset(name, dataset_dir=configs["dataset_dir"])
sampler = data.SequentialSampler(dataset, batch_size=configs["batch_size"])
queue = data.DataLoader(dataset, sampler=sampler, transform=preprocess)
return dataset, queue
def extract_vanilla_feature(n, data_queue):
"""extract features without any postprocessing
Args:
n (int): size of dataset
data_queue (data.DataLoader): dataloader to extract feature
Returns:
feature_store (np.array): extracted feature (shape: n * feature_dim)
label (np.array): label of this instance, -1 if unknown (shape: n)
is_noise (np.array): whether this instance is a noise (shape: n)
"""
feature_store = np.zeros((n, configs["feature_dim"]), dtype="float32")
label_store = np.zeros(n, dtype="int32")
is_noise_store = np.zeros(n, dtype="bool")
for images, indice, labels, is_noise in tqdm(data_queue):
images = mge.tensor(images, dtype="float32")
embedding = inference_func(images)
embedding = embedding.numpy()
feature_store[indice] = embedding
label_store[indice] = labels
is_noise_store[indice] = is_noise
return feature_store, label_store, is_noise_store
# prepare facescrub dataset
logger.info("preparing facescrub dataset...")
facescrub_dataset, facescrub_queue = prepare_dataset("facescrub")
# extract facescrub feature
logger.info("extracting facescrub...")
facescrub_feature_store, facescrub_label, facescrub_is_noise = extract_vanilla_feature(
n=len(facescrub_dataset), data_queue=facescrub_queue
)
# prepare megaface dataset
logger.info("preparing megaface dataset...")
megaface_dataset, megaface_queue = prepare_dataset("megaface")
# extract feature for megaface
logger.info("extracting megaface...")
megaface_feature_store, _, megaface_is_noise = extract_vanilla_feature(
n=len(megaface_dataset), data_queue=megaface_queue
)
# parse facescrub noise, replace noisy feature with class center of same person
facescrub_feature_center = np.zeros((facescrub_dataset.num_class, configs["feature_dim"]), dtype="float32")
for i in range(facescrub_dataset.num_class):
mask = (facescrub_label == i) & (~facescrub_is_noise)
center = facescrub_feature_store[mask].sum(axis=0)
center = center / np.linalg.norm(center)
facescrub_feature_center[i] = center
for index in np.where(facescrub_is_noise)[0]:
center = facescrub_feature_center[facescrub_label[index]]
disturb = np.random.uniform(-1e-5, 1e-5, (configs["feature_dim"],))
feat = center + disturb # avoid identical features with minor disturb
feat = feat / np.linalg.norm(feat)
facescrub_feature_store[index] = feat
# extend feature by 1 dimension
# the extended feature is infinitly large (100) if and only if megaface noise, 0 otherwise
# so, the distance between probe and a noisy distractor is infinitly large, while other distances remain unchanged
facescrub_feature_extend = np.zeros((len(facescrub_dataset), 1), dtype="float32")
facescrub_feature = np.concatenate([facescrub_feature_store, facescrub_feature_extend], axis=1)
megaface_feature_extend = megaface_is_noise.astype("float32").reshape(-1, 1) * 100
megaface_feature = np.concatenate([megaface_feature_store, megaface_feature_extend], axis=1)
# write to file system
facescrub_feature_path = os.path.join(configs["base_dir"], "facescrub.npy")
np.save(facescrub_feature_path, facescrub_feature)
facescrub_label_path = os.path.join(configs["base_dir"], "facescrub_label.npy")
np.save(facescrub_label_path, facescrub_label)
megaface_feature_path = os.path.join(configs["base_dir"], "megaface.npy")
np.save(megaface_feature_path, megaface_feature)
return facescrub_feature, facescrub_label, megaface_feature
def calculate_score(configs, facescrub, labels, megaface):
"""calculate megaface identification top1 score. this evaluation implement strictly follows the description of
`"The MegaFace Benchmark: 1 Million Faces for Recognition at Scale" <https://arxiv.org/pdf/1512.00596.pdf>`_
this implement outputs exactly the same as dev-sdk provided by the official, but with much higher speed
Args:
configs (dict): configuration
facescrub (np.array): feature of facescrub
labels (np.array): label of facescrub
megaface (np.array): feature of megaface
Returns:
megaface_score (float): top1 score of megaface
"""
facescrub = mge.tensor(facescrub, dtype="float32")
megaface = mge.tensor(megaface, dtype="float32")
# note: (x - y) ** 2 = x ** 2 + y ** 2 - 2 * x * y
# facescrub_score[i][j] = l2-dist(facescrub[i], facescrub[j])
facescrub_score = (
(facescrub ** 2).sum(axis=-1, keepdims=True)
+ (facescrub ** 2).sum(axis=-1, keepdims=True).transpose(1, 0)
- 2 * F.matmul(facescrub, facescrub.transpose(1, 0))
)
facescrub_score = facescrub_score.numpy()
def get_score_min_megaface(x):
distr_score = (x ** 2).sum(axis=-1) + (megaface ** 2).sum(axis=-1) - 2 * (x * megaface).sum(axis=-1)
return distr_score.min()
up, down = 0, 0
for probe_i in tqdm(range(len(facescrub))):
distr_score_min = get_score_min_megaface(facescrub[probe_i]).numpy()
mask = (labels == labels[probe_i]) & (np.arange(len(facescrub)) != probe_i)
for probe_j in np.where(mask)[0]:
probe_score = facescrub_score[probe_i][probe_j]
up += probe_score < distr_score_min
down += 1
megaface_score = up / down * 100
return megaface_score
def main(args):
configs = load_config_from_path(args.config_file)
configs["evaluate_epoch"] = args.epoch if args.epoch is not None else configs["num_epoch"]
# write log to worklog.txt
os.makedirs(configs["base_dir"], exist_ok=True)
worklog_path = os.path.join(configs["base_dir"], "worklog.txt")
mge.set_log_file(worklog_path)
inference_func = get_inference_func(configs)
facescrub_feature, facescrub_label, megaface_feature = extract_feature_and_clean_noise(configs, inference_func)
megaface_score = calculate_score(configs, facescrub_feature, facescrub_label, megaface_feature)
logger.info("Epoch: %d", configs["evaluate_epoch"])
logger.info("MegaFace Top1: %.2f", megaface_score)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-f", "--config-file", help="path to experiment configuration", required=True)
parser.add_argument(
"-e", "--epoch", help="model of num epoch to evaluate (default: num_epoch)", default=None, type=int
)
args = parser.parse_args()
main(args)
|
[
"megengine.functional.normalize",
"megengine.data.DataLoader",
"megengine.load",
"megengine.tensor",
"megengine.data.SequentialSampler",
"megengine.get_logger",
"megengine.set_log_file",
"megengine.data.transform.Normalize",
"megengine.data.transform.ToMode"
] |
[((464, 488), 'megengine.get_logger', 'mge.get_logger', (['__name__'], {}), '(__name__)\n', (478, 488), True, 'import megengine as mge\n'), ((967, 996), 'recognition.models.FaceRecognitionModel', 'FaceRecognitionModel', (['configs'], {}), '(configs)\n', (987, 996), False, 'from recognition.models import FaceRecognitionModel\n'), ((1066, 1141), 'os.path.join', 'os.path.join', (["configs['base_dir']", 'f"""epoch-{evaluate_epoch}-checkpoint.pkl"""'], {}), "(configs['base_dir'], f'epoch-{evaluate_epoch}-checkpoint.pkl')\n", (1078, 1141), False, 'import os\n'), ((1149, 1180), 'os.path.exists', 'os.path.exists', (['checkpoint_path'], {}), '(checkpoint_path)\n', (1163, 1180), False, 'import os\n'), ((5515, 5600), 'numpy.zeros', 'np.zeros', (["(facescrub_dataset.num_class, configs['feature_dim'])"], {'dtype': '"""float32"""'}), "((facescrub_dataset.num_class, configs['feature_dim']), dtype='float32'\n )\n", (5523, 5600), True, 'import numpy as np\n'), ((6581, 6656), 'numpy.concatenate', 'np.concatenate', (['[facescrub_feature_store, facescrub_feature_extend]'], {'axis': '(1)'}), '([facescrub_feature_store, facescrub_feature_extend], axis=1)\n', (6595, 6656), True, 'import numpy as np\n'), ((6767, 6840), 'numpy.concatenate', 'np.concatenate', (['[megaface_feature_store, megaface_feature_extend]'], {'axis': '(1)'}), '([megaface_feature_store, megaface_feature_extend], axis=1)\n', (6781, 6840), True, 'import numpy as np\n'), ((6898, 6948), 'os.path.join', 'os.path.join', (["configs['base_dir']", '"""facescrub.npy"""'], {}), "(configs['base_dir'], 'facescrub.npy')\n", (6910, 6948), False, 'import os\n'), ((6953, 7003), 'numpy.save', 'np.save', (['facescrub_feature_path', 'facescrub_feature'], {}), '(facescrub_feature_path, facescrub_feature)\n', (6960, 7003), True, 'import numpy as np\n'), ((7031, 7087), 'os.path.join', 'os.path.join', (["configs['base_dir']", '"""facescrub_label.npy"""'], {}), "(configs['base_dir'], 'facescrub_label.npy')\n", (7043, 7087), False, 'import os\n'), ((7092, 7138), 'numpy.save', 'np.save', (['facescrub_label_path', 'facescrub_label'], {}), '(facescrub_label_path, facescrub_label)\n', (7099, 7138), True, 'import numpy as np\n'), ((7167, 7216), 'os.path.join', 'os.path.join', (["configs['base_dir']", '"""megaface.npy"""'], {}), "(configs['base_dir'], 'megaface.npy')\n", (7179, 7216), False, 'import os\n'), ((7221, 7269), 'numpy.save', 'np.save', (['megaface_feature_path', 'megaface_feature'], {}), '(megaface_feature_path, megaface_feature)\n', (7228, 7269), True, 'import numpy as np\n'), ((8020, 8058), 'megengine.tensor', 'mge.tensor', (['facescrub'], {'dtype': '"""float32"""'}), "(facescrub, dtype='float32')\n", (8030, 8058), True, 'import megengine as mge\n'), ((8074, 8111), 'megengine.tensor', 'mge.tensor', (['megaface'], {'dtype': '"""float32"""'}), "(megaface, dtype='float32')\n", (8084, 8111), True, 'import megengine as mge\n'), ((9171, 9210), 'recognition.tools.utils.load_config_from_path', 'load_config_from_path', (['args.config_file'], {}), '(args.config_file)\n', (9192, 9210), False, 'from recognition.tools.utils import load_config_from_path\n'), ((9343, 9390), 'os.makedirs', 'os.makedirs', (["configs['base_dir']"], {'exist_ok': '(True)'}), "(configs['base_dir'], exist_ok=True)\n", (9354, 9390), False, 'import os\n'), ((9410, 9458), 'os.path.join', 'os.path.join', (["configs['base_dir']", '"""worklog.txt"""'], {}), "(configs['base_dir'], 'worklog.txt')\n", (9422, 9458), False, 'import os\n'), ((9463, 9493), 'megengine.set_log_file', 'mge.set_log_file', (['worklog_path'], {}), '(worklog_path)\n', (9479, 9493), True, 'import megengine as mge\n'), ((9914, 9939), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (9937, 9939), False, 'import argparse\n'), ((1208, 1233), 'megengine.load', 'mge.load', (['checkpoint_path'], {}), '(checkpoint_path)\n', (1216, 1233), True, 'import megengine as mge\n'), ((1702, 1732), 'megengine.functional.normalize', 'F.normalize', (['embedding'], {'axis': '(1)'}), '(embedding, axis=1)\n', (1713, 1732), True, 'import megengine.functional as F\n'), ((3313, 3371), 'recognition.datasets.get_eval_dataset', 'get_eval_dataset', (['name'], {'dataset_dir': "configs['dataset_dir']"}), "(name, dataset_dir=configs['dataset_dir'])\n", (3329, 3371), False, 'from recognition.datasets import get_eval_dataset\n'), ((3390, 3455), 'megengine.data.SequentialSampler', 'data.SequentialSampler', (['dataset'], {'batch_size': "configs['batch_size']"}), "(dataset, batch_size=configs['batch_size'])\n", (3412, 3455), True, 'import megengine.data as data\n'), ((3472, 3535), 'megengine.data.DataLoader', 'data.DataLoader', (['dataset'], {'sampler': 'sampler', 'transform': 'preprocess'}), '(dataset, sampler=sampler, transform=preprocess)\n', (3487, 3535), True, 'import megengine.data as data\n'), ((4085, 4139), 'numpy.zeros', 'np.zeros', (["(n, configs['feature_dim'])"], {'dtype': '"""float32"""'}), "((n, configs['feature_dim']), dtype='float32')\n", (4093, 4139), True, 'import numpy as np\n'), ((4162, 4188), 'numpy.zeros', 'np.zeros', (['n'], {'dtype': '"""int32"""'}), "(n, dtype='int32')\n", (4170, 4188), True, 'import numpy as np\n'), ((4214, 4239), 'numpy.zeros', 'np.zeros', (['n'], {'dtype': '"""bool"""'}), "(n, dtype='bool')\n", (4222, 4239), True, 'import numpy as np\n'), ((4288, 4304), 'tqdm.auto.tqdm', 'tqdm', (['data_queue'], {}), '(data_queue)\n', (4292, 4304), False, 'from tqdm.auto import tqdm\n'), ((5877, 5905), 'numpy.where', 'np.where', (['facescrub_is_noise'], {}), '(facescrub_is_noise)\n', (5885, 5905), True, 'import numpy as np\n'), ((5994, 6053), 'numpy.random.uniform', 'np.random.uniform', (['(-1e-05)', '(1e-05)', "(configs['feature_dim'],)"], {}), "(-1e-05, 1e-05, (configs['feature_dim'],))\n", (6011, 6053), True, 'import numpy as np\n'), ((4327, 4362), 'megengine.tensor', 'mge.tensor', (['images'], {'dtype': '"""float32"""'}), "(images, dtype='float32')\n", (4337, 4362), True, 'import megengine as mge\n'), ((5792, 5814), 'numpy.linalg.norm', 'np.linalg.norm', (['center'], {}), '(center)\n', (5806, 5814), True, 'import numpy as np\n'), ((6153, 6173), 'numpy.linalg.norm', 'np.linalg.norm', (['feat'], {}), '(feat)\n', (6167, 6173), True, 'import numpy as np\n'), ((8926, 8940), 'numpy.where', 'np.where', (['mask'], {}), '(mask)\n', (8934, 8940), True, 'import numpy as np\n'), ((3243, 3275), 'megengine.data.transform.Normalize', 'T.Normalize', ([], {'mean': '(127.5)', 'std': '(128)'}), '(mean=127.5, std=128)\n', (3254, 3275), True, 'import megengine.data.transform as T\n'), ((3277, 3292), 'megengine.data.transform.ToMode', 'T.ToMode', (['"""CHW"""'], {}), "('CHW')\n", (3285, 3292), True, 'import megengine.data.transform as T\n')]
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import numpy as np
import yaml
from megengine import jit
from megengine.module.external import ExternOprSubgraph
# "1,3,224,224" -> (1,3,224,224)
def str2tuple(x):
x = x.split(",")
x = [int(a) for a in x]
x = tuple(x)
return x
def main():
parser = argparse.ArgumentParser(
description="load a .pb model and convert to corresponding "
"load-and-run model"
)
parser.add_argument("input", help="mace model file")
parser.add_argument("param", help="mace param file")
parser.add_argument(
"output", help="converted model that can be fed to dump_with_testcase_mge.py"
)
parser.add_argument("config", help="config file with yaml format")
args = parser.parse_args()
with open(args.config, "r") as f:
configs = yaml.load(f)
for model_name in configs["models"]:
# ignore several sub models currently
sub_model = configs["models"][model_name]["subgraphs"][0]
# input/output shapes
isizes = [str2tuple(x) for x in sub_model["input_shapes"]]
# input/output names
input_names = sub_model["input_tensors"]
if "check_tensors" in sub_model:
output_names = sub_model["check_tensors"]
osizes = [str2tuple(x) for x in sub_model["check_shapes"]]
else:
output_names = sub_model["output_tensors"]
osizes = [str2tuple(x) for x in sub_model["output_shapes"]]
with open(args.input, "rb") as fin:
raw_model = fin.read()
with open(args.param, "rb") as fin:
raw_param = fin.read()
model_size = (len(raw_model)).to_bytes(4, byteorder="little")
param_size = (len(raw_param)).to_bytes(4, byteorder="little")
n_inputs = (len(input_names)).to_bytes(4, byteorder="little")
n_outputs = (len(output_names)).to_bytes(4, byteorder="little")
names_buffer = n_inputs + n_outputs
for iname in input_names:
names_buffer += (len(iname)).to_bytes(4, byteorder="little")
names_buffer += str.encode(iname)
for oname in output_names:
names_buffer += (len(oname)).to_bytes(4, byteorder="little")
names_buffer += str.encode(oname)
shapes_buffer = n_outputs
for oshape in osizes:
shapes_buffer += (len(oshape)).to_bytes(4, byteorder="little")
for oi in oshape:
shapes_buffer += oi.to_bytes(4, byteorder="little")
# raw content contains:
# input/output names + output shapes + model buffer + param buffer
wk_raw_content = (
names_buffer
+ shapes_buffer
+ model_size
+ raw_model
+ param_size
+ raw_param
)
net = ExternOprSubgraph(wk_raw_content, "mace", osizes)
net.eval()
@jit.trace(symbolic=True)
def inference(inputs):
return net(inputs)
inputs = [
np.random.random(isizes[i]).astype(np.float32) for i in range(len(isizes))
]
inference.trace(*inputs)
inference.dump(args.output)
if __name__ == "__main__":
main()
|
[
"megengine.jit.trace",
"megengine.module.external.ExternOprSubgraph"
] |
[((666, 774), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""load a .pb model and convert to corresponding load-and-run model"""'}), "(description=\n 'load a .pb model and convert to corresponding load-and-run model')\n", (689, 774), False, 'import argparse\n'), ((1185, 1197), 'yaml.load', 'yaml.load', (['f'], {}), '(f)\n', (1194, 1197), False, 'import yaml\n'), ((3180, 3229), 'megengine.module.external.ExternOprSubgraph', 'ExternOprSubgraph', (['wk_raw_content', '"""mace"""', 'osizes'], {}), "(wk_raw_content, 'mace', osizes)\n", (3197, 3229), False, 'from megengine.module.external import ExternOprSubgraph\n'), ((3259, 3283), 'megengine.jit.trace', 'jit.trace', ([], {'symbolic': '(True)'}), '(symbolic=True)\n', (3268, 3283), False, 'from megengine import jit\n'), ((3378, 3405), 'numpy.random.random', 'np.random.random', (['isizes[i]'], {}), '(isizes[i])\n', (3394, 3405), True, 'import numpy as np\n')]
|
import os
import sys
import time
import logging
from collections import namedtuple
import megengine as mge
import megengine.distributed as dist
import megengine.functional as F
import megengine.autodiff as autodiff
import megengine.optimizer as optim
import yaml
from tensorboardX import SummaryWriter
from nets import Model
from dataset import CREStereoDataset
from megengine.data import DataLoader, RandomSampler, Infinite
def parse_yaml(file_path: str) -> namedtuple:
"""Parse yaml configuration file and return the object in `namedtuple`."""
with open(file_path, "rb") as f:
cfg: dict = yaml.safe_load(f)
args = namedtuple("train_args", cfg.keys())(*cfg.values())
return args
def format_time(elapse):
elapse = int(elapse)
hour = elapse // 3600
minute = elapse % 3600 // 60
seconds = elapse % 60
return "{:02d}:{:02d}:{:02d}".format(hour, minute, seconds)
def ensure_dir(path):
if not os.path.exists(path):
os.makedirs(path, exist_ok=True)
def adjust_learning_rate(optimizer, epoch):
warm_up = 0.02
const_range = 0.6
min_lr_rate = 0.05
if epoch <= args.n_total_epoch * warm_up:
lr = (1 - min_lr_rate) * args.base_lr / (
args.n_total_epoch * warm_up
) * epoch + min_lr_rate * args.base_lr
elif args.n_total_epoch * warm_up < epoch <= args.n_total_epoch * const_range:
lr = args.base_lr
else:
lr = (min_lr_rate - 1) * args.base_lr / (
(1 - const_range) * args.n_total_epoch
) * epoch + (1 - min_lr_rate * const_range) / (1 - const_range) * args.base_lr
optimizer.param_groups[0]["lr"] = lr
def sequence_loss(flow_preds, flow_gt, valid, gamma=0.8):
n_predictions = len(flow_preds)
flow_loss = 0.0
for i in range(n_predictions):
i_weight = gamma ** (n_predictions - i - 1)
i_loss = F.abs(flow_preds[i] - flow_gt)
flow_loss += i_weight * (F.expand_dims(valid, axis=1) * i_loss).mean()
return flow_loss
def main(args):
# initial info
mge.random.seed(args.seed)
rank, world_size = dist.get_rank(), dist.get_world_size()
mge.dtr.enable() # Dynamic tensor rematerialization for memory optimization
# directory check
log_model_dir = os.path.join(args.log_dir, "models")
ensure_dir(log_model_dir)
# model / optimizer
model = Model(
max_disp=args.max_disp, mixed_precision=args.mixed_precision, test_mode=False
)
optimizer = optim.Adam(model.parameters(), lr=0.1, betas=(0.9, 0.999))
dist_callbacks = None if world_size == 1 else [dist.make_allreduce_cb("mean")]
gm = autodiff.GradManager().attach(model.parameters(), callbacks=dist_callbacks)
scaler = mge.amp.GradScaler() if args.mixed_precision else None
if rank == 0:
# tensorboard
tb_log = SummaryWriter(os.path.join(args.log_dir, "train.events"))
# worklog
logging.basicConfig(level=eval(args.log_level))
worklog = logging.getLogger("train_logger")
worklog.propagate = False
fileHandler = logging.FileHandler(
os.path.join(args.log_dir, "worklog.txt"), mode="a", encoding="utf8"
)
formatter = logging.Formatter(
fmt="%(asctime)s %(message)s", datefmt="%Y/%m/%d %H:%M:%S"
)
fileHandler.setFormatter(formatter)
consoleHandler = logging.StreamHandler(sys.stdout)
formatter = logging.Formatter(
fmt="\x1b[32m%(asctime)s\x1b[0m %(message)s", datefmt="%Y/%m/%d %H:%M:%S"
)
consoleHandler.setFormatter(formatter)
worklog.handlers = [fileHandler, consoleHandler]
# params stat
worklog.info(f"Use {world_size} GPU(s)")
worklog.info("Params: %s" % sum([p.size for p in model.parameters()]))
# load pretrained model if exist
chk_path = os.path.join(log_model_dir, "latest.mge")
if args.loadmodel is not None:
chk_path = args.loadmodel
elif not os.path.exists(chk_path):
chk_path = None
if chk_path is not None:
if rank == 0:
worklog.info(f"loading model: {chk_path}")
pretrained_dict = mge.load(chk_path, map_location="cpu")
resume_epoch_idx = pretrained_dict["epoch"]
resume_iters = pretrained_dict["iters"]
model.load_state_dict(pretrained_dict["state_dict"], strict=True)
optimizer.load_state_dict(pretrained_dict["optim_state_dict"])
start_epoch_idx = resume_epoch_idx + 1
start_iters = resume_iters
else:
start_epoch_idx = 1
start_iters = 0
# auxiliary
if world_size > 1:
dist.bcast_list_(model.tensors())
# datasets
dataset = CREStereoDataset(args.training_data_path)
if rank == 0:
worklog.info(f"Dataset size: {len(dataset)}")
inf_sampler = Infinite(
RandomSampler(
dataset,
batch_size=args.batch_size_single,
drop_last=False,
world_size=world_size,
rank=rank,
seed=args.seed,
)
)
dataloader = DataLoader(
dataset, sampler=inf_sampler, num_workers=0, divide=False, preload=True
)
# counter
cur_iters = start_iters
total_iters = args.minibatch_per_epoch * args.n_total_epoch
t0 = time.perf_counter()
for epoch_idx in range(start_epoch_idx, args.n_total_epoch + 1):
# adjust learning rate
epoch_total_train_loss = 0
adjust_learning_rate(optimizer, epoch_idx)
model.train()
t1 = time.perf_counter()
batch_idx = 0
for mini_batch_data in dataloader:
if batch_idx % args.minibatch_per_epoch == 0 and batch_idx != 0:
break
batch_idx += 1
cur_iters += 1
# parse data
left, right, gt_disp, valid_mask = (
mini_batch_data["left"],
mini_batch_data["right"],
mini_batch_data["disparity"],
mini_batch_data["mask"],
)
t2 = time.perf_counter()
with gm: # GradManager
with mge.amp.autocast(enabled=args.mixed_precision):
# pre-process
left = mge.tensor(left)
right = mge.tensor(right)
gt_disp = mge.tensor(gt_disp)
valid_mask = mge.tensor(valid_mask)
gt_disp = F.expand_dims(gt_disp, axis=1)
gt_flow = F.concat([gt_disp, gt_disp * 0], axis=1)
# forward
flow_predictions = model(left, right)
# loss & backword
loss = sequence_loss(
flow_predictions, gt_flow, valid_mask, gamma=0.8
)
if args.mixed_precision:
scaler.backward(gm, loss)
else:
gm.backward(loss)
optimizer.step().clear_grad()
# loss stats
loss_item = loss.item()
epoch_total_train_loss += loss_item
t3 = time.perf_counter()
# terminal print log
if rank == 0:
if cur_iters % 5 == 0:
tdata = t2 - t1
time_train_passed = t3 - t0
time_iter_passed = t3 - t1
step_passed = cur_iters - start_iters
eta = (
(total_iters - cur_iters)
/ max(step_passed, 1e-7)
* time_train_passed
)
meta_info = list()
meta_info.append("{:.2g} b/s".format(1.0 / time_iter_passed))
meta_info.append("passed:{}".format(format_time(time_train_passed)))
meta_info.append("eta:{}".format(format_time(eta)))
meta_info.append(
"data_time:{:.2g}".format(tdata / time_iter_passed)
)
meta_info.append(
"lr:{:.5g}".format(optimizer.param_groups[0]["lr"])
)
meta_info.append(
"[{}/{}:{}/{}]".format(
epoch_idx,
args.n_total_epoch,
batch_idx,
args.minibatch_per_epoch,
)
)
loss_info = [" ==> {}:{:.4g}".format("loss", loss_item)]
# exp_name = ['\n' + os.path.basename(os.getcwd())]
info = [",".join(meta_info)] + loss_info
worklog.info("".join(info))
# minibatch loss
tb_log.add_scalar("train/loss_batch", loss_item, cur_iters)
tb_log.add_scalar(
"train/lr", optimizer.param_groups[0]["lr"], cur_iters
)
tb_log.flush()
t1 = time.perf_counter()
if rank == 0:
# epoch loss
tb_log.add_scalar(
"train/loss",
epoch_total_train_loss / args.minibatch_per_epoch,
epoch_idx,
)
tb_log.flush()
# save model params
ckp_data = {
"epoch": epoch_idx,
"iters": cur_iters,
"batch_size": args.batch_size_single * args.nr_gpus,
"epoch_size": args.minibatch_per_epoch,
"train_loss": epoch_total_train_loss / args.minibatch_per_epoch,
"state_dict": model.state_dict(),
"optim_state_dict": optimizer.state_dict(),
}
mge.save(ckp_data, os.path.join(log_model_dir, "latest.mge"))
if epoch_idx % args.model_save_freq_epoch == 0:
save_path = os.path.join(log_model_dir, "epoch-%d.mge" % epoch_idx)
worklog.info(f"Model params saved: {save_path}")
mge.save(ckp_data, save_path)
if rank == 0:
worklog.info("Training is done, exit.")
if __name__ == "__main__":
# train configuration
args = parse_yaml("cfgs/train.yaml")
# distributed training
run = main if mge.get_device_count("gpu") == 1 else dist.launcher(main)
run(args)
|
[
"megengine.distributed.get_rank",
"megengine.distributed.get_world_size",
"megengine.data.DataLoader",
"megengine.get_device_count",
"megengine.load",
"megengine.functional.abs",
"megengine.tensor",
"megengine.functional.concat",
"megengine.amp.GradScaler",
"megengine.distributed.make_allreduce_cb",
"megengine.data.RandomSampler",
"megengine.random.seed",
"megengine.dtr.enable",
"megengine.distributed.launcher",
"megengine.save",
"megengine.amp.autocast",
"megengine.functional.expand_dims",
"megengine.autodiff.GradManager"
] |
[((2048, 2074), 'megengine.random.seed', 'mge.random.seed', (['args.seed'], {}), '(args.seed)\n', (2063, 2074), True, 'import megengine as mge\n'), ((2141, 2157), 'megengine.dtr.enable', 'mge.dtr.enable', ([], {}), '()\n', (2155, 2157), True, 'import megengine as mge\n'), ((2261, 2297), 'os.path.join', 'os.path.join', (['args.log_dir', '"""models"""'], {}), "(args.log_dir, 'models')\n", (2273, 2297), False, 'import os\n'), ((2365, 2453), 'nets.Model', 'Model', ([], {'max_disp': 'args.max_disp', 'mixed_precision': 'args.mixed_precision', 'test_mode': '(False)'}), '(max_disp=args.max_disp, mixed_precision=args.mixed_precision,\n test_mode=False)\n', (2370, 2453), False, 'from nets import Model\n'), ((3852, 3893), 'os.path.join', 'os.path.join', (['log_model_dir', '"""latest.mge"""'], {}), "(log_model_dir, 'latest.mge')\n", (3864, 3893), False, 'import os\n'), ((4700, 4741), 'dataset.CREStereoDataset', 'CREStereoDataset', (['args.training_data_path'], {}), '(args.training_data_path)\n', (4716, 4741), False, 'from dataset import CREStereoDataset\n'), ((5081, 5168), 'megengine.data.DataLoader', 'DataLoader', (['dataset'], {'sampler': 'inf_sampler', 'num_workers': '(0)', 'divide': '(False)', 'preload': '(True)'}), '(dataset, sampler=inf_sampler, num_workers=0, divide=False,\n preload=True)\n', (5091, 5168), False, 'from megengine.data import DataLoader, RandomSampler, Infinite\n'), ((5295, 5314), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (5312, 5314), False, 'import time\n'), ((612, 629), 'yaml.safe_load', 'yaml.safe_load', (['f'], {}), '(f)\n', (626, 629), False, 'import yaml\n'), ((945, 965), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (959, 965), False, 'import os\n'), ((975, 1007), 'os.makedirs', 'os.makedirs', (['path'], {'exist_ok': '(True)'}), '(path, exist_ok=True)\n', (986, 1007), False, 'import os\n'), ((1875, 1905), 'megengine.functional.abs', 'F.abs', (['(flow_preds[i] - flow_gt)'], {}), '(flow_preds[i] - flow_gt)\n', (1880, 1905), True, 'import megengine.functional as F\n'), ((2098, 2113), 'megengine.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (2111, 2113), True, 'import megengine.distributed as dist\n'), ((2115, 2136), 'megengine.distributed.get_world_size', 'dist.get_world_size', ([], {}), '()\n', (2134, 2136), True, 'import megengine.distributed as dist\n'), ((2720, 2740), 'megengine.amp.GradScaler', 'mge.amp.GradScaler', ([], {}), '()\n', (2738, 2740), True, 'import megengine as mge\n'), ((2984, 3017), 'logging.getLogger', 'logging.getLogger', (['"""train_logger"""'], {}), "('train_logger')\n", (3001, 3017), False, 'import logging\n'), ((3206, 3283), 'logging.Formatter', 'logging.Formatter', ([], {'fmt': '"""%(asctime)s %(message)s"""', 'datefmt': '"""%Y/%m/%d %H:%M:%S"""'}), "(fmt='%(asctime)s %(message)s', datefmt='%Y/%m/%d %H:%M:%S')\n", (3223, 3283), False, 'import logging\n'), ((3375, 3408), 'logging.StreamHandler', 'logging.StreamHandler', (['sys.stdout'], {}), '(sys.stdout)\n', (3396, 3408), False, 'import logging\n'), ((3429, 3526), 'logging.Formatter', 'logging.Formatter', ([], {'fmt': '"""\x1b[32m%(asctime)s\x1b[0m %(message)s"""', 'datefmt': '"""%Y/%m/%d %H:%M:%S"""'}), "(fmt='\\x1b[32m%(asctime)s\\x1b[0m %(message)s', datefmt=\n '%Y/%m/%d %H:%M:%S')\n", (3446, 3526), False, 'import logging\n'), ((4160, 4198), 'megengine.load', 'mge.load', (['chk_path'], {'map_location': '"""cpu"""'}), "(chk_path, map_location='cpu')\n", (4168, 4198), True, 'import megengine as mge\n'), ((4850, 4978), 'megengine.data.RandomSampler', 'RandomSampler', (['dataset'], {'batch_size': 'args.batch_size_single', 'drop_last': '(False)', 'world_size': 'world_size', 'rank': 'rank', 'seed': 'args.seed'}), '(dataset, batch_size=args.batch_size_single, drop_last=False,\n world_size=world_size, rank=rank, seed=args.seed)\n', (4863, 4978), False, 'from megengine.data import DataLoader, RandomSampler, Infinite\n'), ((5538, 5557), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (5555, 5557), False, 'import time\n'), ((10389, 10408), 'megengine.distributed.launcher', 'dist.launcher', (['main'], {}), '(main)\n', (10402, 10408), True, 'import megengine.distributed as dist\n'), ((2590, 2620), 'megengine.distributed.make_allreduce_cb', 'dist.make_allreduce_cb', (['"""mean"""'], {}), "('mean')\n", (2612, 2620), True, 'import megengine.distributed as dist\n'), ((2631, 2653), 'megengine.autodiff.GradManager', 'autodiff.GradManager', ([], {}), '()\n', (2651, 2653), True, 'import megengine.autodiff as autodiff\n'), ((2847, 2889), 'os.path.join', 'os.path.join', (['args.log_dir', '"""train.events"""'], {}), "(args.log_dir, 'train.events')\n", (2859, 2889), False, 'import os\n'), ((3107, 3148), 'os.path.join', 'os.path.join', (['args.log_dir', '"""worklog.txt"""'], {}), "(args.log_dir, 'worklog.txt')\n", (3119, 3148), False, 'import os\n'), ((3976, 4000), 'os.path.exists', 'os.path.exists', (['chk_path'], {}), '(chk_path)\n', (3990, 4000), False, 'import os\n'), ((6055, 6074), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (6072, 6074), False, 'import time\n'), ((7149, 7168), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (7166, 7168), False, 'import time\n'), ((9089, 9108), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (9106, 9108), False, 'import time\n'), ((10351, 10378), 'megengine.get_device_count', 'mge.get_device_count', (['"""gpu"""'], {}), "('gpu')\n", (10371, 10378), True, 'import megengine as mge\n'), ((9844, 9885), 'os.path.join', 'os.path.join', (['log_model_dir', '"""latest.mge"""'], {}), "(log_model_dir, 'latest.mge')\n", (9856, 9885), False, 'import os\n'), ((9975, 10030), 'os.path.join', 'os.path.join', (['log_model_dir', "('epoch-%d.mge' % epoch_idx)"], {}), "(log_model_dir, 'epoch-%d.mge' % epoch_idx)\n", (9987, 10030), False, 'import os\n'), ((10112, 10141), 'megengine.save', 'mge.save', (['ckp_data', 'save_path'], {}), '(ckp_data, save_path)\n', (10120, 10141), True, 'import megengine as mge\n'), ((6133, 6179), 'megengine.amp.autocast', 'mge.amp.autocast', ([], {'enabled': 'args.mixed_precision'}), '(enabled=args.mixed_precision)\n', (6149, 6179), True, 'import megengine as mge\n'), ((6243, 6259), 'megengine.tensor', 'mge.tensor', (['left'], {}), '(left)\n', (6253, 6259), True, 'import megengine as mge\n'), ((6288, 6305), 'megengine.tensor', 'mge.tensor', (['right'], {}), '(right)\n', (6298, 6305), True, 'import megengine as mge\n'), ((6336, 6355), 'megengine.tensor', 'mge.tensor', (['gt_disp'], {}), '(gt_disp)\n', (6346, 6355), True, 'import megengine as mge\n'), ((6389, 6411), 'megengine.tensor', 'mge.tensor', (['valid_mask'], {}), '(valid_mask)\n', (6399, 6411), True, 'import megengine as mge\n'), ((6442, 6472), 'megengine.functional.expand_dims', 'F.expand_dims', (['gt_disp'], {'axis': '(1)'}), '(gt_disp, axis=1)\n', (6455, 6472), True, 'import megengine.functional as F\n'), ((6503, 6543), 'megengine.functional.concat', 'F.concat', (['[gt_disp, gt_disp * 0]'], {'axis': '(1)'}), '([gt_disp, gt_disp * 0], axis=1)\n', (6511, 6543), True, 'import megengine.functional as F\n'), ((1939, 1967), 'megengine.functional.expand_dims', 'F.expand_dims', (['valid'], {'axis': '(1)'}), '(valid, axis=1)\n', (1952, 1967), True, 'import megengine.functional as F\n')]
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import functools
from typing import Iterable, List, Optional, Union
import numpy as np
import megengine._internal as mgb
from megengine._internal import CompGraph, CompNode
from ..core import zeros
from ..core.graph import _use_default_if_none
from ..core.tensor import Tensor, wrap_io_tensor
from .elemwise import ceil
from .utils import _decide_comp_node_and_comp_graph
@wrap_io_tensor
def broadcast_to(inp: Tensor, shape: Union[int, Iterable[int]]) -> Tensor:
"""
Broadcast a tensor to ``shape``
:param inp: The input tensor
:param shape: The target shape
:return: The output tensor
Examples:
.. testcode::
import numpy as np
from megengine import tensor
import megengine.functional as F
data = tensor(np.arange(0, 6, dtype=np.float32).reshape(2, 3))
out = F.broadcast_to(data, (4, 2, 3))
print(out.numpy())
Outputs:
.. testoutput::
[[[0. 1. 2.]
[3. 4. 5.]]
[[0. 1. 2.]
[3. 4. 5.]]
[[0. 1. 2.]
[3. 4. 5.]]
[[0. 1. 2.]
[3. 4. 5.]]]
"""
if isinstance(shape, int):
shape = (shape,)
return mgb.opr.broadcast(inp, shape)
def _get_idx(index, axis):
index_dims = len(index.imm_shape)
idx = []
comp_node, comp_graph = _decide_comp_node_and_comp_graph(index)
for i in range(index_dims):
if i != axis:
shape = [1] * index_dims
shape[i] = index.axis_shape(i)
arange = mgb.opr.linspace(
0,
index.axis_shape(i) - 1,
index.axis_shape(i),
comp_node=comp_node,
comp_graph=comp_graph,
)
arange = (
arange.reshape(*shape)
.broadcast(index.shape)
.reshape(-1)
.astype(np.int32)
)
idx.append(arange)
else:
idx.append(index.reshape(-1))
return tuple(idx)
@wrap_io_tensor
def gather(inp: Tensor, axis: int, index: Tensor) -> Tensor:
r"""
Gather data from :attr:`inp` on :attr:`axis` using :attr:`index`.
For a 3-D tensor, the output is specified by::
out[i][j][k] = inp[index[i][j][k]][j][k] # if axis == 0
out[i][j][k] = inp[i][index[i][j][k]][k] # if axis == 1
out[i][j][k] = inp[i][j][index[i][j][k]] # if axis == 2
if :attr:`inp` is an n-dimensional tensor with size
:math:`(x_0,x_1,...,x_{i-1},x_i,x_{i+1},...,x_{n-1})` and axis=i,
then :attr:`index` must be an n-dimensional tensor with size
:math:`(x_0,x_1,...,x_{i-1},y,x_{i+1},...,x_{n-1})` where :math:`y\ge 1` and
output will have the same size as :attr:`index`.
:param inp: the source tensor
:param axis: the axis along which to index
:param index: the indices of elements to gather
Examples:
.. testcode::
import megengine.functional as F
from megengine.core import tensor
inp = tensor([
[1,2], [3,4], [5,6],
])
index = tensor([[0,2], [1,0]])
oup = F.gather(inp, 0, index)
print(oup.numpy())
Outputs:
.. testoutput::
[[1 6]
[3 2]]
"""
input_shape = inp.imm_shape
index_shape = index.imm_shape
input_dims = len(input_shape)
index_dims = len(index_shape)
if input_dims != index_dims:
raise ValueError(
"The index tensor must have same dimensions as input tensor, "
"But the input dims:{}, the index dims:{}".format(input_dims, index_dims)
)
if axis < 0 or axis >= input_dims:
raise ValueError(
"Index axis {} is output of bounds, should in range [0 {})".format(
axis, input_dims
)
)
for i in range(input_dims):
if i != axis and input_shape[i] != index_shape[i]:
raise ValueError(
"The input {} and index {} must have the same size apart from axis {}".format(
input_shape, index_shape, axis
)
)
idx = _get_idx(index, axis)
return mgb.opr.advanced_indexing(inp)[idx].reshape(
index.shape
) # pylint: disable=no-member
@wrap_io_tensor
def concat(
inps: Iterable[Tensor],
axis: int = 0,
device: Optional[CompNode] = None,
comp_graph: Optional[CompGraph] = None,
) -> Tensor:
r"""
Concat some tensors
:param inps: Input tensors to concat
:param axis: the dimension over which the tensors are concatenated. Default: 0
:param device: The comp node output on. Default: None
:param comp_graph: The graph in which output is. Default: None
:return: The output tensor
Examples:
.. testcode::
import numpy as np
from megengine import tensor
import megengine.functional as F
data1 = tensor(np.arange(0, 6, dtype=np.float32).reshape((2, 3)))
data2 = tensor(np.arange(6, 12, dtype=np.float32).reshape((2, 3)))
out = F.concat([data1, data2])
print(out.numpy())
Outputs:
.. testoutput::
[[ 0. 1. 2.]
[ 3. 4. 5.]
[ 6. 7. 8.]
[ 9. 10. 11.]]
"""
# Output buffer not supported
return mgb.opr.concat(
*list(inps), axis=axis, comp_node=device, comp_graph=comp_graph
)
@wrap_io_tensor
def scatter(inp: Tensor, axis: int, index: Tensor, source: Tensor) -> Tensor:
r"""
Writes all values from the tensor :attr:`source` into :attr:`inp` at the indices specified in the :attr:`index` tensor.
For each value in :attr:`source`, its output index is specified by its index
in :attr:`source` for ``axis != dimension`` and by the corresponding value in
:attr:`index` for ``axis = dimension``.
For a 3-D tensor, :attr:`inp` is updated as::
inp[index[i][j][k]][j][k] = source[i][j][k] # if axis == 0
inp[i][index[i][j][k]][k] = source[i][j][k] # if axis == 1
inp[i][j][index[i][j][k]] = source[i][j][k] # if axis == 2
:attr:`inp`, :attr:`index` and :attr:`source` should have same number of dimensions.
It is also required that ``source.shape(d) <= inp.shape(d)`` and ``index.shape(d) == source.shape(d)``
for all dimensions ``d``.
Moreover, the values of :attr:`index` must be between ``0`` and ``inp.shape(axis) - 1`` inclusive.
.. note::
Please notice that, due to performance issues, the result is uncertain on the GPU device
if scatter difference positions from source to the same destination position
regard to index tensor.
Show the case using the following examples, the oup[0][2] is maybe
from source[0][2] which value is 0.2256 or source[1][2] which value is 0.5339
if set the index[1][2] from 1 to 0.
:param inp: the inp tensor which to be scattered
:param axis: the axis along which to index
:param index: the indices of elements to scatter
:param source: the source element(s) to scatter
Examples:
.. testcode::
import numpy as np
import megengine.functional as F
from megengine.core import tensor
inp = tensor(np.zeros(shape=(3,5),dtype=np.float32))
source = tensor([[0.9935,0.9465,0.2256,0.8926,0.4396],[0.7723,0.0718,0.5939,0.357,0.4576]])
index = tensor([[0,2,0,2,1],[2,0,1,1,2]])
oup = F.scatter(inp, 0, index,source)
print(oup.numpy())
Outputs:
.. testoutput::
[[0.9935 0.0718 0.2256 0. 0. ]
[0. 0. 0.5939 0.357 0.4396]
[0.7723 0.9465 0. 0.8926 0.4576]]
"""
input_shape = inp.imm_shape
index_shape = index.imm_shape
source_shape = source.imm_shape
input_dims = len(input_shape)
index_dims = len(index_shape)
source_dims = len(source_shape)
if input_dims != index_dims or input_dims != source_dims:
raise ValueError("The input, source and index tensor must have same dimensions")
if axis < 0 or axis >= input_dims:
raise ValueError(
"Index axis {} is output of bounds, should in range [0 {})".format(
axis, input_dims
)
)
for i in range(source_dims):
if source_shape[i] > input_shape[i]:
raise ValueError(
"The each shape size for source {} must be less than or equal to input {} ".format(
source_shape, input_shape
)
)
for i in range(index_dims):
if index_shape[i] != source_shape[i]:
raise ValueError(
"The each shape size for index {} must be equal to source {} ".format(
index_shape, source_shape
)
)
for i in range(index_dims):
if i != axis and index_shape[i] > input_shape[i]:
raise ValueError(
"The index {} must be less than or equal to input {} size apart from axis {}".format(
index_shape, input_shape, axis
)
)
idx = _get_idx(index, axis)
return mgb.opr.set_advanced_indexing(inp, source.flatten())[idx]
@wrap_io_tensor
def where(mask: Tensor, x: Tensor, y: Tensor) -> Tensor:
r"""
Select elements either from Tensor x or Tensor y, according to mask.
.. math::
\textrm{out}_i = x_i \textrm{ if } \textrm{mask}_i \textrm{ is True else } y_i
:param mask: a mask used for choosing x or y
:param x: the first choice
:param y: the second choice
Examples:
.. testcode::
from megengine import tensor
import megengine.functional as F
mask = tensor(np.array([[1, 0], [0, 1]], dtype=np.int32))
x = tensor(np.array([[1, np.inf], [np.nan, 4]],
dtype=np.float32))
y = tensor(np.array([[5, 6], [7, 8]], dtype=np.float32))
out = F.where(mask, x, y)
print(out.numpy())
Outputs:
.. testoutput::
[[1. 6.]
[7. 4.]]
"""
v0, index0 = mgb.opr.cond_take(
x, mask, mode=mgb.opr_param_defs.CondTake.Mode.EQ, val=1
)
v1, index1 = mgb.opr.cond_take(
y, mask, mode=mgb.opr_param_defs.CondTake.Mode.EQ, val=0
)
out = x.flatten()
index = mgb.opr.concat(index0, index1, axis=0)
v = mgb.opr.concat(v0, v1, axis=0)
out = mgb.opr.set_advanced_indexing(out, v)[index]
out = out.reshape(x.shape)
return out
@wrap_io_tensor
def cond_take(mask: Tensor, x: Tensor, val=1) -> Tensor:
r"""
Take elements from data if specific condition is satisfied on mask. This operator has two outputs: the first is the elements taken, and the second is the indices corresponding to those elements; they are both 1-dimensional. High-dimension input would first be flattened.
:param mask: condition param; must be the same shape with data
:param x: input tensor from which to take elements
:param val: value to be compared to by mode
Examples:
.. testcode::
from megengine import tensor
import megengine.functional as F
mask = tensor(np.array([[1, 0], [0, 1]], dtype=np.int32))
x = tensor(np.array([[1, np.inf], [np.nan, 4]],
dtype=np.float32))
v, index = F.cond_take(mask, x, 1)
print(v, index)
Outputs:
.. testoutput::
Tensor([1. 4.]) Tensor([0 3], dtype=int32)
"""
v, index = mgb.opr.cond_take(
x, mask, mode=mgb.opr_param_defs.CondTake.Mode.EQ, val=val
)
return v, index
def shapeof(x: Tensor, axis=None):
r"""
The shape of input tensor.
"""
return x.shapeof(axis=axis)
@wrap_io_tensor
def dimshuffle(inp: Tensor, pattern: Iterable[int]) -> Tensor:
r"""
Swap shapes and strides according to given pattern
:param inp: Input tensor
:param pattern: a list of integers including 0, 1, ... , ``ndim``-1, and any number of ``'x'`` char in dimensions where this tensor should be broadcasted. For examples:
* (``'x'``) -> make a 0d (scalar) into a 1d vector
* (0, 1) -> identity for 2d vectors
* (1, 0) -> inverts the first and second dimensions
* (``'x'``, 0) -> make a row out of a 1d vector (N to 1xN)
* (0, ``'x'``) -> make a column out of a 1d vector (N to Nx1)
* (2, 0, 1) -> AxBxC to CxAxB
* (0, ``'x'``, 1) -> AxB to Ax1xB
* (1, ``'x'``, 0) -> AxB to Bx1xA
* (1,) -> This remove dimensions 0. It must be a broadcastable dimension (1xA to A)
:return: The output tensor
Examples:
.. testcode::
import numpy as np
from megengine import tensor
import megengine.functional as F
x = tensor(np.array([[1, 1], [0, 0]], dtype=np.int32))
out = F.dimshuffle(x, (1, 0))
print(out.numpy())
Outputs:
.. testoutput::
[[1 0]
[1 0]]
"""
return mgb.opr.dimshuffle(inp, pattern)
@wrap_io_tensor
def reshape(inp: Tensor, target_shape: Iterable[int]) -> Tensor:
r"""
Reshape a tensor to given target shape; total number of logical elements must
remain unchanged
:param inp: Input tensor
:param target_shape: target shape, the components would be concatenated to form the
target shape, and it can contain an element of -1 representing unspec_axis.
Examples:
.. testcode::
import numpy as np
from megengine import tensor
import megengine.functional as F
x = tensor(np.arange(12, dtype=np.int32))
out = F.reshape(x, (3, 2, 2))
print(out.numpy())
Outputs:
.. testoutput::
[[[ 0 1]
[ 2 3]]
[[ 4 5]
[ 6 7]]
[[ 8 9]
[10 11]]]
"""
return mgb.opr.reshape(inp, target_shape)
def transpose(inp: Tensor, pattern: Iterable[int]) -> Tensor:
r"""Equivalent to :func:`dimshuffle`
"""
return dimshuffle(inp, pattern)
@wrap_io_tensor
def add_axis(inp: Tensor, axis: int) -> Tensor:
r"""
Add dimension before given axis.
:param inp: Input tensor
:param axis: Place of new axes
:return: The output tensor
Examples:
.. testcode::
import numpy as np
from megengine import tensor
import megengine.functional as F
x = tensor([1, 2])
out = F.add_axis(x, 0)
print(out.shape)
Outputs:
.. testoutput::
(1, 2)
"""
if not isinstance(axis, int):
raise ValueError("axis must be int, but got type:{}".format(type(axis)))
return mgb.opr.add_axis(inp, axis)
@wrap_io_tensor
def remove_axis(inp: Tensor, axis: int) -> Tensor:
r"""
Remove dimension of shape 1.
:param inp: Input tensor
:param axis: Place of axis to be removed
:return: The output tensor
Examples:
.. testcode::
import numpy as np
from megengine import tensor
import megengine.functional as F
x = tensor(np.array([1, 2], dtype=np.int32).reshape(1, 1, 2, 1))
out = F.remove_axis(x, 3)
print(out.shape)
Outputs:
.. testoutput::
(1, 1, 2)
"""
if not isinstance(axis, int):
raise ValueError("axis must be int, but got type:{}".format(type(axis)))
return mgb.opr.remove_axis(inp, axis)
def linspace(
start: Union[int, float, Tensor],
stop: Union[int, float, Tensor],
num: Union[int, Tensor],
dtype=np.float32,
device: Optional[CompNode] = None,
comp_graph: Optional[CompGraph] = None,
) -> Tensor:
r"""
Return equally spaced numbers over a specified interval
:param start: Starting value of the squence, shoule be scalar
:param stop: The last value of the squence, shoule be scalar
:param num: number of values to generate
:param dtype: result data type
:return: The generated tensor
Examples:
.. testcode::
import numpy as np
import megengine.functional as F
a = F.linspace(3,10,5)
print(a.numpy())
.. testoutput::
[ 3. 4.75 6.5 8.25 10. ]
"""
if dtype is not np.float32:
raise ValueError("linspace is only implemented for float32")
device, comp_graph = _use_default_if_none(device, comp_graph)
ret = Tensor(
mgb.opr.linspace(start, stop, num, comp_node=device, comp_graph=comp_graph)
)
return ret.astype(dtype)
def arange(
start: Union[int, float, Tensor],
end: Union[int, float, Tensor],
step: Union[int, float, Tensor] = 1,
dtype=np.float32,
device: Optional[CompNode] = None,
comp_graph: Optional[CompGraph] = None,
) -> Tensor:
r"""
Returns a Tensor with values from `start` to `end` with adjacent interval `step`
:param start: starting value of the squence, shoule be scalar
:param end: ending value of the squence, shoule be scalar
:param step: the gap between each pair of adjacent values. Default 1
:param dtype: result data type
:return: The generated tensor
Examples:
.. testcode::
import numpy as np
import megengine.functional as F
a = F.arange(1, 5, 1)
print(a.numpy())
.. testoutput::
[1. 2. 3. 4.]
"""
if dtype is not np.float32:
raise ValueError("arange is only implemented for float32")
num = ceil((end - start) / step)
stop = start + step * (num - 1)
ret = linspace(start, stop, num, device=device, comp_graph=comp_graph)
return ret
def zeros_like(inp: Tensor) -> Tensor:
r"""
Returns a zero tensor with the same shape as input tensor
:param inp: input tensor
Examples:
.. testcode::
import numpy as np
from megengine import tensor
import megengine.functional as F
inp = tensor(np.arange(1, 7, dtype=np.int32).reshape(2,3))
out = F.zeros_like(inp)
print(out.numpy())
.. testoutput::
[[0 0 0]
[0 0 0]]
"""
return zeros(inp.shapeof()).astype(inp.dtype)
|
[
"megengine._internal.opr.cond_take",
"megengine._internal.opr.dimshuffle",
"megengine._internal.opr.add_axis",
"megengine._internal.opr.remove_axis",
"megengine._internal.opr.set_advanced_indexing",
"megengine._internal.opr.reshape",
"megengine._internal.opr.broadcast",
"megengine._internal.opr.linspace",
"megengine._internal.opr.advanced_indexing",
"megengine._internal.opr.concat"
] |
[((1563, 1592), 'megengine._internal.opr.broadcast', 'mgb.opr.broadcast', (['inp', 'shape'], {}), '(inp, shape)\n', (1580, 1592), True, 'import megengine._internal as mgb\n'), ((10424, 10499), 'megengine._internal.opr.cond_take', 'mgb.opr.cond_take', (['x', 'mask'], {'mode': 'mgb.opr_param_defs.CondTake.Mode.EQ', 'val': '(1)'}), '(x, mask, mode=mgb.opr_param_defs.CondTake.Mode.EQ, val=1)\n', (10441, 10499), True, 'import megengine._internal as mgb\n'), ((10531, 10606), 'megengine._internal.opr.cond_take', 'mgb.opr.cond_take', (['y', 'mask'], {'mode': 'mgb.opr_param_defs.CondTake.Mode.EQ', 'val': '(0)'}), '(y, mask, mode=mgb.opr_param_defs.CondTake.Mode.EQ, val=0)\n', (10548, 10606), True, 'import megengine._internal as mgb\n'), ((10655, 10693), 'megengine._internal.opr.concat', 'mgb.opr.concat', (['index0', 'index1'], {'axis': '(0)'}), '(index0, index1, axis=0)\n', (10669, 10693), True, 'import megengine._internal as mgb\n'), ((10702, 10732), 'megengine._internal.opr.concat', 'mgb.opr.concat', (['v0', 'v1'], {'axis': '(0)'}), '(v0, v1, axis=0)\n', (10716, 10732), True, 'import megengine._internal as mgb\n'), ((11809, 11886), 'megengine._internal.opr.cond_take', 'mgb.opr.cond_take', (['x', 'mask'], {'mode': 'mgb.opr_param_defs.CondTake.Mode.EQ', 'val': 'val'}), '(x, mask, mode=mgb.opr_param_defs.CondTake.Mode.EQ, val=val)\n', (11826, 11886), True, 'import megengine._internal as mgb\n'), ((13288, 13320), 'megengine._internal.opr.dimshuffle', 'mgb.opr.dimshuffle', (['inp', 'pattern'], {}), '(inp, pattern)\n', (13306, 13320), True, 'import megengine._internal as mgb\n'), ((14143, 14177), 'megengine._internal.opr.reshape', 'mgb.opr.reshape', (['inp', 'target_shape'], {}), '(inp, target_shape)\n', (14158, 14177), True, 'import megengine._internal as mgb\n'), ((14944, 14971), 'megengine._internal.opr.add_axis', 'mgb.opr.add_axis', (['inp', 'axis'], {}), '(inp, axis)\n', (14960, 14971), True, 'import megengine._internal as mgb\n'), ((15650, 15680), 'megengine._internal.opr.remove_axis', 'mgb.opr.remove_axis', (['inp', 'axis'], {}), '(inp, axis)\n', (15669, 15680), True, 'import megengine._internal as mgb\n'), ((10743, 10780), 'megengine._internal.opr.set_advanced_indexing', 'mgb.opr.set_advanced_indexing', (['out', 'v'], {}), '(out, v)\n', (10772, 10780), True, 'import megengine._internal as mgb\n'), ((16659, 16734), 'megengine._internal.opr.linspace', 'mgb.opr.linspace', (['start', 'stop', 'num'], {'comp_node': 'device', 'comp_graph': 'comp_graph'}), '(start, stop, num, comp_node=device, comp_graph=comp_graph)\n', (16675, 16734), True, 'import megengine._internal as mgb\n'), ((4531, 4561), 'megengine._internal.opr.advanced_indexing', 'mgb.opr.advanced_indexing', (['inp'], {}), '(inp)\n', (4556, 4561), True, 'import megengine._internal as mgb\n')]
|
from collections import OrderedDict
import numpy as np
import megengine.functional as F
import megengine.module as M
from megengine import Tensor
from megengine.core._imperative_rt.core2 import apply
from megengine.core.ops import builtin
from megengine.module import Module
from megengine.traced_module import TracedModule, enable_expr_checker, trace_module
from megengine.traced_module.expr import Apply, CallFunction, Constant
class MyModule1(M.Module):
def forward(self, x):
y = Tensor(x)
y += 1
x = x + 2
return x, y
class MyModule2(M.Module):
def forward(self, x):
y = Tensor([1, x, 1])
y += 1
x = x + 2
return x, y
class MyModule3(M.Module):
def __init__(self):
super().__init__()
self.modules = [
M.Elemwise("ADD"),
M.Elemwise("ADD"),
OrderedDict([("a", M.Elemwise("ADD")), ("b", M.Elemwise("ADD"))]),
M.Elemwise("RELU"),
M.Elemwise("RELU"),
]
def forward(self, a, b):
x = self.modules[0](a, b)
y = self.modules[1](a, b)
assert list(self.modules[2].keys()) == ["a", "b"]
for _, m in self.modules[2].items():
y = m(x, y)
for m in self.modules[3:]:
y = m(y)
return y
class MyModule4(M.Module):
def __init__(self):
super().__init__()
self.add = F.add
def forward(self, x, y):
return self.add(x, y)
def test_trace_module():
enable_expr_checker()
x = Tensor(1)
m1 = MyModule1()
tm1 = trace_module(m1, x)
m2 = MyModule2()
tm2 = trace_module(m2, x)
inp = Tensor(2)
gt = m1(inp)
output = tm1(inp)
for a, b in zip(output, gt):
np.testing.assert_equal(a.numpy(), b.numpy())
gt1 = m2(inp)
output1 = tm2(inp)
for a, b in zip(output1, gt1):
np.testing.assert_equal(a.numpy(), b.numpy())
a, b = Tensor(1), Tensor(2)
m3 = MyModule3()
gt = m3(a, b)
tm3 = trace_module(m3, a, b)
out = tm3(a, b)
np.testing.assert_equal(out.numpy(), gt.numpy())
assert isinstance(tm3.modules.__dict__["0"], M.Elemwise)
assert isinstance(tm3.modules.__dict__["2"], TracedModule)
assert isinstance(tm3.modules.__dict__["2"].a, M.Elemwise)
assert isinstance(tm3.modules.__dict__["3"], M.Elemwise)
m4 = MyModule4()
tm4 = trace_module(m4, a, b)
np.testing.assert_equal(tm4(a, b).numpy(), 3)
np.testing.assert_equal(tm4(a, y=b).numpy(), 3)
np.testing.assert_equal(tm4(x=a, y=b).numpy(), 3)
tm4 = trace_module(m4, a, y=b)
np.testing.assert_equal(tm4(a, b).numpy(), 3)
np.testing.assert_equal(tm4(a, y=b).numpy(), 3)
np.testing.assert_equal(tm4(x=a, y=b).numpy(), 3)
tm4 = trace_module(m4, x=a, y=b)
np.testing.assert_equal(tm4(a, b).numpy(), 3)
np.testing.assert_equal(tm4(a, y=b).numpy(), 3)
np.testing.assert_equal(tm4(x=a, y=b).numpy(), 3)
tm5 = trace_module(tm4, a, b)
np.testing.assert_equal(tm5(a, b).numpy(), 3)
np.testing.assert_equal(tm5(a, y=b).numpy(), 3)
np.testing.assert_equal(tm5(x=a, y=b).numpy(), 3)
tm5 = trace_module(tm4, a, y=b)
np.testing.assert_equal(tm5(a, b).numpy(), 3)
np.testing.assert_equal(tm5(a, y=b).numpy(), 3)
np.testing.assert_equal(tm5(x=a, y=b).numpy(), 3)
tm5 = trace_module(tm4, x=a, y=b)
np.testing.assert_equal(tm5(a, b).numpy(), 3)
np.testing.assert_equal(tm5(a, y=b).numpy(), 3)
np.testing.assert_equal(tm5(x=a, y=b).numpy(), 3)
assert len(tm4.graph._exprs) == 1
assert isinstance(tm4.graph._exprs[0], CallFunction)
class MyModule5(Module):
def __init__(self):
super().__init__()
self.m1 = tm4
def forward(self, x, y):
return self.m1(x, y)
tm6 = trace_module(MyModule5(), a, b)
assert tm6.m1.argspec is None
assert tm6.m1._is_top is False
def test_trace_module_2():
class Model(M.Module):
def __init__(self):
super().__init__()
def forward(self, x):
out = x.shape
out = apply(builtin.Elemwise(mode="ADD"), out, Tensor(1))
return out
traced_model = trace_module(Model(), Tensor(([1,])))
assert isinstance(traced_model.graph._exprs[0], Apply) and isinstance(
traced_model.graph._exprs[0].opdef, builtin.GetVarShape
)
assert isinstance(traced_model.graph._exprs[1], Constant)
assert isinstance(traced_model.graph._exprs[2], Apply) and isinstance(
traced_model.graph._exprs[2].opdef, builtin.Elemwise
)
assert int(traced_model(Tensor([1, 2]))[0]) == 3
|
[
"megengine.traced_module.enable_expr_checker",
"megengine.Tensor",
"megengine.module.Elemwise",
"megengine.core.ops.builtin.Elemwise",
"megengine.traced_module.trace_module"
] |
[((1514, 1535), 'megengine.traced_module.enable_expr_checker', 'enable_expr_checker', ([], {}), '()\n', (1533, 1535), False, 'from megengine.traced_module import TracedModule, enable_expr_checker, trace_module\n'), ((1544, 1553), 'megengine.Tensor', 'Tensor', (['(1)'], {}), '(1)\n', (1550, 1553), False, 'from megengine import Tensor\n'), ((1585, 1604), 'megengine.traced_module.trace_module', 'trace_module', (['m1', 'x'], {}), '(m1, x)\n', (1597, 1604), False, 'from megengine.traced_module import TracedModule, enable_expr_checker, trace_module\n'), ((1637, 1656), 'megengine.traced_module.trace_module', 'trace_module', (['m2', 'x'], {}), '(m2, x)\n', (1649, 1656), False, 'from megengine.traced_module import TracedModule, enable_expr_checker, trace_module\n'), ((1667, 1676), 'megengine.Tensor', 'Tensor', (['(2)'], {}), '(2)\n', (1673, 1676), False, 'from megengine import Tensor\n'), ((2017, 2039), 'megengine.traced_module.trace_module', 'trace_module', (['m3', 'a', 'b'], {}), '(m3, a, b)\n', (2029, 2039), False, 'from megengine.traced_module import TracedModule, enable_expr_checker, trace_module\n'), ((2393, 2415), 'megengine.traced_module.trace_module', 'trace_module', (['m4', 'a', 'b'], {}), '(m4, a, b)\n', (2405, 2415), False, 'from megengine.traced_module import TracedModule, enable_expr_checker, trace_module\n'), ((2583, 2607), 'megengine.traced_module.trace_module', 'trace_module', (['m4', 'a'], {'y': 'b'}), '(m4, a, y=b)\n', (2595, 2607), False, 'from megengine.traced_module import TracedModule, enable_expr_checker, trace_module\n'), ((2775, 2801), 'megengine.traced_module.trace_module', 'trace_module', (['m4'], {'x': 'a', 'y': 'b'}), '(m4, x=a, y=b)\n', (2787, 2801), False, 'from megengine.traced_module import TracedModule, enable_expr_checker, trace_module\n'), ((2969, 2992), 'megengine.traced_module.trace_module', 'trace_module', (['tm4', 'a', 'b'], {}), '(tm4, a, b)\n', (2981, 2992), False, 'from megengine.traced_module import TracedModule, enable_expr_checker, trace_module\n'), ((3160, 3185), 'megengine.traced_module.trace_module', 'trace_module', (['tm4', 'a'], {'y': 'b'}), '(tm4, a, y=b)\n', (3172, 3185), False, 'from megengine.traced_module import TracedModule, enable_expr_checker, trace_module\n'), ((3353, 3380), 'megengine.traced_module.trace_module', 'trace_module', (['tm4'], {'x': 'a', 'y': 'b'}), '(tm4, x=a, y=b)\n', (3365, 3380), False, 'from megengine.traced_module import TracedModule, enable_expr_checker, trace_module\n'), ((499, 508), 'megengine.Tensor', 'Tensor', (['x'], {}), '(x)\n', (505, 508), False, 'from megengine import Tensor\n'), ((629, 646), 'megengine.Tensor', 'Tensor', (['[1, x, 1]'], {}), '([1, x, 1])\n', (635, 646), False, 'from megengine import Tensor\n'), ((1947, 1956), 'megengine.Tensor', 'Tensor', (['(1)'], {}), '(1)\n', (1953, 1956), False, 'from megengine import Tensor\n'), ((1958, 1967), 'megengine.Tensor', 'Tensor', (['(2)'], {}), '(2)\n', (1964, 1967), False, 'from megengine import Tensor\n'), ((4234, 4245), 'megengine.Tensor', 'Tensor', (['[1]'], {}), '([1])\n', (4240, 4245), False, 'from megengine import Tensor\n'), ((817, 834), 'megengine.module.Elemwise', 'M.Elemwise', (['"""ADD"""'], {}), "('ADD')\n", (827, 834), True, 'import megengine.module as M\n'), ((848, 865), 'megengine.module.Elemwise', 'M.Elemwise', (['"""ADD"""'], {}), "('ADD')\n", (858, 865), True, 'import megengine.module as M\n'), ((958, 976), 'megengine.module.Elemwise', 'M.Elemwise', (['"""RELU"""'], {}), "('RELU')\n", (968, 976), True, 'import megengine.module as M\n'), ((990, 1008), 'megengine.module.Elemwise', 'M.Elemwise', (['"""RELU"""'], {}), "('RELU')\n", (1000, 1008), True, 'import megengine.module as M\n'), ((4123, 4151), 'megengine.core.ops.builtin.Elemwise', 'builtin.Elemwise', ([], {'mode': '"""ADD"""'}), "(mode='ADD')\n", (4139, 4151), False, 'from megengine.core.ops import builtin\n'), ((4158, 4167), 'megengine.Tensor', 'Tensor', (['(1)'], {}), '(1)\n', (4164, 4167), False, 'from megengine import Tensor\n'), ((4628, 4642), 'megengine.Tensor', 'Tensor', (['[1, 2]'], {}), '([1, 2])\n', (4634, 4642), False, 'from megengine import Tensor\n'), ((898, 915), 'megengine.module.Elemwise', 'M.Elemwise', (['"""ADD"""'], {}), "('ADD')\n", (908, 915), True, 'import megengine.module as M\n'), ((924, 941), 'megengine.module.Elemwise', 'M.Elemwise', (['"""ADD"""'], {}), "('ADD')\n", (934, 941), True, 'import megengine.module as M\n')]
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# pylint: disable=too-many-lines
from typing import Optional, Tuple, Union
import megengine._internal as mgb
from megengine._internal import CompGraph, CompNode
from ..core import Tensor, wrap_io_tensor
from ..core.graph import _use_default_if_none
from ..jit import barrier, mark_impure
from ..random import uniform
from ..utils.types import _pair, _pair_nonzero
from .debug_param import get_conv_execution_strategy
from .tensor import concat
from .utils import _decide_comp_node_and_comp_graph
@wrap_io_tensor
def linear(inp: Tensor, weight: Tensor, bias: Optional[Tensor] = None) -> Tensor:
"""Applies a linear transformation to the input.
Refer to :class:`~.Linear` for more information.
"""
orig_shape = inp.shape
inp = inp.reshape(-1, orig_shape[-1])
ret = mgb.opr.matrix_mul(inp, weight, transposeB=True)
ret = ret.reshape(orig_shape[:-1], weight.shape[0])
if bias is not None:
ret += bias
return ret
@wrap_io_tensor
def conv2d(
inp: Tensor,
weight: Tensor,
bias: Optional[Tensor] = None,
stride: Union[int, Tuple[int, int]] = 1,
padding: Union[int, Tuple[int, int]] = 0,
dilation: Union[int, Tuple[int, int]] = 1,
groups: int = 1,
conv_mode="CROSS_CORRELATION",
compute_mode="DEFAULT",
) -> Tensor:
"""2D convolution operation.
:param inp: The feature map of the convolution operation
:param weight: The convolution kernel
:param bias: The bias added to the result of convolution (if given)
:param stride: Stride of the 2D convolution operation. Default: 1
:param padding: Size of the paddings added to the input on both sides of its
spatial dimensions. Only zero-padding is supported. Default: 0
:param dilation: Dilation of the 2D convolution operation. Default: 1
:param groups: number of groups to divide input and output channels into,
so as to perform a "grouped convolution". When ``groups`` is not 1,
``in_channels`` and ``out_channels`` must be divisible by ``groups``,
and the shape of weight should be ``(groups, out_channel // groups,
in_channels // groups, height, width)``.
:type conv_mode: string or :class:`mgb.opr_param_defs.Convolution.Mode`
:param conv_mode: Supports 'CROSS_CORRELATION' or 'CONVOLUTION'. Default:
'CROSS_CORRELATION'.
:type compute_mode: string or
:class:`mgb.opr_param_defs.Convolution.ComputeMode`
:param compute_mode: When set to 'DEFAULT', no special requirements will be
placed on the precision of intermediate results. When set to 'FLOAT32',
Float32 would be used for accumulator and intermediate result, but only
effective when input and output are of Float16 dtype.
Refer to :class:`~.Conv2d` for more information.
"""
ph, pw = _pair(padding)
sh, sw = _pair_nonzero(stride)
dh, dw = _pair_nonzero(dilation)
Sparse = mgb.opr_param_defs.Convolution.Sparse
sparse_type = Sparse.DENSE if groups == 1 else Sparse.GROUP
res = mgb.opr.convolution(
inp,
weight,
pad_h=ph,
pad_w=pw,
stride_h=sh,
stride_w=sw,
dilate_h=dh,
dilate_w=dw,
format="NCHW",
strategy=get_conv_execution_strategy(),
mode=conv_mode,
compute_mode=compute_mode,
sparse=sparse_type,
)
if bias is not None:
res += bias
return res
@wrap_io_tensor
def conv_transpose2d(
inp: Tensor,
weight: Tensor,
bias: Optional[Tensor] = None,
stride: Union[int, Tuple[int, int]] = 1,
padding: Union[int, Tuple[int, int]] = 0,
dilation: Union[int, Tuple[int, int]] = 1,
groups: int = 1,
conv_mode="CROSS_CORRELATION",
compute_mode="DEFAULT",
) -> Tensor:
"""2D transposed convolution operation.
:param inp: The feature map of the convolution operation
:param weight: The convolution kernel
:param bias: The bias added to the result of convolution (if given)
:param stride: Stride of the 2D convolution operation. Default: 1
:param padding: Size of the paddings added to the input on both sides of its
spatial dimensions. Only zero-padding is supported. Default: 0
:param dilation: Dilation of the 2D convolution operation. Default: 1
:param groups: number of groups to divide input and output channels into,
so as to perform a "grouped convolution". When ``groups`` is not 1,
``in_channels`` and ``out_channels`` must be divisible by ``groups``,
and the shape of weight should be ``(groups, out_channel // groups,
in_channels // groups, height, width)``. Default: 1
:type conv_mode: string or :class:`mgb.opr_param_defs.Convolution.Mode`
:param conv_mode: Supports 'CROSS_CORRELATION' or 'CONVOLUTION'. Default:
'CROSS_CORRELATION'.
:type compute_mode: string or
:class:`mgb.opr_param_defs.Convolution.ComputeMode`
:param compute_mode: When set to 'DEFAULT', no special requirements will be
placed on the precision of intermediate results. When set to 'FLOAT32',
Float32 would be used for accumulator and intermediate result, but only
effective when input and output are of Float16 dtype.
Refer to :class:`~.ConvTranspose2d` for more information.
"""
ph, pw = _pair(padding)
sh, sw = _pair_nonzero(stride)
dh, dw = _pair_nonzero(dilation)
Sparse = mgb.opr_param_defs.Convolution.Sparse
sparse_type = Sparse.DENSE if groups == 1 else Sparse.GROUP
res = mgb.opr.deconvolution(
inp,
weight,
pad_h=ph,
pad_w=pw,
stride_h=sh,
stride_w=sw,
dilate_h=dh,
dilate_w=dw,
format="NCHW",
strategy=get_conv_execution_strategy(),
mode=conv_mode,
compute_mode=compute_mode,
sparse=sparse_type,
)
if bias is not None:
res += bias
return res
@wrap_io_tensor
def max_pool2d(
inp: Tensor,
kernel_size: Union[int, Tuple[int, int]],
stride: Optional[Union[int, Tuple[int, int]]] = None,
padding: Union[int, Tuple[int, int]] = 0,
) -> Tensor:
"""Applies a 2D max pooling over an input.
:param inp: The input tensor.
:param kernel_size: The size of the window.
:param stride: The stride of the window. If not provided, its value is set to ``kernel_size``.
Default: None
:param padding: Implicit zero padding to be added on both sides. Default: 0
Refer to :class:`~.MaxPool2d` for more information.
"""
kh, kw = _pair_nonzero(kernel_size)
sh, sw = _pair_nonzero(stride or kernel_size)
ph, pw = _pair(padding)
mode = mgb.opr_param_defs.Pooling.Mode.MAX
return mgb.opr.pooling(
inp,
mode=mode,
format="NCHW",
stride_h=sh,
stride_w=sw,
pad_h=ph,
pad_w=pw,
window_h=kh,
window_w=kw,
)
@wrap_io_tensor
def avg_pool2d(
inp: Tensor,
kernel_size: Union[int, Tuple[int, int]],
stride: Optional[Union[int, Tuple[int, int]]] = None,
padding: Union[int, Tuple[int, int]] = 0,
) -> Tensor:
""" Applies a 2D average pooling over an input.
:param inp: The input tensor.
:param kernel_size: The size of the window.
:param stride: The stride of the window. If not provided, its value is set to ``kernel_size``.
Default: None
:param padding: Implicit zero padding to be added on both sides. Default: 0
Refer to :class:`~.AvgPool2d` for more information.
"""
kh, kw = _pair_nonzero(kernel_size)
sh, sw = _pair_nonzero(stride or kernel_size)
ph, pw = _pair(padding)
mode = mgb.opr_param_defs.Pooling.Mode.AVERAGE
return mgb.opr.pooling(
inp,
mode=mode,
format="NCHW",
stride_h=sh,
stride_w=sw,
pad_h=ph,
pad_w=pw,
window_h=kh,
window_w=kw,
)
@wrap_io_tensor
def prelu(inp: Tensor, weight: Tensor) -> Tensor:
r"""
Applies the element-wise PReLU function.
Refer to :class:`~.PReLU` for more information.
"""
return mgb.opr.elemwise(inp, 0, mode="MAX") + weight * mgb.opr.elemwise(
inp, 0, mode="MIN"
)
@wrap_io_tensor
def leaky_relu(inp: Tensor, negative_slope: float = 0.01) -> Tensor:
r"""
Applies the element-wise leaky_relu function
Refer to :class:`~.LeakyReLU` for more information.
"""
return mgb.opr.elemwise(inp, 0, mode="MAX") + negative_slope * mgb.opr.elemwise(
inp, 0, mode="MIN"
)
@wrap_io_tensor
def flatten(inp: Tensor, start_axis: int = 0, end_axis: int = -1) -> Tensor:
r"""
Reshapes the tensor by flattening the sub-tensor from dimension ``start_axis`` to dimension ``end_axis``.
:param inp: The input tensor.
:param start_axis: The start dimension that the sub-tensor to be flattened. Default: 0
:param end_axis: The end dimension that the sub-tensor to be flattened. Default: -1
Examples:
.. testcode::
import numpy as np
from megengine import tensor
import megengine.functional as F
inp_shape = (2, 2, 3, 3)
inp = tensor(
np.arange(36, dtype=np.int32).reshape(inp_shape),
)
oup = F.flatten(inp, 2)
print(inp.numpy().shape)
print(oup.numpy().shape)
Outputs:
.. testoutput::
(2, 2, 3, 3)
(2, 2, 9)
"""
target_shape = tuple(inp.shape[i] for i in range(start_axis)) + (-1,)
if end_axis != -1:
target_shape += (inp.shape[end_axis + 1 :],)
return inp.reshape(*target_shape)
def _get_softmax_axis(ndim: int) -> int:
if ndim in (0, 1, 3):
return 0
return 1
@wrap_io_tensor
def softmax(inp: Tensor, axis: Optional[int] = None) -> Tensor:
r"""
Applies a softmax function. Softmax is defined as:
.. math::
\text{Softmax}(x_{i}) = \frac{\exp(x_i)}{\sum_j \exp(x_j)}
It is applied to all elements along axis, and will re-scale them so that
the elements lie in the range `[0, 1]` and sum to 1.
See :class:`~megengine.module.activation.Softmax` for more details.
:param inp: The input tensor.
:param axis: An axis along which softmax will be applied. By default,
softmax will apply along the highest ranked axis.
"""
if axis is None:
axis = _get_softmax_axis(len(inp.imm_shape))
offset = mgb.opr.zero_grad(inp.max(axis=axis, keepdims=True))
inp = inp - offset
down = mgb.opr.elem.exp(inp).sum(axis=axis, keepdims=True)
return mgb.opr.elem.exp(inp) / down
@wrap_io_tensor
def batch_norm2d(
inp: Tensor,
running_mean: Tensor,
running_var: Tensor,
weight: Optional[Tensor] = None,
bias: Optional[Tensor] = None,
training: bool = False,
momentum: float = 0.9,
eps: float = 1e-5,
) -> Tensor:
"""Applies batch normalization to the input.
:param inp: input tensor.
:param running_mean: tensor to store running mean.
:param running_var: tensor to store running variance.
:param weight: scaling tensor in the learnable affine parameters.
See :math:`\gamma` in :class:`~.BatchNorm2d`
:param bias: bias tensor in the learnable affine parameters.
See :math:`\beta` in :class:`~.BatchNorm2d`
:param training: a boolean value to indicate whether batch norm is performed
in traning mode. Default: ``False``
:param momentum: the value used for the ``running_mean`` and ``running_var``
computation.
Default: 0.9
:param eps: a value added to the denominator for numerical stability.
Default: 1e-5.
Refer to :class:`~.BatchNorm2d` and :class:`~.BatchNorm1d` for more information.
"""
inp = mgb.opr.mark_no_broadcast_elemwise(inp)
_channels = inp.imm_shape[1]
_ndim = len(inp.imm_shape)
_param_shape = (1, _channels) + (1,) * (_ndim - 2)
assert _ndim == 4, "only 4D tensor supported"
if weight is not None:
weight = weight.reshape(*_param_shape)
else:
weight = mgb.make_immutable(*_use_default_if_none(None, None), 1.0).broadcast(
*_param_shape
)
if bias is not None:
bias = bias.reshape(*_param_shape)
else:
bias = mgb.make_immutable(*_use_default_if_none(None, None), 0.0).broadcast(
*_param_shape
)
FwdMode = mgb.opr_param_defs.BN.FwdMode
fwdmode = FwdMode.TRAINING if training else FwdMode.INFERENCE
avg_factor = 1 - momentum
if running_mean is not None and running_var is not None:
if training:
inp = barrier(inp)
output = mgb.opr.batch_norm(
inp,
weight,
bias,
running_mean,
running_var,
param_dim="DIM_1C11",
fwd_mode=fwdmode,
epsilon=eps,
avg_factor=avg_factor,
)[-1]
if training:
mark_impure(output)
else:
output = mgb.opr.batch_norm_no_statistic(
inp,
weight,
bias,
param_dim="DIM_1C11",
fwd_mode=fwdmode,
epsilon=eps,
avg_factor=avg_factor,
)[-1]
return output
def one_hot(inp: Tensor, num_classes: int = -1) -> Tensor:
r"""
Perform one-hot encoding for the input tensor.
:param inp: input tensor
:param num_classes: number of classes denotes the last dimension of the output tensor
Examples:
.. testcode::
import numpy as np
from megengine import tensor
import megengine.functional as F
inp = tensor(np.arange(1, 4, dtype=np.int32))
out = F.one_hot(inp)
print(out.numpy())
Outputs:
.. testoutput::
[[0 1 0 0]
[0 0 1 0]
[0 0 0 1]]
"""
comp_node, comp_graph = _decide_comp_node_and_comp_graph(inp)
if num_classes == -1:
num_classes = inp.max() + 1
zeros = mgb.make_immutable(value=0, comp_node=comp_node, comp_graph=comp_graph)
zeros_symvar = zeros.broadcast(inp.shapeof(), num_classes)
ones = mgb.make_immutable(value=1, comp_node=comp_node, comp_graph=comp_graph)
ones_symvar = ones.broadcast(inp.shapeof(), 1)
return Tensor(
mgb.opr.indexing_set_one_hot(
zeros_symvar, axis=len(inp.shapeof()), index=inp, value=ones_symvar
)
)
@wrap_io_tensor
def warp_perspective(
inp: Tensor,
M: Tensor,
dsize: Union[Tuple[int, int], int, Tensor],
border_mode: str = "REPLICATE",
border_val: float = 0.0,
interp_mode: str = "LINEAR",
):
r"""
Applies perspective transformation to batched 2D images.
The input images are transformed to the output images by the transformation matrix:
.. math::
\text{output}(n, c, h, w) = \text{input} \left( n, c,
\frac{M_{00}h + M_{01}w + M_{02}}{M_{20}h + M_{21}w + M_{22}},
\frac{M_{10}h + M_{11}w + M_{12}}{M_{20}h + M_{21}w + M_{22}}
\right)
:param inp: input image
:param M: (batch, 3, 3) transformation matrix
:param dsize: (h, w) size of the output image
:param border_mode: pixel extrapolation method. Default: ``"REPLICATE"``
:param border_val: value used in case of a constant border. Default: ``0``
:param interp_mode: interpolation methods. Default: ``"LINEAR"``
Examples:
.. testcode::
import numpy as np
from megengine import tensor
import megengine.functional as F
inp_shape = (1, 1, 4, 4)
inp = tensor(np.arange(16, dtype=np.float32).reshape(inp_shape))
M_shape = (1, 3, 3)
# M defines a translation: dst(1, 1, h, w) = rst(1, 1, h+1, w+1)
M = tensor(np.array([[1., 0., 1.],
[0., 1., 1.],
[0., 0., 1.]], dtype=np.float32).reshape(M_shape))
out = F.warp_perspective(inp, M, (2, 2))
print(out.numpy())
Outputs:
.. testoutput::
[[[[ 5. 6.]
[ 9. 10.]]]]
"""
return mgb.opr.warp_perspective(
inp,
M,
dsize,
bmode=border_mode,
border_val=border_val,
imode=interp_mode,
format="NCHW",
)
@wrap_io_tensor
def eye(
n: int,
m: Optional[int] = None,
*,
dtype=None,
device: Optional[CompNode] = None,
comp_graph: Optional[CompGraph] = None
) -> Tensor:
"""
Fills the 2-dimensional input :class:`SymbolVar` with the identity matrix.
:param n: The number of rows
:param m: The number of columns, default to None
:param dtype: The data type, default to None
:param device: Compute node of the matrix, defaults to None
:param comp_graph: Compute graph of the matrix, defaults to None
:return: The eye matrix
Examples:
.. testcode::
import numpy as np
import megengine.functional as F
data_shape = (4, 6)
n, m = data_shape
out = F.eye(n, m, dtype=np.float32)
print(out.numpy())
Outputs:
.. testoutput::
[[1. 0. 0. 0. 0. 0.]
[0. 1. 0. 0. 0. 0.]
[0. 0. 1. 0. 0. 0.]
[0. 0. 0. 1. 0. 0.]]
"""
device, comp_graph = _use_default_if_none(device, comp_graph)
if m is None:
m = n
return mgb.opr.eye((n, m), dtype=dtype, comp_node=device, comp_graph=comp_graph)
@wrap_io_tensor
def matrix_mul(inp1: Tensor, inp2: Tensor) -> Tensor:
"""
Performs a matrix multiplication of the matrices ``inp1`` and ``inp2``
:param inp1: The first matrix to be multiplied (a, b)
:param inp2: The second matrix to be multiplied (b, c)
:return: The output tensor (a, c)
Examples:
.. testcode::
import numpy as np
from megengine import tensor
import megengine.functional as F
shape_1 = (2, 3)
shape_2 = (3, 4)
data1 = tensor(np.arange(0, 6, dtype=np.float32).reshape(2, 3))
data2 = tensor(np.arange(0, 6, dtype=np.float32).reshape(3, 2))
out = F.matrix_mul(data1, data2)
print(out.numpy())
Outputs:
.. testoutput::
[[10. 13.]
[28. 40.]]
"""
return mgb.opr.matrix_mul(inp1, inp2)
@wrap_io_tensor
def batched_matrix_mul(inp1: Tensor, inp2: Tensor) -> Tensor:
"""
Performs a batched multiplication of th batched matrices ``inp1`` and ``inp2``
:param inp1: The first batch matrix to be multiplied (n, a, b)
:param inp2: The second batch matrix to be multiplied (n, b, c)
:return: The output batch (n, a, c)
Examples:
.. testcode::
import numpy as np
from megengine import tensor
import megengine.functional as F
batch_size = 3
shape_1 = (batch_size, 2, 3)
shape_2 = (batch_size, 3, 4)
data1 = tensor(
np.arange(0, batch_size * 6, dtype=np.float32).reshape(batch_size, 2, 3))
data2 = tensor(
np.arange(0, batch_size * 12, dtype=np.float32).reshape(batch_size, 3, 4))
out = F.batched_matrix_mul(data1, data2)
print(out.numpy())
Outputs:
.. testoutput::
[[[ 20. 23. 26. 29.]
[ 56. 68. 80. 92.]]
[[ 344. 365. 386. 407.]
[ 488. 518. 548. 578.]]
[[1100. 1139. 1178. 1217.]
[1352. 1400. 1448. 1496.]]]
"""
return mgb.opr.batched_matrix_mul(inp1, inp2)
@wrap_io_tensor
def interpolate(
inp: Tensor,
size: Optional[Union[int, Tuple[int, int]]] = None,
scale_factor: Optional[Union[float, Tuple[float, float]]] = None,
mode: str = "BILINEAR",
align_corners: bool = None,
) -> Tensor:
r"""
Down/up samples the input tensor to either the given :attr:`size` or the given
:attr:`scale_factor`
:param inp: input tensor
:param size: size of the output tensor. Default: ``None``
:param scale_factor: scaling factor of the output tensor. Default: ``None``
:param mode: interpolation methods, acceptable values are:
'bilinear'(default), 'linear', 'nearest' (todo), 'cubic' (todo), 'area' (todo)
Examples:
.. testcode::
import numpy as np
from megengine import tensor
import megengine.functional as F
from megengine.test import assertTensorClose
inp = tensor(np.arange(1, 5, dtype=np.float32).reshape(1, 1, 2, 2))
out = F.interpolate(inp, [4, 4], align_corners=False)
print(out.numpy())
out2 = F.interpolate(inp, scale_factor=2.)
assertTensorClose(out.numpy(), out2.numpy())
Outputs:
.. testoutput::
[[[[1. 1.25 1.75 2. ]
[1.5 1.75 2.25 2.5 ]
[2.5 2.75 3.25 3.5 ]
[3. 3.25 3.75 4. ]]]]
"""
mode = mode.upper()
if mode not in ["BILINEAR", "LINEAR"]:
raise ValueError("interpolate only support bilinear mode")
if mode not in ["BILINEAR", "LINEAR"]:
if align_corners is not None:
raise ValueError(
"align_corners option can only be set in the bilinear/linear interpolating mode"
)
else:
if align_corners is None:
align_corners = False
if mode == "LINEAR":
inp = mgb.opr.add_axis(inp, 3)
if len(inp.imm_shape) != 4:
raise ValueError("shape of input tensor must correspond to the operartion mode")
if size is None:
if scale_factor is None:
raise ValueError("scale_factor must not be None when size is None")
if isinstance(scale_factor, (float, int)):
scale_factor = float(scale_factor)
if mode == "LINEAR":
scale_factor = (scale_factor, float(1))
else:
scale_factor = (scale_factor, scale_factor)
else:
if mode == "LINEAR":
raise ValueError(
"under LINEAR mode, scale_factor can only be single value"
)
assert len(scale_factor) == 2, "shape of scale_factor must be equal to (2, )"
assert isinstance(scale_factor[0], float) and isinstance(
scale_factor[1], float
), "scale_factor must be float type"
dsize = tuple(
mgb.opr.elemwise(inp.shape[i + 2] * scale_factor[i], mode="FLOOR")
for i in range(2)
)
dsize = mgb.opr.concat([dsize[0], dsize[1]], axis=0)
else:
if scale_factor is not None:
raise ValueError("scale_factor must be None when size is provided")
if isinstance(size, int):
size = (size, 1)
else:
if mode == "LINEAR":
raise ValueError("under LINEAR mode, size can only be single value")
dsize = size
oh, ow = dsize[0], dsize[1]
ih, iw = inp.shape[2], inp.shape[3]
if align_corners:
hscale = (ih - 1.0) / (oh - 1.0)
wscale = 1.0 * iw / ow
if mode != "LINEAR":
wscale = (iw - 1.0) / (ow - 1.0)
row0 = mgb.opr.concat([wscale, [0, 0]], axis=0).reshape(1, 3)
row1 = mgb.opr.concat([[0], hscale, [0]], axis=0).reshape(1, 3)
weight = mgb.opr.concat([row0, row1, [[0, 0, 1]]], axis=0).reshape(1, 3, 3)
weight = mgb.opr.broadcast(weight, (inp.shape[0], 3, 3))
else:
hscale = 1.0 * ih / oh
wscale = 1.0 * iw / ow
row0 = mgb.opr.concat([wscale, [0], 0.5 * wscale - 0.5], axis=0).reshape(1, 3)
row1 = mgb.opr.concat([[0], hscale, 0.5 * hscale - 0.5], axis=0).reshape(1, 3)
weight = mgb.opr.concat([row0, row1, [[0, 0, 1]]], axis=0).reshape(1, 3, 3)
weight = mgb.opr.broadcast(weight, (inp.shape[0], 3, 3))
ret = mgb.opr.warp_perspective(inp, weight, dsize, imode="LINEAR", format="NCHW")
if mode == "LINEAR":
ret = mgb.opr.reshape(ret, ret.shape[0:3])
return ret
@wrap_io_tensor
def dropout(inp: Tensor, drop_prob: float, rescale: bool = True) -> Tensor:
"""
Returns a new tensor where each of the elements are randomly set to zero
with probability P = ``drop_prob``. Optionally rescale the output tensor.
:param inp: The input tensor
:param drop_prob: The probability to drop (set to zero) a single element
:param rescale: The default behavior of ``dropout`` during training is to rescale the output,
then it can be replaced by an :class:`~.Identity` during inference, default to True.
:return: The output tensor
Examples:
.. testcode::
import numpy as np
import megengine as mge
import megengine.functional as F
from megengine import tensor
data = tensor(np.ones(10, dtype=np.float32))
out = F.dropout(data, 1./3.)
print(out.numpy())
Outputs:
.. testoutput::
:options: +SKIP
[1.5 1.5 0. 1.5 1.5 1.5 1.5 1.5 1.5 1.5]
"""
assert 0 <= drop_prob < 1
rv = uniform(inp.shape)
mask = rv > drop_prob
inp *= mask.astype(inp.dtype)
if rescale:
inp *= 1 / (1 - drop_prob)
return inp
@wrap_io_tensor
def identity(inp: Tensor) -> Tensor:
"""applies an identity transform to the input tensor.
:param inp: The input tensor
"""
return mgb.opr.identity(inp)
@wrap_io_tensor
def embedding(
input: Tensor,
weight: Tensor,
padding_idx: Optional[int] = None,
max_norm: Optional[float] = None,
norm_type: Optional[float] = None,
):
"""
Applies lookup table for embedding.
:param input: the tensor with indices.
:param weight: the learnable weights which embedding from.
:param padding_idx: should be set to None, not support now.
:param max_norm: should be set to None, not support now.
:param norm_type: should be set to None, not support now.
Refer to :class:`~.Embedding` for more information.
"""
if padding_idx is not None:
raise ValueError("Not support padding_idx Now!")
if max_norm is not None or norm_type is not None:
raise ValueError("Not support weight normlization Now!")
return mgb.opr.advanced_indexing(weight)[input.reshape(-1), :].reshape(
input.shape, weight.shape[-1]
)
@wrap_io_tensor
def roi_pooling(
input: Tensor,
rois: Tensor,
output_shape: Union[int, tuple, list],
mode: str = "max",
scale: float = 1.0,
) -> Tensor:
"""
Apply roi pooling on input feature
:param input: tensor that represents the input feature, (N, C, H, W) images
:param rois: (K, 5) boxes. First column is the index into N. The other 4 columns are xyxy
:param output_shape: (height, width) of output rois feature
:param mode: "max" or "average", use max/average align just like max/average pooling. Default: ``"max"``
:param scale: scale the input boxes by this number. Default: 1.0
:return: (K, C, output_shape[0], output_shape[1]) feature of rois
"""
assert mode in ["max", "average"], "only max/average mode is supported"
if isinstance(output_shape, int):
output_shape = (output_shape, output_shape)
return mgb.opr.roi_pooling(
input, rois, output_shape, mode=mode.upper(), scale=scale
)
@wrap_io_tensor
def roi_align(
input: Tensor,
rois: Tensor,
output_shape: Union[int, tuple, list],
mode: str = "average",
spatial_scale: float = 1.0,
sample_points: Union[int, tuple, list] = 2,
aligned: bool = True,
) -> Tensor:
"""
Apply roi align on input feature
:param input: tensor that represents the input feature, (N, C, H, W) images
:param rois: (N, 5) boxes. First column is the index into N. The other 4 columns are xyxy
:param output_shape: (height, width) shape of output rois feature.
:param mode: "max" or "average", use max/average align just like max/average pooling. Default: ``"average"``
:param spatial_scale: scale the input boxes by this number. Default: 1.0
:param sample_points: number of inputs samples to take for each output sample.
0 to take samples densely. Default: 2
:param aligned: wheather align the input feature, with `aligned=True`,
we first appropriately scale the ROI and then shift it by -0.5. Default: True
"""
assert mode in ["max", "average"], "only max/average mode is supported"
if isinstance(output_shape, int):
output_shape = (output_shape, output_shape)
pooled_height, pooled_width = output_shape
if isinstance(sample_points, int):
sample_points = (sample_points, sample_points)
sample_height, sample_width = sample_points
offset = 0.5 if aligned else 0.0
return mgb.opr.roi_align(
input,
rois,
mode=mode.upper(),
spatial_scale=spatial_scale,
offset=offset,
pooled_height=pooled_height,
pooled_width=pooled_width,
sample_height=sample_height,
sample_width=sample_width,
)
@wrap_io_tensor
def assert_equal(
get: Tensor, expect: Tensor, max_err: float = 1e-4, verbose: bool = False
) -> Tensor:
r"""
Asserts that ``get`` equals to ``expect``, and returns value of ``expect``.
:param get: tensor to be checked.
:param expect: tensor with expected values.
:param max_err: tolerance that two float values are asserted equal. Default: 1e-4
:param verbose: whether to print details if two tensors are not equal. Default: False
Examples:
.. testcode::
import megengine.functional as F
from megengine import tensor
get = tensor([1.0, 2.0])
max_err = 0.1
expect = get + max_err / 2.0
val = F.assert_equal(expect, get, max_err=max_err)
print(val.numpy())
Outputs:
.. testoutput::
[1.05 2.05]
"""
return mgb.opr.assert_equal(get, expect, maxerr=max_err, verbose=verbose)
@wrap_io_tensor
def indexing_one_hot(
src: Tensor, index: Tensor, axis: int = 1, keepdims=False
) -> Tensor:
r"""
One-hot indexing for some axis.
:param src: input data tensor.
:param index: index tensor.
:param axis: the axis on src for which values in index index. Default: 1
:param keepdims: whether not to remove the axis in result. Default: ``False``
Examples:
.. testcode::
import megengine.functional as F
from megengine import tensor
src = tensor([[1.0, 2.0]])
index = tensor([0])
val = F.indexing_one_hot(src, index)
print(val.numpy())
.. testoutput::
[1.]
"""
return mgb.opr.indexing_one_hot(src, axis, index, keepdims=keepdims)
|
[
"megengine._internal.opr.elem.exp",
"megengine._internal.opr.pooling",
"megengine._internal.opr.add_axis",
"megengine._internal.opr.assert_equal",
"megengine._internal.opr.elemwise",
"megengine._internal.opr.reshape",
"megengine._internal.opr.indexing_one_hot",
"megengine._internal.opr.batch_norm",
"megengine._internal.opr.advanced_indexing",
"megengine._internal.opr.concat",
"megengine._internal.opr.warp_perspective",
"megengine._internal.make_immutable",
"megengine._internal.opr.mark_no_broadcast_elemwise",
"megengine._internal.opr.identity",
"megengine._internal.opr.batched_matrix_mul",
"megengine._internal.opr.batch_norm_no_statistic",
"megengine._internal.opr.eye",
"megengine._internal.opr.matrix_mul",
"megengine._internal.opr.broadcast"
] |
[((1169, 1217), 'megengine._internal.opr.matrix_mul', 'mgb.opr.matrix_mul', (['inp', 'weight'], {'transposeB': '(True)'}), '(inp, weight, transposeB=True)\n', (1187, 1217), True, 'import megengine._internal as mgb\n'), ((7082, 7204), 'megengine._internal.opr.pooling', 'mgb.opr.pooling', (['inp'], {'mode': 'mode', 'format': '"""NCHW"""', 'stride_h': 'sh', 'stride_w': 'sw', 'pad_h': 'ph', 'pad_w': 'pw', 'window_h': 'kh', 'window_w': 'kw'}), "(inp, mode=mode, format='NCHW', stride_h=sh, stride_w=sw,\n pad_h=ph, pad_w=pw, window_h=kh, window_w=kw)\n", (7097, 7204), True, 'import megengine._internal as mgb\n'), ((8075, 8197), 'megengine._internal.opr.pooling', 'mgb.opr.pooling', (['inp'], {'mode': 'mode', 'format': '"""NCHW"""', 'stride_h': 'sh', 'stride_w': 'sw', 'pad_h': 'ph', 'pad_w': 'pw', 'window_h': 'kh', 'window_w': 'kw'}), "(inp, mode=mode, format='NCHW', stride_h=sh, stride_w=sw,\n pad_h=ph, pad_w=pw, window_h=kh, window_w=kw)\n", (8090, 8197), True, 'import megengine._internal as mgb\n'), ((12094, 12133), 'megengine._internal.opr.mark_no_broadcast_elemwise', 'mgb.opr.mark_no_broadcast_elemwise', (['inp'], {}), '(inp)\n', (12128, 12133), True, 'import megengine._internal as mgb\n'), ((14310, 14381), 'megengine._internal.make_immutable', 'mgb.make_immutable', ([], {'value': '(0)', 'comp_node': 'comp_node', 'comp_graph': 'comp_graph'}), '(value=0, comp_node=comp_node, comp_graph=comp_graph)\n', (14328, 14381), True, 'import megengine._internal as mgb\n'), ((14457, 14528), 'megengine._internal.make_immutable', 'mgb.make_immutable', ([], {'value': '(1)', 'comp_node': 'comp_node', 'comp_graph': 'comp_graph'}), '(value=1, comp_node=comp_node, comp_graph=comp_graph)\n', (14475, 14528), True, 'import megengine._internal as mgb\n'), ((16421, 16541), 'megengine._internal.opr.warp_perspective', 'mgb.opr.warp_perspective', (['inp', 'M', 'dsize'], {'bmode': 'border_mode', 'border_val': 'border_val', 'imode': 'interp_mode', 'format': '"""NCHW"""'}), "(inp, M, dsize, bmode=border_mode, border_val=\n border_val, imode=interp_mode, format='NCHW')\n", (16445, 16541), True, 'import megengine._internal as mgb\n'), ((17671, 17744), 'megengine._internal.opr.eye', 'mgb.opr.eye', (['(n, m)'], {'dtype': 'dtype', 'comp_node': 'device', 'comp_graph': 'comp_graph'}), '((n, m), dtype=dtype, comp_node=device, comp_graph=comp_graph)\n', (17682, 17744), True, 'import megengine._internal as mgb\n'), ((18554, 18584), 'megengine._internal.opr.matrix_mul', 'mgb.opr.matrix_mul', (['inp1', 'inp2'], {}), '(inp1, inp2)\n', (18572, 18584), True, 'import megengine._internal as mgb\n'), ((19745, 19783), 'megengine._internal.opr.batched_matrix_mul', 'mgb.opr.batched_matrix_mul', (['inp1', 'inp2'], {}), '(inp1, inp2)\n', (19771, 19783), True, 'import megengine._internal as mgb\n'), ((24039, 24114), 'megengine._internal.opr.warp_perspective', 'mgb.opr.warp_perspective', (['inp', 'weight', 'dsize'], {'imode': '"""LINEAR"""', 'format': '"""NCHW"""'}), "(inp, weight, dsize, imode='LINEAR', format='NCHW')\n", (24063, 24114), True, 'import megengine._internal as mgb\n'), ((25556, 25577), 'megengine._internal.opr.identity', 'mgb.opr.identity', (['inp'], {}), '(inp)\n', (25572, 25577), True, 'import megengine._internal as mgb\n'), ((30069, 30135), 'megengine._internal.opr.assert_equal', 'mgb.opr.assert_equal', (['get', 'expect'], {'maxerr': 'max_err', 'verbose': 'verbose'}), '(get, expect, maxerr=max_err, verbose=verbose)\n', (30089, 30135), True, 'import megengine._internal as mgb\n'), ((30828, 30889), 'megengine._internal.opr.indexing_one_hot', 'mgb.opr.indexing_one_hot', (['src', 'axis', 'index'], {'keepdims': 'keepdims'}), '(src, axis, index, keepdims=keepdims)\n', (30852, 30889), True, 'import megengine._internal as mgb\n'), ((8468, 8504), 'megengine._internal.opr.elemwise', 'mgb.opr.elemwise', (['inp', '(0)'], {'mode': '"""MAX"""'}), "(inp, 0, mode='MAX')\n", (8484, 8504), True, 'import megengine._internal as mgb\n'), ((8789, 8825), 'megengine._internal.opr.elemwise', 'mgb.opr.elemwise', (['inp', '(0)'], {'mode': '"""MAX"""'}), "(inp, 0, mode='MAX')\n", (8805, 8825), True, 'import megengine._internal as mgb\n'), ((10915, 10936), 'megengine._internal.opr.elem.exp', 'mgb.opr.elem.exp', (['inp'], {}), '(inp)\n', (10931, 10936), True, 'import megengine._internal as mgb\n'), ((21594, 21618), 'megengine._internal.opr.add_axis', 'mgb.opr.add_axis', (['inp', '(3)'], {}), '(inp, 3)\n', (21610, 21618), True, 'import megengine._internal as mgb\n'), ((22711, 22755), 'megengine._internal.opr.concat', 'mgb.opr.concat', (['[dsize[0], dsize[1]]'], {'axis': '(0)'}), '([dsize[0], dsize[1]], axis=0)\n', (22725, 22755), True, 'import megengine._internal as mgb\n'), ((23585, 23632), 'megengine._internal.opr.broadcast', 'mgb.opr.broadcast', (['weight', '(inp.shape[0], 3, 3)'], {}), '(weight, (inp.shape[0], 3, 3))\n', (23602, 23632), True, 'import megengine._internal as mgb\n'), ((23980, 24027), 'megengine._internal.opr.broadcast', 'mgb.opr.broadcast', (['weight', '(inp.shape[0], 3, 3)'], {}), '(weight, (inp.shape[0], 3, 3))\n', (23997, 24027), True, 'import megengine._internal as mgb\n'), ((24154, 24190), 'megengine._internal.opr.reshape', 'mgb.opr.reshape', (['ret', 'ret.shape[0:3]'], {}), '(ret, ret.shape[0:3])\n', (24169, 24190), True, 'import megengine._internal as mgb\n'), ((8516, 8552), 'megengine._internal.opr.elemwise', 'mgb.opr.elemwise', (['inp', '(0)'], {'mode': '"""MIN"""'}), "(inp, 0, mode='MIN')\n", (8532, 8552), True, 'import megengine._internal as mgb\n'), ((8845, 8881), 'megengine._internal.opr.elemwise', 'mgb.opr.elemwise', (['inp', '(0)'], {'mode': '"""MIN"""'}), "(inp, 0, mode='MIN')\n", (8861, 8881), True, 'import megengine._internal as mgb\n'), ((10852, 10873), 'megengine._internal.opr.elem.exp', 'mgb.opr.elem.exp', (['inp'], {}), '(inp)\n', (10868, 10873), True, 'import megengine._internal as mgb\n'), ((12985, 13130), 'megengine._internal.opr.batch_norm', 'mgb.opr.batch_norm', (['inp', 'weight', 'bias', 'running_mean', 'running_var'], {'param_dim': '"""DIM_1C11"""', 'fwd_mode': 'fwdmode', 'epsilon': 'eps', 'avg_factor': 'avg_factor'}), "(inp, weight, bias, running_mean, running_var, param_dim=\n 'DIM_1C11', fwd_mode=fwdmode, epsilon=eps, avg_factor=avg_factor)\n", (13003, 13130), True, 'import megengine._internal as mgb\n'), ((13329, 13459), 'megengine._internal.opr.batch_norm_no_statistic', 'mgb.opr.batch_norm_no_statistic', (['inp', 'weight', 'bias'], {'param_dim': '"""DIM_1C11"""', 'fwd_mode': 'fwdmode', 'epsilon': 'eps', 'avg_factor': 'avg_factor'}), "(inp, weight, bias, param_dim='DIM_1C11',\n fwd_mode=fwdmode, epsilon=eps, avg_factor=avg_factor)\n", (13360, 13459), True, 'import megengine._internal as mgb\n'), ((22588, 22654), 'megengine._internal.opr.elemwise', 'mgb.opr.elemwise', (['(inp.shape[i + 2] * scale_factor[i])'], {'mode': '"""FLOOR"""'}), "(inp.shape[i + 2] * scale_factor[i], mode='FLOOR')\n", (22604, 22654), True, 'import megengine._internal as mgb\n'), ((23357, 23397), 'megengine._internal.opr.concat', 'mgb.opr.concat', (['[wscale, [0, 0]]'], {'axis': '(0)'}), '([wscale, [0, 0]], axis=0)\n', (23371, 23397), True, 'import megengine._internal as mgb\n'), ((23427, 23469), 'megengine._internal.opr.concat', 'mgb.opr.concat', (['[[0], hscale, [0]]'], {'axis': '(0)'}), '([[0], hscale, [0]], axis=0)\n', (23441, 23469), True, 'import megengine._internal as mgb\n'), ((23501, 23550), 'megengine._internal.opr.concat', 'mgb.opr.concat', (['[row0, row1, [[0, 0, 1]]]'], {'axis': '(0)'}), '([row0, row1, [[0, 0, 1]]], axis=0)\n', (23515, 23550), True, 'import megengine._internal as mgb\n'), ((23720, 23777), 'megengine._internal.opr.concat', 'mgb.opr.concat', (['[wscale, [0], 0.5 * wscale - 0.5]'], {'axis': '(0)'}), '([wscale, [0], 0.5 * wscale - 0.5], axis=0)\n', (23734, 23777), True, 'import megengine._internal as mgb\n'), ((23807, 23864), 'megengine._internal.opr.concat', 'mgb.opr.concat', (['[[0], hscale, 0.5 * hscale - 0.5]'], {'axis': '(0)'}), '([[0], hscale, 0.5 * hscale - 0.5], axis=0)\n', (23821, 23864), True, 'import megengine._internal as mgb\n'), ((23896, 23945), 'megengine._internal.opr.concat', 'mgb.opr.concat', (['[row0, row1, [[0, 0, 1]]]'], {'axis': '(0)'}), '([row0, row1, [[0, 0, 1]]], axis=0)\n', (23910, 23945), True, 'import megengine._internal as mgb\n'), ((26397, 26430), 'megengine._internal.opr.advanced_indexing', 'mgb.opr.advanced_indexing', (['weight'], {}), '(weight)\n', (26422, 26430), True, 'import megengine._internal as mgb\n')]
|
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# pylint: disable=import-error,no-name-in-module,no-member
from test.traced_module.test_tflite import _test_convert_result
from test.utils import ConvBn2dOpr, ConvBnRelu2dOpr, ConvOpr, ConvRelu2dOpr, LinearOpr
import megengine as mge
import megengine.module as M
import numpy as np
from megengine.core.tensor import dtype
from megengine.core.tensor.dtype import _builtin_quant_dtypes
from megengine.module.quant_dequant import QuantStub
from megengine.quantization.quantize import quantize_qat
from megengine.quantization.utils import create_qparams
from megengine.traced_module.fake_quant import FakeQuantize
from .tm_utils import get_traced_module
max_error = 1e-4
tmp_file = "test_model"
def get_qat_net(inp_dtype, net, num_inp=1, shape=(1, 16, 32, 32)):
qat_net = quantize_qat(net)
inps = []
for _ in range(num_inp):
data1 = mge.tensor(np.random.random(shape)) * 16
data1 = data1.astype(inp_dtype)
inp1 = mge.tensor(dtype.convert_from_qint8(data1.numpy()))
inp1.qparams.scale = mge.tensor(dtype.get_scale(inp_dtype))
inp1.qparams.dtype_meta = dtype._builtin_quant_dtypes["qint8"]
inps.append(inp1)
return qat_net, inps
def test_qat_conv_qint8():
class QConvOpr(M.Module):
def __init__(self):
super().__init__()
self.normal_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), padding=(3, 1), dilation=(2, 2),
)
self.normal_conv.bias = mge.Parameter(
np.random.random(self.normal_conv.bias.shape).astype(np.float32)
)
def forward(self, x):
x = self.normal_conv(x)
return x
net = QConvOpr()
qat_net = quantize_qat(net)
inp_dtype = dtype.qint8(16.0 / 128)
data = mge.tensor(np.random.random((1, 3, 224, 224))) * 16
data = data.astype(inp_dtype)
inp = mge.tensor(dtype.convert_from_qint8(data.numpy()))
inp.qparams.scale = mge.tensor(dtype.get_scale(inp_dtype))
inp.qparams.dtype_meta = dtype._builtin_quant_dtypes["qint8"]
traced_module, tm_result = get_traced_module(qat_net, inp)
print(traced_module.flatten().graph)
inp = inp.astype(inp_dtype)
out_dtype = traced_module.graph.outputs[0].qparams
scale = out_dtype.scale.numpy()
_test_convert_result(
inp,
traced_module,
tm_result,
scale=scale,
require_quantize=True,
max_err=max_error,
)
def test_qat_convrelu():
net = ConvRelu2dOpr()
qat_net = quantize_qat(net)
inp_dtype = dtype.qint8(16.0 / 128)
data = mge.tensor(np.random.random((1, 3, 224, 224))) * 16
data = data.astype(inp_dtype)
inp = mge.tensor(dtype.convert_from_qint8(data.numpy()))
inp.qparams.scale = mge.tensor(dtype.get_scale(inp_dtype))
inp.qparams.dtype_meta = dtype._builtin_quant_dtypes["qint8"]
traced_module, tm_result = get_traced_module(qat_net, inp)
inp = inp.astype(inp_dtype)
out_dtype = traced_module.graph.outputs[0].qparams
scale = out_dtype.scale.numpy()
_test_convert_result(
inp,
traced_module,
tm_result,
scale=scale,
require_quantize=True,
max_err=max_error,
)
def test_qat_convbn():
net = ConvBn2dOpr()
net.eval()
qat_net = quantize_qat(net)
inp_dtype = dtype.qint8(16.0 / 128)
data = mge.tensor(np.random.random((1, 3, 224, 224))) * 16
data = data.astype(inp_dtype)
inp = mge.tensor(dtype.convert_from_qint8(data.numpy()))
inp.qparams.scale = mge.tensor(dtype.get_scale(inp_dtype))
inp.qparams.dtype_meta = dtype._builtin_quant_dtypes["qint8"]
traced_module, tm_result = get_traced_module(qat_net, inp)
inp = inp.astype(inp_dtype)
out_dtype = traced_module.graph.outputs[0].qparams
scale = out_dtype.scale.numpy()
_test_convert_result(
inp,
traced_module,
tm_result,
scale=scale,
require_quantize=True,
max_err=max_error,
)
def test_qat_convbnrelu():
net = ConvBnRelu2dOpr()
net.eval()
qat_net = quantize_qat(net)
inp_dtype = dtype.qint8(16.0 / 128)
data = mge.tensor(np.random.random((1, 3, 224, 224))) * 16
data = data.astype(inp_dtype)
inp = mge.tensor(dtype.convert_from_qint8(data.numpy()))
inp.qparams.scale = mge.tensor(dtype.get_scale(inp_dtype))
inp.qparams.dtype_meta = dtype._builtin_quant_dtypes["qint8"]
traced_module, tm_result = get_traced_module(qat_net, inp)
inp = inp.astype(inp_dtype)
out_dtype = traced_module.graph.outputs[0].qparams
scale = out_dtype.scale.numpy()
_test_convert_result(
inp,
traced_module,
tm_result,
scale=scale,
require_quantize=True,
max_err=max_error,
)
def test_deconv_qint8():
net = ConvOpr("tflite_transpose")
qat_net = quantize_qat(net)
inp_dtype = dtype.qint8(16.0 / 128)
data = mge.tensor(np.random.random((1, 3, 64, 64))) * 16
data = data.astype(inp_dtype)
inp = mge.tensor(dtype.convert_from_qint8(data.numpy()))
inp.qparams.scale = mge.tensor(dtype.get_scale(inp_dtype))
inp.qparams.dtype_meta = dtype._builtin_quant_dtypes["qint8"]
traced_module, tm_result = get_traced_module(qat_net, inp)
print(traced_module.flatten().graph)
inp = inp.astype(inp_dtype)
out_dtype = traced_module.graph.outputs[0].qparams
scale = out_dtype.scale.numpy()
_test_convert_result(
inp,
traced_module,
tm_result,
scale=scale,
require_quantize=True,
max_err=max_error,
)
def test_linear():
net = LinearOpr()
inp_dtype = dtype.qint8(16.0 / 128.0)
qat_net, inps = get_qat_net(inp_dtype, net, shape=(10, 100))
traced_module, tm_result = get_traced_module(qat_net, inps[0])
print(traced_module.flatten().graph)
out_dtype = traced_module.graph.outputs[0].qparams
scale = out_dtype.scale.numpy()
inp = inps[0].astype(inp_dtype)
_test_convert_result(
inp,
traced_module,
tm_result,
scale=scale,
require_quantize=True,
max_err=max_error,
)
def test_add():
class ElemwiseOpr(M.Module):
def __init__(self,):
super().__init__()
self.data = np.ones((2, 3, 224, 224)).astype(np.float32)
self.data1 = np.random.random((1, 3, 1, 1)).astype(np.float32)
self.add1 = M.Elemwise("add")
self.add2 = M.Elemwise("add")
self.add3 = M.Elemwise("add")
scale = mge.tensor((16.0 / 128.0))
self.quant_stub = QuantStub()
self.quant_stub.act_fake_quant = FakeQuantize(
_builtin_quant_dtypes["qint8"]
)
self.quant_stub.act_fake_quant.set_qparams(
create_qparams(
dtype_meta=_builtin_quant_dtypes["qint8"],
scale=scale,
zero_point=None,
)
)
self.quant_stub1 = QuantStub()
self.quant_stub1.act_fake_quant = FakeQuantize(
_builtin_quant_dtypes["qint8"]
)
self.quant_stub1.act_fake_quant.set_qparams(
create_qparams(
dtype_meta=_builtin_quant_dtypes["qint8"],
scale=scale,
zero_point=None,
)
)
def forward(self, a):
n = self.quant_stub(mge.tensor(np.float32(10)))
data1 = self.quant_stub1(mge.tensor(self.data1))
x = self.add1(a, n)
y = self.add2(a, data1)
z = self.add3(x, y)
return z
net = ElemwiseOpr()
inp_dtype = dtype.qint8(16.0 / 128.0)
qat_net, inps = get_qat_net(inp_dtype, net, shape=(1, 3, 1, 1))
traced_module, tm_result = get_traced_module(qat_net, inps[0])
print(traced_module.flatten().graph)
out_dtype = traced_module.graph.outputs[0].qparams
scale = out_dtype.scale.numpy()
inp = inps[0].astype(inp_dtype)
_test_convert_result(
inp,
traced_module,
tm_result,
scale=scale,
require_quantize=True,
max_err=max_error,
)
|
[
"megengine.quantization.quantize.quantize_qat",
"megengine.module.Elemwise",
"megengine.traced_module.fake_quant.FakeQuantize",
"megengine.tensor",
"megengine.quantization.utils.create_qparams",
"megengine.core.tensor.dtype.get_scale",
"megengine.module.quant_dequant.QuantStub",
"megengine.core.tensor.dtype.qint8",
"megengine.module.Conv2d"
] |
[((1132, 1149), 'megengine.quantization.quantize.quantize_qat', 'quantize_qat', (['net'], {}), '(net)\n', (1144, 1149), False, 'from megengine.quantization.quantize import quantize_qat\n'), ((2064, 2081), 'megengine.quantization.quantize.quantize_qat', 'quantize_qat', (['net'], {}), '(net)\n', (2076, 2081), False, 'from megengine.quantization.quantize import quantize_qat\n'), ((2099, 2122), 'megengine.core.tensor.dtype.qint8', 'dtype.qint8', (['(16.0 / 128)'], {}), '(16.0 / 128)\n', (2110, 2122), False, 'from megengine.core.tensor import dtype\n'), ((2642, 2752), 'test.traced_module.test_tflite._test_convert_result', '_test_convert_result', (['inp', 'traced_module', 'tm_result'], {'scale': 'scale', 'require_quantize': '(True)', 'max_err': 'max_error'}), '(inp, traced_module, tm_result, scale=scale,\n require_quantize=True, max_err=max_error)\n', (2662, 2752), False, 'from test.traced_module.test_tflite import _test_convert_result\n'), ((2841, 2856), 'test.utils.ConvRelu2dOpr', 'ConvRelu2dOpr', ([], {}), '()\n', (2854, 2856), False, 'from test.utils import ConvBn2dOpr, ConvBnRelu2dOpr, ConvOpr, ConvRelu2dOpr, LinearOpr\n'), ((2871, 2888), 'megengine.quantization.quantize.quantize_qat', 'quantize_qat', (['net'], {}), '(net)\n', (2883, 2888), False, 'from megengine.quantization.quantize import quantize_qat\n'), ((2905, 2928), 'megengine.core.tensor.dtype.qint8', 'dtype.qint8', (['(16.0 / 128)'], {}), '(16.0 / 128)\n', (2916, 2928), False, 'from megengine.core.tensor import dtype\n'), ((3407, 3517), 'test.traced_module.test_tflite._test_convert_result', '_test_convert_result', (['inp', 'traced_module', 'tm_result'], {'scale': 'scale', 'require_quantize': '(True)', 'max_err': 'max_error'}), '(inp, traced_module, tm_result, scale=scale,\n require_quantize=True, max_err=max_error)\n', (3427, 3517), False, 'from test.traced_module.test_tflite import _test_convert_result\n'), ((3604, 3617), 'test.utils.ConvBn2dOpr', 'ConvBn2dOpr', ([], {}), '()\n', (3615, 3617), False, 'from test.utils import ConvBn2dOpr, ConvBnRelu2dOpr, ConvOpr, ConvRelu2dOpr, LinearOpr\n'), ((3647, 3664), 'megengine.quantization.quantize.quantize_qat', 'quantize_qat', (['net'], {}), '(net)\n', (3659, 3664), False, 'from megengine.quantization.quantize import quantize_qat\n'), ((3681, 3704), 'megengine.core.tensor.dtype.qint8', 'dtype.qint8', (['(16.0 / 128)'], {}), '(16.0 / 128)\n', (3692, 3704), False, 'from megengine.core.tensor import dtype\n'), ((4183, 4293), 'test.traced_module.test_tflite._test_convert_result', '_test_convert_result', (['inp', 'traced_module', 'tm_result'], {'scale': 'scale', 'require_quantize': '(True)', 'max_err': 'max_error'}), '(inp, traced_module, tm_result, scale=scale,\n require_quantize=True, max_err=max_error)\n', (4203, 4293), False, 'from test.traced_module.test_tflite import _test_convert_result\n'), ((4384, 4401), 'test.utils.ConvBnRelu2dOpr', 'ConvBnRelu2dOpr', ([], {}), '()\n', (4399, 4401), False, 'from test.utils import ConvBn2dOpr, ConvBnRelu2dOpr, ConvOpr, ConvRelu2dOpr, LinearOpr\n'), ((4431, 4448), 'megengine.quantization.quantize.quantize_qat', 'quantize_qat', (['net'], {}), '(net)\n', (4443, 4448), False, 'from megengine.quantization.quantize import quantize_qat\n'), ((4465, 4488), 'megengine.core.tensor.dtype.qint8', 'dtype.qint8', (['(16.0 / 128)'], {}), '(16.0 / 128)\n', (4476, 4488), False, 'from megengine.core.tensor import dtype\n'), ((4967, 5077), 'test.traced_module.test_tflite._test_convert_result', '_test_convert_result', (['inp', 'traced_module', 'tm_result'], {'scale': 'scale', 'require_quantize': '(True)', 'max_err': 'max_error'}), '(inp, traced_module, tm_result, scale=scale,\n require_quantize=True, max_err=max_error)\n', (4987, 5077), False, 'from test.traced_module.test_tflite import _test_convert_result\n'), ((5166, 5193), 'test.utils.ConvOpr', 'ConvOpr', (['"""tflite_transpose"""'], {}), "('tflite_transpose')\n", (5173, 5193), False, 'from test.utils import ConvBn2dOpr, ConvBnRelu2dOpr, ConvOpr, ConvRelu2dOpr, LinearOpr\n'), ((5208, 5225), 'megengine.quantization.quantize.quantize_qat', 'quantize_qat', (['net'], {}), '(net)\n', (5220, 5225), False, 'from megengine.quantization.quantize import quantize_qat\n'), ((5243, 5266), 'megengine.core.tensor.dtype.qint8', 'dtype.qint8', (['(16.0 / 128)'], {}), '(16.0 / 128)\n', (5254, 5266), False, 'from megengine.core.tensor import dtype\n'), ((5784, 5894), 'test.traced_module.test_tflite._test_convert_result', '_test_convert_result', (['inp', 'traced_module', 'tm_result'], {'scale': 'scale', 'require_quantize': '(True)', 'max_err': 'max_error'}), '(inp, traced_module, tm_result, scale=scale,\n require_quantize=True, max_err=max_error)\n', (5804, 5894), False, 'from test.traced_module.test_tflite import _test_convert_result\n'), ((5977, 5988), 'test.utils.LinearOpr', 'LinearOpr', ([], {}), '()\n', (5986, 5988), False, 'from test.utils import ConvBn2dOpr, ConvBnRelu2dOpr, ConvOpr, ConvRelu2dOpr, LinearOpr\n'), ((6005, 6030), 'megengine.core.tensor.dtype.qint8', 'dtype.qint8', (['(16.0 / 128.0)'], {}), '(16.0 / 128.0)\n', (6016, 6030), False, 'from megengine.core.tensor import dtype\n'), ((6335, 6445), 'test.traced_module.test_tflite._test_convert_result', '_test_convert_result', (['inp', 'traced_module', 'tm_result'], {'scale': 'scale', 'require_quantize': '(True)', 'max_err': 'max_error'}), '(inp, traced_module, tm_result, scale=scale,\n require_quantize=True, max_err=max_error)\n', (6355, 6445), False, 'from test.traced_module.test_tflite import _test_convert_result\n'), ((8073, 8098), 'megengine.core.tensor.dtype.qint8', 'dtype.qint8', (['(16.0 / 128.0)'], {}), '(16.0 / 128.0)\n', (8084, 8098), False, 'from megengine.core.tensor import dtype\n'), ((8406, 8516), 'test.traced_module.test_tflite._test_convert_result', '_test_convert_result', (['inp', 'traced_module', 'tm_result'], {'scale': 'scale', 'require_quantize': '(True)', 'max_err': 'max_error'}), '(inp, traced_module, tm_result, scale=scale,\n require_quantize=True, max_err=max_error)\n', (8426, 8516), False, 'from test.traced_module.test_tflite import _test_convert_result\n'), ((2316, 2342), 'megengine.core.tensor.dtype.get_scale', 'dtype.get_scale', (['inp_dtype'], {}), '(inp_dtype)\n', (2331, 2342), False, 'from megengine.core.tensor import dtype\n'), ((3122, 3148), 'megengine.core.tensor.dtype.get_scale', 'dtype.get_scale', (['inp_dtype'], {}), '(inp_dtype)\n', (3137, 3148), False, 'from megengine.core.tensor import dtype\n'), ((3898, 3924), 'megengine.core.tensor.dtype.get_scale', 'dtype.get_scale', (['inp_dtype'], {}), '(inp_dtype)\n', (3913, 3924), False, 'from megengine.core.tensor import dtype\n'), ((4682, 4708), 'megengine.core.tensor.dtype.get_scale', 'dtype.get_scale', (['inp_dtype'], {}), '(inp_dtype)\n', (4697, 4708), False, 'from megengine.core.tensor import dtype\n'), ((5458, 5484), 'megengine.core.tensor.dtype.get_scale', 'dtype.get_scale', (['inp_dtype'], {}), '(inp_dtype)\n', (5473, 5484), False, 'from megengine.core.tensor import dtype\n'), ((1397, 1423), 'megengine.core.tensor.dtype.get_scale', 'dtype.get_scale', (['inp_dtype'], {}), '(inp_dtype)\n', (1412, 1423), False, 'from megengine.core.tensor import dtype\n'), ((1696, 1762), 'megengine.module.Conv2d', 'M.Conv2d', (['(3)', '(30)', '(3)'], {'stride': '(2, 3)', 'padding': '(3, 1)', 'dilation': '(2, 2)'}), '(3, 30, 3, stride=(2, 3), padding=(3, 1), dilation=(2, 2))\n', (1704, 1762), True, 'import megengine.module as M\n'), ((2145, 2179), 'numpy.random.random', 'np.random.random', (['(1, 3, 224, 224)'], {}), '((1, 3, 224, 224))\n', (2161, 2179), True, 'import numpy as np\n'), ((2951, 2985), 'numpy.random.random', 'np.random.random', (['(1, 3, 224, 224)'], {}), '((1, 3, 224, 224))\n', (2967, 2985), True, 'import numpy as np\n'), ((3727, 3761), 'numpy.random.random', 'np.random.random', (['(1, 3, 224, 224)'], {}), '((1, 3, 224, 224))\n', (3743, 3761), True, 'import numpy as np\n'), ((4511, 4545), 'numpy.random.random', 'np.random.random', (['(1, 3, 224, 224)'], {}), '((1, 3, 224, 224))\n', (4527, 4545), True, 'import numpy as np\n'), ((5289, 5321), 'numpy.random.random', 'np.random.random', (['(1, 3, 64, 64)'], {}), '((1, 3, 64, 64))\n', (5305, 5321), True, 'import numpy as np\n'), ((6776, 6793), 'megengine.module.Elemwise', 'M.Elemwise', (['"""add"""'], {}), "('add')\n", (6786, 6793), True, 'import megengine.module as M\n'), ((6818, 6835), 'megengine.module.Elemwise', 'M.Elemwise', (['"""add"""'], {}), "('add')\n", (6828, 6835), True, 'import megengine.module as M\n'), ((6860, 6877), 'megengine.module.Elemwise', 'M.Elemwise', (['"""add"""'], {}), "('add')\n", (6870, 6877), True, 'import megengine.module as M\n'), ((6899, 6923), 'megengine.tensor', 'mge.tensor', (['(16.0 / 128.0)'], {}), '(16.0 / 128.0)\n', (6909, 6923), True, 'import megengine as mge\n'), ((6956, 6967), 'megengine.module.quant_dequant.QuantStub', 'QuantStub', ([], {}), '()\n', (6965, 6967), False, 'from megengine.module.quant_dequant import QuantStub\n'), ((7013, 7057), 'megengine.traced_module.fake_quant.FakeQuantize', 'FakeQuantize', (["_builtin_quant_dtypes['qint8']"], {}), "(_builtin_quant_dtypes['qint8'])\n", (7025, 7057), False, 'from megengine.traced_module.fake_quant import FakeQuantize\n'), ((7372, 7383), 'megengine.module.quant_dequant.QuantStub', 'QuantStub', ([], {}), '()\n', (7381, 7383), False, 'from megengine.module.quant_dequant import QuantStub\n'), ((7430, 7474), 'megengine.traced_module.fake_quant.FakeQuantize', 'FakeQuantize', (["_builtin_quant_dtypes['qint8']"], {}), "(_builtin_quant_dtypes['qint8'])\n", (7442, 7474), False, 'from megengine.traced_module.fake_quant import FakeQuantize\n'), ((1220, 1243), 'numpy.random.random', 'np.random.random', (['shape'], {}), '(shape)\n', (1236, 1243), True, 'import numpy as np\n'), ((7160, 7251), 'megengine.quantization.utils.create_qparams', 'create_qparams', ([], {'dtype_meta': "_builtin_quant_dtypes['qint8']", 'scale': 'scale', 'zero_point': 'None'}), "(dtype_meta=_builtin_quant_dtypes['qint8'], scale=scale,\n zero_point=None)\n", (7174, 7251), False, 'from megengine.quantization.utils import create_qparams\n'), ((7578, 7669), 'megengine.quantization.utils.create_qparams', 'create_qparams', ([], {'dtype_meta': "_builtin_quant_dtypes['qint8']", 'scale': 'scale', 'zero_point': 'None'}), "(dtype_meta=_builtin_quant_dtypes['qint8'], scale=scale,\n zero_point=None)\n", (7592, 7669), False, 'from megengine.quantization.utils import create_qparams\n'), ((7887, 7909), 'megengine.tensor', 'mge.tensor', (['self.data1'], {}), '(self.data1)\n', (7897, 7909), True, 'import megengine as mge\n'), ((6632, 6657), 'numpy.ones', 'np.ones', (['(2, 3, 224, 224)'], {}), '((2, 3, 224, 224))\n', (6639, 6657), True, 'import numpy as np\n'), ((6702, 6732), 'numpy.random.random', 'np.random.random', (['(1, 3, 1, 1)'], {}), '((1, 3, 1, 1))\n', (6718, 6732), True, 'import numpy as np\n'), ((7833, 7847), 'numpy.float32', 'np.float32', (['(10)'], {}), '(10)\n', (7843, 7847), True, 'import numpy as np\n'), ((1861, 1906), 'numpy.random.random', 'np.random.random', (['self.normal_conv.bias.shape'], {}), '(self.normal_conv.bias.shape)\n', (1877, 1906), True, 'import numpy as np\n')]
|
import numpy as np
import megengine as mge
import megengine.autodiff as ad
import megengine.optimizer as optimizer
from megengine import Parameter, tensor
from megengine.core.tensor.raw_tensor import RawTensor
from megengine.module import Module
class Simple(Module):
def __init__(self):
super().__init__()
self.a = Parameter(1.23, dtype=np.float32)
def forward(self, x):
x = x * self.a
return x
def test_save_load():
net = Simple()
optim = optimizer.SGD(net.parameters(), lr=1.0, momentum=0.9)
optim.clear_grad()
gm = ad.GradManager().attach(net.parameters())
data = tensor([2.34])
with gm:
loss = net(data)
gm.backward(loss)
optim.step()
model_name = "simple.pkl"
print("save to {}".format(model_name))
mge.save(
{
"name": "simple",
"state_dict": net.state_dict(),
"opt_state": optim.state_dict(),
},
model_name,
)
# Load param to cpu
checkpoint = mge.load(model_name, map_location="cpu0")
device_save = mge.get_default_device()
mge.set_default_device("cpu0")
net = Simple()
net.load_state_dict(checkpoint["state_dict"])
optim = optimizer.SGD(net.parameters(), lr=1.0, momentum=0.9)
optim.load_state_dict(checkpoint["opt_state"])
print("load done")
with gm:
loss = net([1.23])
gm.backward(loss)
optim.step()
# Restore device
mge.set_default_device(device_save)
|
[
"megengine.get_default_device",
"megengine.load",
"megengine.tensor",
"megengine.set_default_device",
"megengine.autodiff.GradManager",
"megengine.Parameter"
] |
[((636, 650), 'megengine.tensor', 'tensor', (['[2.34]'], {}), '([2.34])\n', (642, 650), False, 'from megengine import Parameter, tensor\n'), ((1031, 1072), 'megengine.load', 'mge.load', (['model_name'], {'map_location': '"""cpu0"""'}), "(model_name, map_location='cpu0')\n", (1039, 1072), True, 'import megengine as mge\n'), ((1091, 1115), 'megengine.get_default_device', 'mge.get_default_device', ([], {}), '()\n', (1113, 1115), True, 'import megengine as mge\n'), ((1120, 1150), 'megengine.set_default_device', 'mge.set_default_device', (['"""cpu0"""'], {}), "('cpu0')\n", (1142, 1150), True, 'import megengine as mge\n'), ((1470, 1505), 'megengine.set_default_device', 'mge.set_default_device', (['device_save'], {}), '(device_save)\n', (1492, 1505), True, 'import megengine as mge\n'), ((339, 372), 'megengine.Parameter', 'Parameter', (['(1.23)'], {'dtype': 'np.float32'}), '(1.23, dtype=np.float32)\n', (348, 372), False, 'from megengine import Parameter, tensor\n'), ((582, 598), 'megengine.autodiff.GradManager', 'ad.GradManager', ([], {}), '()\n', (596, 598), True, 'import megengine.autodiff as ad\n')]
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import itertools
import numpy as np
import megengine as mge
import megengine.autodiff as ad
import megengine.functional as F
from megengine import Tensor
from megengine.core._imperative_rt.core2 import (
_set_drop_flag,
_set_swap_flag,
get_option,
set_option,
)
from megengine.module import Linear, Module
from megengine.optimizer import SGD
batch_size = 64
data_shape = (batch_size, 2)
label_shape = (batch_size,)
def minibatch_generator():
while True:
inp_data = np.zeros((batch_size, 2))
label = np.zeros(batch_size, dtype=np.int32)
for i in range(batch_size):
# [x0, x1], sampled from U[-1, 1]
inp_data[i, :] = np.random.rand(2) * 2 - 1
label[i] = 0 if np.prod(inp_data[i]) < 0 else 1
yield inp_data.astype(np.float32), label.astype(np.int32)
def calculate_precision(data: np.ndarray, pred: np.ndarray) -> float:
""" Calculate precision for given data and prediction.
:type data: [[x, y], ...]
:param data: Input data
:type pred: [[x_pred, y_pred], ...]
:param pred: Network output data
"""
correct = 0
assert len(data) == len(pred)
for inp_data, pred_output in zip(data, pred):
label = 0 if np.prod(inp_data) < 0 else 1
pred_label = np.argmax(pred_output)
if pred_label == label:
correct += 1
return float(correct) / len(data)
class XORNet(Module):
def __init__(self):
self.mid_layers = 14
self.num_class = 2
super().__init__()
self.fc0 = Linear(self.num_class, self.mid_layers, bias=True)
self.fc1 = Linear(self.mid_layers, self.mid_layers, bias=True)
self.fc2 = Linear(self.mid_layers, self.num_class, bias=True)
def forward(self, x):
y = self.fc0(x)
x._swap_out()
x = F.tanh(y)
y = self.fc1(x)
x = F.tanh(y)
x = self.fc2(x)
y = (x + x) / 2 # in order to test drop()
y._drop()
return y
def test_training_converge_with_swap_and_drop():
_set_swap_flag(True)
_set_drop_flag(True)
old_buffer_length = get_option("buffer_length")
set_option("buffer_length", 0)
net = XORNet()
opt = SGD(net.parameters(), lr=0.01, momentum=0.9, weight_decay=5e-4)
gm = ad.GradManager().attach(net.parameters())
def train(data, label):
with gm:
pred = net(data)
loss = F.nn.cross_entropy(pred, label)
gm.backward(loss)
return loss
def infer(data):
return net(data)
train_dataset = minibatch_generator()
losses = []
for data, label in itertools.islice(train_dataset, 2000):
data = Tensor(data, dtype=np.float32)
label = Tensor(label, dtype=np.int32)
opt.clear_grad()
loss = train(data, label)
opt.step()
losses.append(loss.numpy())
assert np.mean(losses[-100:]) < 0.1, "Final training Loss must be low enough"
ngrid = 10
x = np.linspace(-1.0, 1.0, ngrid)
xx, yy = np.meshgrid(x, x)
xx = xx.reshape((ngrid * ngrid, 1))
yy = yy.reshape((ngrid * ngrid, 1))
data = mge.tensor(np.concatenate((xx, yy), axis=1).astype(np.float32))
pred = infer(Tensor(data)).numpy()
precision = calculate_precision(data.numpy(), pred)
assert precision == 1.0, "Test precision must be high enough, get {}".format(
precision
)
_set_swap_flag(False)
_set_drop_flag(False)
set_option("buffer_length", old_buffer_length)
|
[
"megengine.core._imperative_rt.core2._set_swap_flag",
"megengine.core._imperative_rt.core2.set_option",
"megengine.Tensor",
"megengine.module.Linear",
"megengine.functional.nn.cross_entropy",
"megengine.core._imperative_rt.core2._set_drop_flag",
"megengine.autodiff.GradManager",
"megengine.functional.tanh",
"megengine.core._imperative_rt.core2.get_option"
] |
[((2431, 2451), 'megengine.core._imperative_rt.core2._set_swap_flag', '_set_swap_flag', (['(True)'], {}), '(True)\n', (2445, 2451), False, 'from megengine.core._imperative_rt.core2 import _set_drop_flag, _set_swap_flag, get_option, set_option\n'), ((2456, 2476), 'megengine.core._imperative_rt.core2._set_drop_flag', '_set_drop_flag', (['(True)'], {}), '(True)\n', (2470, 2476), False, 'from megengine.core._imperative_rt.core2 import _set_drop_flag, _set_swap_flag, get_option, set_option\n'), ((2501, 2528), 'megengine.core._imperative_rt.core2.get_option', 'get_option', (['"""buffer_length"""'], {}), "('buffer_length')\n", (2511, 2528), False, 'from megengine.core._imperative_rt.core2 import _set_drop_flag, _set_swap_flag, get_option, set_option\n'), ((2533, 2563), 'megengine.core._imperative_rt.core2.set_option', 'set_option', (['"""buffer_length"""', '(0)'], {}), "('buffer_length', 0)\n", (2543, 2563), False, 'from megengine.core._imperative_rt.core2 import _set_drop_flag, _set_swap_flag, get_option, set_option\n'), ((3014, 3051), 'itertools.islice', 'itertools.islice', (['train_dataset', '(2000)'], {}), '(train_dataset, 2000)\n', (3030, 3051), False, 'import itertools\n'), ((3366, 3395), 'numpy.linspace', 'np.linspace', (['(-1.0)', '(1.0)', 'ngrid'], {}), '(-1.0, 1.0, ngrid)\n', (3377, 3395), True, 'import numpy as np\n'), ((3409, 3426), 'numpy.meshgrid', 'np.meshgrid', (['x', 'x'], {}), '(x, x)\n', (3420, 3426), True, 'import numpy as np\n'), ((3789, 3810), 'megengine.core._imperative_rt.core2._set_swap_flag', '_set_swap_flag', (['(False)'], {}), '(False)\n', (3803, 3810), False, 'from megengine.core._imperative_rt.core2 import _set_drop_flag, _set_swap_flag, get_option, set_option\n'), ((3815, 3836), 'megengine.core._imperative_rt.core2._set_drop_flag', '_set_drop_flag', (['(False)'], {}), '(False)\n', (3829, 3836), False, 'from megengine.core._imperative_rt.core2 import _set_drop_flag, _set_swap_flag, get_option, set_option\n'), ((3841, 3887), 'megengine.core._imperative_rt.core2.set_option', 'set_option', (['"""buffer_length"""', 'old_buffer_length'], {}), "('buffer_length', old_buffer_length)\n", (3851, 3887), False, 'from megengine.core._imperative_rt.core2 import _set_drop_flag, _set_swap_flag, get_option, set_option\n'), ((875, 900), 'numpy.zeros', 'np.zeros', (['(batch_size, 2)'], {}), '((batch_size, 2))\n', (883, 900), True, 'import numpy as np\n'), ((917, 953), 'numpy.zeros', 'np.zeros', (['batch_size'], {'dtype': 'np.int32'}), '(batch_size, dtype=np.int32)\n', (925, 953), True, 'import numpy as np\n'), ((1663, 1685), 'numpy.argmax', 'np.argmax', (['pred_output'], {}), '(pred_output)\n', (1672, 1685), True, 'import numpy as np\n'), ((1932, 1982), 'megengine.module.Linear', 'Linear', (['self.num_class', 'self.mid_layers'], {'bias': '(True)'}), '(self.num_class, self.mid_layers, bias=True)\n', (1938, 1982), False, 'from megengine.module import Linear, Module\n'), ((2002, 2053), 'megengine.module.Linear', 'Linear', (['self.mid_layers', 'self.mid_layers'], {'bias': '(True)'}), '(self.mid_layers, self.mid_layers, bias=True)\n', (2008, 2053), False, 'from megengine.module import Linear, Module\n'), ((2074, 2124), 'megengine.module.Linear', 'Linear', (['self.mid_layers', 'self.num_class'], {'bias': '(True)'}), '(self.mid_layers, self.num_class, bias=True)\n', (2080, 2124), False, 'from megengine.module import Linear, Module\n'), ((2210, 2219), 'megengine.functional.tanh', 'F.tanh', (['y'], {}), '(y)\n', (2216, 2219), True, 'import megengine.functional as F\n'), ((2256, 2265), 'megengine.functional.tanh', 'F.tanh', (['y'], {}), '(y)\n', (2262, 2265), True, 'import megengine.functional as F\n'), ((3068, 3098), 'megengine.Tensor', 'Tensor', (['data'], {'dtype': 'np.float32'}), '(data, dtype=np.float32)\n', (3074, 3098), False, 'from megengine import Tensor\n'), ((3115, 3144), 'megengine.Tensor', 'Tensor', (['label'], {'dtype': 'np.int32'}), '(label, dtype=np.int32)\n', (3121, 3144), False, 'from megengine import Tensor\n'), ((3271, 3293), 'numpy.mean', 'np.mean', (['losses[-100:]'], {}), '(losses[-100:])\n', (3278, 3293), True, 'import numpy as np\n'), ((2666, 2682), 'megengine.autodiff.GradManager', 'ad.GradManager', ([], {}), '()\n', (2680, 2682), True, 'import megengine.autodiff as ad\n'), ((2802, 2833), 'megengine.functional.nn.cross_entropy', 'F.nn.cross_entropy', (['pred', 'label'], {}), '(pred, label)\n', (2820, 2833), True, 'import megengine.functional as F\n'), ((1613, 1630), 'numpy.prod', 'np.prod', (['inp_data'], {}), '(inp_data)\n', (1620, 1630), True, 'import numpy as np\n'), ((3529, 3561), 'numpy.concatenate', 'np.concatenate', (['(xx, yy)'], {'axis': '(1)'}), '((xx, yy), axis=1)\n', (3543, 3561), True, 'import numpy as np\n'), ((3600, 3612), 'megengine.Tensor', 'Tensor', (['data'], {}), '(data)\n', (3606, 3612), False, 'from megengine import Tensor\n'), ((1065, 1082), 'numpy.random.rand', 'np.random.rand', (['(2)'], {}), '(2)\n', (1079, 1082), True, 'import numpy as np\n'), ((1119, 1139), 'numpy.prod', 'np.prod', (['inp_data[i]'], {}), '(inp_data[i])\n', (1126, 1139), True, 'import numpy as np\n')]
|
# -*- coding:utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine.functional as F
from megengine.random import uniform
def sample_labels(labels, num_samples, label_value, ignore_label=-1):
"""sample N labels with label value = sample_labels
Args:
labels(Tensor): shape of label is (N,)
num_samples(int):
label_value(int):
Returns:
label(Tensor): label after sampling
"""
assert labels.ndim == 1, "Only tensor of dim 1 is supported."
mask = (labels == label_value)
num_class = mask.sum()
if num_class <= num_samples:
return labels
topk_tensor = F.zeros_like(labels).astype("float32")
topk_tensor[mask] = uniform(size=num_class)
_, select_inds = F.topk(topk_tensor, k=num_samples - num_class)
labels[select_inds] = ignore_label
return labels
def sample_mask_from_labels(labels, num_sample, sample_value):
"""generate mask for labels using sampling method.
Args:
labels (Tensor):
num_sample (int):
sample_value (int):
Returns:
sample_mask (Tensor)
"""
assert labels.ndim == 1, "Only tensor of dim 1 is supported."
# TODO: support bool mask
sample_mask = (labels == sample_value).astype("float32")
num_mask = sample_mask.sum().astype("int32")
if num_mask <= num_sample:
return sample_mask
random_tensor = sample_mask * uniform(size=labels.shape)
_, sampled_idx = F.topk(random_tensor, k=num_sample - num_mask)
sample_mask[sampled_idx] = F.zeros(sampled_idx.shape)
return sample_mask
|
[
"megengine.functional.zeros",
"megengine.functional.topk",
"megengine.random.uniform",
"megengine.functional.zeros_like"
] |
[((1015, 1038), 'megengine.random.uniform', 'uniform', ([], {'size': 'num_class'}), '(size=num_class)\n', (1022, 1038), False, 'from megengine.random import uniform\n'), ((1060, 1106), 'megengine.functional.topk', 'F.topk', (['topk_tensor'], {'k': '(num_samples - num_class)'}), '(topk_tensor, k=num_samples - num_class)\n', (1066, 1106), True, 'import megengine.functional as F\n'), ((1773, 1819), 'megengine.functional.topk', 'F.topk', (['random_tensor'], {'k': '(num_sample - num_mask)'}), '(random_tensor, k=num_sample - num_mask)\n', (1779, 1819), True, 'import megengine.functional as F\n'), ((1851, 1877), 'megengine.functional.zeros', 'F.zeros', (['sampled_idx.shape'], {}), '(sampled_idx.shape)\n', (1858, 1877), True, 'import megengine.functional as F\n'), ((1725, 1751), 'megengine.random.uniform', 'uniform', ([], {'size': 'labels.shape'}), '(size=labels.shape)\n', (1732, 1751), False, 'from megengine.random import uniform\n'), ((952, 972), 'megengine.functional.zeros_like', 'F.zeros_like', (['labels'], {}), '(labels)\n', (964, 972), True, 'import megengine.functional as F\n')]
|
#!/usr/bin/env python3
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
import os
from typing import Iterable, Union
from megengine import Parameter, tensor
from megengine.functional.inplace import _inplace_add_
from megengine.optimizer import Optimizer
class SGD(Optimizer):
r"""Implements stochastic gradient descent.
Nesterov momentum is based on the formula from
`"On the importance of initialization and momentum in deep learning"
<http://www.cs.toronto.edu/%7Ehinton/absps/momentum.pdf>`_.
Args:
params: iterable of parameters to optimize or dicts defining parameter groups.
lr: learning rate.
momentum: momentum factor. Default: ``0.0``
nesterov: enables Nesterov momentum. Default: ``False``
weight_decay: weight decay (L2 penalty). Default: ``0.0``
"""
def __init__(
self,
params: Union[Iterable[Parameter], dict],
lr: float,
momentum: float = 0.0,
nesterov: bool = False,
weight_decay: float = 0.0,
):
if lr < 0.0:
raise ValueError("Invalid learning rate: {}".format(lr))
if momentum < 0.0:
raise ValueError("Invalid momentum value: {}".format(momentum))
if weight_decay < 0.0:
raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
if nesterov and momentum <= 0:
raise ValueError("Nesterov momentum requires a momentum")
defaults = dict(lr=lr, momentum=momentum, weight_decay=weight_decay)
super().__init__(params, defaults)
self.nesterov = nesterov
self._disable_type_convert = True
def _create_state(self, param_group):
if param_group["momentum"] != 0.0:
for param in param_group["params"]:
self._add_state(param, "momentum_buffer")
def _updates(self, param_group):
lr = param_group["lr"]
weight_decay = param_group["weight_decay"]
momentum = param_group["momentum"]
# since `conver_inputs` is disabled for param updates,
# scalar should be explicitly tansforred to tensor
_lr = tensor(lr)
_weight_decay = tensor(weight_decay)
_momentum = tensor(momentum)
inplace_mode = int(os.getenv("MEGENGINE_INPLACE_UPDATE", "0"))
if inplace_mode:
_neg_lr = tensor(-lr)
c1 = tensor([1.0])
for param in param_group["params"]:
if param.grad is None:
continue
grad = param.grad
if weight_decay != 0.0:
grad = grad + param * _weight_decay
if inplace_mode:
if momentum != 0.0:
v = self._state[param]["momentum_buffer"]
_inplace_add_(v, grad, alpha=_momentum, beta=c1)
if self.nesterov:
grad = grad + v * _momentum
else:
grad = v
_inplace_add_(param, grad, alpha=c1, beta=_neg_lr)
continue
if momentum != 0.0:
v = self._state[param]["momentum_buffer"]
v *= _momentum
v += grad
if self.nesterov:
grad = grad + v * _momentum
else:
grad = v
param -= _lr * grad
|
[
"megengine.tensor",
"megengine.functional.inplace._inplace_add_"
] |
[((2154, 2164), 'megengine.tensor', 'tensor', (['lr'], {}), '(lr)\n', (2160, 2164), False, 'from megengine import Parameter, tensor\n'), ((2189, 2209), 'megengine.tensor', 'tensor', (['weight_decay'], {}), '(weight_decay)\n', (2195, 2209), False, 'from megengine import Parameter, tensor\n'), ((2230, 2246), 'megengine.tensor', 'tensor', (['momentum'], {}), '(momentum)\n', (2236, 2246), False, 'from megengine import Parameter, tensor\n'), ((2275, 2317), 'os.getenv', 'os.getenv', (['"""MEGENGINE_INPLACE_UPDATE"""', '"""0"""'], {}), "('MEGENGINE_INPLACE_UPDATE', '0')\n", (2284, 2317), False, 'import os\n'), ((2366, 2377), 'megengine.tensor', 'tensor', (['(-lr)'], {}), '(-lr)\n', (2372, 2377), False, 'from megengine import Parameter, tensor\n'), ((2395, 2408), 'megengine.tensor', 'tensor', (['[1.0]'], {}), '([1.0])\n', (2401, 2408), False, 'from megengine import Parameter, tensor\n'), ((2995, 3045), 'megengine.functional.inplace._inplace_add_', '_inplace_add_', (['param', 'grad'], {'alpha': 'c1', 'beta': '_neg_lr'}), '(param, grad, alpha=c1, beta=_neg_lr)\n', (3008, 3045), False, 'from megengine.functional.inplace import _inplace_add_\n'), ((2781, 2829), 'megengine.functional.inplace._inplace_add_', '_inplace_add_', (['v', 'grad'], {'alpha': '_momentum', 'beta': 'c1'}), '(v, grad, alpha=_momentum, beta=c1)\n', (2794, 2829), False, 'from megengine.functional.inplace import _inplace_add_\n')]
|
#!/usr/bin/env python3
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
from collections import namedtuple
from typing import Iterable, Union
import megengine as mge
import megengine.distributed as dist
import megengine.module as M
import megengine.optimizer as optim
from basecore.config import ConfigDict
from megengine import Parameter
from megengine.amp import GradScaler
from megengine.autodiff import GradManager
from pkg_resources import packaging
from basecls.utils import registers
from .optimizer import LAMB, LARS, SGD
from .weight_decay import get_param_groups
__all__ = ["Solver", "BaseSolver", "DefaultSolver"]
Solver = namedtuple("Solver", ["optimizer", "grad_manager", "grad_scaler"])
class BaseSolver:
"""Base class for solver factory.
A solver factory should return a :py:class:`~Solver` object, which combines
an :py:class:`~megengine.optimizer.Optimizer` and
a :py:class:`~megengine.autodiff.GradManager`.
"""
@classmethod
def build(cls, cfg: ConfigDict, model: M.Module) -> Solver:
"""Abstract build function
Args:
cfg: config for training.
model: model for training.
Returns:
A solver.
"""
raise NotImplementedError
@registers.solvers.register()
class DefaultSolver(BaseSolver):
"""The default solver factory.
According to ``cfg.reduce_mode``, learning rate and weight decay will be scaled automatically
following the linear scaling rule, see
`"Accurate, Large Minibatch SGD: Training ImageNet in 1 Hour"
<https://arxiv.org/abs/1706.02677>`_ for more details.
It supports ``"sgd"``, ``"adam"`` and ``"adamw"``.
Note:
This linear scaling rule can only work well with SGD. We are still looking for
the applicable scaling rule for Adam and AdamW. Thus we recommend keeping default
training settings (like learning rate and world size) when using Adam and AdamW.
"""
@classmethod
def build(cls, cfg: ConfigDict, model: M.Module) -> Solver:
"""Build function with the linear scaling strategy.
Args:
cfg: config for training.
model: model for training.
Returns:
A solver.
"""
amp_cfg = cfg.amp
cfg = cfg.solver
world_size = dist.get_world_size()
# build optimizer
lr = cfg.basic_lr * world_size # linear scaling rule
optim_params = get_param_groups(model, cfg.weight_decay)
optimizer = cls.build_optimizer(cfg, optim_params, lr, 0)
# build grad_manager
gm = GradManager()
callbacks = [dist.make_allreduce_cb("mean", dist.WORLD)] if world_size > 1 else None
gm.attach(model.parameters(), callbacks=callbacks)
# build grad_scaler
scaler = (
GradScaler(init_scale=65536.0, growth_interval=2000)
if amp_cfg.dynamic_scale
else GradScaler(init_scale=128.0, growth_interval=0)
)
return Solver(optimizer, gm, scaler)
@classmethod
def build_optimizer(
cls, cfg: ConfigDict, params: Union[Iterable[Parameter], dict], lr: float, wd: float
) -> optim.Optimizer:
"""Build optimizer according to training config.
Args:
cfg: config for training.
params: iterable of parameters to optimize or dicts defining parameter groups.
lr: learning rate.
weight_decay: weight decay (L2, penalty).
Returns:
An optimizer.
"""
if cfg.optimizer == "adam":
return optim.Adam(params, lr=lr, weight_decay=wd, betas=cfg.betas)
elif cfg.optimizer == "adamw":
return optim.AdamW(params, lr=lr, weight_decay=wd, betas=cfg.betas)
elif cfg.optimizer == "lamb":
return LAMB(
params, lr=lr, weight_decay=wd, betas=cfg.betas, always_adapt=cfg.always_adapt
)
elif cfg.optimizer == "lars":
return LARS(
params,
lr=lr,
weight_decay=wd,
momentum=cfg.momentum,
nesterov=cfg.nesterov,
always_adapt=cfg.always_adapt,
)
elif cfg.optimizer == "sgd":
if packaging.version.parse(mge.__version__) < packaging.version.parse("1.7.0"):
return SGD(
params, lr=lr, weight_decay=wd, momentum=cfg.momentum, nesterov=cfg.nesterov
)
return optim.SGD(
params, lr=lr, weight_decay=wd, momentum=cfg.momentum, nesterov=cfg.nesterov
)
else:
raise NotImplementedError(f"Optimizer '{cfg.optimizer}' not supported")
|
[
"megengine.optimizer.SGD",
"megengine.optimizer.Adam",
"megengine.amp.GradScaler",
"megengine.optimizer.AdamW",
"megengine.autodiff.GradManager",
"megengine.distributed.make_allreduce_cb",
"megengine.distributed.get_world_size"
] |
[((649, 715), 'collections.namedtuple', 'namedtuple', (['"""Solver"""', "['optimizer', 'grad_manager', 'grad_scaler']"], {}), "('Solver', ['optimizer', 'grad_manager', 'grad_scaler'])\n", (659, 715), False, 'from collections import namedtuple\n'), ((1267, 1295), 'basecls.utils.registers.solvers.register', 'registers.solvers.register', ([], {}), '()\n', (1293, 1295), False, 'from basecls.utils import registers\n'), ((2331, 2352), 'megengine.distributed.get_world_size', 'dist.get_world_size', ([], {}), '()\n', (2350, 2352), True, 'import megengine.distributed as dist\n'), ((2618, 2631), 'megengine.autodiff.GradManager', 'GradManager', ([], {}), '()\n', (2629, 2631), False, 'from megengine.autodiff import GradManager\n'), ((2844, 2896), 'megengine.amp.GradScaler', 'GradScaler', ([], {'init_scale': '(65536.0)', 'growth_interval': '(2000)'}), '(init_scale=65536.0, growth_interval=2000)\n', (2854, 2896), False, 'from megengine.amp import GradScaler\n'), ((2951, 2998), 'megengine.amp.GradScaler', 'GradScaler', ([], {'init_scale': '(128.0)', 'growth_interval': '(0)'}), '(init_scale=128.0, growth_interval=0)\n', (2961, 2998), False, 'from megengine.amp import GradScaler\n'), ((3614, 3673), 'megengine.optimizer.Adam', 'optim.Adam', (['params'], {'lr': 'lr', 'weight_decay': 'wd', 'betas': 'cfg.betas'}), '(params, lr=lr, weight_decay=wd, betas=cfg.betas)\n', (3624, 3673), True, 'import megengine.optimizer as optim\n'), ((2653, 2695), 'megengine.distributed.make_allreduce_cb', 'dist.make_allreduce_cb', (['"""mean"""', 'dist.WORLD'], {}), "('mean', dist.WORLD)\n", (2675, 2695), True, 'import megengine.distributed as dist\n'), ((3732, 3792), 'megengine.optimizer.AdamW', 'optim.AdamW', (['params'], {'lr': 'lr', 'weight_decay': 'wd', 'betas': 'cfg.betas'}), '(params, lr=lr, weight_decay=wd, betas=cfg.betas)\n', (3743, 3792), True, 'import megengine.optimizer as optim\n'), ((4538, 4630), 'megengine.optimizer.SGD', 'optim.SGD', (['params'], {'lr': 'lr', 'weight_decay': 'wd', 'momentum': 'cfg.momentum', 'nesterov': 'cfg.nesterov'}), '(params, lr=lr, weight_decay=wd, momentum=cfg.momentum, nesterov=\n cfg.nesterov)\n', (4547, 4630), True, 'import megengine.optimizer as optim\n'), ((4299, 4339), 'pkg_resources.packaging.version.parse', 'packaging.version.parse', (['mge.__version__'], {}), '(mge.__version__)\n', (4322, 4339), False, 'from pkg_resources import packaging\n'), ((4342, 4374), 'pkg_resources.packaging.version.parse', 'packaging.version.parse', (['"""1.7.0"""'], {}), "('1.7.0')\n", (4365, 4374), False, 'from pkg_resources import packaging\n')]
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
from helpers import opr_test
import megengine._internal as mgb
import megengine.functional as F
from megengine import Buffer, Parameter, is_cuda_available, jit, tensor
from megengine.test import assertTensorClose
def test_flatten():
data0_shape = (2, 3, 4, 5)
data1_shape = (4, 5, 6, 7)
data0 = np.random.random(data0_shape).astype(np.float32)
data1 = np.random.random(data1_shape).astype(np.float32)
def compare_fn(x, y):
assert x.numpy().shape == y
output0 = (2 * 3 * 4 * 5,)
output1 = (4 * 5 * 6 * 7,)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn)
output0 = (2, 3 * 4 * 5)
output1 = (4, 5 * 6 * 7)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=1)
output0 = (2, 3, 4 * 5)
output1 = (4, 5, 6 * 7)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=2)
output0 = (2, 3 * 4, 5)
output1 = (4, 5 * 6, 7)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=1, end_axis=2)
def test_where():
maskv0 = np.array([[1, 0], [0, 1]], dtype=np.int32)
xv0 = np.array([[1, np.inf], [np.nan, 4]], dtype=np.float32)
yv0 = np.array([[5, 6], [7, 8]], dtype=np.float32)
maskv1 = np.array([[1, 0, 1], [1, 0, 0], [1, 1, 0]], dtype=np.int32)
xv1 = np.array([[1, np.inf, 2], [0, np.nan, 4], [1, 5, 7]], dtype=np.float32)
yv1 = np.array([[5, 6, 9], [2, 7, 8], [2, 1, 9]], dtype=np.float32)
cases = [
{"input": [maskv0, xv0, yv0]},
{"input": [maskv1, xv1, yv1]},
]
opr_test(cases, F.where, ref_fn=np.where)
maskv2 = np.array([1, 1, 1], dtype=np.int32)
xv2 = np.array([1, 3, 2], dtype=np.float32)
yv2 = np.array([5, 6, 9], dtype=np.float32)
maskv3 = np.array([0, 0, 0], dtype=np.int32)
xv3 = np.array([1, 3, 2], dtype=np.float32)
yv3 = np.array([5, 6, 9], dtype=np.float32)
cases = [
{"input": [maskv2, xv2, yv2]},
{"input": [maskv3, xv3, yv3]},
]
opr_test(cases, F.where, ref_fn=np.where)
def test_eye():
dtype = np.float32
cases = [{"input": [10, 20]}, {"input": [20, 30]}]
opr_test(cases, F.eye, ref_fn=lambda n, m: np.eye(n, m).astype(dtype), dtype=dtype)
def test_concat():
def get_data_shape(length: int):
return (length, 2, 3)
data1 = np.random.random(get_data_shape(5)).astype("float32")
data2 = np.random.random(get_data_shape(6)).astype("float32")
data3 = np.random.random(get_data_shape(7)).astype("float32")
def run(data1, data2):
return F.concat([data1, data2])
cases = [{"input": [data1, data2]}, {"input": [data1, data3]}]
opr_test(cases, run, ref_fn=lambda x, y: np.concatenate([x, y]))
def test_matrix_mul():
shape1 = (2, 3)
shape2 = (3, 4)
shape3 = (4, 5)
data1 = np.random.random(shape1).astype("float32")
data2 = np.random.random(shape2).astype("float32")
data3 = np.random.random(shape3).astype("float32")
cases = [{"input": [data1, data2]}, {"input": [data2, data3]}]
opr_test(cases, F.matrix_mul, ref_fn=np.matmul)
def test_batched_matrix_mul():
batch_size = 10
shape1 = (batch_size, 2, 3)
shape2 = (batch_size, 3, 4)
shape3 = (batch_size, 4, 5)
data1 = np.random.random(shape1).astype("float32")
data2 = np.random.random(shape2).astype("float32")
data3 = np.random.random(shape3).astype("float32")
cases = [{"input": [data1, data2]}, {"input": [data2, data3]}]
for i in range(0, batch_size):
def compare_fn(x, y):
x.numpy()[i, ...] == y
opr_test(
cases,
F.batched_matrix_mul,
compare_fn=compare_fn,
ref_fn=lambda x, y: np.matmul(x[i, ...], y[i, ...]),
)
def test_sort():
data1_shape = (10, 3)
data2_shape = (12, 2)
data1 = np.random.random(data1_shape).astype(np.float32)
data2 = np.random.random(data2_shape).astype(np.float32)
output0 = [np.sort(data1), np.argsort(data1).astype(np.int32)]
output1 = [np.sort(data2), np.argsort(data2).astype(np.int32)]
cases = [
{"input": data1, "output": output0},
{"input": data2, "output": output1},
]
opr_test(cases, F.sort)
def test_round():
data1_shape = (15,)
data2_shape = (25,)
data1 = np.random.random(data1_shape).astype(np.float32)
data2 = np.random.random(data2_shape).astype(np.float32)
cases = [{"input": data1}, {"input": data2}]
opr_test(cases, F.round, ref_fn=np.round)
def test_broadcast_to():
input1_shape = (20, 30)
output1_shape = (30, 20, 30)
data1 = np.random.random(input1_shape).astype(np.float32)
input2_shape = (10, 20)
output2_shape = (20, 10, 20)
data2 = np.random.random(input2_shape).astype(np.float32)
def compare_fn(x, y):
assert x.numpy().shape == y
cases = [
{"input": [data1, output1_shape], "output": output1_shape},
{"input": [data2, output2_shape], "output": output2_shape},
]
opr_test(cases, F.broadcast_to, compare_fn=compare_fn)
def test_linspace():
cases = [
{"input": [1, 9, 9]},
{"input": [3, 10, 8]},
]
opr_test(
cases,
F.linspace,
ref_fn=lambda start, end, step: np.linspace(start, end, step, dtype=np.float32),
)
cases = [
{"input": [9, 1, 9]},
{"input": [10, 3, 8]},
]
opr_test(
cases,
F.linspace,
ref_fn=lambda start, end, step: np.linspace(start, end, step, dtype=np.float32),
)
def test_arange():
cases = [
{"input": [1, 9, 1]},
{"input": [2, 10, 2]},
]
opr_test(
cases,
F.arange,
ref_fn=lambda start, end, step: np.arange(start, end, step, dtype=np.float32),
)
cases = [
{"input": [9, 1, -1]},
{"input": [10, 2, -2]},
]
opr_test(
cases,
F.arange,
ref_fn=lambda start, end, step: np.arange(start, end, step, dtype=np.float32),
)
cases = [
{"input": [9.3, 1.2, -0.5]},
{"input": [10.3, 2.1, -1.7]},
]
opr_test(
cases,
F.arange,
ref_fn=lambda start, end, step: np.arange(start, end, step, dtype=np.float32),
)
def test_add_update():
shape = (2, 3)
v = np.random.random(shape).astype(np.float32)
b = Buffer(v)
u = F.add_update(b, 1)
assertTensorClose(u.numpy(), v + 1)
u = F.add_update(b, 1)
assertTensorClose(u.numpy(), v + 2)
x = np.ones((2, 2), dtype=np.float32)
y = x * 0.5
dest = tensor(x)
delta = tensor(y)
r = F.add_update(dest, delta, alpha=tensor(0.9), beta=0.1, bias=0.1)
assertTensorClose(r.numpy(), x * 0.9 + y * 0.1 + 0.1)
def test_add_update_params():
b = np.random.random((2, 3)).astype(np.float32)
y = Buffer(b)
@jit.trace
def f(x):
return F.add_update(y, x)
f(np.zeros((2, 3)).astype(np.float32))
z = Buffer(np.zeros((2, 3)).astype(np.float32))
F.add_update(y, z, beta=0.1)
res = f(np.ones((2, 3)).astype(np.float32))
assertTensorClose(res, b + 1)
def test_cross_entropy_with_softmax():
data1_shape = (1, 2)
label1_shape = (1,)
data2_shape = (1, 3)
label2_shape = (1,)
data1 = np.array([1, 0.5], dtype=np.float32).reshape(data1_shape)
label1 = np.array([1], dtype=np.int32).reshape(label1_shape)
expect1 = F.cross_entropy(F.softmax(tensor(data1)), tensor(label1)).numpy()
data2 = np.array([0.3, 0.4, 0.3], dtype=np.float32).reshape(data2_shape)
label2 = np.array([1], dtype=np.int32).reshape(label2_shape)
expect2 = F.cross_entropy(F.softmax(tensor(data2)), tensor(label2)).numpy()
cases = [
{"input": [data1, label1], "output": expect1,},
{"input": [data2, label2], "output": expect2,},
]
opr_test(cases, F.cross_entropy_with_softmax)
def test_cross_entropy():
data1_shape = (1, 2)
label1_shape = (1,)
data2_shape = (1, 3)
label2_shape = (1,)
data1 = np.array([0.5, 0.5], dtype=np.float32).reshape(data1_shape)
label1 = np.array([1], dtype=np.int32).reshape(label1_shape)
expect1 = np.array([-np.log(0.5)], dtype=np.float32)
data2 = np.array([0.3, 0.4, 0.3], dtype=np.float32).reshape(data2_shape)
label2 = np.array([1], dtype=np.int32).reshape(label2_shape)
expect2 = np.array([-np.log(0.4)], dtype=np.float32)
cases = [
{"input": [data1, label1], "output": expect1,},
{"input": [data2, label2], "output": expect2,},
]
opr_test(cases, F.cross_entropy)
def test_binary_cross_entropy():
data1_shape = (2, 2)
label1_shape = (2, 2)
data2_shape = (2, 3)
label2_shape = (2, 3)
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def compare_fn(x, y):
assertTensorClose(x.numpy(), y, max_err=5e-4)
np.random.seed(123)
data1 = sigmoid(np.random.uniform(size=data1_shape).astype(np.float32))
label1 = np.random.uniform(size=label1_shape).astype(np.float32)
expect1 = np.array([0.6361], dtype=np.float32)
np.random.seed(123)
data2 = sigmoid(np.random.uniform(size=data2_shape).astype(np.float32))
label2 = np.random.uniform(size=label2_shape).astype(np.float32)
expect2 = np.array([0.6750], dtype=np.float32)
cases = [
{"input": [data1, label1], "output": expect1,},
{"input": [data2, label2], "output": expect2,},
]
opr_test(cases, F.binary_cross_entropy, compare_fn=compare_fn)
@pytest.mark.skip
def test_conv_bias():
inp_scale = 0.01
w_scale = 0.02
outp_scale = 0.1
inp_dtype = mgb.dtype.qint8(inp_scale)
w_dtype = mgb.dtype.qint8(w_scale)
b_dtype = mgb.dtype.qint32(inp_scale * w_scale)
out_dtype = mgb.dtype.qint8(outp_scale)
def run(
N,
IC,
OC,
IH,
IW,
KH,
KW,
PH,
PW,
SH,
SW,
has_bias=True,
nonlinear_mode="IDENTITY",
):
inp_v = np.random.normal(size=(N, IC, IH, IW))
w_v = np.random.normal(size=(OC, IC, KW, KW))
b_v = np.random.normal(size=(1, OC, 1, 1))
inp_scale = mgb.dtype.get_scale(inp_dtype)
w_scale = mgb.dtype.get_scale(w_dtype)
b_scale = mgb.dtype.get_scale(b_dtype)
inpv = mgb.dtype.convert_to_qint8(inp_v * inp_scale, inp_dtype)
wv = mgb.dtype.convert_to_qint8(w_v * w_scale, w_dtype)
bv = mgb.dtype.convert_to_qint32(b_v * b_scale, b_dtype)
inp_int8 = tensor(inpv, dtype=inp_dtype)
w_int8 = Parameter(wv, dtype=w_dtype)
b_int32 = Parameter(bv, dtype=b_dtype)
inp_fp32 = inp_int8.astype("float32")
w_fp32 = w_int8.astype("float32")
b_fp32 = b_int32.astype("float32")
jit.trace.enabled = True
b_symbolic = True
def convert_to_nchw4(var):
return var.reshape(
var.shapeof(0), var.shapeof(1) // 4, 4, var.shapeof(2), var.shapeof(3)
).dimshuffle(0, 1, 3, 4, 2)
@jit.trace(symbolic=b_symbolic)
def run_conv2d(inp, w, b):
O = F.conv2d(
inp, w, b if has_bias else None, stride=(SH, SW), padding=(PH, PW),
)
if nonlinear_mode == "RELU":
return F.relu(O)
else:
return O
@jit.trace(symbolic=b_symbolic)
def run_conv_bias(inp, w, b, format="NCHW"):
b = b if has_bias else np.zeros_like(b)
if format == "NCHW4":
inp = convert_to_nchw4(inp)
w = convert_to_nchw4(w)
b = F.flatten(b)
return F.conv_bias_activation(
inp,
w,
b,
stride=(SH, SW),
padding=(PH, PW),
dtype=out_dtype,
nonlinear_mode=nonlinear_mode,
)
format = "NCHW4" if is_cuda_available() else "NCHW"
expected = run_conv2d(inp_fp32, w_fp32, b_fp32)
expected = expected.astype(out_dtype).astype("float32")
result = run_conv_bias(inp_int8, w_int8, b_int32, format=format).astype(
"float32"
)
if format == "NCHW4":
result = result.dimshuffle(0, 1, 4, 2, 3)
expected = F.flatten(expected)
result = F.flatten(result)
assertTensorClose(result.numpy(), expected.numpy())
if not is_cuda_available():
run(1, 4, 4, 24, 33, 1, 1, 2, 3, 1, 1, False)
run(10, 12, 24, 46, 46, 1, 1, 2, 1, 3, 1, False)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, False)
run(1, 4, 4, 24, 33, 1, 1, 2, 3, 1, 1)
run(10, 12, 24, 46, 46, 1, 1, 2, 1, 3, 1)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, False, "RELU")
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, True, "RELU")
|
[
"megengine.jit.trace",
"megengine.functional.add_update",
"megengine.functional.conv2d",
"megengine.tensor",
"megengine.functional.flatten",
"megengine.functional.concat",
"megengine._internal.dtype.convert_to_qint32",
"megengine._internal.dtype.convert_to_qint8",
"megengine._internal.dtype.qint8",
"megengine.Buffer",
"megengine.test.assertTensorClose",
"megengine.is_cuda_available",
"megengine.functional.relu",
"megengine._internal.dtype.get_scale",
"megengine._internal.dtype.qint32",
"megengine.Parameter",
"megengine.functional.conv_bias_activation"
] |
[((1047, 1096), 'helpers.opr_test', 'opr_test', (['cases', 'F.flatten'], {'compare_fn': 'compare_fn'}), '(cases, F.flatten, compare_fn=compare_fn)\n', (1055, 1096), False, 'from helpers import opr_test\n'), ((1247, 1310), 'helpers.opr_test', 'opr_test', (['cases', 'F.flatten'], {'compare_fn': 'compare_fn', 'start_axis': '(1)'}), '(cases, F.flatten, compare_fn=compare_fn, start_axis=1)\n', (1255, 1310), False, 'from helpers import opr_test\n'), ((1459, 1522), 'helpers.opr_test', 'opr_test', (['cases', 'F.flatten'], {'compare_fn': 'compare_fn', 'start_axis': '(2)'}), '(cases, F.flatten, compare_fn=compare_fn, start_axis=2)\n', (1467, 1522), False, 'from helpers import opr_test\n'), ((1671, 1746), 'helpers.opr_test', 'opr_test', (['cases', 'F.flatten'], {'compare_fn': 'compare_fn', 'start_axis': '(1)', 'end_axis': '(2)'}), '(cases, F.flatten, compare_fn=compare_fn, start_axis=1, end_axis=2)\n', (1679, 1746), False, 'from helpers import opr_test\n'), ((1780, 1822), 'numpy.array', 'np.array', (['[[1, 0], [0, 1]]'], {'dtype': 'np.int32'}), '([[1, 0], [0, 1]], dtype=np.int32)\n', (1788, 1822), True, 'import numpy as np\n'), ((1833, 1887), 'numpy.array', 'np.array', (['[[1, np.inf], [np.nan, 4]]'], {'dtype': 'np.float32'}), '([[1, np.inf], [np.nan, 4]], dtype=np.float32)\n', (1841, 1887), True, 'import numpy as np\n'), ((1898, 1942), 'numpy.array', 'np.array', (['[[5, 6], [7, 8]]'], {'dtype': 'np.float32'}), '([[5, 6], [7, 8]], dtype=np.float32)\n', (1906, 1942), True, 'import numpy as np\n'), ((1957, 2016), 'numpy.array', 'np.array', (['[[1, 0, 1], [1, 0, 0], [1, 1, 0]]'], {'dtype': 'np.int32'}), '([[1, 0, 1], [1, 0, 0], [1, 1, 0]], dtype=np.int32)\n', (1965, 2016), True, 'import numpy as np\n'), ((2027, 2098), 'numpy.array', 'np.array', (['[[1, np.inf, 2], [0, np.nan, 4], [1, 5, 7]]'], {'dtype': 'np.float32'}), '([[1, np.inf, 2], [0, np.nan, 4], [1, 5, 7]], dtype=np.float32)\n', (2035, 2098), True, 'import numpy as np\n'), ((2109, 2170), 'numpy.array', 'np.array', (['[[5, 6, 9], [2, 7, 8], [2, 1, 9]]'], {'dtype': 'np.float32'}), '([[5, 6, 9], [2, 7, 8], [2, 1, 9]], dtype=np.float32)\n', (2117, 2170), True, 'import numpy as np\n'), ((2274, 2315), 'helpers.opr_test', 'opr_test', (['cases', 'F.where'], {'ref_fn': 'np.where'}), '(cases, F.where, ref_fn=np.where)\n', (2282, 2315), False, 'from helpers import opr_test\n'), ((2330, 2365), 'numpy.array', 'np.array', (['[1, 1, 1]'], {'dtype': 'np.int32'}), '([1, 1, 1], dtype=np.int32)\n', (2338, 2365), True, 'import numpy as np\n'), ((2376, 2413), 'numpy.array', 'np.array', (['[1, 3, 2]'], {'dtype': 'np.float32'}), '([1, 3, 2], dtype=np.float32)\n', (2384, 2413), True, 'import numpy as np\n'), ((2424, 2461), 'numpy.array', 'np.array', (['[5, 6, 9]'], {'dtype': 'np.float32'}), '([5, 6, 9], dtype=np.float32)\n', (2432, 2461), True, 'import numpy as np\n'), ((2476, 2511), 'numpy.array', 'np.array', (['[0, 0, 0]'], {'dtype': 'np.int32'}), '([0, 0, 0], dtype=np.int32)\n', (2484, 2511), True, 'import numpy as np\n'), ((2522, 2559), 'numpy.array', 'np.array', (['[1, 3, 2]'], {'dtype': 'np.float32'}), '([1, 3, 2], dtype=np.float32)\n', (2530, 2559), True, 'import numpy as np\n'), ((2570, 2607), 'numpy.array', 'np.array', (['[5, 6, 9]'], {'dtype': 'np.float32'}), '([5, 6, 9], dtype=np.float32)\n', (2578, 2607), True, 'import numpy as np\n'), ((2711, 2752), 'helpers.opr_test', 'opr_test', (['cases', 'F.where'], {'ref_fn': 'np.where'}), '(cases, F.where, ref_fn=np.where)\n', (2719, 2752), False, 'from helpers import opr_test\n'), ((3751, 3798), 'helpers.opr_test', 'opr_test', (['cases', 'F.matrix_mul'], {'ref_fn': 'np.matmul'}), '(cases, F.matrix_mul, ref_fn=np.matmul)\n', (3759, 3798), False, 'from helpers import opr_test\n'), ((4906, 4929), 'helpers.opr_test', 'opr_test', (['cases', 'F.sort'], {}), '(cases, F.sort)\n', (4914, 4929), False, 'from helpers import opr_test\n'), ((5174, 5215), 'helpers.opr_test', 'opr_test', (['cases', 'F.round'], {'ref_fn': 'np.round'}), '(cases, F.round, ref_fn=np.round)\n', (5182, 5215), False, 'from helpers import opr_test\n'), ((5714, 5768), 'helpers.opr_test', 'opr_test', (['cases', 'F.broadcast_to'], {'compare_fn': 'compare_fn'}), '(cases, F.broadcast_to, compare_fn=compare_fn)\n', (5722, 5768), False, 'from helpers import opr_test\n'), ((7048, 7057), 'megengine.Buffer', 'Buffer', (['v'], {}), '(v)\n', (7054, 7057), False, 'from megengine import Buffer, Parameter, is_cuda_available, jit, tensor\n'), ((7067, 7085), 'megengine.functional.add_update', 'F.add_update', (['b', '(1)'], {}), '(b, 1)\n', (7079, 7085), True, 'import megengine.functional as F\n'), ((7134, 7152), 'megengine.functional.add_update', 'F.add_update', (['b', '(1)'], {}), '(b, 1)\n', (7146, 7152), True, 'import megengine.functional as F\n'), ((7202, 7235), 'numpy.ones', 'np.ones', (['(2, 2)'], {'dtype': 'np.float32'}), '((2, 2), dtype=np.float32)\n', (7209, 7235), True, 'import numpy as np\n'), ((7263, 7272), 'megengine.tensor', 'tensor', (['x'], {}), '(x)\n', (7269, 7272), False, 'from megengine import Buffer, Parameter, is_cuda_available, jit, tensor\n'), ((7285, 7294), 'megengine.tensor', 'tensor', (['y'], {}), '(y)\n', (7291, 7294), False, 'from megengine import Buffer, Parameter, is_cuda_available, jit, tensor\n'), ((7518, 7527), 'megengine.Buffer', 'Buffer', (['b'], {}), '(b)\n', (7524, 7527), False, 'from megengine import Buffer, Parameter, is_cuda_available, jit, tensor\n'), ((7693, 7721), 'megengine.functional.add_update', 'F.add_update', (['y', 'z'], {'beta': '(0.1)'}), '(y, z, beta=0.1)\n', (7705, 7721), True, 'import megengine.functional as F\n'), ((7775, 7804), 'megengine.test.assertTensorClose', 'assertTensorClose', (['res', '(b + 1)'], {}), '(res, b + 1)\n', (7792, 7804), False, 'from megengine.test import assertTensorClose\n'), ((8520, 8565), 'helpers.opr_test', 'opr_test', (['cases', 'F.cross_entropy_with_softmax'], {}), '(cases, F.cross_entropy_with_softmax)\n', (8528, 8565), False, 'from helpers import opr_test\n'), ((9224, 9256), 'helpers.opr_test', 'opr_test', (['cases', 'F.cross_entropy'], {}), '(cases, F.cross_entropy)\n', (9232, 9256), False, 'from helpers import opr_test\n'), ((9537, 9556), 'numpy.random.seed', 'np.random.seed', (['(123)'], {}), '(123)\n', (9551, 9556), True, 'import numpy as np\n'), ((9716, 9752), 'numpy.array', 'np.array', (['[0.6361]'], {'dtype': 'np.float32'}), '([0.6361], dtype=np.float32)\n', (9724, 9752), True, 'import numpy as np\n'), ((9758, 9777), 'numpy.random.seed', 'np.random.seed', (['(123)'], {}), '(123)\n', (9772, 9777), True, 'import numpy as np\n'), ((9937, 9972), 'numpy.array', 'np.array', (['[0.675]'], {'dtype': 'np.float32'}), '([0.675], dtype=np.float32)\n', (9945, 9972), True, 'import numpy as np\n'), ((10111, 10173), 'helpers.opr_test', 'opr_test', (['cases', 'F.binary_cross_entropy'], {'compare_fn': 'compare_fn'}), '(cases, F.binary_cross_entropy, compare_fn=compare_fn)\n', (10119, 10173), False, 'from helpers import opr_test\n'), ((10293, 10319), 'megengine._internal.dtype.qint8', 'mgb.dtype.qint8', (['inp_scale'], {}), '(inp_scale)\n', (10308, 10319), True, 'import megengine._internal as mgb\n'), ((10334, 10358), 'megengine._internal.dtype.qint8', 'mgb.dtype.qint8', (['w_scale'], {}), '(w_scale)\n', (10349, 10358), True, 'import megengine._internal as mgb\n'), ((10373, 10410), 'megengine._internal.dtype.qint32', 'mgb.dtype.qint32', (['(inp_scale * w_scale)'], {}), '(inp_scale * w_scale)\n', (10389, 10410), True, 'import megengine._internal as mgb\n'), ((10427, 10454), 'megengine._internal.dtype.qint8', 'mgb.dtype.qint8', (['outp_scale'], {}), '(outp_scale)\n', (10442, 10454), True, 'import megengine._internal as mgb\n'), ((3267, 3291), 'megengine.functional.concat', 'F.concat', (['[data1, data2]'], {}), '([data1, data2])\n', (3275, 3291), True, 'import megengine.functional as F\n'), ((4672, 4686), 'numpy.sort', 'np.sort', (['data1'], {}), '(data1)\n', (4679, 4686), True, 'import numpy as np\n'), ((4739, 4753), 'numpy.sort', 'np.sort', (['data2'], {}), '(data2)\n', (4746, 4753), True, 'import numpy as np\n'), ((7573, 7591), 'megengine.functional.add_update', 'F.add_update', (['y', 'x'], {}), '(y, x)\n', (7585, 7591), True, 'import megengine.functional as F\n'), ((10681, 10719), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(N, IC, IH, IW)'}), '(size=(N, IC, IH, IW))\n', (10697, 10719), True, 'import numpy as np\n'), ((10734, 10773), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(OC, IC, KW, KW)'}), '(size=(OC, IC, KW, KW))\n', (10750, 10773), True, 'import numpy as np\n'), ((10788, 10824), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(1, OC, 1, 1)'}), '(size=(1, OC, 1, 1))\n', (10804, 10824), True, 'import numpy as np\n'), ((10845, 10875), 'megengine._internal.dtype.get_scale', 'mgb.dtype.get_scale', (['inp_dtype'], {}), '(inp_dtype)\n', (10864, 10875), True, 'import megengine._internal as mgb\n'), ((10894, 10922), 'megengine._internal.dtype.get_scale', 'mgb.dtype.get_scale', (['w_dtype'], {}), '(w_dtype)\n', (10913, 10922), True, 'import megengine._internal as mgb\n'), ((10941, 10969), 'megengine._internal.dtype.get_scale', 'mgb.dtype.get_scale', (['b_dtype'], {}), '(b_dtype)\n', (10960, 10969), True, 'import megengine._internal as mgb\n'), ((10986, 11042), 'megengine._internal.dtype.convert_to_qint8', 'mgb.dtype.convert_to_qint8', (['(inp_v * inp_scale)', 'inp_dtype'], {}), '(inp_v * inp_scale, inp_dtype)\n', (11012, 11042), True, 'import megengine._internal as mgb\n'), ((11056, 11106), 'megengine._internal.dtype.convert_to_qint8', 'mgb.dtype.convert_to_qint8', (['(w_v * w_scale)', 'w_dtype'], {}), '(w_v * w_scale, w_dtype)\n', (11082, 11106), True, 'import megengine._internal as mgb\n'), ((11120, 11171), 'megengine._internal.dtype.convert_to_qint32', 'mgb.dtype.convert_to_qint32', (['(b_v * b_scale)', 'b_dtype'], {}), '(b_v * b_scale, b_dtype)\n', (11147, 11171), True, 'import megengine._internal as mgb\n'), ((11192, 11221), 'megengine.tensor', 'tensor', (['inpv'], {'dtype': 'inp_dtype'}), '(inpv, dtype=inp_dtype)\n', (11198, 11221), False, 'from megengine import Buffer, Parameter, is_cuda_available, jit, tensor\n'), ((11239, 11267), 'megengine.Parameter', 'Parameter', (['wv'], {'dtype': 'w_dtype'}), '(wv, dtype=w_dtype)\n', (11248, 11267), False, 'from megengine import Buffer, Parameter, is_cuda_available, jit, tensor\n'), ((11286, 11314), 'megengine.Parameter', 'Parameter', (['bv'], {'dtype': 'b_dtype'}), '(bv, dtype=b_dtype)\n', (11295, 11314), False, 'from megengine import Buffer, Parameter, is_cuda_available, jit, tensor\n'), ((11712, 11742), 'megengine.jit.trace', 'jit.trace', ([], {'symbolic': 'b_symbolic'}), '(symbolic=b_symbolic)\n', (11721, 11742), False, 'from megengine import Buffer, Parameter, is_cuda_available, jit, tensor\n'), ((12029, 12059), 'megengine.jit.trace', 'jit.trace', ([], {'symbolic': 'b_symbolic'}), '(symbolic=b_symbolic)\n', (12038, 12059), False, 'from megengine import Buffer, Parameter, is_cuda_available, jit, tensor\n'), ((12977, 12996), 'megengine.functional.flatten', 'F.flatten', (['expected'], {}), '(expected)\n', (12986, 12996), True, 'import megengine.functional as F\n'), ((13014, 13031), 'megengine.functional.flatten', 'F.flatten', (['result'], {}), '(result)\n', (13023, 13031), True, 'import megengine.functional as F\n'), ((13104, 13123), 'megengine.is_cuda_available', 'is_cuda_available', ([], {}), '()\n', (13121, 13123), False, 'from megengine import Buffer, Parameter, is_cuda_available, jit, tensor\n'), ((720, 749), 'numpy.random.random', 'np.random.random', (['data0_shape'], {}), '(data0_shape)\n', (736, 749), True, 'import numpy as np\n'), ((781, 810), 'numpy.random.random', 'np.random.random', (['data1_shape'], {}), '(data1_shape)\n', (797, 810), True, 'import numpy as np\n'), ((3526, 3550), 'numpy.random.random', 'np.random.random', (['shape1'], {}), '(shape1)\n', (3542, 3550), True, 'import numpy as np\n'), ((3581, 3605), 'numpy.random.random', 'np.random.random', (['shape2'], {}), '(shape2)\n', (3597, 3605), True, 'import numpy as np\n'), ((3636, 3660), 'numpy.random.random', 'np.random.random', (['shape3'], {}), '(shape3)\n', (3652, 3660), True, 'import numpy as np\n'), ((3960, 3984), 'numpy.random.random', 'np.random.random', (['shape1'], {}), '(shape1)\n', (3976, 3984), True, 'import numpy as np\n'), ((4015, 4039), 'numpy.random.random', 'np.random.random', (['shape2'], {}), '(shape2)\n', (4031, 4039), True, 'import numpy as np\n'), ((4070, 4094), 'numpy.random.random', 'np.random.random', (['shape3'], {}), '(shape3)\n', (4086, 4094), True, 'import numpy as np\n'), ((4547, 4576), 'numpy.random.random', 'np.random.random', (['data1_shape'], {}), '(data1_shape)\n', (4563, 4576), True, 'import numpy as np\n'), ((4608, 4637), 'numpy.random.random', 'np.random.random', (['data2_shape'], {}), '(data2_shape)\n', (4624, 4637), True, 'import numpy as np\n'), ((5010, 5039), 'numpy.random.random', 'np.random.random', (['data1_shape'], {}), '(data1_shape)\n', (5026, 5039), True, 'import numpy as np\n'), ((5071, 5100), 'numpy.random.random', 'np.random.random', (['data2_shape'], {}), '(data2_shape)\n', (5087, 5100), True, 'import numpy as np\n'), ((5316, 5346), 'numpy.random.random', 'np.random.random', (['input1_shape'], {}), '(input1_shape)\n', (5332, 5346), True, 'import numpy as np\n'), ((5440, 5470), 'numpy.random.random', 'np.random.random', (['input2_shape'], {}), '(input2_shape)\n', (5456, 5470), True, 'import numpy as np\n'), ((6997, 7020), 'numpy.random.random', 'np.random.random', (['shape'], {}), '(shape)\n', (7013, 7020), True, 'import numpy as np\n'), ((7335, 7346), 'megengine.tensor', 'tensor', (['(0.9)'], {}), '(0.9)\n', (7341, 7346), False, 'from megengine import Buffer, Parameter, is_cuda_available, jit, tensor\n'), ((7466, 7490), 'numpy.random.random', 'np.random.random', (['(2, 3)'], {}), '((2, 3))\n', (7482, 7490), True, 'import numpy as np\n'), ((7957, 7993), 'numpy.array', 'np.array', (['[1, 0.5]'], {'dtype': 'np.float32'}), '([1, 0.5], dtype=np.float32)\n', (7965, 7993), True, 'import numpy as np\n'), ((8028, 8057), 'numpy.array', 'np.array', (['[1]'], {'dtype': 'np.int32'}), '([1], dtype=np.int32)\n', (8036, 8057), True, 'import numpy as np\n'), ((8173, 8216), 'numpy.array', 'np.array', (['[0.3, 0.4, 0.3]'], {'dtype': 'np.float32'}), '([0.3, 0.4, 0.3], dtype=np.float32)\n', (8181, 8216), True, 'import numpy as np\n'), ((8251, 8280), 'numpy.array', 'np.array', (['[1]'], {'dtype': 'np.int32'}), '([1], dtype=np.int32)\n', (8259, 8280), True, 'import numpy as np\n'), ((8705, 8743), 'numpy.array', 'np.array', (['[0.5, 0.5]'], {'dtype': 'np.float32'}), '([0.5, 0.5], dtype=np.float32)\n', (8713, 8743), True, 'import numpy as np\n'), ((8778, 8807), 'numpy.array', 'np.array', (['[1]'], {'dtype': 'np.int32'}), '([1], dtype=np.int32)\n', (8786, 8807), True, 'import numpy as np\n'), ((8900, 8943), 'numpy.array', 'np.array', (['[0.3, 0.4, 0.3]'], {'dtype': 'np.float32'}), '([0.3, 0.4, 0.3], dtype=np.float32)\n', (8908, 8943), True, 'import numpy as np\n'), ((8978, 9007), 'numpy.array', 'np.array', (['[1]'], {'dtype': 'np.int32'}), '([1], dtype=np.int32)\n', (8986, 9007), True, 'import numpy as np\n'), ((9646, 9682), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'label1_shape'}), '(size=label1_shape)\n', (9663, 9682), True, 'import numpy as np\n'), ((9867, 9903), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'label2_shape'}), '(size=label2_shape)\n', (9884, 9903), True, 'import numpy as np\n'), ((11794, 11870), 'megengine.functional.conv2d', 'F.conv2d', (['inp', 'w', '(b if has_bias else None)'], {'stride': '(SH, SW)', 'padding': '(PH, PW)'}), '(inp, w, b if has_bias else None, stride=(SH, SW), padding=(PH, PW))\n', (11802, 11870), True, 'import megengine.functional as F\n'), ((12335, 12456), 'megengine.functional.conv_bias_activation', 'F.conv_bias_activation', (['inp', 'w', 'b'], {'stride': '(SH, SW)', 'padding': '(PH, PW)', 'dtype': 'out_dtype', 'nonlinear_mode': 'nonlinear_mode'}), '(inp, w, b, stride=(SH, SW), padding=(PH, PW), dtype=\n out_dtype, nonlinear_mode=nonlinear_mode)\n', (12357, 12456), True, 'import megengine.functional as F\n'), ((12608, 12627), 'megengine.is_cuda_available', 'is_cuda_available', ([], {}), '()\n', (12625, 12627), False, 'from megengine import Buffer, Parameter, is_cuda_available, jit, tensor\n'), ((3405, 3427), 'numpy.concatenate', 'np.concatenate', (['[x, y]'], {}), '([x, y])\n', (3419, 3427), True, 'import numpy as np\n'), ((4688, 4705), 'numpy.argsort', 'np.argsort', (['data1'], {}), '(data1)\n', (4698, 4705), True, 'import numpy as np\n'), ((4755, 4772), 'numpy.argsort', 'np.argsort', (['data2'], {}), '(data2)\n', (4765, 4772), True, 'import numpy as np\n'), ((5962, 6009), 'numpy.linspace', 'np.linspace', (['start', 'end', 'step'], {'dtype': 'np.float32'}), '(start, end, step, dtype=np.float32)\n', (5973, 6009), True, 'import numpy as np\n'), ((6188, 6235), 'numpy.linspace', 'np.linspace', (['start', 'end', 'step'], {'dtype': 'np.float32'}), '(start, end, step, dtype=np.float32)\n', (6199, 6235), True, 'import numpy as np\n'), ((6432, 6477), 'numpy.arange', 'np.arange', (['start', 'end', 'step'], {'dtype': 'np.float32'}), '(start, end, step, dtype=np.float32)\n', (6441, 6477), True, 'import numpy as np\n'), ((6656, 6701), 'numpy.arange', 'np.arange', (['start', 'end', 'step'], {'dtype': 'np.float32'}), '(start, end, step, dtype=np.float32)\n', (6665, 6701), True, 'import numpy as np\n'), ((6892, 6937), 'numpy.arange', 'np.arange', (['start', 'end', 'step'], {'dtype': 'np.float32'}), '(start, end, step, dtype=np.float32)\n', (6901, 6937), True, 'import numpy as np\n'), ((7599, 7615), 'numpy.zeros', 'np.zeros', (['(2, 3)'], {}), '((2, 3))\n', (7607, 7615), True, 'import numpy as np\n'), ((7652, 7668), 'numpy.zeros', 'np.zeros', (['(2, 3)'], {}), '((2, 3))\n', (7660, 7668), True, 'import numpy as np\n'), ((7735, 7750), 'numpy.ones', 'np.ones', (['(2, 3)'], {}), '((2, 3))\n', (7742, 7750), True, 'import numpy as np\n'), ((8136, 8150), 'megengine.tensor', 'tensor', (['label1'], {}), '(label1)\n', (8142, 8150), False, 'from megengine import Buffer, Parameter, is_cuda_available, jit, tensor\n'), ((8359, 8373), 'megengine.tensor', 'tensor', (['label2'], {}), '(label2)\n', (8365, 8373), False, 'from megengine import Buffer, Parameter, is_cuda_available, jit, tensor\n'), ((8855, 8866), 'numpy.log', 'np.log', (['(0.5)'], {}), '(0.5)\n', (8861, 8866), True, 'import numpy as np\n'), ((9055, 9066), 'numpy.log', 'np.log', (['(0.4)'], {}), '(0.4)\n', (9061, 9066), True, 'import numpy as np\n'), ((9439, 9449), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (9445, 9449), True, 'import numpy as np\n'), ((9577, 9612), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'data1_shape'}), '(size=data1_shape)\n', (9594, 9612), True, 'import numpy as np\n'), ((9798, 9833), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'data2_shape'}), '(size=data2_shape)\n', (9815, 9833), True, 'import numpy as np\n'), ((11966, 11975), 'megengine.functional.relu', 'F.relu', (['O'], {}), '(O)\n', (11972, 11975), True, 'import megengine.functional as F\n'), ((12148, 12164), 'numpy.zeros_like', 'np.zeros_like', (['b'], {}), '(b)\n', (12161, 12164), True, 'import numpy as np\n'), ((12303, 12315), 'megengine.functional.flatten', 'F.flatten', (['b'], {}), '(b)\n', (12312, 12315), True, 'import megengine.functional as F\n'), ((4421, 4452), 'numpy.matmul', 'np.matmul', (['x[i, ...]', 'y[i, ...]'], {}), '(x[i, ...], y[i, ...])\n', (4430, 4452), True, 'import numpy as np\n'), ((8120, 8133), 'megengine.tensor', 'tensor', (['data1'], {}), '(data1)\n', (8126, 8133), False, 'from megengine import Buffer, Parameter, is_cuda_available, jit, tensor\n'), ((8343, 8356), 'megengine.tensor', 'tensor', (['data2'], {}), '(data2)\n', (8349, 8356), False, 'from megengine import Buffer, Parameter, is_cuda_available, jit, tensor\n'), ((2896, 2908), 'numpy.eye', 'np.eye', (['n', 'm'], {}), '(n, m)\n', (2902, 2908), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
# This repo is licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import json
import logging
import megengine as mge
import coloredlogs
import numpy as np
import megengine.functional as F
class Params():
"""Class that loads hyperparameters from a json file.
Example:
```
params = Params(json_path)
print(params.learning_rate)
params.learning_rate = 0.5 # change the value of learning_rate in params
```
"""
def __init__(self, json_path):
with open(json_path) as f:
params = json.load(f)
self.update(params)
def save(self, json_path):
with open(json_path, 'w') as f:
json.dump(self.__dict__, f, indent=4)
def update(self, dict):
"""Loads parameters from json file"""
self.__dict__.update(dict)
@property
def dict(self):
"""Gives dict-like access to Params instance by `params.dict['learning_rate']"""
return self.__dict__
class RunningAverage():
"""A simple class that maintains the running average of a quantity
Example:
```
loss_avg = RunningAverage()
loss_avg.update(2)
loss_avg.update(4)
loss_avg() = 3
```
"""
def __init__(self):
self.steps = 0
self.total = 0
def update(self, val):
self.total += val
self.steps += 1
def __call__(self):
return self.total / float(self.steps)
class AverageMeter():
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.val_previous = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, num):
self.val_previous = self.val
self.val = val
self.sum += val * num
self.count += num
self.avg = self.sum / self.count
def loss_meter_manager_intial(loss_meter_names):
loss_meters = []
for name in loss_meter_names:
exec("%s = %s" % (name, 'AverageMeter()'))
exec("loss_meters.append(%s)" % name)
return loss_meters
def tensor_mge(batch, check_on=True):
if check_on:
for k, v in batch.items():
if isinstance(v, np.ndarray):
batch[k] = mge.Tensor(v)
else:
for k, v in batch.items():
batch[k] = v.numpy()
return batch
def set_logger(log_path):
"""Set the logger to log info in terminal and file `log_path`.
In general, it is useful to have a logger so that every output to the terminal is saved
in a permanent file. Here we save it to `model_dir/train.log`.
Example:
```
logging.info("Starting training...")
```
Args:
log_path: (string) where to log
"""
logger = logging.getLogger()
logger.setLevel(logging.INFO)
coloredlogs.install(level='INFO', logger=logger, fmt='%(asctime)s %(name)s %(message)s')
file_handler = logging.FileHandler(log_path)
log_formatter = logging.Formatter('%(asctime)s - %(message)s')
file_handler.setFormatter(log_formatter)
logger.addHandler(file_handler)
logger.info('Output and logs will be saved to {}'.format(log_path))
return logger
def save_dict_to_json(d, json_path):
"""Saves dict of floats in json file
Args:
d: (dict) of float-castable values (np.float, int, float, etc.)
json_path: (string) path to json file
"""
save_dict = {}
with open(json_path, "w") as f:
# We need to convert the values to float for json (it doesn"t accept np.array, np.float, )
for k, v in d.items():
if isinstance(v, AverageMeter):
save_dict[k] = float(v.avg)
else:
save_dict[k] = float(v)
json.dump(save_dict, f, indent=4)
def upsample2d_flow_as(inputs, target_as, mode="bilinear", if_rate=False):
_, _, h, w = target_as.shape
res = F.vision.interpolate(inputs, [h, w], mode=mode, align_corners=True)
_, _, h_, w_ = inputs.shape
if if_rate:
u_scale = (w / w_)
v_scale = (h / h_)
res[:, 0] *= u_scale
res[:, 1] *= v_scale
return res
def mesh_grid(B, H, W):
# mesh grid
x_base = F.arange(0, W)
x_base = F.tile(x_base, (B, H, 1))
y_base = F.arange(0, H) # BHW
y_base = F.tile(y_base, (B, W, 1)).transpose(0, 2, 1)
base_grid = F.stack([x_base, y_base], 1) # B2HW
return base_grid
def flow_warp(x, flow12):
B, _, H, W = x.shape
base_grid = mesh_grid(B, H, W).astype(x) # B2HW
grid_warp = base_grid + flow12
grid_warp = F.transpose(grid_warp, (0, 2, 3, 1))
warp_imgs = F.vision.remap(x, grid_warp)
return warp_imgs
def euclidean(t):
return F.sqrt(F.sum(t**2, axis=(1, ), keepdims=True))
def flow_error_avg(pred_flow, gt_flow):
_, _, H, W = gt_flow.shape
_, _, h, w = pred_flow.shape
assert (H == h) and (W == w), "inps shape is not the same: {} - {}".format((H, W), (h, w))
diff = euclidean(pred_flow - gt_flow)
diff_s = F.mean(diff)
error = diff_s
return error
def weight_parameters(module):
return [param for name, param in module.named_parameters() if "weight" in name]
def bias_parameters(module):
return [param for name, param in module.named_parameters() if "bias" in name]
|
[
"megengine.functional.arange",
"megengine.functional.tile",
"megengine.Tensor",
"megengine.functional.stack",
"megengine.functional.vision.remap",
"megengine.functional.mean",
"megengine.functional.transpose",
"megengine.functional.vision.interpolate",
"megengine.functional.sum"
] |
[((3019, 3038), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (3036, 3038), False, 'import logging\n'), ((3078, 3171), 'coloredlogs.install', 'coloredlogs.install', ([], {'level': '"""INFO"""', 'logger': 'logger', 'fmt': '"""%(asctime)s %(name)s %(message)s"""'}), "(level='INFO', logger=logger, fmt=\n '%(asctime)s %(name)s %(message)s')\n", (3097, 3171), False, 'import coloredlogs\n'), ((3186, 3215), 'logging.FileHandler', 'logging.FileHandler', (['log_path'], {}), '(log_path)\n', (3205, 3215), False, 'import logging\n'), ((3236, 3282), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s - %(message)s"""'], {}), "('%(asctime)s - %(message)s')\n", (3253, 3282), False, 'import logging\n'), ((4164, 4231), 'megengine.functional.vision.interpolate', 'F.vision.interpolate', (['inputs', '[h, w]'], {'mode': 'mode', 'align_corners': '(True)'}), '(inputs, [h, w], mode=mode, align_corners=True)\n', (4184, 4231), True, 'import megengine.functional as F\n'), ((4462, 4476), 'megengine.functional.arange', 'F.arange', (['(0)', 'W'], {}), '(0, W)\n', (4470, 4476), True, 'import megengine.functional as F\n'), ((4490, 4515), 'megengine.functional.tile', 'F.tile', (['x_base', '(B, H, 1)'], {}), '(x_base, (B, H, 1))\n', (4496, 4515), True, 'import megengine.functional as F\n'), ((4530, 4544), 'megengine.functional.arange', 'F.arange', (['(0)', 'H'], {}), '(0, H)\n', (4538, 4544), True, 'import megengine.functional as F\n'), ((4627, 4655), 'megengine.functional.stack', 'F.stack', (['[x_base, y_base]', '(1)'], {}), '([x_base, y_base], 1)\n', (4634, 4655), True, 'import megengine.functional as F\n'), ((4844, 4880), 'megengine.functional.transpose', 'F.transpose', (['grid_warp', '(0, 2, 3, 1)'], {}), '(grid_warp, (0, 2, 3, 1))\n', (4855, 4880), True, 'import megengine.functional as F\n'), ((4898, 4926), 'megengine.functional.vision.remap', 'F.vision.remap', (['x', 'grid_warp'], {}), '(x, grid_warp)\n', (4912, 4926), True, 'import megengine.functional as F\n'), ((5283, 5295), 'megengine.functional.mean', 'F.mean', (['diff'], {}), '(diff)\n', (5289, 5295), True, 'import megengine.functional as F\n'), ((4010, 4043), 'json.dump', 'json.dump', (['save_dict', 'f'], {'indent': '(4)'}), '(save_dict, f, indent=4)\n', (4019, 4043), False, 'import json\n'), ((4986, 5025), 'megengine.functional.sum', 'F.sum', (['(t ** 2)'], {'axis': '(1,)', 'keepdims': '(True)'}), '(t ** 2, axis=(1,), keepdims=True)\n', (4991, 5025), True, 'import megengine.functional as F\n'), ((846, 858), 'json.load', 'json.load', (['f'], {}), '(f)\n', (855, 858), False, 'import json\n'), ((975, 1012), 'json.dump', 'json.dump', (['self.__dict__', 'f'], {'indent': '(4)'}), '(self.__dict__, f, indent=4)\n', (984, 1012), False, 'import json\n'), ((4565, 4590), 'megengine.functional.tile', 'F.tile', (['y_base', '(B, W, 1)'], {}), '(y_base, (B, W, 1))\n', (4571, 4590), True, 'import megengine.functional as F\n'), ((2512, 2525), 'megengine.Tensor', 'mge.Tensor', (['v'], {}), '(v)\n', (2522, 2525), True, 'import megengine as mge\n')]
|
import functools
import numpy as np
import pytest
import megengine
from megengine.autodiff.grad_manager import GradManager
from megengine.core.ops.builtin import GetVarShape, Reduce, TypeCvt
from megengine.core.tensor.utils import subgraph_fn
from megengine.device import CompNode, get_default_device
from megengine.jit import trace
_assert_allclose = functools.partial(np.testing.assert_allclose, atol=5e-6, rtol=5e-6)
@functools.lru_cache(maxsize=None)
def _get_batch_norm_fn(dtype, device, channels, ndim, interpret, gopt_level):
@subgraph_fn(
"BatchNormNd",
dtype=dtype,
device=device,
nr_inputs=4,
interpret=interpret,
gopt_level=gopt_level,
)
def batch_norm_nd(inputs, f, c):
input, eps, weight, bias = inputs[0:4]
reduce_shape = c(
(1, channels) + (1,) * (ndim - 2), dtype="int32", device=device
)
input_shape = f(GetVarShape(), input)
input_elems = f(Reduce(mode="product", axis=0), input_shape)
reduce_elems = f(Reduce(mode="product", axis=0), reduce_shape)
reduce_size = f("//", input_elems, reduce_elems)
reduce_size = f(TypeCvt(dtype=dtype), reduce_size)
channel_x1s = f(Reduce(mode="sum"), input, reduce_shape)
channel_x2s = f(Reduce(mode="sum_sqr"), input, reduce_shape)
channel_mean = f("/", channel_x1s, reduce_size)
channel_var = f(
"-", f("/", channel_x2s, reduce_size), f("*", channel_mean, channel_mean),
)
invsqrt_channel_var = f("**", f("+", channel_var, eps), c(-0.5))
inv_var_wt = f("*", invsqrt_channel_var, weight)
neg_channel_mean = f("-", channel_mean)
outvar = f(
"fma3", input, inv_var_wt, f("fma3", neg_channel_mean, inv_var_wt, bias),
)
return (outvar,), (True,)
return batch_norm_nd
@pytest.mark.parametrize("device", [get_default_device(), "cpux"])
@pytest.mark.parametrize("batch_size", [1, 8])
@pytest.mark.parametrize("channels", [3])
@pytest.mark.parametrize(
"use_trace, symbolic", [(False, None), (True, False), (True, True)]
)
@pytest.mark.parametrize("gopt_level", [None, 1, 2])
@pytest.mark.parametrize("dtype", ["float32"])
def test_subgraph(device, batch_size, channels, use_trace, symbolic, gopt_level, dtype):
device = CompNode(device)
def subgraph_batch_norm(inp, weight, bias, eps, diff):
inp = inp.detach()
with GradManager().attach(inp) as gm:
batch_norm_fn = _get_batch_norm_fn(
dtype, device, channels, ndim, interpret=False, gopt_level=gopt_level
)
out, *_ = batch_norm_fn(inp, eps, weight, bias)
gm.backward(out * 1e3 + 1e3, diff)
return out, inp.grad
def primitive_batch_norm(inp, weight, bias, eps, diff):
inp = inp.detach()
with GradManager().attach(inp) as gm:
batch_norm_fn = _get_batch_norm_fn(
dtype, device, channels, ndim, interpret=True, gopt_level=gopt_level
)
(out,) = batch_norm_fn(inp, eps, weight, bias)
gm.backward(out * 1e3 + 1e3, diff)
return out, inp.grad
if use_trace:
subgraph_batch_norm = trace(symbolic=symbolic)(subgraph_batch_norm)
primitive_batch_norm = trace(symbolic=symbolic)(primitive_batch_norm)
def rand_tensor(shape, dtype=dtype, device=device):
return megengine.tensor(np.random.random(shape), dtype=dtype, device=device)
# test shape change
for image_shape in [(223, 223), (10, 20)]:
ndim = len(image_shape) + 2
input_shape = (batch_size, channels) + image_shape
param_shape = (1, channels) + (1,) * len(image_shape)
inp = rand_tensor(input_shape) * 1e3 + 1e3
weight = rand_tensor(param_shape)
bias = rand_tensor(param_shape)
eps = megengine.tensor(1e-5, dtype=dtype, device=device)
diff = rand_tensor(input_shape)
out1, grad1 = subgraph_batch_norm(inp, weight, bias, eps, diff)
out2, grad2 = primitive_batch_norm(inp, weight, bias, eps, diff)
_assert_allclose(out1.numpy(), out2.numpy())
_assert_allclose(grad1.numpy(), grad2.numpy())
|
[
"megengine.core.ops.builtin.GetVarShape",
"megengine.core.tensor.utils.subgraph_fn",
"megengine.device.CompNode",
"megengine.jit.trace",
"megengine.device.get_default_device",
"megengine.core.ops.builtin.Reduce",
"megengine.tensor",
"megengine.core.ops.builtin.TypeCvt",
"megengine.autodiff.grad_manager.GradManager"
] |
[((355, 424), 'functools.partial', 'functools.partial', (['np.testing.assert_allclose'], {'atol': '(5e-06)', 'rtol': '(5e-06)'}), '(np.testing.assert_allclose, atol=5e-06, rtol=5e-06)\n', (372, 424), False, 'import functools\n'), ((426, 459), 'functools.lru_cache', 'functools.lru_cache', ([], {'maxsize': 'None'}), '(maxsize=None)\n', (445, 459), False, 'import functools\n'), ((1944, 1989), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""batch_size"""', '[1, 8]'], {}), "('batch_size', [1, 8])\n", (1967, 1989), False, 'import pytest\n'), ((1991, 2031), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""channels"""', '[3]'], {}), "('channels', [3])\n", (2014, 2031), False, 'import pytest\n'), ((2033, 2130), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""use_trace, symbolic"""', '[(False, None), (True, False), (True, True)]'], {}), "('use_trace, symbolic', [(False, None), (True, False\n ), (True, True)])\n", (2056, 2130), False, 'import pytest\n'), ((2133, 2184), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""gopt_level"""', '[None, 1, 2]'], {}), "('gopt_level', [None, 1, 2])\n", (2156, 2184), False, 'import pytest\n'), ((2186, 2231), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dtype"""', "['float32']"], {}), "('dtype', ['float32'])\n", (2209, 2231), False, 'import pytest\n'), ((543, 658), 'megengine.core.tensor.utils.subgraph_fn', 'subgraph_fn', (['"""BatchNormNd"""'], {'dtype': 'dtype', 'device': 'device', 'nr_inputs': '(4)', 'interpret': 'interpret', 'gopt_level': 'gopt_level'}), "('BatchNormNd', dtype=dtype, device=device, nr_inputs=4,\n interpret=interpret, gopt_level=gopt_level)\n", (554, 658), False, 'from megengine.core.tensor.utils import subgraph_fn\n'), ((2334, 2350), 'megengine.device.CompNode', 'CompNode', (['device'], {}), '(device)\n', (2342, 2350), False, 'from megengine.device import CompNode, get_default_device\n'), ((3884, 3935), 'megengine.tensor', 'megengine.tensor', (['(1e-05)'], {'dtype': 'dtype', 'device': 'device'}), '(1e-05, dtype=dtype, device=device)\n', (3900, 3935), False, 'import megengine\n'), ((1912, 1932), 'megengine.device.get_default_device', 'get_default_device', ([], {}), '()\n', (1930, 1932), False, 'from megengine.device import CompNode, get_default_device\n'), ((930, 943), 'megengine.core.ops.builtin.GetVarShape', 'GetVarShape', ([], {}), '()\n', (941, 943), False, 'from megengine.core.ops.builtin import GetVarShape, Reduce, TypeCvt\n'), ((976, 1006), 'megengine.core.ops.builtin.Reduce', 'Reduce', ([], {'mode': '"""product"""', 'axis': '(0)'}), "(mode='product', axis=0)\n", (982, 1006), False, 'from megengine.core.ops.builtin import GetVarShape, Reduce, TypeCvt\n'), ((1046, 1076), 'megengine.core.ops.builtin.Reduce', 'Reduce', ([], {'mode': '"""product"""', 'axis': '(0)'}), "(mode='product', axis=0)\n", (1052, 1076), False, 'from megengine.core.ops.builtin import GetVarShape, Reduce, TypeCvt\n'), ((1173, 1193), 'megengine.core.ops.builtin.TypeCvt', 'TypeCvt', ([], {'dtype': 'dtype'}), '(dtype=dtype)\n', (1180, 1193), False, 'from megengine.core.ops.builtin import GetVarShape, Reduce, TypeCvt\n'), ((1232, 1250), 'megengine.core.ops.builtin.Reduce', 'Reduce', ([], {'mode': '"""sum"""'}), "(mode='sum')\n", (1238, 1250), False, 'from megengine.core.ops.builtin import GetVarShape, Reduce, TypeCvt\n'), ((1297, 1319), 'megengine.core.ops.builtin.Reduce', 'Reduce', ([], {'mode': '"""sum_sqr"""'}), "(mode='sum_sqr')\n", (1303, 1319), False, 'from megengine.core.ops.builtin import GetVarShape, Reduce, TypeCvt\n'), ((3241, 3265), 'megengine.jit.trace', 'trace', ([], {'symbolic': 'symbolic'}), '(symbolic=symbolic)\n', (3246, 3265), False, 'from megengine.jit import trace\n'), ((3318, 3342), 'megengine.jit.trace', 'trace', ([], {'symbolic': 'symbolic'}), '(symbolic=symbolic)\n', (3323, 3342), False, 'from megengine.jit import trace\n'), ((3454, 3477), 'numpy.random.random', 'np.random.random', (['shape'], {}), '(shape)\n', (3470, 3477), True, 'import numpy as np\n'), ((2451, 2464), 'megengine.autodiff.grad_manager.GradManager', 'GradManager', ([], {}), '()\n', (2462, 2464), False, 'from megengine.autodiff.grad_manager import GradManager\n'), ((2873, 2886), 'megengine.autodiff.grad_manager.GradManager', 'GradManager', ([], {}), '()\n', (2884, 2886), False, 'from megengine.autodiff.grad_manager import GradManager\n')]
|
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
import megengine.optimizer as optim
class AdaptiveAvgPool2d(M.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return F.mean(F.mean(x, axis=-2, keepdims=True), axis=-1, keepdims=True)
class AdaptiveMaxPool2d(M.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return F.max(F.max(x, axis=-2, keepdims=True), axis=-1, keepdims=True)
class ChannelAttention(M.Module):
def __init__(self, in_planes, ratio=16):
super().__init__()
self.avg_pool = AdaptiveAvgPool2d()
self.max_pool = AdaptiveMaxPool2d()
self.sharedMLP = M.Sequential(
M.Conv2d(in_planes, in_planes // ratio, 1, bias=False), M.ReLU(),
M.Conv2d(in_planes // ratio, in_planes, 1, bias=False))
self.sigmoid = M.Sigmoid()
def forward(self, x):
avgout = self.sharedMLP(self.avg_pool(x))
maxout = self.sharedMLP(self.max_pool(x))
return self.sigmoid(avgout + maxout)
class SpatialAttention(M.Module):
def __init__(self, kernel_size=3):
super().__init__()
self.conv = M.Conv2d(2,1,kernel_size, padding=1, bias=False)
self.sigmoid = M.Sigmoid()
self.concat = F.concat
self.mean = F.mean
self.max = F.max
def forward(self, x):
avgout = self.mean(x, 1, True)
maxout = self.max(x, 1, True)
x = self.concat([avgout, maxout], 1)
x = self.conv(x)
return self.sigmoid(x)
class CBAM(M.Module):
def __init__(self, planes):
super().__init__()
self.ca = ChannelAttention(planes)
self.sa = SpatialAttention()
def forward(self, x):
x = self.ca(x) * x
out = self.sa(x) * x
return out
if __name__ == "__main__":
data = mge.tensor(np.random.random((1, 16, 10, 10)).astype(np.float32))
model = CBAM(16)
opt = optim.SGD(model.parameters(), lr=0.1)
for i in range(5):
opt.zero_grad()
loss = model(data).mean()
opt.backward(loss)
opt.step()
print("loss = {:.3f}".format(loss.numpy()[0]))
|
[
"megengine.module.Sigmoid",
"megengine.module.ReLU",
"megengine.functional.mean",
"megengine.functional.max",
"megengine.module.Conv2d"
] |
[((933, 944), 'megengine.module.Sigmoid', 'M.Sigmoid', ([], {}), '()\n', (942, 944), True, 'import megengine.module as M\n'), ((1239, 1289), 'megengine.module.Conv2d', 'M.Conv2d', (['(2)', '(1)', 'kernel_size'], {'padding': '(1)', 'bias': '(False)'}), '(2, 1, kernel_size, padding=1, bias=False)\n', (1247, 1289), True, 'import megengine.module as M\n'), ((1311, 1322), 'megengine.module.Sigmoid', 'M.Sigmoid', ([], {}), '()\n', (1320, 1322), True, 'import megengine.module as M\n'), ((278, 311), 'megengine.functional.mean', 'F.mean', (['x'], {'axis': '(-2)', 'keepdims': '(True)'}), '(x, axis=-2, keepdims=True)\n', (284, 311), True, 'import megengine.functional as F\n'), ((472, 504), 'megengine.functional.max', 'F.max', (['x'], {'axis': '(-2)', 'keepdims': '(True)'}), '(x, axis=-2, keepdims=True)\n', (477, 504), True, 'import megengine.functional as F\n'), ((776, 830), 'megengine.module.Conv2d', 'M.Conv2d', (['in_planes', '(in_planes // ratio)', '(1)'], {'bias': '(False)'}), '(in_planes, in_planes // ratio, 1, bias=False)\n', (784, 830), True, 'import megengine.module as M\n'), ((832, 840), 'megengine.module.ReLU', 'M.ReLU', ([], {}), '()\n', (838, 840), True, 'import megengine.module as M\n'), ((854, 908), 'megengine.module.Conv2d', 'M.Conv2d', (['(in_planes // ratio)', 'in_planes', '(1)'], {'bias': '(False)'}), '(in_planes // ratio, in_planes, 1, bias=False)\n', (862, 908), True, 'import megengine.module as M\n'), ((1926, 1959), 'numpy.random.random', 'np.random.random', (['(1, 16, 10, 10)'], {}), '((1, 16, 10, 10))\n', (1942, 1959), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
from typing import Callable, Union
import megengine as mge
import megengine.functional as F
import megengine.module as M
from .activations import activation
__all__ = ["conv2d", "norm2d", "pool2d", "gap2d", "linear", "SE", "DropPath"]
def conv2d(
w_in: int,
w_out: int,
k: int,
*,
stride: int = 1,
dilation: int = 1,
groups: int = 1,
bias: bool = False,
) -> M.Conv2d:
"""Helper for building a conv2d layer.
It will calculate padding automatically.
Args:
w_in: input width.
w_out: output width.
k: kernel size.
stride: stride. Default: ``1``
dilation: dilation. Default: ``1``
groups: groups. Default: ``1``
bias: enable bias or not. Default: ``False``
Returns:
A conv2d module.
"""
assert k % 2 == 1, "Only odd size kernels supported to avoid padding issues."
s, p, d, g, b = stride, (k - 1) * dilation // 2, dilation, groups, bias
return M.Conv2d(w_in, w_out, k, stride=s, padding=p, dilation=d, groups=g, bias=b)
def norm2d(name: Union[str, Callable], w_in: int, **kwargs) -> M.Module:
"""Helper for building a norm2d layer.
Args:
norm_name: normalization name, supports ``None``, ``"BN"``, ``"GN"``, ``"IN"``, ``"LN"``
and ``"SyncBN"``.
w_in: input width.
Returns:
A norm2d module.
"""
if name is None:
return M.Identity()
if callable(name):
return name(w_in, **kwargs)
if isinstance(name, str):
norm_funcs = {
"BN": M.BatchNorm2d,
"GN": M.GroupNorm,
"IN": M.InstanceNorm,
"LN": M.LayerNorm,
"SyncBN": M.SyncBatchNorm,
}
if name in norm_funcs.keys():
return norm_funcs[name](w_in, **kwargs)
raise ValueError(f"Norm name '{name}' not supported")
def pool2d(k: int, *, stride: int = 1, name: str = "max") -> M.Module:
"""Helper for building a pool2d layer.
Args:
k: kernel size.
stride: stride. Default: ``1``
name: pooling name, supports ``"avg"`` and ``"max"``.
Returns:
A pool2d module.
"""
assert k % 2 == 1, "Only odd size kernels supported to avoid padding issues."
pool_funcs = {
"avg": M.AvgPool2d,
"max": M.MaxPool2d,
}
if name not in pool_funcs.keys():
raise ValueError(f"Pool name '{name}' not supported")
return pool_funcs[name](k, stride=stride, padding=(k - 1) // 2)
def gap2d(shape=1) -> M.AdaptiveAvgPool2d:
"""Helper for building a gap2d layer.
Args:
shape: output shape. Default: ``1``
Returns:
A gap2d module.
"""
return M.AdaptiveAvgPool2d(shape)
def linear(w_in: int, w_out: int, *, bias: bool = False) -> M.Linear:
"""Helper for building a linear layer.
Args:
w_in: input width.
w_out: output width.
bias: enable bias or not. Default: ``False``
Returns:
A linear module.
"""
return M.Linear(w_in, w_out, bias=bias)
class SE(M.Module):
"""Squeeze-and-Excitation (SE) block: AvgPool, FC, Act, FC, Sigmoid.
Args:
w_in: input width.
w_se: se width.
act_name: activation name.
approx_sigmoid: approximated sigmoid function.
Attributes:
avg_pool: gad2d layer.
f_ex: sequantial which conbines conv2d -> act -> conv2d -> sigmoid.
"""
def __init__(self, w_in: int, w_se: int, act_name: str, approx_sigmoid: bool = False):
super().__init__()
self.avg_pool = gap2d()
self.f_ex = M.Sequential(
conv2d(w_in, w_se, 1, bias=True),
activation(act_name),
conv2d(w_se, w_in, 1, bias=True),
activation("hsigmoid") if approx_sigmoid else M.Sigmoid(),
)
def forward(self, x: mge.Tensor) -> mge.Tensor:
return x * self.f_ex(self.avg_pool(x))
class DropPath(M.Dropout):
"""DropPath block.
Args:
drop_prob: the probability to drop (set to zero) each path.
"""
def forward(self, x: mge.Tensor):
if not self.training or self.drop_prob == 0.0:
return x
shape = (x.shape[0],) + (1,) * (x.ndim - 1)
mask = F.ones(shape)
mask = F.dropout(mask, self.drop_prob, training=self.training)
return x * mask
|
[
"megengine.module.AdaptiveAvgPool2d",
"megengine.module.Identity",
"megengine.module.Sigmoid",
"megengine.module.Linear",
"megengine.functional.dropout",
"megengine.functional.ones",
"megengine.module.Conv2d"
] |
[((1058, 1133), 'megengine.module.Conv2d', 'M.Conv2d', (['w_in', 'w_out', 'k'], {'stride': 's', 'padding': 'p', 'dilation': 'd', 'groups': 'g', 'bias': 'b'}), '(w_in, w_out, k, stride=s, padding=p, dilation=d, groups=g, bias=b)\n', (1066, 1133), True, 'import megengine.module as M\n'), ((2780, 2806), 'megengine.module.AdaptiveAvgPool2d', 'M.AdaptiveAvgPool2d', (['shape'], {}), '(shape)\n', (2799, 2806), True, 'import megengine.module as M\n'), ((3100, 3132), 'megengine.module.Linear', 'M.Linear', (['w_in', 'w_out'], {'bias': 'bias'}), '(w_in, w_out, bias=bias)\n', (3108, 3132), True, 'import megengine.module as M\n'), ((1500, 1512), 'megengine.module.Identity', 'M.Identity', ([], {}), '()\n', (1510, 1512), True, 'import megengine.module as M\n'), ((4325, 4338), 'megengine.functional.ones', 'F.ones', (['shape'], {}), '(shape)\n', (4331, 4338), True, 'import megengine.functional as F\n'), ((4354, 4409), 'megengine.functional.dropout', 'F.dropout', (['mask', 'self.drop_prob'], {'training': 'self.training'}), '(mask, self.drop_prob, training=self.training)\n', (4363, 4409), True, 'import megengine.functional as F\n'), ((3881, 3892), 'megengine.module.Sigmoid', 'M.Sigmoid', ([], {}), '()\n', (3890, 3892), True, 'import megengine.module as M\n')]
|
#! /usr/bin/env python3
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import json
import math
import os
from megengine.utils.module_stats import sizeof_fmt
from megengine.utils.tensorboard import SummaryWriterExtend
def load_single_graph(fpath):
with open(fpath) as fin:
data = json.load(fin)
for t in ["operator", "var"]:
data[t] = {int(i): j for i, j in data[t].items()}
gvars = data["var"]
for oid, i in data["operator"].items():
i["input"] = list(map(int, i["input"]))
out = i["output"] = list(map(int, i["output"]))
for j in out:
gvars[j]["owner_opr"] = oid
for var in data["var"].values():
mp = var.get("mem_plan", None)
if mp:
var["shape"] = "{" + ",".join(map(str, mp["layout"]["shape"])) + "}"
else:
var["shape"] = "<?>"
return data
def comp_graph_plotter(input, writer):
jgraph = load_single_graph(input)
all_oprs = jgraph["operator"]
all_vars = jgraph["var"]
for i in all_oprs:
opr = all_oprs[i]
if opr["type"] == "ImmutableTensor":
continue
inputlist = []
for var in opr["input"]:
inpopr = all_oprs[all_vars[var]["owner_opr"]]
if inpopr["type"] == "ImmutableTensor":
continue
inputlist.append(all_oprs[all_vars[var]["owner_opr"]]["name"])
writer.add_node_raw(opr["name"], opr["type"], inputlist)
writer.add_graph_by_node_raw_list()
def load_mem_info(fpath):
with open(fpath) as fin:
data = json.load(fin)
oprs = data["opr"]
for oid, i in oprs.items():
i["size"] = 0
for oid, i in data["chunk"].items():
i["size"] = int(i["logic_addr_end"]) - int(i["logic_addr_begin"])
data["peak_memory"] = 0
data["weight_memory"] = 0
for oid, i in data["chunk"].items():
if i["type"] == "static_mem":
i["owner_opr"] = oprs[i["time_begin"]]["name"]
life_begin = int(i["time_begin"])
life_end = int(i["time_end"])
if i["overwrite_dest_id"] != "-1":
life_begin = life_begin + 1
if data["peak_memory"] < int(i["logic_addr_end"]):
data["peak_memory"] = int(i["logic_addr_end"])
for j in range(life_begin, life_end):
oprs[str(j)]["size"] = oprs[str(j)]["size"] + i["size"]
elif i["type"] == "weight_mem":
data["weight_memory"] += int(i["logic_addr_end"]) - int(
i["logic_addr_begin"]
)
return data
def peak_mem_regist(input, writer):
jmem = load_mem_info(input)
writer.add_text(
"PEAK_MEMORY_SIZE",
[sizeof_fmt(jmem["peak_memory"]) + "(" + str(jmem["peak_memory"]) + " B)"],
)
writer.add_text(
"WEIGHT_MEMORY_SIZE",
[sizeof_fmt(jmem["weight_memory"]) + "(" + str(jmem["weight_memory"]) + " B)"],
)
all_oprs = jmem["opr"]
all_chunks = jmem["chunk"]
max_size = 0
max_size_oprs = []
# get oprs that reach the max memory
for oid, i in all_oprs.items():
if i["size"] == max_size:
max_size_oprs.append(int(i["id"]))
elif i["size"] > max_size:
max_size = i["size"]
max_size_oprs.clear()
max_size_oprs.append(int(i["id"]))
# get component of chunks
max_size_oprs.sort()
opr2chunks = []
num = len(max_size_oprs)
for i in range(num):
opr2chunks.append([])
for oid, i in all_chunks.items():
if i["type"] == "static_mem":
life_begin = int(i["time_begin"])
life_end = int(i["time_end"])
if i["overwrite_dest_id"] != "-1":
life_begin = life_begin + 1
if max_size_oprs[0] >= life_end or max_size_oprs[-1] < life_begin:
continue
for j in range(num):
if max_size_oprs[j] >= life_end:
break
elif max_size_oprs[j] >= life_begin:
opr2chunks[j].append(i["id"])
peak_num = 0
for i in range(num):
suffix_1 = "PEAK" + str(peak_num)
if i - 1 > 0 and opr2chunks[i - 1] == opr2chunks[i]:
continue
max_num = 0
opr2chunks[i] = sorted(
opr2chunks[i],
key=lambda chunk_id: all_chunks[chunk_id]["size"],
reverse=True,
)
writer.add_text(
suffix_1 + "/" + "<SUMMARY_INFO>",
["reached_max_opr_name: " + all_oprs[str(max_size_oprs[i])]["name"]],
0,
)
writer.add_text(
suffix_1 + "/" + "<SUMMARY_INFO>",
["max_used_size: " + sizeof_fmt(max_size)],
1,
)
for j in opr2chunks[i]:
suffix_2 = "MAX" + str(max_num)
j_size = sizeof_fmt(all_chunks[j]["size"])
j_percent = round(all_chunks[j]["size"] / max_size * 100, 3)
writer.add_text(
suffix_1 + "/" + suffix_2 + "_OPR",
["percent: " + str(j_percent) + "%"],
0,
)
writer.add_text(
suffix_1 + "/" + suffix_2 + "_OPR", ["memory_size: " + j_size], 1,
)
writer.add_text(
suffix_1 + "/" + suffix_2 + "_OPR",
["owner_opr: " + all_chunks[j]["owner_opr"]],
2,
)
writer.add_node_raw_attributes(
all_chunks[j]["owner_opr"],
{
"memory_" + all_chunks[j]["id"]: j_size,
"memory_percent": str(j_percent) + "%",
"summary_memory_" + str(peak_num): sizeof_fmt(max_size),
},
)
writer.add_node_raw_name_suffix(
all_chunks[j]["owner_opr"], "_" + suffix_1 + "_" + suffix_2
)
max_num += 1
peak_num += 1
writer.add_graph_by_node_raw_list()
def convert(args):
file_process_order = {
"graph.json": comp_graph_plotter,
"StaticMemoryInfo.json": peak_mem_regist,
}
g = os.walk(args.input)
for path, dir_list, file_list in g:
out_path = path.replace(args.input, args.output)
writer = SummaryWriterExtend(out_path)
for key, value in file_process_order.items():
if key in file_list:
value(os.path.join(path, key), writer)
def main():
"""`graph_info_analyze.py` is uesed to convert json dumped by `VisableDataSet`
class to logs which can be read by python `tensorboard`.
Now `get_static_memory_alloc_info()` support this feature,it will dump a dir
which can be convert by `graph_info_analyze.py`.
Examples:
.. code-block:: shell
graph_info_analyze.py -i <input_dir_name> -o <output_dir_name>
tensorboard --logdir <output_dir_name>
"""
parser = argparse.ArgumentParser(
"convert json dumped by c to logs which can be read by python tensorboard",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"-i", "--input", required=True, help="input dirctor name(c tensorboard info)"
)
parser.add_argument(
"-o",
"--output",
required=True,
help="output dirctor name(python tensorboard info)",
)
args = parser.parse_args()
convert(args)
if __name__ == "__main__":
main()
|
[
"megengine.utils.module_stats.sizeof_fmt",
"megengine.utils.tensorboard.SummaryWriterExtend"
] |
[((6623, 6642), 'os.walk', 'os.walk', (['args.input'], {}), '(args.input)\n', (6630, 6642), False, 'import os\n'), ((7413, 7577), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""convert json dumped by c to logs which can be read by python tensorboard"""'], {'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), "(\n 'convert json dumped by c to logs which can be read by python tensorboard',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n", (7436, 7577), False, 'import argparse\n'), ((616, 630), 'json.load', 'json.load', (['fin'], {}), '(fin)\n', (625, 630), False, 'import json\n'), ((1952, 1966), 'json.load', 'json.load', (['fin'], {}), '(fin)\n', (1961, 1966), False, 'import json\n'), ((6757, 6786), 'megengine.utils.tensorboard.SummaryWriterExtend', 'SummaryWriterExtend', (['out_path'], {}), '(out_path)\n', (6776, 6786), False, 'from megengine.utils.tensorboard import SummaryWriterExtend\n'), ((5321, 5354), 'megengine.utils.module_stats.sizeof_fmt', 'sizeof_fmt', (["all_chunks[j]['size']"], {}), "(all_chunks[j]['size'])\n", (5331, 5354), False, 'from megengine.utils.module_stats import sizeof_fmt\n'), ((5175, 5195), 'megengine.utils.module_stats.sizeof_fmt', 'sizeof_fmt', (['max_size'], {}), '(max_size)\n', (5185, 5195), False, 'from megengine.utils.module_stats import sizeof_fmt\n'), ((6191, 6211), 'megengine.utils.module_stats.sizeof_fmt', 'sizeof_fmt', (['max_size'], {}), '(max_size)\n', (6201, 6211), False, 'from megengine.utils.module_stats import sizeof_fmt\n'), ((6896, 6919), 'os.path.join', 'os.path.join', (['path', 'key'], {}), '(path, key)\n', (6908, 6919), False, 'import os\n'), ((3178, 3209), 'megengine.utils.module_stats.sizeof_fmt', 'sizeof_fmt', (["jmem['peak_memory']"], {}), "(jmem['peak_memory'])\n", (3188, 3209), False, 'from megengine.utils.module_stats import sizeof_fmt\n'), ((3319, 3352), 'megengine.utils.module_stats.sizeof_fmt', 'sizeof_fmt', (["jmem['weight_memory']"], {}), "(jmem['weight_memory'])\n", (3329, 3352), False, 'from megengine.utils.module_stats import sizeof_fmt\n')]
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import multiprocessing as mp
import os
import megengine as mge
import megengine.data as data
import megengine.data.dataset as dataset
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.jit as jit
import megengine.optimizer as optim
import numpy as np
from official.vision.segmentation.deeplabv3plus import (
DeepLabV3Plus,
softmax_cross_entropy,
)
from official.vision.segmentation.utils import import_config_from_file
logger = mge.get_logger(__name__)
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"-c", "--config", type=str, required=True, help="configuration file"
)
parser.add_argument(
"-d", "--dataset_dir", type=str, default="/data/datasets/VOC2012",
)
parser.add_argument(
"-w", "--weight_file", type=str, default=None, help="pre-train weights file",
)
parser.add_argument(
"-n", "--ngpus", type=int, default=8, help="batchsize for training"
)
parser.add_argument(
"-r", "--resume", type=str, default=None, help="resume model file"
)
args = parser.parse_args()
world_size = args.ngpus
logger.info("Device Count = %d", world_size)
if world_size > 1:
mp.set_start_method("spawn")
processes = []
for rank in range(world_size):
p = mp.Process(target=worker, args=(rank, world_size, args))
p.start()
processes.append(p)
for p in processes:
p.join()
else:
worker(0, 1, args)
def worker(rank, world_size, args):
cfg = import_config_from_file(args.config)
if world_size > 1:
dist.init_process_group(
master_ip="localhost",
master_port=23456,
world_size=world_size,
rank=rank,
dev=rank,
)
logger.info("Init process group done")
logger.info("Prepare dataset")
train_loader, epoch_size = build_dataloader(cfg.BATCH_SIZE, args.dataset_dir, cfg)
batch_iter = epoch_size // (cfg.BATCH_SIZE * world_size)
net = DeepLabV3Plus(class_num=cfg.NUM_CLASSES, pretrained=args.weight_file)
base_lr = cfg.LEARNING_RATE * world_size
optimizer = optim.SGD(
net.parameters(requires_grad=True),
lr=base_lr,
momentum=0.9,
weight_decay=0.00004,
)
@jit.trace(symbolic=True, opt_level=2)
def train_func(data, label, net=None, optimizer=None):
net.train()
pred = net(data)
loss = softmax_cross_entropy(pred, label, ignore_index=cfg.IGNORE_INDEX)
optimizer.backward(loss)
return pred, loss
begin_epoch = 0
end_epoch = cfg.EPOCHS
if args.resume is not None:
pretrained = mge.load(args.resume)
begin_epoch = pretrained["epoch"] + 1
net.load_state_dict(pretrained["state_dict"])
logger.info("load success: epoch %d", begin_epoch)
itr = begin_epoch * batch_iter
max_itr = end_epoch * batch_iter
image = mge.tensor(
np.zeros([cfg.BATCH_SIZE, 3, cfg.IMG_HEIGHT, cfg.IMG_WIDTH]).astype(np.float32),
dtype="float32",
)
label = mge.tensor(
np.zeros([cfg.BATCH_SIZE, cfg.IMG_HEIGHT, cfg.IMG_WIDTH]).astype(np.int32),
dtype="int32",
)
exp_name = os.path.abspath(os.path.dirname(__file__)).split("/")[-1]
for epoch in range(begin_epoch, end_epoch):
for i_batch, sample_batched in enumerate(train_loader):
def adjust_lr(optimizer, itr, max_itr):
now_lr = base_lr * (1 - itr / (max_itr + 1)) ** 0.9
for param_group in optimizer.param_groups:
param_group["lr"] = now_lr
return now_lr
now_lr = adjust_lr(optimizer, itr, max_itr)
inputs_batched, labels_batched = sample_batched
labels_batched = np.squeeze(labels_batched, axis=1).astype(np.int32)
image.set_value(inputs_batched)
label.set_value(labels_batched)
optimizer.zero_grad()
_, loss = train_func(image, label, net=net, optimizer=optimizer)
optimizer.step()
running_loss = loss.numpy()[0]
if rank == 0:
logger.info(
"%s epoch:%d/%d\tbatch:%d/%d\titr:%d\tlr:%g\tloss:%g",
exp_name,
epoch,
end_epoch,
i_batch,
batch_iter,
itr + 1,
now_lr,
running_loss,
)
itr += 1
if rank == 0:
save_path = os.path.join(cfg.MODEL_SAVE_DIR, "epoch%d.pkl" % (epoch))
mge.save({"epoch": epoch, "state_dict": net.state_dict()}, save_path)
logger.info("save epoch%d", epoch)
def build_dataloader(batch_size, dataset_dir, cfg):
if cfg.DATASET == "VOC2012":
train_dataset = dataset.PascalVOC(
dataset_dir,
cfg.DATA_TYPE,
order=["image", "mask"]
)
elif cfg.DATASET == "Cityscapes":
train_dataset = dataset.Cityscapes(
dataset_dir,
"train",
mode='gtFine',
order=["image", "mask"]
)
else:
raise ValueError("Unsupported dataset {}".format(cfg.DATASET))
train_sampler = data.RandomSampler(train_dataset, batch_size, drop_last=True)
train_dataloader = data.DataLoader(
train_dataset,
sampler=train_sampler,
transform=T.Compose(
transforms=[
T.RandomHorizontalFlip(0.5),
T.RandomResize(scale_range=(0.5, 2)),
T.RandomCrop(
output_size=(cfg.IMG_HEIGHT, cfg.IMG_WIDTH),
padding_value=[0, 0, 0],
padding_maskvalue=255,
),
T.Normalize(mean=cfg.IMG_MEAN, std=cfg.IMG_STD),
T.ToMode(),
],
order=["image", "mask"],
),
num_workers=0,
)
return train_dataloader, train_dataset.__len__()
if __name__ == "__main__":
main()
|
[
"megengine.data.transform.RandomResize",
"megengine.jit.trace",
"megengine.data.transform.RandomHorizontalFlip",
"megengine.distributed.init_process_group",
"megengine.load",
"megengine.data.dataset.Cityscapes",
"megengine.get_logger",
"megengine.data.transform.Normalize",
"megengine.data.transform.ToMode",
"megengine.data.dataset.PascalVOC",
"megengine.data.transform.RandomCrop",
"megengine.data.RandomSampler"
] |
[((872, 896), 'megengine.get_logger', 'mge.get_logger', (['__name__'], {}), '(__name__)\n', (886, 896), True, 'import megengine as mge\n'), ((924, 949), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (947, 949), False, 'import argparse\n'), ((1986, 2022), 'official.vision.segmentation.utils.import_config_from_file', 'import_config_from_file', (['args.config'], {}), '(args.config)\n', (2009, 2022), False, 'from official.vision.segmentation.utils import import_config_from_file\n'), ((2478, 2547), 'official.vision.segmentation.deeplabv3plus.DeepLabV3Plus', 'DeepLabV3Plus', ([], {'class_num': 'cfg.NUM_CLASSES', 'pretrained': 'args.weight_file'}), '(class_num=cfg.NUM_CLASSES, pretrained=args.weight_file)\n', (2491, 2547), False, 'from official.vision.segmentation.deeplabv3plus import DeepLabV3Plus, softmax_cross_entropy\n'), ((2748, 2785), 'megengine.jit.trace', 'jit.trace', ([], {'symbolic': '(True)', 'opt_level': '(2)'}), '(symbolic=True, opt_level=2)\n', (2757, 2785), True, 'import megengine.jit as jit\n'), ((5754, 5815), 'megengine.data.RandomSampler', 'data.RandomSampler', (['train_dataset', 'batch_size'], {'drop_last': '(True)'}), '(train_dataset, batch_size, drop_last=True)\n', (5772, 5815), True, 'import megengine.data as data\n'), ((1634, 1662), 'multiprocessing.set_start_method', 'mp.set_start_method', (['"""spawn"""'], {}), "('spawn')\n", (1653, 1662), True, 'import multiprocessing as mp\n'), ((2055, 2168), 'megengine.distributed.init_process_group', 'dist.init_process_group', ([], {'master_ip': '"""localhost"""', 'master_port': '(23456)', 'world_size': 'world_size', 'rank': 'rank', 'dev': 'rank'}), "(master_ip='localhost', master_port=23456,\n world_size=world_size, rank=rank, dev=rank)\n", (2078, 2168), True, 'import megengine.distributed as dist\n'), ((2905, 2970), 'official.vision.segmentation.deeplabv3plus.softmax_cross_entropy', 'softmax_cross_entropy', (['pred', 'label'], {'ignore_index': 'cfg.IGNORE_INDEX'}), '(pred, label, ignore_index=cfg.IGNORE_INDEX)\n', (2926, 2970), False, 'from official.vision.segmentation.deeplabv3plus import DeepLabV3Plus, softmax_cross_entropy\n'), ((3131, 3152), 'megengine.load', 'mge.load', (['args.resume'], {}), '(args.resume)\n', (3139, 3152), True, 'import megengine as mge\n'), ((5335, 5405), 'megengine.data.dataset.PascalVOC', 'dataset.PascalVOC', (['dataset_dir', 'cfg.DATA_TYPE'], {'order': "['image', 'mask']"}), "(dataset_dir, cfg.DATA_TYPE, order=['image', 'mask'])\n", (5352, 5405), True, 'import megengine.data.dataset as dataset\n'), ((1741, 1797), 'multiprocessing.Process', 'mp.Process', ([], {'target': 'worker', 'args': '(rank, world_size, args)'}), '(target=worker, args=(rank, world_size, args))\n', (1751, 1797), True, 'import multiprocessing as mp\n'), ((5037, 5092), 'os.path.join', 'os.path.join', (['cfg.MODEL_SAVE_DIR', "('epoch%d.pkl' % epoch)"], {}), "(cfg.MODEL_SAVE_DIR, 'epoch%d.pkl' % epoch)\n", (5049, 5092), False, 'import os\n'), ((5514, 5599), 'megengine.data.dataset.Cityscapes', 'dataset.Cityscapes', (['dataset_dir', '"""train"""'], {'mode': '"""gtFine"""', 'order': "['image', 'mask']"}), "(dataset_dir, 'train', mode='gtFine', order=['image', 'mask']\n )\n", (5532, 5599), True, 'import megengine.data.dataset as dataset\n'), ((3418, 3478), 'numpy.zeros', 'np.zeros', (['[cfg.BATCH_SIZE, 3, cfg.IMG_HEIGHT, cfg.IMG_WIDTH]'], {}), '([cfg.BATCH_SIZE, 3, cfg.IMG_HEIGHT, cfg.IMG_WIDTH])\n', (3426, 3478), True, 'import numpy as np\n'), ((3562, 3619), 'numpy.zeros', 'np.zeros', (['[cfg.BATCH_SIZE, cfg.IMG_HEIGHT, cfg.IMG_WIDTH]'], {}), '([cfg.BATCH_SIZE, cfg.IMG_HEIGHT, cfg.IMG_WIDTH])\n', (3570, 3619), True, 'import numpy as np\n'), ((3698, 3723), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (3713, 3723), False, 'import os\n'), ((4256, 4290), 'numpy.squeeze', 'np.squeeze', (['labels_batched'], {'axis': '(1)'}), '(labels_batched, axis=1)\n', (4266, 4290), True, 'import numpy as np\n'), ((5980, 6007), 'megengine.data.transform.RandomHorizontalFlip', 'T.RandomHorizontalFlip', (['(0.5)'], {}), '(0.5)\n', (6002, 6007), True, 'import megengine.data.transform as T\n'), ((6025, 6061), 'megengine.data.transform.RandomResize', 'T.RandomResize', ([], {'scale_range': '(0.5, 2)'}), '(scale_range=(0.5, 2))\n', (6039, 6061), True, 'import megengine.data.transform as T\n'), ((6079, 6188), 'megengine.data.transform.RandomCrop', 'T.RandomCrop', ([], {'output_size': '(cfg.IMG_HEIGHT, cfg.IMG_WIDTH)', 'padding_value': '[0, 0, 0]', 'padding_maskvalue': '(255)'}), '(output_size=(cfg.IMG_HEIGHT, cfg.IMG_WIDTH), padding_value=[0,\n 0, 0], padding_maskvalue=255)\n', (6091, 6188), True, 'import megengine.data.transform as T\n'), ((6281, 6328), 'megengine.data.transform.Normalize', 'T.Normalize', ([], {'mean': 'cfg.IMG_MEAN', 'std': 'cfg.IMG_STD'}), '(mean=cfg.IMG_MEAN, std=cfg.IMG_STD)\n', (6292, 6328), True, 'import megengine.data.transform as T\n'), ((6346, 6356), 'megengine.data.transform.ToMode', 'T.ToMode', ([], {}), '()\n', (6354, 6356), True, 'import megengine.data.transform as T\n')]
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
from io import BytesIO
import numpy as np
from helpers import MLP, graph_mode
import megengine.functional as F
from megengine import load, save
from megengine.core import TensorDict, tensor
from megengine.jit import trace
from megengine.optimizer import SGD, Adam
from megengine.test import assertTensorClose
def get_input():
batch_size = 2
input_dim = 28
data_shape = (batch_size, input_dim)
label_shape = (batch_size,)
data = tensor()
label = tensor(dtype=np.int32)
data.set_value(np.random.random(data_shape).astype(np.float32))
label.set_value(np.random.randint(0, 10, label_shape))
return data, data_shape, label, label_shape
def test_sgd_simple():
data, data_shape, label, label_shape = get_input()
mlp = MLP()
opt = SGD(mlp.parameters(), lr=0.01, weight_decay=0.1)
for idx in range(3):
data.set_value(np.random.random(data_shape).astype(np.float32))
label.set_value(np.random.randint(0, 10, label_shape))
pred = mlp(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
if idx % 2:
opt.zero_grad()
else:
mlp.zero_grad()
opt.backward(loss)
grads = TensorDict()
orig_params = TensorDict()
for param in mlp.parameters():
grad = F.grad(loss, param, use_virtual_grad=False)
assertTensorClose(grad.numpy(), param.grad.numpy())
grads[param] = np.copy(grad.numpy())
orig_params[param] = np.copy(param.numpy())
opt.step()
for param in mlp.parameters():
assertTensorClose(
param.numpy(), orig_params[param] * 0.999 - grads[param] * 0.01
)
def test_sgd_momentum():
data, data_shape, label, label_shape = get_input()
mlp = MLP()
opt = SGD(mlp.parameters(), lr=0.01, momentum=0.9)
slots = TensorDict()
for param in mlp.parameters():
slots[param] = np.zeros(param.shape).astype(np.float32)
for _ in range(3):
data.set_value(np.random.random(data_shape).astype(np.float32))
label.set_value(np.random.randint(0, 10, label_shape))
pred = mlp(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
opt.zero_grad()
opt.backward(loss)
orig_params = TensorDict()
grads = TensorDict()
for param in mlp.parameters():
orig_params[param] = np.copy(param.numpy())
grads[param] = np.copy(param.grad.numpy())
opt.step()
for param in mlp.parameters():
slot = slots[param]
orig_param = orig_params[param]
slot *= 0.9
slot -= param.grad.numpy() * 0.01
assertTensorClose(param.numpy(), orig_param + slot)
# TODO: put opt.step() inside trace
def test_sgd_momentum_static():
_, data_shape, _, label_shape = get_input()
mlp = MLP()
opt = SGD(mlp.parameters(), lr=0.01, momentum=0.9)
@trace
def f(data, label):
pred = mlp(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
opt.zero_grad()
opt.backward(loss)
slots = TensorDict()
for param in mlp.parameters():
slots[param] = np.zeros(param.shape).astype(np.float32)
for _ in range(3):
f(
np.random.random(data_shape).astype(np.float32),
np.random.randint(0, 10, label_shape).astype(np.int32),
)
orig_params = TensorDict()
grads = TensorDict()
for param in mlp.parameters():
orig_params[param] = np.copy(param.numpy())
grads[param] = np.copy(param.grad.numpy())
opt.step()
for param in mlp.parameters():
slot = slots[param]
orig_param = orig_params[param]
slot *= 0.9
slot -= param.grad.numpy() * 0.01
assertTensorClose(param.numpy(), orig_param + slot)
def test_update_lr():
data, data_shape, label, label_shape = get_input()
mlp = MLP()
opt = SGD(mlp.parameters(), lr=0.01)
pred = mlp(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
opt.zero_grad()
opt.backward(loss)
opt.step()
for group in opt.param_groups:
group["lr"] += 0.02
for _ in range(3):
data.set_value(np.random.random(data_shape).astype(np.float32))
label.set_value(np.random.randint(0, 10, label_shape))
pred = mlp(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
opt.zero_grad()
opt.backward(loss)
for param in mlp.parameters():
grad = F.grad(loss, param, use_virtual_grad=False)
assertTensorClose(grad.numpy(), param.grad.numpy())
orig_params = []
for param in mlp.parameters():
orig_params.append(np.copy(param.numpy()))
opt.step()
for param, orig_param in zip(mlp.parameters(), orig_params):
assertTensorClose(param.numpy(), orig_param - param.grad.numpy() * 0.03)
def test_adam():
data, data_shape, label, label_shape = get_input()
mlp = MLP()
beta0 = 0.8
beta1 = 0.9
eps = 1e-4
opt = Adam(mlp.parameters(), lr=0.01, betas=(beta0, beta1), eps=eps)
m_slots = TensorDict()
v_slots = TensorDict()
for param in mlp.parameters():
m_slots[param] = np.zeros(param.shape).astype(np.float32)
v_slots[param] = np.zeros(param.shape).astype(np.float32)
step_size = 0
def check_value():
for param in mlp.parameters():
grad = param.grad.numpy()
orig_param = orig_params[param]
m = m_slots[param]
v = v_slots[param]
m *= beta0
m += (1 - beta0) * grad
v *= beta1
v += (1 - beta1) * grad * grad
update = (m / (1 - beta0 ** step_size)) / (
np.sqrt(v / (1 - beta1 ** step_size)) + eps
)
assertTensorClose(param.numpy(), orig_param - 0.01 * update)
# eager
for _ in range(3):
data.set_value(np.random.random(data_shape).astype(np.float32))
label.set_value(np.random.randint(0, 10, label_shape))
pred = mlp(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
opt.zero_grad()
grads = opt.backward(loss)
orig_params = TensorDict()
for param in mlp.parameters():
orig_params[param] = np.copy(param.numpy())
opt.step()
step_size += 1
check_value()
# static
@trace
def f(data, label):
pred = mlp(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
opt.backward(loss)
for _ in range(3):
opt.zero_grad()
orig_params = TensorDict()
for param in mlp.parameters():
orig_params[param] = np.copy(param.numpy())
f(
np.random.random(data_shape).astype(np.float32),
np.random.randint(0, 10, label_shape).astype(np.int32),
)
opt.step()
step_size += 1
check_value()
@graph_mode("eager", "static")
def test_optimizer_serialization():
data, data_shape, label, label_shape = get_input()
mlp = MLP()
opt = SGD(mlp.parameters(), lr=0.01, momentum=0.9)
slots = TensorDict()
for param in mlp.parameters():
slots[param] = np.zeros(param.shape).astype(np.float32)
pred = mlp(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
opt.zero_grad()
opt.backward(loss)
opt.step()
for param in mlp.parameters():
slot = slots[param]
slot *= 0.9
slot -= param.grad.numpy() * 0.01
with BytesIO() as fout:
save(opt.state_dict(), fout)
fout.seek(0)
state_dict = load(fout)
opt1 = SGD(mlp.parameters(), lr=0.02, momentum=0.8)
opt1.load_state_dict(state_dict)
data.set_value(np.random.random(data_shape).astype(np.float32))
label.set_value(np.random.randint(0, 10, label_shape))
pred = mlp(data)
loss = F.square_loss(pred, label.reshape(-1, 1))
opt1.zero_grad()
opt1.backward(loss)
orig_params = TensorDict()
for param in mlp.parameters():
orig_params[param] = np.copy(param.numpy())
opt1.step()
for param in mlp.parameters():
orig_param = orig_params[param]
slot = slots[param]
slot *= 0.9
slot -= param.grad.numpy() * 0.01
assertTensorClose(param.numpy(), orig_param + slot)
|
[
"megengine.load",
"megengine.functional.grad",
"megengine.core.TensorDict",
"megengine.core.tensor"
] |
[((7391, 7420), 'helpers.graph_mode', 'graph_mode', (['"""eager"""', '"""static"""'], {}), "('eager', 'static')\n", (7401, 7420), False, 'from helpers import MLP, graph_mode\n'), ((829, 837), 'megengine.core.tensor', 'tensor', ([], {}), '()\n', (835, 837), False, 'from megengine.core import TensorDict, tensor\n'), ((850, 872), 'megengine.core.tensor', 'tensor', ([], {'dtype': 'np.int32'}), '(dtype=np.int32)\n', (856, 872), False, 'from megengine.core import TensorDict, tensor\n'), ((1138, 1143), 'helpers.MLP', 'MLP', ([], {}), '()\n', (1141, 1143), False, 'from helpers import MLP, graph_mode\n'), ((2172, 2177), 'helpers.MLP', 'MLP', ([], {}), '()\n', (2175, 2177), False, 'from helpers import MLP, graph_mode\n'), ((2245, 2257), 'megengine.core.TensorDict', 'TensorDict', ([], {}), '()\n', (2255, 2257), False, 'from megengine.core import TensorDict, tensor\n'), ((3258, 3263), 'helpers.MLP', 'MLP', ([], {}), '()\n', (3261, 3263), False, 'from helpers import MLP, graph_mode\n'), ((3501, 3513), 'megengine.core.TensorDict', 'TensorDict', ([], {}), '()\n', (3511, 3513), False, 'from megengine.core import TensorDict, tensor\n'), ((4357, 4362), 'helpers.MLP', 'MLP', ([], {}), '()\n', (4360, 4362), False, 'from helpers import MLP, graph_mode\n'), ((5432, 5437), 'helpers.MLP', 'MLP', ([], {}), '()\n', (5435, 5437), False, 'from helpers import MLP, graph_mode\n'), ((5572, 5584), 'megengine.core.TensorDict', 'TensorDict', ([], {}), '()\n', (5582, 5584), False, 'from megengine.core import TensorDict, tensor\n'), ((5599, 5611), 'megengine.core.TensorDict', 'TensorDict', ([], {}), '()\n', (5609, 5611), False, 'from megengine.core import TensorDict, tensor\n'), ((7522, 7527), 'helpers.MLP', 'MLP', ([], {}), '()\n', (7525, 7527), False, 'from helpers import MLP, graph_mode\n'), ((7595, 7607), 'megengine.core.TensorDict', 'TensorDict', ([], {}), '()\n', (7605, 7607), False, 'from megengine.core import TensorDict, tensor\n'), ((961, 998), 'numpy.random.randint', 'np.random.randint', (['(0)', '(10)', 'label_shape'], {}), '(0, 10, label_shape)\n', (978, 998), True, 'import numpy as np\n'), ((1578, 1590), 'megengine.core.TensorDict', 'TensorDict', ([], {}), '()\n', (1588, 1590), False, 'from megengine.core import TensorDict, tensor\n'), ((1613, 1625), 'megengine.core.TensorDict', 'TensorDict', ([], {}), '()\n', (1623, 1625), False, 'from megengine.core import TensorDict, tensor\n'), ((2670, 2682), 'megengine.core.TensorDict', 'TensorDict', ([], {}), '()\n', (2680, 2682), False, 'from megengine.core import TensorDict, tensor\n'), ((2699, 2711), 'megengine.core.TensorDict', 'TensorDict', ([], {}), '()\n', (2709, 2711), False, 'from megengine.core import TensorDict, tensor\n'), ((3808, 3820), 'megengine.core.TensorDict', 'TensorDict', ([], {}), '()\n', (3818, 3820), False, 'from megengine.core import TensorDict, tensor\n'), ((3837, 3849), 'megengine.core.TensorDict', 'TensorDict', ([], {}), '()\n', (3847, 3849), False, 'from megengine.core import TensorDict, tensor\n'), ((6666, 6678), 'megengine.core.TensorDict', 'TensorDict', ([], {}), '()\n', (6676, 6678), False, 'from megengine.core import TensorDict, tensor\n'), ((7066, 7078), 'megengine.core.TensorDict', 'TensorDict', ([], {}), '()\n', (7076, 7078), False, 'from megengine.core import TensorDict, tensor\n'), ((7975, 7984), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (7982, 7984), False, 'from io import BytesIO\n'), ((8073, 8083), 'megengine.load', 'load', (['fout'], {}), '(fout)\n', (8077, 8083), False, 'from megengine import load, save\n'), ((8478, 8490), 'megengine.core.TensorDict', 'TensorDict', ([], {}), '()\n', (8488, 8490), False, 'from megengine.core import TensorDict, tensor\n'), ((1324, 1361), 'numpy.random.randint', 'np.random.randint', (['(0)', '(10)', 'label_shape'], {}), '(0, 10, label_shape)\n', (1341, 1361), True, 'import numpy as np\n'), ((1684, 1727), 'megengine.functional.grad', 'F.grad', (['loss', 'param'], {'use_virtual_grad': '(False)'}), '(loss, param, use_virtual_grad=False)\n', (1690, 1727), True, 'import megengine.functional as F\n'), ((2476, 2513), 'numpy.random.randint', 'np.random.randint', (['(0)', '(10)', 'label_shape'], {}), '(0, 10, label_shape)\n', (2493, 2513), True, 'import numpy as np\n'), ((4718, 4755), 'numpy.random.randint', 'np.random.randint', (['(0)', '(10)', 'label_shape'], {}), '(0, 10, label_shape)\n', (4735, 4755), True, 'import numpy as np\n'), ((4948, 4991), 'megengine.functional.grad', 'F.grad', (['loss', 'param'], {'use_virtual_grad': '(False)'}), '(loss, param, use_virtual_grad=False)\n', (4954, 4991), True, 'import megengine.functional as F\n'), ((6464, 6501), 'numpy.random.randint', 'np.random.randint', (['(0)', '(10)', 'label_shape'], {}), '(0, 10, label_shape)\n', (6481, 6501), True, 'import numpy as np\n'), ((8282, 8319), 'numpy.random.randint', 'np.random.randint', (['(0)', '(10)', 'label_shape'], {}), '(0, 10, label_shape)\n', (8299, 8319), True, 'import numpy as np\n'), ((892, 920), 'numpy.random.random', 'np.random.random', (['data_shape'], {}), '(data_shape)\n', (908, 920), True, 'import numpy as np\n'), ((2316, 2337), 'numpy.zeros', 'np.zeros', (['param.shape'], {}), '(param.shape)\n', (2324, 2337), True, 'import numpy as np\n'), ((3572, 3593), 'numpy.zeros', 'np.zeros', (['param.shape'], {}), '(param.shape)\n', (3580, 3593), True, 'import numpy as np\n'), ((5672, 5693), 'numpy.zeros', 'np.zeros', (['param.shape'], {}), '(param.shape)\n', (5680, 5693), True, 'import numpy as np\n'), ((5738, 5759), 'numpy.zeros', 'np.zeros', (['param.shape'], {}), '(param.shape)\n', (5746, 5759), True, 'import numpy as np\n'), ((7666, 7687), 'numpy.zeros', 'np.zeros', (['param.shape'], {}), '(param.shape)\n', (7674, 7687), True, 'import numpy as np\n'), ((1251, 1279), 'numpy.random.random', 'np.random.random', (['data_shape'], {}), '(data_shape)\n', (1267, 1279), True, 'import numpy as np\n'), ((2403, 2431), 'numpy.random.random', 'np.random.random', (['data_shape'], {}), '(data_shape)\n', (2419, 2431), True, 'import numpy as np\n'), ((3659, 3687), 'numpy.random.random', 'np.random.random', (['data_shape'], {}), '(data_shape)\n', (3675, 3687), True, 'import numpy as np\n'), ((3720, 3757), 'numpy.random.randint', 'np.random.randint', (['(0)', '(10)', 'label_shape'], {}), '(0, 10, label_shape)\n', (3737, 3757), True, 'import numpy as np\n'), ((4645, 4673), 'numpy.random.random', 'np.random.random', (['data_shape'], {}), '(data_shape)\n', (4661, 4673), True, 'import numpy as np\n'), ((6201, 6238), 'numpy.sqrt', 'np.sqrt', (['(v / (1 - beta1 ** step_size))'], {}), '(v / (1 - beta1 ** step_size))\n', (6208, 6238), True, 'import numpy as np\n'), ((6391, 6419), 'numpy.random.random', 'np.random.random', (['data_shape'], {}), '(data_shape)\n', (6407, 6419), True, 'import numpy as np\n'), ((7197, 7225), 'numpy.random.random', 'np.random.random', (['data_shape'], {}), '(data_shape)\n', (7213, 7225), True, 'import numpy as np\n'), ((7258, 7295), 'numpy.random.randint', 'np.random.randint', (['(0)', '(10)', 'label_shape'], {}), '(0, 10, label_shape)\n', (7275, 7295), True, 'import numpy as np\n'), ((8209, 8237), 'numpy.random.random', 'np.random.random', (['data_shape'], {}), '(data_shape)\n', (8225, 8237), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
import megengine.functional as F
import megengine.module as M
from .darknet import Darknet
from .network_blocks import BaseConv, UpSample
class YOLOFPN(M.Module):
"""
YOLOFPN module. Darknet 53 is the default backbone of this model.
"""
def __init__(
self, depth=53, in_features=["dark3", "dark4", "dark5"],
):
super().__init__()
self.backbone = Darknet(depth)
self.in_features = in_features
# out 1
self.out1_cbl = self._make_cbl(512, 256, 1)
self.out1 = self._make_embedding([256, 512], 512 + 256)
# out 2
self.out2_cbl = self._make_cbl(256, 128, 1)
self.out2 = self._make_embedding([128, 256], 256 + 128)
# upsample
self.upsample = UpSample(scale_factor=2, mode="bilinear")
def _make_cbl(self, _in, _out, ks):
return BaseConv(_in, _out, ks, stride=1, act="lrelu")
def _make_embedding(self, filters_list, in_filters):
m = M.Sequential(
*[
self._make_cbl(in_filters, filters_list[0], 1),
self._make_cbl(filters_list[0], filters_list[1], 3),
self._make_cbl(filters_list[1], filters_list[0], 1),
self._make_cbl(filters_list[0], filters_list[1], 3),
self._make_cbl(filters_list[1], filters_list[0], 1),
]
)
return m
def forward(self, inputs):
"""
Args:
inputs (Tensor): input image.
Returns:
Tuple[Tensor]: FPN output features..
"""
# backbone
out_features = self.backbone(inputs)
x2, x1, x0 = [out_features[f] for f in self.in_features]
# yolo branch 1
x1_in = self.out1_cbl(x0)
x1_in = self.upsample(x1_in)
x1_in = F.concat([x1_in, x1], 1)
out_dark4 = self.out1(x1_in)
# yolo branch 2
x2_in = self.out2_cbl(out_dark4)
x2_in = self.upsample(x2_in)
x2_in = F.concat([x2_in, x2], 1)
out_dark3 = self.out2(x2_in)
outputs = (out_dark3, out_dark4, x0)
return outputs
|
[
"megengine.functional.concat"
] |
[((1916, 1940), 'megengine.functional.concat', 'F.concat', (['[x1_in, x1]', '(1)'], {}), '([x1_in, x1], 1)\n', (1924, 1940), True, 'import megengine.functional as F\n'), ((2098, 2122), 'megengine.functional.concat', 'F.concat', (['[x2_in, x2]', '(1)'], {}), '([x2_in, x2], 1)\n', (2106, 2122), True, 'import megengine.functional as F\n')]
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import os
import time
import numpy as np
# pylint: disable=import-error
import model as snet_model
import quantizable_model as quantizable_snet_model
import megengine
import megengine.device as device
import megengine.autodiff as autodiff
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.optimizer as optim
import megengine.jit as jit
import megengine.amp as amp
import megengine.quantization as Q
logging = megengine.logger.get_logger()
from dataset import get_dataloader
DEFAULT_QAT_CONFIG = {
"ema":Q.ema_fakequant_qconfig,
"ema_lowbi":Q.ema_lowbit_fakequant_qconfig,
"sync_ema":Q.sync_ema_fakequant_qconfig,
"min_max":Q.min_max_fakequant_qconfig,
"tqt":Q.tqt_qconfig
}
def get_qconifg(config_name: str):
return DEFAULT_QAT_CONFIG[config_name]
def main():
parser = argparse.ArgumentParser(description="shufflenet benchmark")
parser.add_argument(
"-a",
"--arch",
default="shufflenet_v2_x2_0",
help="model architecture (default: shufflenet_v2_x1_0)",
)
parser.add_argument(
"-n",
"--ngpus",
default=1,
type=int,
help="number of GPUs per node (default: None, use all available GPUs)",
)
parser.add_argument(
"-s",
"--steps",
default=200,
type=int,
help="number of train steps (default: 200)",
)
parser.add_argument(
"-b",
"--batch-size",
metavar="SIZE",
default=64,
type=int,
help="batch size for single GPU (default: 128)",
)
parser.add_argument(
"--trace",
action='store_true',
default=False,
help="whether use trace or not (default: False)",
)
parser.add_argument(
"--symbolic",
action='store_true',
default=False,
help="whether use symbolic trace or not (default: False)",
)
parser.add_argument(
"--lr",
metavar="LR",
default=0.001,
help="learning rate for single GPU (default: 0.001)",
)
parser.add_argument("--momentum", default=0.9, help="momentum (default: 0.9)")
parser.add_argument(
"--weight-decay", default=4e-5, help="weight decay (default: 4e-5)"
)
parser.add_argument(
"-p",
"--print-freq",
default=1,
type=int,
metavar="N",
help="print frequency (default: 1)",
)
parser.add_argument(
"-m",
"--mode",
default="normal",
type=str,
choices=["normal", "mp", "qat"],
help="Quantization Mode\n"
"normal: no quantization, using float32\n"
"mp: input type is fp16\n"
"qat: quantization aware training"
)
parser.add_argument(
"--qat-config",
default="min_max",
type=str,
choices=["min_max", "ema", "ema_lowbit", "sync_ema", "tqt"],
help="quantization aware training config\n"
"min_max: min_max_fakequant_qconfig\n"
"ema: ema_fakequant_qconfig\n"
"ema_lowbit: ema_lowbit_fakequant_qconfig\n"
"sync_ema: sync_ema_fakequant_qconfig\n"
"tqt: tqt_qconfig"
)
parser.add_argument("--dist-addr", default="localhost")
parser.add_argument("--dist-port", type=int, default=0)
parser.add_argument("--world-size", type=int, default=None)
parser.add_argument("--rank", default=0)
parser.add_argument("--loader", default=False, action="store_true", help="whether use loader")
parser.add_argument("--preload", default=False, action="store_true", help="whether use preload")
args = parser.parse_args()
if args.world_size is None:
args.world_size = args.ngpus
if args.world_size > 1:
# launch processes
train_func = dist.launcher(worker, master_ip=args.dist_addr, port=args.dist_port,
world_size=args.world_size, n_gpus=args.ngpus, rank_start=args.rank * args.ngpus)
train_func(args)
else:
worker(args)
def worker(args):
steps = args.steps
# build model
shufflenet = quantizable_snet_model if args.mode == "qat" else snet_model
model = shufflenet.__dict__[args.arch]()
if args.mode == "qat":
if args.qat_config == "sync_ema":
assert args.ngpus > 1, "sync_ema does not support ngpus={}".format(args.ngpus)
qconfig = get_qconifg(args.qat_config)
model = Q.quantize_qat(module=model, qconfig= qconfig)
model.train()
Q.enable_observer(model)
Q.enable_fake_quant(model)
# Sync parameters
if args.world_size > 1:
dist.bcast_list_(model.parameters(), dist.WORLD)
# Autodiff gradient manager
gm = autodiff.GradManager().attach(
model.parameters(),
callbacks=dist.make_allreduce_cb("SUM") if args.world_size > 1 else None,
)
# Optimizer
params_wd = []
params_nwd = []
params_scale = []
for n, p in model.named_parameters():
if n.find("weight") >= 0 and len(p.shape) > 1:
params_wd.append(p)
elif n.find("scale") >= 0:
params_scale.append(p)
else:
params_nwd.append(p)
opt = optim.SGD(
[{"params": params_wd},
{"params": params_nwd, "weight_decay": 0},
],
lr=args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay * args.world_size, # scale weight decay in "SUM" mode
)
# train and valid func
@amp.autocast(enabled=args.mode == "mp")
def train_step(image, label):
with gm:
logits = model(image)
loss = F.nn.cross_entropy(logits, label, label_smooth=0.1)
gm.backward(loss)
opt.step().clear_grad()
return loss
if args.trace:
if args.symbolic:
train_step = jit.trace(train_step, symbolic=True, sublinear_memory_config=jit.SublinearMemoryConfig(genetic_nr_iter=50), symbolic_shape=False)
else:
train_step = jit.trace(train_step, symbolic=False, symbolic_shape=False)
else:
assert args.symbolic==False, "invalid arguments: trace=Trace, symbolic=True"
# start training
objs = AverageMeter("Loss")
clck = AverageMeter("Time")
if args.loader:
dataloader = iter(get_dataloader(args))
image,label = next(dataloader)
else:
image = np.random.randn(args.batch_size, 3, 224, 224).astype("float32")
label = np.random.randint(0, 1000, size=(args.batch_size,)).astype("int32")
# warm up
for step in range(10):
if args.loader:
image,label = next(dataloader)
if not args.preload:
image = megengine.tensor(image, dtype="float32")
label = megengine.tensor(label, dtype="int32")
else:
image = megengine.tensor(image, dtype="float32")
label = megengine.tensor(label, dtype="int32")
loss = train_step(image, label)
loss.item()
for step in range(0, steps):
t = time.time()
if args.loader:
image,label = next(dataloader)
if not args.preload:
image = megengine.tensor(image, dtype="float32")
label = megengine.tensor(label, dtype="int32")
else:
image = megengine.tensor(image, dtype="float32")
label = megengine.tensor(label, dtype="int32")
loss = train_step(image, label)
objs.update(loss.item())
clck.update(time.time() - t)
if step % args.print_freq == 0 and dist.get_rank() == 0:
print(
"Step {}, {}, {}".format(
step,
objs,
clck,
))
objs.reset()
if dist.get_rank() == 0:
print("="*20, "summary", "="*20)
print(" benchmark: shufflent")
if args.trace:
print(" mode: trace(symbolic={})".format("True, sublinear=True" if args.symbolic else "False"))
else:
print(" mode: imperative")
print(" loader: {}".format("" if not args.loader else "--loader"))
if args.loader:
print(" preload: {}".format("" if not args.preload else "--preload"))
print(" arch: {}".format(args.arch))
print("train_mode: {}".format(args.mode))
print(" batchsize: {}".format(args.batch_size))
print(" #GPU: {}".format(args.ngpus))
print(" avg time: {:.3f} seconds".format(clck.avg))
class AverageMeter:
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=":.3f"):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = "{name} {val" + self.fmt + "} ({avg" + self.fmt + "})"
return fmtstr.format(**self.__dict__)
if __name__ == "__main__":
main()
|
[
"megengine.jit.trace",
"megengine.distributed.get_rank",
"megengine.quantization.enable_fake_quant",
"megengine.tensor",
"megengine.functional.nn.cross_entropy",
"megengine.quantization.enable_observer",
"megengine.quantization.quantize_qat",
"megengine.jit.SublinearMemoryConfig",
"megengine.optimizer.SGD",
"megengine.logger.get_logger",
"megengine.distributed.make_allreduce_cb",
"megengine.distributed.launcher",
"megengine.amp.autocast",
"megengine.autodiff.GradManager"
] |
[((908, 937), 'megengine.logger.get_logger', 'megengine.logger.get_logger', ([], {}), '()\n', (935, 937), False, 'import megengine\n'), ((1301, 1360), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""shufflenet benchmark"""'}), "(description='shufflenet benchmark')\n", (1324, 1360), False, 'import argparse\n'), ((5671, 5843), 'megengine.optimizer.SGD', 'optim.SGD', (["[{'params': params_wd}, {'params': params_nwd, 'weight_decay': 0}]"], {'lr': 'args.lr', 'momentum': 'args.momentum', 'weight_decay': '(args.weight_decay * args.world_size)'}), "([{'params': params_wd}, {'params': params_nwd, 'weight_decay': 0}\n ], lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay *\n args.world_size)\n", (5680, 5843), True, 'import megengine.optimizer as optim\n'), ((5962, 6001), 'megengine.amp.autocast', 'amp.autocast', ([], {'enabled': "(args.mode == 'mp')"}), "(enabled=args.mode == 'mp')\n", (5974, 6001), True, 'import megengine.amp as amp\n'), ((4256, 4414), 'megengine.distributed.launcher', 'dist.launcher', (['worker'], {'master_ip': 'args.dist_addr', 'port': 'args.dist_port', 'world_size': 'args.world_size', 'n_gpus': 'args.ngpus', 'rank_start': '(args.rank * args.ngpus)'}), '(worker, master_ip=args.dist_addr, port=args.dist_port,\n world_size=args.world_size, n_gpus=args.ngpus, rank_start=args.rank *\n args.ngpus)\n', (4269, 4414), True, 'import megengine.distributed as dist\n'), ((4903, 4948), 'megengine.quantization.quantize_qat', 'Q.quantize_qat', ([], {'module': 'model', 'qconfig': 'qconfig'}), '(module=model, qconfig=qconfig)\n', (4917, 4948), True, 'import megengine.quantization as Q\n'), ((4980, 5004), 'megengine.quantization.enable_observer', 'Q.enable_observer', (['model'], {}), '(model)\n', (4997, 5004), True, 'import megengine.quantization as Q\n'), ((5013, 5039), 'megengine.quantization.enable_fake_quant', 'Q.enable_fake_quant', (['model'], {}), '(model)\n', (5032, 5039), True, 'import megengine.quantization as Q\n'), ((7515, 7526), 'time.time', 'time.time', ([], {}), '()\n', (7524, 7526), False, 'import time\n'), ((8241, 8256), 'megengine.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (8254, 8256), True, 'import megengine.distributed as dist\n'), ((5190, 5212), 'megengine.autodiff.GradManager', 'autodiff.GradManager', ([], {}), '()\n', (5210, 5212), True, 'import megengine.autodiff as autodiff\n'), ((6106, 6157), 'megengine.functional.nn.cross_entropy', 'F.nn.cross_entropy', (['logits', 'label'], {'label_smooth': '(0.1)'}), '(logits, label, label_smooth=0.1)\n', (6124, 6157), True, 'import megengine.functional as F\n'), ((6480, 6539), 'megengine.jit.trace', 'jit.trace', (['train_step'], {'symbolic': '(False)', 'symbolic_shape': '(False)'}), '(train_step, symbolic=False, symbolic_shape=False)\n', (6489, 6539), True, 'import megengine.jit as jit\n'), ((6768, 6788), 'dataset.get_dataloader', 'get_dataloader', (['args'], {}), '(args)\n', (6782, 6788), False, 'from dataset import get_dataloader\n'), ((7308, 7348), 'megengine.tensor', 'megengine.tensor', (['image'], {'dtype': '"""float32"""'}), "(image, dtype='float32')\n", (7324, 7348), False, 'import megengine\n'), ((7369, 7407), 'megengine.tensor', 'megengine.tensor', (['label'], {'dtype': '"""int32"""'}), "(label, dtype='int32')\n", (7385, 7407), False, 'import megengine\n'), ((7789, 7829), 'megengine.tensor', 'megengine.tensor', (['image'], {'dtype': '"""float32"""'}), "(image, dtype='float32')\n", (7805, 7829), False, 'import megengine\n'), ((7850, 7888), 'megengine.tensor', 'megengine.tensor', (['label'], {'dtype': '"""int32"""'}), "(label, dtype='int32')\n", (7866, 7888), False, 'import megengine\n'), ((5267, 5296), 'megengine.distributed.make_allreduce_cb', 'dist.make_allreduce_cb', (['"""SUM"""'], {}), "('SUM')\n", (5289, 5296), True, 'import megengine.distributed as dist\n'), ((6855, 6900), 'numpy.random.randn', 'np.random.randn', (['args.batch_size', '(3)', '(224)', '(224)'], {}), '(args.batch_size, 3, 224, 224)\n', (6870, 6900), True, 'import numpy as np\n'), ((6935, 6986), 'numpy.random.randint', 'np.random.randint', (['(0)', '(1000)'], {'size': '(args.batch_size,)'}), '(0, 1000, size=(args.batch_size,))\n', (6952, 6986), True, 'import numpy as np\n'), ((7170, 7210), 'megengine.tensor', 'megengine.tensor', (['image'], {'dtype': '"""float32"""'}), "(image, dtype='float32')\n", (7186, 7210), False, 'import megengine\n'), ((7235, 7273), 'megengine.tensor', 'megengine.tensor', (['label'], {'dtype': '"""int32"""'}), "(label, dtype='int32')\n", (7251, 7273), False, 'import megengine\n'), ((7651, 7691), 'megengine.tensor', 'megengine.tensor', (['image'], {'dtype': '"""float32"""'}), "(image, dtype='float32')\n", (7667, 7691), False, 'import megengine\n'), ((7716, 7754), 'megengine.tensor', 'megengine.tensor', (['label'], {'dtype': '"""int32"""'}), "(label, dtype='int32')\n", (7732, 7754), False, 'import megengine\n'), ((7984, 7995), 'time.time', 'time.time', ([], {}), '()\n', (7993, 7995), False, 'import time\n'), ((8044, 8059), 'megengine.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (8057, 8059), True, 'import megengine.distributed as dist\n'), ((6372, 6417), 'megengine.jit.SublinearMemoryConfig', 'jit.SublinearMemoryConfig', ([], {'genetic_nr_iter': '(50)'}), '(genetic_nr_iter=50)\n', (6397, 6417), True, 'import megengine.jit as jit\n')]
|
import os.path as osp
from abc import ABCMeta, abstractmethod
import megengine as mge
import megengine.distributed as dist
from megengine.optimizer.optimizer import Optimizer
from megengine.module import Module
from edit.utils import mkdir_or_exist, build_from_cfg, get_root_logger
from ..hook import Hook, HOOKS, get_priority
module_ckpt_suffix = "_module.mge"
optim_ckpt_suffix = "_optim.mge"
class BaseRunner(metaclass=ABCMeta):
"""The base class of Runner, a training helper for Mge.
All subclasses should implement the following APIs:
- ``run()``
- ``train()``
- ``test()``
- ``save_checkpoint()``
- ``resume()``
Args:
model (:obj:`megengine.module.Module`): The model to be run.
optimizers_cfg (dict): optimizer configs
work_dir (str, optional): The working directory to save checkpoints and logs. Defaults to None.
"""
def __init__(self, model, optimizers_cfg=None, work_dir=None):
assert hasattr(model, 'train_step')
assert hasattr(model, 'test_step')
assert hasattr(model, 'create_gradmanager_and_optimizers')
assert hasattr(model, 'cal_for_eval')
self.model = model
self.optimizers_cfg = optimizers_cfg
self.logger = get_root_logger()
self.work_dir = work_dir
assert self.work_dir is not None
# get model name from the model class
self._model_name = self.model.__class__.__name__
self.mode = None
self._hooks = []
self._epoch = 0
self._iter = 0
self._inner_iter = 0
self._max_epochs = 0
self._max_iters = 0
@property
def model_name(self):
"""str: Name of the model, usually the module class name."""
return self._model_name
@property
def hooks(self):
"""list[:obj:`Hook`]: A list of registered hooks."""
return self._hooks
@property
def epoch(self):
"""int: Current epoch."""
return self._epoch
@property
def iter(self):
"""int: Current iteration."""
return self._iter
@property
def inner_iter(self):
"""int: Iteration in an epoch."""
return self._inner_iter
@property
def max_epochs(self):
"""int: Maximum training epochs."""
return self._max_epochs
@property
def max_iters(self):
"""int: Maximum training iterations."""
return self._max_iters
@abstractmethod
def train(self, data_loader):
pass
@abstractmethod
def test(self, data_loader):
pass
@abstractmethod
def run(self, data_loaders, workflow, max_iters):
pass
@abstractmethod
def save_checkpoint(self, out_dir, create_symlink=True):
pass
@abstractmethod
def resume(self, path2checkpoint):
pass
@abstractmethod
def register_training_hooks(self, lr_config, checkpoint_config, log_config):
"""Register default hooks for training.
Default hooks include:
- LrUpdaterHook
- CheckpointSaverHook
- log_config
"""
pass
def create_gradmanager_and_optimizers(self):
self.model.create_gradmanager_and_optimizers(self.optimizers_cfg)
def sync_model_params(self):
if dist.is_distributed():
self.logger.info("syncing the model's parameters...")
dist.bcast_list_(self.model.parameters(), dist.WORLD)
else:
pass # do nothing
def current_lr(self):
"""Get current learning rates.
Returns:
list[float] | dict[str, list[float]]: Current learning rates of all
param groups. If the runner has a dict of optimizers, this
method will return a dict.
"""
raise NotImplementedError("")
# if isinstance(self.optimizer, Optimizer):
# lr = [group['lr'] for group in self.optimizer.param_groups]
# elif isinstance(self.optimizer, dict):
# lr = dict()
# for name, optim in self.optimizer.items():
# lr[name] = [group['lr'] for group in optim.param_groups]
# else:
# raise RuntimeError('lr is not applicable because optimizer does not exist.')
# return lr
def current_momentum(self):
"""Get current momentums.
Returns:
list[float] | dict[str, list[float]]: Current momentums of all
param groups. If the runner has a dict of optimizers, this
method will return a dict.
"""
raise NotImplementedError("")
# def _get_momentum(optimizer):
# momentums = []
# for group in optimizer.param_groups:
# if 'momentum' in group.keys():
# momentums.append(group['momentum'])
# elif 'betas' in group.keys():
# momentums.append(group['betas'][0])
# else:
# momentums.append(0)
# return momentums
#
# if self.optimizer is None:
# raise RuntimeError('momentum is not applicable because optimizer does not exist.')
# elif isinstance(self.optimizer, Optimizer):
# momentums = _get_momentum(self.optimizer)
# elif isinstance(self.optimizer, dict):
# momentums = dict()
# for name, optim in self.optimizer.items():
# momentums[name] = _get_momentum(optim)
# return momentums
def register_hook(self, hook, priority='NORMAL'):
"""Register a hook into the hook list.
The hook will be inserted into a priority queue, with the specified
priority (See :class:`Priority` for details of priorities).
For hooks with the same priority, they will be triggered in the same
order as they are registered.
Args:
hook (:obj:`Hook`): The hook to be registered.
priority (int or str or :obj:`Priority`): Hook priority.
Lower value means higher priority.
"""
assert isinstance(hook, Hook)
if hasattr(hook, 'priority'):
raise ValueError('"priority" is a reserved attribute for hook')
priority = get_priority(priority)
hook.priority = priority
# insert the hook to a sorted list
inserted = False
for i in range(len(self._hooks) - 1, -1, -1):
if priority >= self._hooks[i].priority:
self._hooks.insert(i + 1, hook)
inserted = True
break
if not inserted:
self._hooks.insert(0, hook)
def call_hook(self, fn_name):
"""Call all hooks.
Args:
fn_name (str): The function name in each hook to be called, such as
"before_train_epoch".
"""
for hook in self._hooks:
getattr(hook, fn_name)(self)
def load_checkpoint(self, path2checkpoint, load_optim=True):
"""
:param path2checkpoint: e.g. workdirs/xxxxx/checkpoint/epoch_10
:return: dict
"""
assert osp.exists(path2checkpoint), "{} do not exist".format(path2checkpoint)
dirname = osp.split(path2checkpoint)[-1]
epoch, nums = dirname.split("_")
assert epoch in ("epoch", )
self.logger.info('load checkpoint from {}'.format(path2checkpoint))
# 遍历model中的所有配置optimizer的model,并进行load
res = dict()
res['nums'] = int(nums)
for submodule_name in self.optimizers_cfg.keys():
submodule = getattr(self.model, submodule_name, None)
assert submodule is not None, "model should have submodule {}".format(submodule_name)
assert isinstance(submodule, Module), "submodule should be instance of mge.module.Module"
if dist.get_rank() == 0:
module_state_dict = mge.load(osp.join(path2checkpoint, submodule_name + module_ckpt_suffix))
submodule.load_state_dict(module_state_dict, strict = False)
if load_optim:
optim_state_dict = mge.load(osp.join(path2checkpoint, submodule_name + optim_ckpt_suffix))
res[submodule_name] = optim_state_dict
return res
def register_momentum_hook(self, momentum_config):
if momentum_config is None:
return
if isinstance(momentum_config, dict):
assert 'policy' in momentum_config
policy_type = momentum_config.pop('policy')
# If the type of policy is all in lower case, e.g., 'cyclic',
# then its first letter will be capitalized, e.g., to be 'Cyclic'.
# This is for the convenient usage of momentum updater.
# Since this is not applicable for `CosineAnealingMomentumUpdater`,
# the string will not be changed if it contains capital letters.
if policy_type == policy_type.lower():
policy_type = policy_type.title()
hook_type = policy_type + 'MomentumUpdaterHook'
momentum_config['type'] = hook_type
hook = build_from_cfg(momentum_config, HOOKS)
else:
hook = momentum_config
self.register_hook(hook)
def register_optimizer_hook(self, optimizer_config):
if optimizer_config is None:
return
if isinstance(optimizer_config, dict):
optimizer_config.setdefault('type', 'OptimizerHook')
hook = build_from_cfg(optimizer_config, HOOKS)
else:
hook = optimizer_config
self.register_hook(hook)
def register_lr_hook(self, lr_config):
if isinstance(lr_config, dict):
assert 'policy' in lr_config
policy_type = lr_config.pop('policy')
# If the type of policy is all in lower case, e.g., 'cyclic',
# then its first letter will be capitalized, e.g., to be 'Cyclic'.
# This is for the convenient usage of Lr updater.
# Since this is not applicable for `CosineAnealingLrUpdater`,
# the string will not be changed if it contains capital letters.
if policy_type == policy_type.lower():
policy_type = policy_type.title()
hook_type = policy_type + 'LrUpdaterHook'
lr_config['type'] = hook_type
hook = build_from_cfg(lr_config, HOOKS)
else:
hook = lr_config
self.register_hook(hook)
def register_checkpoint_hook(self, checkpoint_config):
if isinstance(checkpoint_config, dict):
checkpoint_config.setdefault('type', 'CheckpointHook')
hook = build_from_cfg(checkpoint_config, HOOKS)
else:
hook = checkpoint_config
self.register_hook(hook)
def register_logger_hooks(self, log_config):
log_interval = log_config['interval']
for info in log_config['hooks']:
logger_hook = build_from_cfg(info, HOOKS, default_args=dict(interval=log_interval))
self.register_hook(logger_hook, priority='HIGH')
|
[
"megengine.distributed.is_distributed",
"megengine.distributed.get_rank"
] |
[((1254, 1271), 'edit.utils.get_root_logger', 'get_root_logger', ([], {}), '()\n', (1269, 1271), False, 'from edit.utils import mkdir_or_exist, build_from_cfg, get_root_logger\n'), ((3305, 3326), 'megengine.distributed.is_distributed', 'dist.is_distributed', ([], {}), '()\n', (3324, 3326), True, 'import megengine.distributed as dist\n'), ((7156, 7183), 'os.path.exists', 'osp.exists', (['path2checkpoint'], {}), '(path2checkpoint)\n', (7166, 7183), True, 'import os.path as osp\n'), ((7245, 7271), 'os.path.split', 'osp.split', (['path2checkpoint'], {}), '(path2checkpoint)\n', (7254, 7271), True, 'import os.path as osp\n'), ((9150, 9188), 'edit.utils.build_from_cfg', 'build_from_cfg', (['momentum_config', 'HOOKS'], {}), '(momentum_config, HOOKS)\n', (9164, 9188), False, 'from edit.utils import mkdir_or_exist, build_from_cfg, get_root_logger\n'), ((9516, 9555), 'edit.utils.build_from_cfg', 'build_from_cfg', (['optimizer_config', 'HOOKS'], {}), '(optimizer_config, HOOKS)\n', (9530, 9555), False, 'from edit.utils import mkdir_or_exist, build_from_cfg, get_root_logger\n'), ((10396, 10428), 'edit.utils.build_from_cfg', 'build_from_cfg', (['lr_config', 'HOOKS'], {}), '(lr_config, HOOKS)\n', (10410, 10428), False, 'from edit.utils import mkdir_or_exist, build_from_cfg, get_root_logger\n'), ((10699, 10739), 'edit.utils.build_from_cfg', 'build_from_cfg', (['checkpoint_config', 'HOOKS'], {}), '(checkpoint_config, HOOKS)\n', (10713, 10739), False, 'from edit.utils import mkdir_or_exist, build_from_cfg, get_root_logger\n'), ((7868, 7883), 'megengine.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (7881, 7883), True, 'import megengine.distributed as dist\n'), ((7935, 7997), 'os.path.join', 'osp.join', (['path2checkpoint', '(submodule_name + module_ckpt_suffix)'], {}), '(path2checkpoint, submodule_name + module_ckpt_suffix)\n', (7943, 7997), True, 'import os.path as osp\n'), ((8147, 8208), 'os.path.join', 'osp.join', (['path2checkpoint', '(submodule_name + optim_ckpt_suffix)'], {}), '(path2checkpoint, submodule_name + optim_ckpt_suffix)\n', (8155, 8208), True, 'import os.path as osp\n')]
|
#!/usr/bin/env python3
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
"""VGG Series
VGG: `"Very Deep Convolutional Networks for Large-Scale Image Recognition"
<https://arxiv.org/abs/1409.1556>`_
"""
from typing import Any, Mapping, Sequence
import megengine as mge
import megengine.hub as hub
import megengine.module as M
from basecls.layers import activation, build_head, conv2d, init_weights, norm2d
from basecls.utils import recursive_update, registers
__all__ = ["VGGStage", "VGG"]
class VGGStage(M.Module):
"""VGG stage (sequence of blocks w/ the same output shape)."""
def __init__(self, w_in: int, w_out: int, depth: int, norm_name: str, act_name: str):
super().__init__()
self.depth = depth
for i in range(depth):
block = M.Sequential(
conv2d(w_in, w_out, 3), norm2d(norm_name, w_out), activation(act_name)
)
setattr(self, f"b{i + 1}", block)
w_in = w_out
self.max_pool = M.MaxPool2d(kernel_size=2, stride=2)
def __len__(self):
return self.depth
def forward(self, x: mge.Tensor) -> mge.Tensor:
for i in range(self.depth):
block = getattr(self, f"b{i + 1}")
x = block(x)
x = self.max_pool(x)
return x
@registers.models.register()
class VGG(M.Module):
"""VGG model.
Args:
depths: depth for each stage (number of blocks in the stage).
widths: width for each stage (width of each block in the stage).
norm_name: normalization function. Default: ``None``
act_name: activation function. Default: ``"relu"``
head: head args. Default: ``None``
"""
def __init__(
self,
depths: Sequence[int],
widths: Sequence[int],
norm_name: str = None,
act_name: str = "relu",
head: Mapping[str, Any] = None,
):
super().__init__()
self.depths = depths
model_args = [depths, widths]
prev_w = 3
for i, (d, w) in enumerate(zip(*model_args)):
stage = VGGStage(prev_w, w, d, norm_name, act_name)
setattr(self, f"s{i + 1}", stage)
prev_w = w
self.head = build_head(prev_w, head, None, act_name)
self.apply(init_weights)
def forward(self, x: mge.Tensor) -> mge.Tensor:
for i in range(len(self.depths)):
stage = getattr(self, f"s{i + 1}")
x = stage(x)
if getattr(self, "head", None) is not None:
x = self.head(x)
return x
def _build_vgg(**kwargs):
model_args = dict(head=dict(name="VGGHead", dropout_prob=0.5))
recursive_update(model_args, kwargs)
return VGG(**model_args)
@registers.models.register()
@hub.pretrained("https://data.megengine.org.cn/research/basecls/models/vgg/vgg11/vgg11.pkl")
def vgg11(**kwargs):
model_args = dict(depths=[1, 1, 2, 2, 2], widths=[64, 128, 256, 512, 512])
recursive_update(model_args, kwargs)
return _build_vgg(**model_args)
@registers.models.register()
@hub.pretrained("https://data.megengine.org.cn/research/basecls/models/vgg/vgg11_bn/vgg11_bn.pkl")
def vgg11_bn(**kwargs):
model_args = dict(norm_name="BN")
recursive_update(model_args, kwargs)
return vgg11(**model_args)
@registers.models.register()
@hub.pretrained("https://data.megengine.org.cn/research/basecls/models/vgg/vgg13/vgg13.pkl")
def vgg13(**kwargs):
model_args = dict(depths=[2, 2, 2, 2, 2], widths=[64, 128, 256, 512, 512])
recursive_update(model_args, kwargs)
return _build_vgg(**model_args)
@registers.models.register()
@hub.pretrained("https://data.megengine.org.cn/research/basecls/models/vgg/vgg13_bn/vgg13_bn.pkl")
def vgg13_bn(**kwargs):
model_args = dict(norm_name="BN")
recursive_update(model_args, kwargs)
return vgg13(**model_args)
@registers.models.register()
@hub.pretrained("https://data.megengine.org.cn/research/basecls/models/vgg/vgg16/vgg16.pkl")
def vgg16(**kwargs):
model_args = dict(depths=[2, 2, 3, 3, 3], widths=[64, 128, 256, 512, 512])
recursive_update(model_args, kwargs)
return _build_vgg(**model_args)
@registers.models.register()
@hub.pretrained("https://data.megengine.org.cn/research/basecls/models/vgg/vgg16_bn/vgg16_bn.pkl")
def vgg16_bn(**kwargs):
model_args = dict(norm_name="BN")
recursive_update(model_args, kwargs)
return vgg16(**model_args)
@registers.models.register()
@hub.pretrained("https://data.megengine.org.cn/research/basecls/models/vgg/vgg19/vgg19.pkl")
def vgg19(**kwargs):
model_args = dict(depths=[2, 2, 4, 4, 4], widths=[64, 128, 256, 512, 512])
recursive_update(model_args, kwargs)
return _build_vgg(**model_args)
@registers.models.register()
@hub.pretrained("https://data.megengine.org.cn/research/basecls/models/vgg/vgg19_bn/vgg19_bn.pkl")
def vgg19_bn(**kwargs):
model_args = dict(norm_name="BN")
recursive_update(model_args, kwargs)
return vgg19(**model_args)
|
[
"megengine.module.MaxPool2d",
"megengine.hub.pretrained"
] |
[((1300, 1327), 'basecls.utils.registers.models.register', 'registers.models.register', ([], {}), '()\n', (1325, 1327), False, 'from basecls.utils import recursive_update, registers\n'), ((2727, 2754), 'basecls.utils.registers.models.register', 'registers.models.register', ([], {}), '()\n', (2752, 2754), False, 'from basecls.utils import recursive_update, registers\n'), ((2756, 2857), 'megengine.hub.pretrained', 'hub.pretrained', (['"""https://data.megengine.org.cn/research/basecls/models/vgg/vgg11/vgg11.pkl"""'], {}), "(\n 'https://data.megengine.org.cn/research/basecls/models/vgg/vgg11/vgg11.pkl'\n )\n", (2770, 2857), True, 'import megengine.hub as hub\n'), ((3028, 3055), 'basecls.utils.registers.models.register', 'registers.models.register', ([], {}), '()\n', (3053, 3055), False, 'from basecls.utils import recursive_update, registers\n'), ((3057, 3164), 'megengine.hub.pretrained', 'hub.pretrained', (['"""https://data.megengine.org.cn/research/basecls/models/vgg/vgg11_bn/vgg11_bn.pkl"""'], {}), "(\n 'https://data.megengine.org.cn/research/basecls/models/vgg/vgg11_bn/vgg11_bn.pkl'\n )\n", (3071, 3164), True, 'import megengine.hub as hub\n'), ((3292, 3319), 'basecls.utils.registers.models.register', 'registers.models.register', ([], {}), '()\n', (3317, 3319), False, 'from basecls.utils import recursive_update, registers\n'), ((3321, 3422), 'megengine.hub.pretrained', 'hub.pretrained', (['"""https://data.megengine.org.cn/research/basecls/models/vgg/vgg13/vgg13.pkl"""'], {}), "(\n 'https://data.megengine.org.cn/research/basecls/models/vgg/vgg13/vgg13.pkl'\n )\n", (3335, 3422), True, 'import megengine.hub as hub\n'), ((3593, 3620), 'basecls.utils.registers.models.register', 'registers.models.register', ([], {}), '()\n', (3618, 3620), False, 'from basecls.utils import recursive_update, registers\n'), ((3622, 3729), 'megengine.hub.pretrained', 'hub.pretrained', (['"""https://data.megengine.org.cn/research/basecls/models/vgg/vgg13_bn/vgg13_bn.pkl"""'], {}), "(\n 'https://data.megengine.org.cn/research/basecls/models/vgg/vgg13_bn/vgg13_bn.pkl'\n )\n", (3636, 3729), True, 'import megengine.hub as hub\n'), ((3857, 3884), 'basecls.utils.registers.models.register', 'registers.models.register', ([], {}), '()\n', (3882, 3884), False, 'from basecls.utils import recursive_update, registers\n'), ((3886, 3987), 'megengine.hub.pretrained', 'hub.pretrained', (['"""https://data.megengine.org.cn/research/basecls/models/vgg/vgg16/vgg16.pkl"""'], {}), "(\n 'https://data.megengine.org.cn/research/basecls/models/vgg/vgg16/vgg16.pkl'\n )\n", (3900, 3987), True, 'import megengine.hub as hub\n'), ((4158, 4185), 'basecls.utils.registers.models.register', 'registers.models.register', ([], {}), '()\n', (4183, 4185), False, 'from basecls.utils import recursive_update, registers\n'), ((4187, 4294), 'megengine.hub.pretrained', 'hub.pretrained', (['"""https://data.megengine.org.cn/research/basecls/models/vgg/vgg16_bn/vgg16_bn.pkl"""'], {}), "(\n 'https://data.megengine.org.cn/research/basecls/models/vgg/vgg16_bn/vgg16_bn.pkl'\n )\n", (4201, 4294), True, 'import megengine.hub as hub\n'), ((4422, 4449), 'basecls.utils.registers.models.register', 'registers.models.register', ([], {}), '()\n', (4447, 4449), False, 'from basecls.utils import recursive_update, registers\n'), ((4451, 4552), 'megengine.hub.pretrained', 'hub.pretrained', (['"""https://data.megengine.org.cn/research/basecls/models/vgg/vgg19/vgg19.pkl"""'], {}), "(\n 'https://data.megengine.org.cn/research/basecls/models/vgg/vgg19/vgg19.pkl'\n )\n", (4465, 4552), True, 'import megengine.hub as hub\n'), ((4723, 4750), 'basecls.utils.registers.models.register', 'registers.models.register', ([], {}), '()\n', (4748, 4750), False, 'from basecls.utils import recursive_update, registers\n'), ((4752, 4859), 'megengine.hub.pretrained', 'hub.pretrained', (['"""https://data.megengine.org.cn/research/basecls/models/vgg/vgg19_bn/vgg19_bn.pkl"""'], {}), "(\n 'https://data.megengine.org.cn/research/basecls/models/vgg/vgg19_bn/vgg19_bn.pkl'\n )\n", (4766, 4859), True, 'import megengine.hub as hub\n'), ((2658, 2694), 'basecls.utils.recursive_update', 'recursive_update', (['model_args', 'kwargs'], {}), '(model_args, kwargs)\n', (2674, 2694), False, 'from basecls.utils import recursive_update, registers\n'), ((2952, 2988), 'basecls.utils.recursive_update', 'recursive_update', (['model_args', 'kwargs'], {}), '(model_args, kwargs)\n', (2968, 2988), False, 'from basecls.utils import recursive_update, registers\n'), ((3221, 3257), 'basecls.utils.recursive_update', 'recursive_update', (['model_args', 'kwargs'], {}), '(model_args, kwargs)\n', (3237, 3257), False, 'from basecls.utils import recursive_update, registers\n'), ((3517, 3553), 'basecls.utils.recursive_update', 'recursive_update', (['model_args', 'kwargs'], {}), '(model_args, kwargs)\n', (3533, 3553), False, 'from basecls.utils import recursive_update, registers\n'), ((3786, 3822), 'basecls.utils.recursive_update', 'recursive_update', (['model_args', 'kwargs'], {}), '(model_args, kwargs)\n', (3802, 3822), False, 'from basecls.utils import recursive_update, registers\n'), ((4082, 4118), 'basecls.utils.recursive_update', 'recursive_update', (['model_args', 'kwargs'], {}), '(model_args, kwargs)\n', (4098, 4118), False, 'from basecls.utils import recursive_update, registers\n'), ((4351, 4387), 'basecls.utils.recursive_update', 'recursive_update', (['model_args', 'kwargs'], {}), '(model_args, kwargs)\n', (4367, 4387), False, 'from basecls.utils import recursive_update, registers\n'), ((4647, 4683), 'basecls.utils.recursive_update', 'recursive_update', (['model_args', 'kwargs'], {}), '(model_args, kwargs)\n', (4663, 4683), False, 'from basecls.utils import recursive_update, registers\n'), ((4916, 4952), 'basecls.utils.recursive_update', 'recursive_update', (['model_args', 'kwargs'], {}), '(model_args, kwargs)\n', (4932, 4952), False, 'from basecls.utils import recursive_update, registers\n'), ((1003, 1039), 'megengine.module.MaxPool2d', 'M.MaxPool2d', ([], {'kernel_size': '(2)', 'stride': '(2)'}), '(kernel_size=2, stride=2)\n', (1014, 1039), True, 'import megengine.module as M\n'), ((2219, 2259), 'basecls.layers.build_head', 'build_head', (['prev_w', 'head', 'None', 'act_name'], {}), '(prev_w, head, None, act_name)\n', (2229, 2259), False, 'from basecls.layers import activation, build_head, conv2d, init_weights, norm2d\n'), ((823, 845), 'basecls.layers.conv2d', 'conv2d', (['w_in', 'w_out', '(3)'], {}), '(w_in, w_out, 3)\n', (829, 845), False, 'from basecls.layers import activation, build_head, conv2d, init_weights, norm2d\n'), ((847, 871), 'basecls.layers.norm2d', 'norm2d', (['norm_name', 'w_out'], {}), '(norm_name, w_out)\n', (853, 871), False, 'from basecls.layers import activation, build_head, conv2d, init_weights, norm2d\n'), ((873, 893), 'basecls.layers.activation', 'activation', (['act_name'], {}), '(act_name)\n', (883, 893), False, 'from basecls.layers import activation, build_head, conv2d, init_weights, norm2d\n')]
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine as mge
import numpy as np
from megengine import get_logger as mge_get_logger
mge_version = mge.__version__
if mge_version <= "0.6.0":
# pylint: disable=import-error, no-name-in-module
import megengine._internal as mgb
from megengine._internal import cgtools
else:
import megengine.core.tensor.megbrain_graph as G
import megengine.core._imperative_rt as rt
import megengine.utils.comp_graph_tools as cgtools
if mge_version <= "1.1.0":
from megengine.core.tensor.raw_tensor import ( # pylint: disable=no-name-in-module,import-error
as_raw_tensor as Tensor,
)
else:
from megengine.tensor import Tensor
def get_logger(*args):
return mge_get_logger(*args)
def get_mge_version():
return mge_version
def get_symvar_value(sym_var):
if mge_version <= "0.6.0":
if sym_var.inferred_value is not None:
val = sym_var.inferred_value
return val
else:
cg = sym_var.owner_graph
func = cg.compile_outonly(sym_var)
val = func()
return val
else:
if sym_var.value is not None:
return sym_var.value
else:
out_node = G.ValueOutputNode(sym_var)
cg = out_node.outputs[0].graph
func = cg.compile(out_node.outputs)
func.execute()
return out_node.get_value()
def isnum(x):
return isinstance(x, (int, float))
def isconst(x):
return x.np_data is not None
def isvar(x):
return (
isinstance(x, mgb.SymbolVar)
if mge_version <= "0.6.0"
else isinstance(x, rt.VarNode) # pylint: disable=c-extension-no-member
)
def get_shape(x):
return x._get_imm_shape() if mge_version <= "0.6.0" else x.shape
def get_dep_vars(x, type=None):
return cgtools.get_dep_vars(x, type)
def get_dtype_name(x):
return (
x.dtype.metadata["mgb_dtype"]["name"] if isinstance(x.dtype, np.dtype) else None
)
def get_opr_type(x):
return cgtools.get_opr_type(x)
def get_owner_opr_type(x):
if mge_version <= "0.6.0":
return cgtools.get_type(x._var)
else:
return cgtools.get_owner_opr_type(x._var)
def load_comp_graph_from_file(path):
if mge_version <= "0.6.0":
cg, _, outputs = mgb.load_comp_graph_from_file(path)
else:
ret = G.load_graph(path)
cg = ret.graph
outputs = ret.output_vars_list
return cg, outputs
def graph_traversal(outputs):
(
map_oprs,
map_vars,
var2oprs,
opr2receivers,
indegree2opr,
opr2indegree,
) = cgtools.graph_traversal(outputs)
return map_oprs, map_vars, var2oprs, opr2receivers, indegree2opr, opr2indegree
def get_oprs_seq(outputs, prune_reshape=True):
all_oprs = cgtools.get_oprs_seq(outputs, prune_reshape=prune_reshape)
return all_oprs
def eval_partial(inp, oup):
if not isinstance(oup, (list, tuple)):
oup = (oup,)
inputs = cgtools.get_dep_vars(oup, "Host2DeviceCopy")
if mge_version <= "0.6.0":
cg = oup[0].owner_graph
outputs = list(map(mgb.copy_output, oup))
f = cg.compile(inputs, outputs)
result = f(inp)
else:
if not isinstance(inp, (list, tuple)):
inp = (inp,)
replace_dict = {}
inp_node_list = []
for i in inputs:
inp_node = G.InputNode(
device="xpux", dtype=inputs[0].dtype, graph=inputs[0].graph
)
replace_dict[i] = inp_node.outputs[0]
inp_node_list.append(inp_node)
new_out = cgtools.replace_vars(oup, replace_dict)
out_node_list = [G.OutputNode(i) for i in new_out]
new_out_list = [i.outputs[0] for i in out_node_list]
cg = new_out_list[0].graph
func = cg.compile(new_out_list)
for node, value in zip(inp_node_list, inp):
node.set_value(Tensor(value)._dev_tensor())
func.execute()
result = [o.get_value().numpy() for o in out_node_list]
return result
|
[
"megengine.core.tensor.megbrain_graph.InputNode",
"megengine.utils.comp_graph_tools.get_owner_opr_type",
"megengine.utils.comp_graph_tools.get_dep_vars",
"megengine.utils.comp_graph_tools.get_opr_type",
"megengine.core.tensor.megbrain_graph.load_graph",
"megengine.utils.comp_graph_tools.replace_vars",
"megengine.core.tensor.megbrain_graph.ValueOutputNode",
"megengine._internal.load_comp_graph_from_file",
"megengine.utils.comp_graph_tools.get_oprs_seq",
"megengine.core.tensor.megbrain_graph.OutputNode",
"megengine.utils.comp_graph_tools.graph_traversal",
"megengine.get_logger",
"megengine.tensor.Tensor",
"megengine.utils.comp_graph_tools.get_type"
] |
[((1102, 1123), 'megengine.get_logger', 'mge_get_logger', (['*args'], {}), '(*args)\n', (1116, 1123), True, 'from megengine import get_logger as mge_get_logger\n'), ((2222, 2251), 'megengine.utils.comp_graph_tools.get_dep_vars', 'cgtools.get_dep_vars', (['x', 'type'], {}), '(x, type)\n', (2242, 2251), True, 'import megengine.utils.comp_graph_tools as cgtools\n'), ((2419, 2442), 'megengine.utils.comp_graph_tools.get_opr_type', 'cgtools.get_opr_type', (['x'], {}), '(x)\n', (2439, 2442), True, 'import megengine.utils.comp_graph_tools as cgtools\n'), ((3029, 3061), 'megengine.utils.comp_graph_tools.graph_traversal', 'cgtools.graph_traversal', (['outputs'], {}), '(outputs)\n', (3052, 3061), True, 'import megengine.utils.comp_graph_tools as cgtools\n'), ((3209, 3267), 'megengine.utils.comp_graph_tools.get_oprs_seq', 'cgtools.get_oprs_seq', (['outputs'], {'prune_reshape': 'prune_reshape'}), '(outputs, prune_reshape=prune_reshape)\n', (3229, 3267), True, 'import megengine.utils.comp_graph_tools as cgtools\n'), ((3395, 3439), 'megengine.utils.comp_graph_tools.get_dep_vars', 'cgtools.get_dep_vars', (['oup', '"""Host2DeviceCopy"""'], {}), "(oup, 'Host2DeviceCopy')\n", (3415, 3439), True, 'import megengine.utils.comp_graph_tools as cgtools\n'), ((2518, 2542), 'megengine.utils.comp_graph_tools.get_type', 'cgtools.get_type', (['x._var'], {}), '(x._var)\n', (2534, 2542), True, 'import megengine.utils.comp_graph_tools as cgtools\n'), ((2568, 2602), 'megengine.utils.comp_graph_tools.get_owner_opr_type', 'cgtools.get_owner_opr_type', (['x._var'], {}), '(x._var)\n', (2594, 2602), True, 'import megengine.utils.comp_graph_tools as cgtools\n'), ((2698, 2733), 'megengine._internal.load_comp_graph_from_file', 'mgb.load_comp_graph_from_file', (['path'], {}), '(path)\n', (2727, 2733), True, 'import megengine._internal as mgb\n'), ((2758, 2776), 'megengine.core.tensor.megbrain_graph.load_graph', 'G.load_graph', (['path'], {}), '(path)\n', (2770, 2776), True, 'import megengine.core.tensor.megbrain_graph as G\n'), ((4014, 4053), 'megengine.utils.comp_graph_tools.replace_vars', 'cgtools.replace_vars', (['oup', 'replace_dict'], {}), '(oup, replace_dict)\n', (4034, 4053), True, 'import megengine.utils.comp_graph_tools as cgtools\n'), ((1611, 1637), 'megengine.core.tensor.megbrain_graph.ValueOutputNode', 'G.ValueOutputNode', (['sym_var'], {}), '(sym_var)\n', (1628, 1637), True, 'import megengine.core.tensor.megbrain_graph as G\n'), ((3800, 3872), 'megengine.core.tensor.megbrain_graph.InputNode', 'G.InputNode', ([], {'device': '"""xpux"""', 'dtype': 'inputs[0].dtype', 'graph': 'inputs[0].graph'}), "(device='xpux', dtype=inputs[0].dtype, graph=inputs[0].graph)\n", (3811, 3872), True, 'import megengine.core.tensor.megbrain_graph as G\n'), ((4079, 4094), 'megengine.core.tensor.megbrain_graph.OutputNode', 'G.OutputNode', (['i'], {}), '(i)\n', (4091, 4094), True, 'import megengine.core.tensor.megbrain_graph as G\n'), ((4328, 4341), 'megengine.tensor.Tensor', 'Tensor', (['value'], {}), '(value)\n', (4334, 4341), False, 'from megengine.tensor import Tensor\n')]
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
from megengine.core import tensor
from megengine.module import BatchNorm1d, BatchNorm2d
from megengine.test import assertTensorClose
def test_batchnorm():
nr_chan = 8
data_shape = (3, nr_chan, 4)
momentum = 0.9
bn = BatchNorm1d(nr_chan, momentum=momentum)
running_mean = np.zeros((1, nr_chan, 1), dtype=np.float32)
running_var = np.ones((1, nr_chan, 1), dtype=np.float32)
data = tensor()
for i in range(3):
xv = np.random.normal(loc=2.3, size=data_shape).astype(np.float32)
mean = np.mean(np.mean(xv, axis=0, keepdims=True), axis=2, keepdims=True)
xv_transposed = np.transpose(xv, [0, 2, 1]).reshape(
(data_shape[0] * data_shape[2], nr_chan)
)
var_biased = np.var(xv_transposed, axis=0).reshape((1, nr_chan, 1))
sd = np.sqrt(var_biased + bn.eps)
var_unbiased = np.var(xv_transposed, axis=0, ddof=1).reshape((1, nr_chan, 1))
running_mean = running_mean * momentum + mean * (1 - momentum)
running_var = running_var * momentum + var_unbiased * (1 - momentum)
data.set_value(xv)
yv = bn(data)
yv_expect = (xv - mean) / sd
assertTensorClose(yv_expect, yv.numpy(), max_err=5e-6)
assertTensorClose(
running_mean.reshape(-1), bn.running_mean.numpy().reshape(-1), max_err=5e-6
)
assertTensorClose(
running_var.reshape(-1), bn.running_var.numpy().reshape(-1), max_err=5e-6
)
# test set 'training' flag to False
mean_backup = bn.running_mean.numpy()
var_backup = bn.running_var.numpy()
bn.training = False
xv = np.random.normal(loc=2.3, size=data_shape).astype(np.float32)
data.set_value(xv)
yv1 = bn(data)
yv2 = bn(data)
assertTensorClose(yv1.numpy(), yv2.numpy(), max_err=0)
assertTensorClose(mean_backup, bn.running_mean.numpy(), max_err=0)
assertTensorClose(var_backup, bn.running_var.numpy(), max_err=0)
yv_expect = (xv - running_mean) / np.sqrt(running_var + bn.eps)
assertTensorClose(yv_expect, yv1.numpy(), max_err=5e-6)
def test_batchnorm2d():
nr_chan = 8
data_shape = (3, nr_chan, 16, 16)
momentum = 0.9
bn = BatchNorm2d(nr_chan, momentum=momentum)
running_mean = np.zeros((1, nr_chan, 1, 1), dtype=np.float32)
running_var = np.ones((1, nr_chan, 1, 1), dtype=np.float32)
data = tensor()
for i in range(3):
xv = np.random.normal(loc=2.3, size=data_shape).astype(np.float32)
xv_transposed = np.transpose(xv, [0, 2, 3, 1]).reshape(
(data_shape[0] * data_shape[2] * data_shape[3], nr_chan)
)
mean = np.mean(xv_transposed, axis=0).reshape(1, nr_chan, 1, 1)
var_biased = np.var(xv_transposed, axis=0).reshape((1, nr_chan, 1, 1))
sd = np.sqrt(var_biased + bn.eps)
var_unbiased = np.var(xv_transposed, axis=0, ddof=1).reshape((1, nr_chan, 1, 1))
running_mean = running_mean * momentum + mean * (1 - momentum)
running_var = running_var * momentum + var_unbiased * (1 - momentum)
data.set_value(xv)
yv = bn(data)
yv_expect = (xv - mean) / sd
assertTensorClose(yv_expect, yv.numpy(), max_err=5e-6)
assertTensorClose(running_mean, bn.running_mean.numpy(), max_err=5e-6)
assertTensorClose(running_var, bn.running_var.numpy(), max_err=5e-6)
# test set 'training' flag to False
mean_backup = bn.running_mean.numpy()
var_backup = bn.running_var.numpy()
bn.training = False
xv = np.random.normal(loc=2.3, size=data_shape).astype(np.float32)
data.set_value(xv)
yv1 = bn(data)
yv2 = bn(data)
assertTensorClose(yv1.numpy(), yv2.numpy(), max_err=0)
assertTensorClose(mean_backup, bn.running_mean.numpy(), max_err=0)
assertTensorClose(var_backup, bn.running_var.numpy(), max_err=0)
yv_expect = (xv - running_mean) / np.sqrt(running_var + bn.eps)
assertTensorClose(yv_expect, yv1.numpy(), max_err=5e-6)
def test_batchnorm_no_stats():
nr_chan = 8
data_shape = (3, nr_chan, 4)
bn = BatchNorm1d(8, track_running_stats=False)
data = tensor()
for i in range(4):
if i == 2:
bn.training = False
xv = np.random.normal(loc=2.3, size=data_shape).astype(np.float32)
mean = np.mean(np.mean(xv, axis=0, keepdims=True), axis=2, keepdims=True)
var = np.var(
np.transpose(xv, [0, 2, 1]).reshape(
(data_shape[0] * data_shape[2], nr_chan)
),
axis=0,
).reshape((1, nr_chan, 1))
sd = np.sqrt(var + bn.eps)
data.set_value(xv)
yv = bn(data)
yv_expect = (xv - mean) / sd
assertTensorClose(yv_expect, yv.numpy(), max_err=5e-6)
def test_batchnorm2d_no_stats():
nr_chan = 8
data_shape = (3, nr_chan, 16, 16)
bn = BatchNorm2d(8, track_running_stats=False)
data = tensor()
for i in range(4):
if i == 2:
bn.training = False
xv = np.random.normal(loc=2.3, size=data_shape).astype(np.float32)
xv_transposed = np.transpose(xv, [0, 2, 3, 1]).reshape(
(data_shape[0] * data_shape[2] * data_shape[3], nr_chan)
)
mean = np.mean(xv_transposed, axis=0).reshape(1, nr_chan, 1, 1)
var = np.var(xv_transposed, axis=0).reshape((1, nr_chan, 1, 1))
sd = np.sqrt(var + bn.eps)
data.set_value(xv)
yv = bn(data)
yv_expect = (xv - mean) / sd
assertTensorClose(yv_expect, yv.numpy(), max_err=5e-6)
|
[
"megengine.module.BatchNorm2d",
"megengine.module.BatchNorm1d",
"megengine.core.tensor"
] |
[((669, 708), 'megengine.module.BatchNorm1d', 'BatchNorm1d', (['nr_chan'], {'momentum': 'momentum'}), '(nr_chan, momentum=momentum)\n', (680, 708), False, 'from megengine.module import BatchNorm1d, BatchNorm2d\n'), ((728, 771), 'numpy.zeros', 'np.zeros', (['(1, nr_chan, 1)'], {'dtype': 'np.float32'}), '((1, nr_chan, 1), dtype=np.float32)\n', (736, 771), True, 'import numpy as np\n'), ((790, 832), 'numpy.ones', 'np.ones', (['(1, nr_chan, 1)'], {'dtype': 'np.float32'}), '((1, nr_chan, 1), dtype=np.float32)\n', (797, 832), True, 'import numpy as np\n'), ((844, 852), 'megengine.core.tensor', 'tensor', ([], {}), '()\n', (850, 852), False, 'from megengine.core import tensor\n'), ((2624, 2663), 'megengine.module.BatchNorm2d', 'BatchNorm2d', (['nr_chan'], {'momentum': 'momentum'}), '(nr_chan, momentum=momentum)\n', (2635, 2663), False, 'from megengine.module import BatchNorm1d, BatchNorm2d\n'), ((2683, 2729), 'numpy.zeros', 'np.zeros', (['(1, nr_chan, 1, 1)'], {'dtype': 'np.float32'}), '((1, nr_chan, 1, 1), dtype=np.float32)\n', (2691, 2729), True, 'import numpy as np\n'), ((2748, 2793), 'numpy.ones', 'np.ones', (['(1, nr_chan, 1, 1)'], {'dtype': 'np.float32'}), '((1, nr_chan, 1, 1), dtype=np.float32)\n', (2755, 2793), True, 'import numpy as np\n'), ((2805, 2813), 'megengine.core.tensor', 'tensor', ([], {}), '()\n', (2811, 2813), False, 'from megengine.core import tensor\n'), ((4492, 4533), 'megengine.module.BatchNorm1d', 'BatchNorm1d', (['(8)'], {'track_running_stats': '(False)'}), '(8, track_running_stats=False)\n', (4503, 4533), False, 'from megengine.module import BatchNorm1d, BatchNorm2d\n'), ((4545, 4553), 'megengine.core.tensor', 'tensor', ([], {}), '()\n', (4551, 4553), False, 'from megengine.core import tensor\n'), ((5267, 5308), 'megengine.module.BatchNorm2d', 'BatchNorm2d', (['(8)'], {'track_running_stats': '(False)'}), '(8, track_running_stats=False)\n', (5278, 5308), False, 'from megengine.module import BatchNorm1d, BatchNorm2d\n'), ((5320, 5328), 'megengine.core.tensor', 'tensor', ([], {}), '()\n', (5326, 5328), False, 'from megengine.core import tensor\n'), ((1247, 1275), 'numpy.sqrt', 'np.sqrt', (['(var_biased + bn.eps)'], {}), '(var_biased + bn.eps)\n', (1254, 1275), True, 'import numpy as np\n'), ((2426, 2455), 'numpy.sqrt', 'np.sqrt', (['(running_var + bn.eps)'], {}), '(running_var + bn.eps)\n', (2433, 2455), True, 'import numpy as np\n'), ((3221, 3249), 'numpy.sqrt', 'np.sqrt', (['(var_biased + bn.eps)'], {}), '(var_biased + bn.eps)\n', (3228, 3249), True, 'import numpy as np\n'), ((4311, 4340), 'numpy.sqrt', 'np.sqrt', (['(running_var + bn.eps)'], {}), '(running_var + bn.eps)\n', (4318, 4340), True, 'import numpy as np\n'), ((4996, 5017), 'numpy.sqrt', 'np.sqrt', (['(var + bn.eps)'], {}), '(var + bn.eps)\n', (5003, 5017), True, 'import numpy as np\n'), ((5779, 5800), 'numpy.sqrt', 'np.sqrt', (['(var + bn.eps)'], {}), '(var + bn.eps)\n', (5786, 5800), True, 'import numpy as np\n'), ((974, 1008), 'numpy.mean', 'np.mean', (['xv'], {'axis': '(0)', 'keepdims': '(True)'}), '(xv, axis=0, keepdims=True)\n', (981, 1008), True, 'import numpy as np\n'), ((2066, 2108), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(2.3)', 'size': 'data_shape'}), '(loc=2.3, size=data_shape)\n', (2082, 2108), True, 'import numpy as np\n'), ((3951, 3993), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(2.3)', 'size': 'data_shape'}), '(loc=2.3, size=data_shape)\n', (3967, 3993), True, 'import numpy as np\n'), ((4726, 4760), 'numpy.mean', 'np.mean', (['xv'], {'axis': '(0)', 'keepdims': '(True)'}), '(xv, axis=0, keepdims=True)\n', (4733, 4760), True, 'import numpy as np\n'), ((889, 931), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(2.3)', 'size': 'data_shape'}), '(loc=2.3, size=data_shape)\n', (905, 931), True, 'import numpy as np\n'), ((1057, 1084), 'numpy.transpose', 'np.transpose', (['xv', '[0, 2, 1]'], {}), '(xv, [0, 2, 1])\n', (1069, 1084), True, 'import numpy as np\n'), ((1179, 1208), 'numpy.var', 'np.var', (['xv_transposed'], {'axis': '(0)'}), '(xv_transposed, axis=0)\n', (1185, 1208), True, 'import numpy as np\n'), ((1300, 1337), 'numpy.var', 'np.var', (['xv_transposed'], {'axis': '(0)', 'ddof': '(1)'}), '(xv_transposed, axis=0, ddof=1)\n', (1306, 1337), True, 'import numpy as np\n'), ((2850, 2892), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(2.3)', 'size': 'data_shape'}), '(loc=2.3, size=data_shape)\n', (2866, 2892), True, 'import numpy as np\n'), ((2936, 2966), 'numpy.transpose', 'np.transpose', (['xv', '[0, 2, 3, 1]'], {}), '(xv, [0, 2, 3, 1])\n', (2948, 2966), True, 'import numpy as np\n'), ((3071, 3101), 'numpy.mean', 'np.mean', (['xv_transposed'], {'axis': '(0)'}), '(xv_transposed, axis=0)\n', (3078, 3101), True, 'import numpy as np\n'), ((3150, 3179), 'numpy.var', 'np.var', (['xv_transposed'], {'axis': '(0)'}), '(xv_transposed, axis=0)\n', (3156, 3179), True, 'import numpy as np\n'), ((3274, 3311), 'numpy.var', 'np.var', (['xv_transposed'], {'axis': '(0)', 'ddof': '(1)'}), '(xv_transposed, axis=0, ddof=1)\n', (3280, 3311), True, 'import numpy as np\n'), ((4641, 4683), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(2.3)', 'size': 'data_shape'}), '(loc=2.3, size=data_shape)\n', (4657, 4683), True, 'import numpy as np\n'), ((5416, 5458), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(2.3)', 'size': 'data_shape'}), '(loc=2.3, size=data_shape)\n', (5432, 5458), True, 'import numpy as np\n'), ((5502, 5532), 'numpy.transpose', 'np.transpose', (['xv', '[0, 2, 3, 1]'], {}), '(xv, [0, 2, 3, 1])\n', (5514, 5532), True, 'import numpy as np\n'), ((5637, 5667), 'numpy.mean', 'np.mean', (['xv_transposed'], {'axis': '(0)'}), '(xv_transposed, axis=0)\n', (5644, 5667), True, 'import numpy as np\n'), ((5708, 5737), 'numpy.var', 'np.var', (['xv_transposed'], {'axis': '(0)'}), '(xv_transposed, axis=0)\n', (5714, 5737), True, 'import numpy as np\n'), ((4819, 4846), 'numpy.transpose', 'np.transpose', (['xv', '[0, 2, 1]'], {}), '(xv, [0, 2, 1])\n', (4831, 4846), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
import megengine as mge
import megengine.module as M
from models.yolo_fpn import YOLOFPN
from models.yolo_head import YOLOXHead
from models.yolo_pafpn import YOLOPAFPN
from models.yolox import YOLOX
def build_yolox(name="yolox-s"):
num_classes = 80
# value meaning: depth, width
param_dict = {
"yolox-nano": (0.33, 0.25),
"yolox-tiny": (0.33, 0.375),
"yolox-s": (0.33, 0.50),
"yolox-m": (0.67, 0.75),
"yolox-l": (1.0, 1.0),
"yolox-x": (1.33, 1.25),
}
if name == "yolov3":
depth = 1.0
width = 1.0
backbone = YOLOFPN()
head = YOLOXHead(num_classes, width, in_channels=[128, 256, 512], act="lrelu")
model = YOLOX(backbone, head)
else:
assert name in param_dict
kwargs = {}
depth, width = param_dict[name]
if name == "yolox-nano":
kwargs["depthwise"] = True
in_channels = [256, 512, 1024]
backbone = YOLOPAFPN(depth, width, in_channels=in_channels, **kwargs)
head = YOLOXHead(num_classes, width, in_channels=in_channels, **kwargs)
model = YOLOX(backbone, head)
for m in model.modules():
if isinstance(m, M.BatchNorm2d):
m.eps = 1e-3
return model
def build_and_load(weight_file, name="yolox-s"):
model = build_yolox(name)
model_weights = mge.load(weight_file)
model.load_state_dict(model_weights, strict=False)
return model
|
[
"megengine.load"
] |
[((1412, 1433), 'megengine.load', 'mge.load', (['weight_file'], {}), '(weight_file)\n', (1420, 1433), True, 'import megengine as mge\n'), ((650, 659), 'models.yolo_fpn.YOLOFPN', 'YOLOFPN', ([], {}), '()\n', (657, 659), False, 'from models.yolo_fpn import YOLOFPN\n'), ((675, 746), 'models.yolo_head.YOLOXHead', 'YOLOXHead', (['num_classes', 'width'], {'in_channels': '[128, 256, 512]', 'act': '"""lrelu"""'}), "(num_classes, width, in_channels=[128, 256, 512], act='lrelu')\n", (684, 746), False, 'from models.yolo_head import YOLOXHead\n'), ((763, 784), 'models.yolox.YOLOX', 'YOLOX', (['backbone', 'head'], {}), '(backbone, head)\n', (768, 784), False, 'from models.yolox import YOLOX\n'), ((1019, 1077), 'models.yolo_pafpn.YOLOPAFPN', 'YOLOPAFPN', (['depth', 'width'], {'in_channels': 'in_channels'}), '(depth, width, in_channels=in_channels, **kwargs)\n', (1028, 1077), False, 'from models.yolo_pafpn import YOLOPAFPN\n'), ((1093, 1157), 'models.yolo_head.YOLOXHead', 'YOLOXHead', (['num_classes', 'width'], {'in_channels': 'in_channels'}), '(num_classes, width, in_channels=in_channels, **kwargs)\n', (1102, 1157), False, 'from models.yolo_head import YOLOXHead\n'), ((1174, 1195), 'models.yolox.YOLOX', 'YOLOX', (['backbone', 'head'], {}), '(backbone, head)\n', (1179, 1195), False, 'from models.yolox import YOLOX\n')]
|
import numpy as np
import argparse
from datetime import datetime
import time
import model as resnet_model
import megengine as mge
import megengine.autodiff as ad
import megengine.functional as F
import megengine.optimizer as optim
parser = argparse.ArgumentParser(description="MegEngine ResNet Training")
parser.add_argument(
"-a",
"--arch",
default="resnet50",
help="model architecture (default: resnet50)",
)
parser.add_argument(
"--steps",
default=10,
type=int,
help="number of total steps to run (default: 10)",
)
parser.add_argument(
"-b",
"--batch-size",
metavar="SIZE",
default=64,
type=int,
help="batch size for single GPU (default: 64)",
)
parser.add_argument(
"--enable-dtr",
dest="enable_dtr",
action="store_true",
help="Enable DTR")
parser.add_argument(
"--memory-budget",
dest="mem_budget",
default=5,
type=int,
help="memory budget for DTR, measured in GB (default: 5)",
)
args = parser.parse_args()
if args.enable_dtr:
from megengine.utils.dtr import DTR
ds = DTR(memory_budget=args.mem_budget*1024**3)
batch_size = args.batch_size
image = mge.tensor(np.random.random((batch_size, 3, 224, 224)))
label = mge.tensor(np.random.randint(100, size=(batch_size,)))
#model = resnet_model.__dict__["resnet50"]()
model = resnet_model.__dict__[args.arch]()
gm=ad.GradManager().attach(model.parameters())
opt=optim.SGD(model.parameters(), lr=0.0125, momentum=0.9, weight_decay=1e-4)
# miliseconds
print(datetime.now().timetz())
time_list = []
cur_time = int(round(time.time()*1000))
for i in range(args.steps):
with gm:
logits=model(image)
loss=F.nn.cross_entropy(logits, label)
gm.backward(loss)
total, free = mge.get_mem_status_bytes()
print('iter = {}, used bytes(/MB) = {}'.format(i+1, float(total - free)/1024.0/1024.0))
opt.step().clear_grad()
next_time = int(round(time.time()*1000))
time_list.append(next_time - cur_time)
cur_time = next_time
print("iter = {}, loss = {}".format(i+1, loss.numpy()))
print('throughput: {} ms!!!'.format(np.average(np.array(time_list))))
|
[
"megengine.functional.nn.cross_entropy",
"megengine.get_mem_status_bytes",
"megengine.utils.dtr.DTR",
"megengine.autodiff.GradManager"
] |
[((243, 307), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""MegEngine ResNet Training"""'}), "(description='MegEngine ResNet Training')\n", (266, 307), False, 'import argparse\n'), ((1079, 1125), 'megengine.utils.dtr.DTR', 'DTR', ([], {'memory_budget': '(args.mem_budget * 1024 ** 3)'}), '(memory_budget=args.mem_budget * 1024 ** 3)\n', (1082, 1125), False, 'from megengine.utils.dtr import DTR\n'), ((1171, 1214), 'numpy.random.random', 'np.random.random', (['(batch_size, 3, 224, 224)'], {}), '((batch_size, 3, 224, 224))\n', (1187, 1214), True, 'import numpy as np\n'), ((1235, 1277), 'numpy.random.randint', 'np.random.randint', (['(100)'], {'size': '(batch_size,)'}), '(100, size=(batch_size,))\n', (1252, 1277), True, 'import numpy as np\n'), ((1371, 1387), 'megengine.autodiff.GradManager', 'ad.GradManager', ([], {}), '()\n', (1385, 1387), True, 'import megengine.autodiff as ad\n'), ((1676, 1709), 'megengine.functional.nn.cross_entropy', 'F.nn.cross_entropy', (['logits', 'label'], {}), '(logits, label)\n', (1694, 1709), True, 'import megengine.functional as F\n'), ((1758, 1784), 'megengine.get_mem_status_bytes', 'mge.get_mem_status_bytes', ([], {}), '()\n', (1782, 1784), True, 'import megengine as mge\n'), ((1514, 1528), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1526, 1528), False, 'from datetime import datetime\n'), ((1575, 1586), 'time.time', 'time.time', ([], {}), '()\n', (1584, 1586), False, 'import time\n'), ((2152, 2171), 'numpy.array', 'np.array', (['time_list'], {}), '(time_list)\n', (2160, 2171), True, 'import numpy as np\n'), ((1944, 1955), 'time.time', 'time.time', ([], {}), '()\n', (1953, 1955), False, 'import time\n')]
|
import numpy as np
import megengine.functional as F
import megengine.module as M
from config import config
from .anchors_generator import AnchorGenerator
from .find_top_rpn_proposals import find_top_rpn_proposals
from .fpn_anchor_target import fpn_anchor_target, fpn_rpn_reshape
from det_opr.loss_opr import softmax_loss, smooth_l1_loss_rpn
import pdb
class RPN(M.Module):
def __init__(self, rpn_channel=256):
super().__init__()
self.anchors_generator = AnchorGenerator(
config.anchor_base_size,
config.anchor_aspect_ratios,
config.anchor_base_scale)
self.rpn_conv = M.Conv2d(256, rpn_channel, kernel_size=3, stride=1, padding=1)
self.rpn_cls_score = M.Conv2d(rpn_channel, config.num_cell_anchors * 2, kernel_size=1, stride=1)
self.rpn_bbox_offsets = M.Conv2d(rpn_channel, config.num_cell_anchors * 4, kernel_size=1, stride=1)
for l in [self.rpn_conv, self.rpn_cls_score, self.rpn_bbox_offsets]:
M.init.normal_(l.weight, std=0.01)
M.init.fill_(l.bias, 0)
def forward(self, features, im_info, boxes=None):
# prediction
pred_cls_score_list = []
pred_bbox_offsets_list = []
for x in features:
t = F.relu(self.rpn_conv(x))
pred_cls_score_list.append(self.rpn_cls_score(t))
pred_bbox_offsets_list.append(self.rpn_bbox_offsets(t))
# get anchors
all_anchors_list = []
fm_stride = 2 ** (len(features) + 1)
for fm in features:
layer_anchors = self.anchors_generator(fm, fm_stride)
fm_stride = fm_stride // 2
all_anchors_list.append(layer_anchors)
# sample from the predictions
rpn_rois, rpn_probs = find_top_rpn_proposals(
self.training, pred_bbox_offsets_list, pred_cls_score_list,
all_anchors_list, im_info)
if self.training:
rpn_labels, rpn_bbox_targets = fpn_anchor_target(
boxes, im_info, all_anchors_list)
#rpn_labels = rpn_labels.astype(np.int32)
pred_cls_score, pred_bbox_offsets = fpn_rpn_reshape(
pred_cls_score_list, pred_bbox_offsets_list)
# rpn loss
rpn_cls_loss = softmax_loss(pred_cls_score, rpn_labels)
rpn_bbox_loss = smooth_l1_loss_rpn(pred_bbox_offsets, rpn_bbox_targets, \
rpn_labels, config.rpn_smooth_l1_beta)
loss_dict = {}
loss_dict['loss_rpn_cls'] = rpn_cls_loss
loss_dict['loss_rpn_loc'] = rpn_bbox_loss
return rpn_rois, loss_dict
else:
return rpn_rois
|
[
"megengine.module.init.fill_",
"megengine.module.init.normal_",
"megengine.module.Conv2d"
] |
[((640, 702), 'megengine.module.Conv2d', 'M.Conv2d', (['(256)', 'rpn_channel'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(256, rpn_channel, kernel_size=3, stride=1, padding=1)\n', (648, 702), True, 'import megengine.module as M\n'), ((732, 807), 'megengine.module.Conv2d', 'M.Conv2d', (['rpn_channel', '(config.num_cell_anchors * 2)'], {'kernel_size': '(1)', 'stride': '(1)'}), '(rpn_channel, config.num_cell_anchors * 2, kernel_size=1, stride=1)\n', (740, 807), True, 'import megengine.module as M\n'), ((840, 915), 'megengine.module.Conv2d', 'M.Conv2d', (['rpn_channel', '(config.num_cell_anchors * 4)'], {'kernel_size': '(1)', 'stride': '(1)'}), '(rpn_channel, config.num_cell_anchors * 4, kernel_size=1, stride=1)\n', (848, 915), True, 'import megengine.module as M\n'), ((1006, 1040), 'megengine.module.init.normal_', 'M.init.normal_', (['l.weight'], {'std': '(0.01)'}), '(l.weight, std=0.01)\n', (1020, 1040), True, 'import megengine.module as M\n'), ((1053, 1076), 'megengine.module.init.fill_', 'M.init.fill_', (['l.bias', '(0)'], {}), '(l.bias, 0)\n', (1065, 1076), True, 'import megengine.module as M\n'), ((2286, 2326), 'det_opr.loss_opr.softmax_loss', 'softmax_loss', (['pred_cls_score', 'rpn_labels'], {}), '(pred_cls_score, rpn_labels)\n', (2298, 2326), False, 'from det_opr.loss_opr import softmax_loss, smooth_l1_loss_rpn\n'), ((2355, 2454), 'det_opr.loss_opr.smooth_l1_loss_rpn', 'smooth_l1_loss_rpn', (['pred_bbox_offsets', 'rpn_bbox_targets', 'rpn_labels', 'config.rpn_smooth_l1_beta'], {}), '(pred_bbox_offsets, rpn_bbox_targets, rpn_labels, config.\n rpn_smooth_l1_beta)\n', (2373, 2454), False, 'from det_opr.loss_opr import softmax_loss, smooth_l1_loss_rpn\n')]
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import copy
import itertools
import os
from typing import Callable
import numpy as np
import pytest
import megengine as mge
import megengine.module.init as init
from megengine.core import tensor
from megengine.functional import cross_entropy_with_softmax, relu
from megengine.jit import trace
from megengine.module import Linear, Module
from megengine.optimizer import SGD, Optimizer
from megengine.test import assertTensorClose
batch_size = 64
data_shape = (batch_size, 2)
label_shape = (batch_size,)
def minibatch_generator():
while True:
inp_data = np.zeros((batch_size, 2))
label = np.zeros(batch_size, dtype=np.int32)
for i in range(batch_size):
# [x0, x1], sampled from U[-1, 1]
inp_data[i, :] = np.random.rand(2) * 2 - 1
label[i] = 0 if np.prod(inp_data[i]) < 0 else 1
yield inp_data.astype(np.float32), label.astype(np.int32)
class SimpleNet(Module):
def __init__(self):
self.mid_layers = 14
self.num_class = 2
super().__init__()
self.fc0 = Linear(self.num_class, self.mid_layers, bias=True)
fan_in, _ = init.calculate_fan_in_and_fan_out(self.fc0.weight)
init.normal_(self.fc0.weight, std=np.sqrt(float(1.0) / fan_in))
init.zeros_(self.fc0.bias)
self.fc1 = Linear(self.mid_layers, self.mid_layers, bias=True)
fan_in, _ = init.calculate_fan_in_and_fan_out(self.fc1.weight)
init.normal_(self.fc1.weight, std=np.sqrt(float(1.0) / fan_in))
init.zeros_(self.fc1.bias)
self.fc2 = Linear(self.mid_layers, self.num_class, bias=True)
fan_in, _ = init.calculate_fan_in_and_fan_out(self.fc2.weight)
init.normal_(self.fc2.weight, std=np.sqrt(float(1.0) / fan_in))
init.zeros_(self.fc2.bias)
def forward(self, x):
x = self.fc0(x)
x = relu(x) # Should use tanh but it's not stable now.
x = self.fc1(x)
x = relu(x) # Should use tanh but it's not stable now.
x = self.fc2(x)
return x
def generate_eager_step(net: Module, opt_factory: Callable[[Module], Optimizer]):
data_inp = tensor(np.zeros(data_shape), dtype=np.float32)
label_inp = tensor(np.zeros(label_shape), dtype=np.int32)
opt = opt_factory(net)
def step(data, label):
opt.zero_grad()
data_inp.set_value(data)
label_inp.set_value(label)
pred = net(data_inp)
loss = cross_entropy_with_softmax(pred, label_inp)
opt.backward(loss)
opt.step()
return loss.numpy()[0]
return step
def generate_static_step(net: Module, opt_factory: Callable[[Module], Optimizer]):
data = tensor(np.zeros(data_shape), dtype=np.float32)
label = tensor(np.zeros(label_shape), dtype=np.int32)
opt = opt_factory(net)
# Save state to reset parameters later.
state = copy.deepcopy(net.state_dict())
# Evaluate network in eager mode once.
pred = net(data)
loss = cross_entropy_with_softmax(pred, label)
opt.zero_grad()
grads = opt.backward(loss)
f = mge.graph.compile(loss, grads)
def step(data, label):
opt.zero_grad()
out = f(data=data, label=label)
opt.step()
loss = out[0][0]
return loss
# Reset parameters.
net.load_state_dict(state)
return step
def generate_trace_step(
net: Module, opt_factory: Callable[[Module], Optimizer], enable: bool
):
opt = opt_factory(net)
@trace
def train(data, label):
pred = net(data)
loss = cross_entropy_with_softmax(pred, label)
opt.zero_grad()
opt.backward(loss)
return loss
train.enabled = enable
def step(data, label):
out = train(data, label)
opt.step()
loss = out[0][0]
return loss
return step
def assert_network_equvilence(nets):
net_state = [net.state_dict() for net in nets]
for state in net_state[1:]:
assert len(net_state[0]) == len(state)
for k, v in net_state[0].items():
for state in net_state[1:]:
assert k in state
assertTensorClose(v, state[k])
@pytest.mark.slow
def test_eager_equvilence():
eager_net = SimpleNet()
trace_enable_net = copy.deepcopy(eager_net)
trace_disable_net = copy.deepcopy(eager_net)
opt_factory = lambda net: SGD(
net.parameters(requires_grad=True), lr=0.01, momentum=0.01
)
estep = generate_eager_step(eager_net, opt_factory)
te_step = generate_trace_step(trace_enable_net, opt_factory, True)
td_step = generate_trace_step(trace_disable_net, opt_factory, False)
assert_network_equvilence([eager_net, trace_enable_net, trace_disable_net])
# Use hard code number as limit, may increase if needed.
for data, label in itertools.islice(minibatch_generator(), 200):
eloss = estep(data, label)
te_loss = te_step(data, label)
td_loss = td_step(data, label)
assertTensorClose(eloss, te_loss)
assertTensorClose(eloss, td_loss)
assert_network_equvilence(
[eager_net, trace_enable_net, trace_disable_net,]
)
|
[
"megengine.test.assertTensorClose",
"megengine.graph.compile",
"megengine.module.init.calculate_fan_in_and_fan_out",
"megengine.module.Linear",
"megengine.module.init.zeros_",
"megengine.functional.cross_entropy_with_softmax",
"megengine.functional.relu"
] |
[((3348, 3387), 'megengine.functional.cross_entropy_with_softmax', 'cross_entropy_with_softmax', (['pred', 'label'], {}), '(pred, label)\n', (3374, 3387), False, 'from megengine.functional import cross_entropy_with_softmax, relu\n'), ((3448, 3478), 'megengine.graph.compile', 'mge.graph.compile', (['loss', 'grads'], {}), '(loss, grads)\n', (3465, 3478), True, 'import megengine as mge\n'), ((4617, 4641), 'copy.deepcopy', 'copy.deepcopy', (['eager_net'], {}), '(eager_net)\n', (4630, 4641), False, 'import copy\n'), ((4666, 4690), 'copy.deepcopy', 'copy.deepcopy', (['eager_net'], {}), '(eager_net)\n', (4679, 4690), False, 'import copy\n'), ((946, 971), 'numpy.zeros', 'np.zeros', (['(batch_size, 2)'], {}), '((batch_size, 2))\n', (954, 971), True, 'import numpy as np\n'), ((988, 1024), 'numpy.zeros', 'np.zeros', (['batch_size'], {'dtype': 'np.int32'}), '(batch_size, dtype=np.int32)\n', (996, 1024), True, 'import numpy as np\n'), ((1444, 1494), 'megengine.module.Linear', 'Linear', (['self.num_class', 'self.mid_layers'], {'bias': '(True)'}), '(self.num_class, self.mid_layers, bias=True)\n', (1450, 1494), False, 'from megengine.module import Linear, Module\n'), ((1515, 1565), 'megengine.module.init.calculate_fan_in_and_fan_out', 'init.calculate_fan_in_and_fan_out', (['self.fc0.weight'], {}), '(self.fc0.weight)\n', (1548, 1565), True, 'import megengine.module.init as init\n'), ((1646, 1672), 'megengine.module.init.zeros_', 'init.zeros_', (['self.fc0.bias'], {}), '(self.fc0.bias)\n', (1657, 1672), True, 'import megengine.module.init as init\n'), ((1693, 1744), 'megengine.module.Linear', 'Linear', (['self.mid_layers', 'self.mid_layers'], {'bias': '(True)'}), '(self.mid_layers, self.mid_layers, bias=True)\n', (1699, 1744), False, 'from megengine.module import Linear, Module\n'), ((1765, 1815), 'megengine.module.init.calculate_fan_in_and_fan_out', 'init.calculate_fan_in_and_fan_out', (['self.fc1.weight'], {}), '(self.fc1.weight)\n', (1798, 1815), True, 'import megengine.module.init as init\n'), ((1896, 1922), 'megengine.module.init.zeros_', 'init.zeros_', (['self.fc1.bias'], {}), '(self.fc1.bias)\n', (1907, 1922), True, 'import megengine.module.init as init\n'), ((1943, 1993), 'megengine.module.Linear', 'Linear', (['self.mid_layers', 'self.num_class'], {'bias': '(True)'}), '(self.mid_layers, self.num_class, bias=True)\n', (1949, 1993), False, 'from megengine.module import Linear, Module\n'), ((2014, 2064), 'megengine.module.init.calculate_fan_in_and_fan_out', 'init.calculate_fan_in_and_fan_out', (['self.fc2.weight'], {}), '(self.fc2.weight)\n', (2047, 2064), True, 'import megengine.module.init as init\n'), ((2145, 2171), 'megengine.module.init.zeros_', 'init.zeros_', (['self.fc2.bias'], {}), '(self.fc2.bias)\n', (2156, 2171), True, 'import megengine.module.init as init\n'), ((2235, 2242), 'megengine.functional.relu', 'relu', (['x'], {}), '(x)\n', (2239, 2242), False, 'from megengine.functional import cross_entropy_with_softmax, relu\n'), ((2323, 2330), 'megengine.functional.relu', 'relu', (['x'], {}), '(x)\n', (2327, 2330), False, 'from megengine.functional import cross_entropy_with_softmax, relu\n'), ((2522, 2542), 'numpy.zeros', 'np.zeros', (['data_shape'], {}), '(data_shape)\n', (2530, 2542), True, 'import numpy as np\n'), ((2585, 2606), 'numpy.zeros', 'np.zeros', (['label_shape'], {}), '(label_shape)\n', (2593, 2606), True, 'import numpy as np\n'), ((2816, 2859), 'megengine.functional.cross_entropy_with_softmax', 'cross_entropy_with_softmax', (['pred', 'label_inp'], {}), '(pred, label_inp)\n', (2842, 2859), False, 'from megengine.functional import cross_entropy_with_softmax, relu\n'), ((3058, 3078), 'numpy.zeros', 'np.zeros', (['data_shape'], {}), '(data_shape)\n', (3066, 3078), True, 'import numpy as np\n'), ((3117, 3138), 'numpy.zeros', 'np.zeros', (['label_shape'], {}), '(label_shape)\n', (3125, 3138), True, 'import numpy as np\n'), ((3918, 3957), 'megengine.functional.cross_entropy_with_softmax', 'cross_entropy_with_softmax', (['pred', 'label'], {}), '(pred, label)\n', (3944, 3957), False, 'from megengine.functional import cross_entropy_with_softmax, relu\n'), ((5335, 5368), 'megengine.test.assertTensorClose', 'assertTensorClose', (['eloss', 'te_loss'], {}), '(eloss, te_loss)\n', (5352, 5368), False, 'from megengine.test import assertTensorClose\n'), ((5377, 5410), 'megengine.test.assertTensorClose', 'assertTensorClose', (['eloss', 'td_loss'], {}), '(eloss, td_loss)\n', (5394, 5410), False, 'from megengine.test import assertTensorClose\n'), ((4486, 4516), 'megengine.test.assertTensorClose', 'assertTensorClose', (['v', 'state[k]'], {}), '(v, state[k])\n', (4503, 4516), False, 'from megengine.test import assertTensorClose\n'), ((1137, 1154), 'numpy.random.rand', 'np.random.rand', (['(2)'], {}), '(2)\n', (1151, 1154), True, 'import numpy as np\n'), ((1191, 1211), 'numpy.prod', 'np.prod', (['inp_data[i]'], {}), '(inp_data[i])\n', (1198, 1211), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# Copyright (c) Megvii, Inc. and its affiliates.
import argparse
import megengine as mge
import numpy as np
from megengine import jit
from ..build import build_and_load
def make_parser():
parser = argparse.ArgumentParser("YOLOX Demo Dump")
parser.add_argument("-n", "--name", type=str, default="yolox-s", help="model name")
parser.add_argument("-c", "--ckpt", default=None, type=str, help="ckpt for eval")
parser.add_argument(
"--dump_path", default="model.mge", help="path to save the dumped model"
)
return parser
def dump_static_graph(model, graph_name="model.mge"):
model.eval()
model.head.decode_in_inference = False
data = mge.Tensor(np.random.random((1, 3, 640, 640)))
@jit.trace(capture_as_const=True)
def pred_func(data):
outputs = model(data)
return outputs
pred_func(data)
pred_func.dump(
graph_name,
arg_names=["data"],
optimize_for_inference=True,
enable_fuse_conv_bias_nonlinearity=True,
)
def main(args):
model = build_and_load(args.ckpt, name=args.name)
dump_static_graph(model, args.dump_path)
if __name__ == "__main__":
args = make_parser().parse_args()
main(args)
|
[
"megengine.jit.trace"
] |
[((252, 294), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""YOLOX Demo Dump"""'], {}), "('YOLOX Demo Dump')\n", (275, 294), False, 'import argparse\n'), ((780, 812), 'megengine.jit.trace', 'jit.trace', ([], {'capture_as_const': '(True)'}), '(capture_as_const=True)\n', (789, 812), False, 'from megengine import jit\n'), ((738, 772), 'numpy.random.random', 'np.random.random', (['(1, 3, 640, 640)'], {}), '((1, 3, 640, 640))\n', (754, 772), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import json
import os
import subprocess
import sys
import time
import numpy as np
from resnet50 import Resnet50
import megengine as mge
import megengine.distributed as dist
import megengine.functional as F
from megengine._internal.plugin import CompGraphProfiler
from megengine.core import Graph, tensor
from megengine.core.graph import get_default_graph
from megengine.functional.debug_param import (
get_conv_execution_strategy,
set_conv_execution_strategy,
)
from megengine.jit import trace
from megengine.module import BatchNorm2d, Conv2d, Linear, MaxPool2d, Module
from megengine.optimizer import SGD
sys.path.append(os.path.join(os.path.dirname(__file__), "..", "..", "..", "examples"))
def init_profiler(comp_graph=get_default_graph()):
profiler = CompGraphProfiler(comp_graph)
return profiler
def dump_profiler(profiler, filename):
with open(filename, "w") as fout:
json.dump(profiler.get(), fout, indent=2)
def print_gpu_usage():
stdout = subprocess.getoutput("nvidia-smi")
for line in stdout.split("\n"):
for item in line.split(" "):
if "MiB" in item:
print("Finish with GPU Usage", item)
break
def run_perf(
batch_size=64,
warm_up=True,
dump_prof=None,
opt_level=2,
conv_fastrun=False,
run_step=True,
track_bn_stats=True,
warm_up_iter=20,
run_iter=100,
num_gpu=None,
device=0,
server=None,
port=None,
scale_batch_size=False,
eager=False,
):
if conv_fastrun:
set_conv_execution_strategy("PROFILE")
if num_gpu:
dist.init_process_group(args.server, args.port, num_gpu, device, device)
if scale_batch_size:
batch_size = batch_size // num_gpu
print("Run with data parallel, batch size = {} per GPU".format(batch_size))
data = tensor(np.random.randn(batch_size, 3, 224, 224).astype("float32"))
label = tensor(np.random.randint(1000, size=[batch_size,], dtype=np.int32))
net = Resnet50(track_bn_stats=track_bn_stats)
opt = SGD(net.parameters(), lr=0.01, momentum=0.9, weight_decay=1e-4)
def train_func(data, label):
logits = net(data)
loss = F.cross_entropy_with_softmax(logits, label)
if num_gpu:
loss = loss / num_gpu
opt.zero_grad()
opt.backward(loss)
return loss
train_func = trace(
train_func,
symbolic=(not eager),
opt_level=opt_level,
profiling=not (dump_prof is None),
)
if warm_up:
print("Warm up ...")
for _ in range(warm_up_iter):
opt.zero_grad()
train_func(data, label)
if run_step:
opt.step()
print_gpu_usage()
print("Running train ...")
start = time.time()
for _ in range(run_iter):
opt.zero_grad()
train_func(data, label)
if run_step:
opt.step()
time_used = time.time() - start
if dump_prof:
with open(dump_prof, "w") as fout:
json.dump(train_func.get_profile(), fout, indent=2)
return time_used / run_iter
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("Boolean value expected.")
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Running regression test on Resnet 50",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument("--batch-size", type=int, default=64, help="batch size ")
parser.add_argument(
"--warm-up", type=str2bool, default=True, help="whether to warm up"
)
parser.add_argument(
"--dump-prof",
type=str,
default=None,
help="pass the json file path to dump the profiling result",
)
parser.add_argument("--opt-level", type=int, default=2, help="graph opt level")
parser.add_argument(
"--conv-fastrun",
type=str2bool,
default=False,
help="whether to use conv fastrun mode",
)
parser.add_argument(
"--run-step",
type=str2bool,
default=True,
help="whether to run optimizer.step()",
)
parser.add_argument(
"--track-bn-stats",
type=str2bool,
default=True,
help="whether to track bn stats",
)
parser.add_argument(
"--warm-up-iter", type=int, default=20, help="number of iters to warm up"
)
parser.add_argument(
"--run-iter", type=int, default=100, help="number of iters to collect wall time"
)
parser.add_argument("--server", default="0.0.0.0")
parser.add_argument("--port", type=int, default=2222)
parser.add_argument(
"--scale-batch-size",
type=str2bool,
default=False,
help="whether to divide batch size by number of GPUs",
)
parser.add_argument(
"--eager", type=str2bool, default=False, help="whether to use eager mode"
)
# Data parallel related
parser.add_argument("--num-gpu", type=int, default=None)
parser.add_argument("--device", type=int, default=0)
args = parser.parse_args()
print(vars(args))
os.environ["MGB_JIT_BACKEND"] = "NVRTC"
t = run_perf(**vars(args))
print("**********************************")
print("Wall time per iter {:.0f} ms".format(t * 1000))
print("**********************************")
get_default_graph().clear_device_memory()
|
[
"megengine.jit.trace",
"megengine.core.graph.get_default_graph",
"megengine.distributed.init_process_group",
"megengine.functional.cross_entropy_with_softmax",
"megengine._internal.plugin.CompGraphProfiler",
"megengine.functional.debug_param.set_conv_execution_strategy"
] |
[((1128, 1147), 'megengine.core.graph.get_default_graph', 'get_default_graph', ([], {}), '()\n', (1145, 1147), False, 'from megengine.core.graph import get_default_graph\n'), ((1165, 1194), 'megengine._internal.plugin.CompGraphProfiler', 'CompGraphProfiler', (['comp_graph'], {}), '(comp_graph)\n', (1182, 1194), False, 'from megengine._internal.plugin import CompGraphProfiler\n'), ((1382, 1416), 'subprocess.getoutput', 'subprocess.getoutput', (['"""nvidia-smi"""'], {}), "('nvidia-smi')\n", (1402, 1416), False, 'import subprocess\n'), ((2401, 2440), 'resnet50.Resnet50', 'Resnet50', ([], {'track_bn_stats': 'track_bn_stats'}), '(track_bn_stats=track_bn_stats)\n', (2409, 2440), False, 'from resnet50 import Resnet50\n'), ((2780, 2876), 'megengine.jit.trace', 'trace', (['train_func'], {'symbolic': '(not eager)', 'opt_level': 'opt_level', 'profiling': '(not dump_prof is None)'}), '(train_func, symbolic=not eager, opt_level=opt_level, profiling=not \n dump_prof is None)\n', (2785, 2876), False, 'from megengine.jit import trace\n'), ((3180, 3191), 'time.time', 'time.time', ([], {}), '()\n', (3189, 3191), False, 'import time\n'), ((3849, 3984), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Running regression test on Resnet 50"""', 'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), "(description='Running regression test on Resnet 50',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n", (3872, 3984), False, 'import argparse\n'), ((1039, 1064), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1054, 1064), False, 'import os\n'), ((1934, 1972), 'megengine.functional.debug_param.set_conv_execution_strategy', 'set_conv_execution_strategy', (['"""PROFILE"""'], {}), "('PROFILE')\n", (1961, 1972), False, 'from megengine.functional.debug_param import get_conv_execution_strategy, set_conv_execution_strategy\n'), ((1998, 2070), 'megengine.distributed.init_process_group', 'dist.init_process_group', (['args.server', 'args.port', 'num_gpu', 'device', 'device'], {}), '(args.server, args.port, num_gpu, device, device)\n', (2021, 2070), True, 'import megengine.distributed as dist\n'), ((2329, 2387), 'numpy.random.randint', 'np.random.randint', (['(1000)'], {'size': '[batch_size]', 'dtype': 'np.int32'}), '(1000, size=[batch_size], dtype=np.int32)\n', (2346, 2387), True, 'import numpy as np\n'), ((2591, 2634), 'megengine.functional.cross_entropy_with_softmax', 'F.cross_entropy_with_softmax', (['logits', 'label'], {}), '(logits, label)\n', (2619, 2634), True, 'import megengine.functional as F\n'), ((3339, 3350), 'time.time', 'time.time', ([], {}), '()\n', (3348, 3350), False, 'import time\n'), ((3753, 3806), 'argparse.ArgumentTypeError', 'argparse.ArgumentTypeError', (['"""Boolean value expected."""'], {}), "('Boolean value expected.')\n", (3779, 3806), False, 'import argparse\n'), ((5951, 5970), 'megengine.core.graph.get_default_graph', 'get_default_graph', ([], {}), '()\n', (5968, 5970), False, 'from megengine.core.graph import get_default_graph\n'), ((2250, 2290), 'numpy.random.randn', 'np.random.randn', (['batch_size', '(3)', '(224)', '(224)'], {}), '(batch_size, 3, 224, 224)\n', (2265, 2290), True, 'import numpy as np\n')]
|
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import megengine._internal as mgb
from ... import functional as F
from ...core import Parameter
from ..qat import linear as QAT
from .module import QuantizedModule
class Linear(QuantizedModule):
r"""quantized version of :class:`~.qat.linear.Linear`."""
def __init__(
self, dtype: np.dtype = None,
):
super().__init__()
self.weight = None
self.bias = None
self.output_dtype = dtype
def forward(self, inp):
if self.training:
raise ValueError("quantized module only support inference.")
inp_scale = mgb.dtype.get_scale(inp.dtype)
w_scale = mgb.dtype.get_scale(self.weight.dtype)
bias_dtype = mgb.dtype.qint32(inp_scale * w_scale)
return F.linear(
inp,
self.weight,
None if self.bias is None else self.bias.astype(bias_dtype),
).astype(self.output_dtype)
@classmethod
def from_qat_module(cls, qat_module: QAT.Linear):
r"""
return a :class:`~.QuantizedModule` instance converted from a
:class:`~.QATModule` instance.
"""
output_dtype = qat_module.get_activation_dtype()
qmod = cls(dtype=output_dtype)
weight = qat_module.weight.astype(qat_module.get_weight_dtype())
qmod.weight = Parameter(weight.numpy())
if qat_module.bias is not None:
qmod.bias = Parameter(qat_module.bias.numpy())
return qmod
|
[
"megengine._internal.dtype.qint32",
"megengine._internal.dtype.get_scale"
] |
[((958, 988), 'megengine._internal.dtype.get_scale', 'mgb.dtype.get_scale', (['inp.dtype'], {}), '(inp.dtype)\n', (977, 988), True, 'import megengine._internal as mgb\n'), ((1007, 1045), 'megengine._internal.dtype.get_scale', 'mgb.dtype.get_scale', (['self.weight.dtype'], {}), '(self.weight.dtype)\n', (1026, 1045), True, 'import megengine._internal as mgb\n'), ((1067, 1104), 'megengine._internal.dtype.qint32', 'mgb.dtype.qint32', (['(inp_scale * w_scale)'], {}), '(inp_scale * w_scale)\n', (1083, 1104), True, 'import megengine._internal as mgb\n')]
|
import os
import math
import argparse
from multiprocessing import Process, Queue
from tqdm import tqdm
import numpy as np
import megengine as mge
from megengine import jit
from config import config
import network
import dataset
import misc_utils
if_set_nms = True
def eval_all(args):
# model_path
saveDir = config.model_dir
evalDir = config.eval_dir
misc_utils.ensure_dir(evalDir)
model_file = os.path.join(saveDir,
'epoch_{}.pkl'.format(args.resume_weights))
assert os.path.exists(model_file)
# load data
records = misc_utils.load_json_lines(config.eval_source)
# multiprocessing
num_records = len(records)
num_devs = args.devices
num_image = math.ceil(num_records / num_devs)
result_queue = Queue(1000)
procs = []
all_results = []
for i in range(num_devs):
start = i * num_image
end = min(start + num_image, num_records)
split_records = records[start:end]
proc = Process(target=inference, args=(
model_file, i, split_records, result_queue))
proc.start()
procs.append(proc)
pbar = tqdm(total=num_records, ncols=50)
for i in range(num_records):
t = result_queue.get()
all_results.append(t)
pbar.update(1)
for p in procs:
p.join()
fpath = os.path.join(evalDir, 'dump-{}.json'.format(args.resume_weights))
misc_utils.save_json_lines(all_results, fpath)
def inference(model_file, device, records, result_queue):
@jit.trace(symbolic=False)
def val_func():
pred_boxes = net(net.inputs)
return pred_boxes
net = network.Network()
net.eval()
check_point = mge.load(model_file)
net.load_state_dict(check_point['state_dict'])
for record in records:
np.set_printoptions(precision=2, suppress=True)
net.eval()
image, gt_boxes, im_info, ID = get_data(record, device)
net.inputs["image"].set_value(image.astype(np.float32))
net.inputs["im_info"].set_value(im_info)
pred_boxes = val_func().numpy()
# nms
if if_set_nms:
from set_nms_utils import set_cpu_nms
n = pred_boxes.shape[0] // 2
idents = np.tile(np.arange(n)[:,None], (1, 2)).reshape(-1, 1)
pred_boxes = np.hstack((pred_boxes, idents))
keep = pred_boxes[:, -2] > 0.05
pred_boxes = pred_boxes[keep]
keep = set_cpu_nms(pred_boxes, 0.5)
pred_boxes = pred_boxes[keep][:, :-1]
else:
from set_nms_utils import cpu_nms
keep = pred_boxes[:, -1] > 0.05
pred_boxes = pred_boxes[keep]
keep = cpu_nms(pred_boxes, 0.5)
pred_boxes = pred_boxes[keep]
result_dict = dict(ID=ID, height=int(im_info[0, -2]), width=int(im_info[0, -1]),
dtboxes=boxes_dump(pred_boxes, False),
gtboxes=boxes_dump(gt_boxes, True))
result_queue.put_nowait(result_dict)
def boxes_dump(boxes, is_gt):
result = []
boxes = boxes.tolist()
for box in boxes:
if is_gt:
box_dict = {}
box_dict['box'] = [box[0], box[1], box[2]-box[0], box[3]-box[1]]
box_dict['tag'] = box[-1]
else:
box_dict = {}
box_dict['box'] = [box[0], box[1], box[2]-box[0], box[3]-box[1]]
box_dict['tag'] = 1
box_dict['score'] = box[-1]
result.append(box_dict)
return result
def get_data(record, device):
data = dataset.val_dataset(record)
image, gt_boxes, ID = \
data['data'], data['boxes'], data['ID']
if config.eval_resize == False:
resized_img, scale = image, 1
else:
resized_img, scale = dataset.resize_img_by_short_and_max_size(
image, config.eval_image_short_size, config.eval_image_max_size)
original_height, original_width = image.shape[0:2]
height, width = resized_img.shape[0:2]
transposed_img = np.ascontiguousarray(
resized_img.transpose(2, 0, 1)[None, :, :, :],
dtype=np.float32)
im_info = np.array([height, width, scale, original_height, original_width],
dtype=np.float32)[None, :]
return transposed_img, gt_boxes, im_info, ID
def run_test():
parser = argparse.ArgumentParser()
parser.add_argument('--resume_weights', '-r', default=None, type=str)
parser.add_argument('--devices', '-d', default=1, type=int)
args = parser.parse_args()
eval_all(args)
if __name__ == '__main__':
run_test()
|
[
"megengine.jit.trace",
"megengine.load"
] |
[((370, 400), 'misc_utils.ensure_dir', 'misc_utils.ensure_dir', (['evalDir'], {}), '(evalDir)\n', (391, 400), False, 'import misc_utils\n'), ((508, 534), 'os.path.exists', 'os.path.exists', (['model_file'], {}), '(model_file)\n', (522, 534), False, 'import os\n'), ((565, 611), 'misc_utils.load_json_lines', 'misc_utils.load_json_lines', (['config.eval_source'], {}), '(config.eval_source)\n', (591, 611), False, 'import misc_utils\n'), ((709, 742), 'math.ceil', 'math.ceil', (['(num_records / num_devs)'], {}), '(num_records / num_devs)\n', (718, 742), False, 'import math\n'), ((762, 773), 'multiprocessing.Queue', 'Queue', (['(1000)'], {}), '(1000)\n', (767, 773), False, 'from multiprocessing import Process, Queue\n'), ((1131, 1164), 'tqdm.tqdm', 'tqdm', ([], {'total': 'num_records', 'ncols': '(50)'}), '(total=num_records, ncols=50)\n', (1135, 1164), False, 'from tqdm import tqdm\n'), ((1401, 1447), 'misc_utils.save_json_lines', 'misc_utils.save_json_lines', (['all_results', 'fpath'], {}), '(all_results, fpath)\n', (1427, 1447), False, 'import misc_utils\n'), ((1512, 1537), 'megengine.jit.trace', 'jit.trace', ([], {'symbolic': '(False)'}), '(symbolic=False)\n', (1521, 1537), False, 'from megengine import jit\n'), ((1631, 1648), 'network.Network', 'network.Network', ([], {}), '()\n', (1646, 1648), False, 'import network\n'), ((1682, 1702), 'megengine.load', 'mge.load', (['model_file'], {}), '(model_file)\n', (1690, 1702), True, 'import megengine as mge\n'), ((3529, 3556), 'dataset.val_dataset', 'dataset.val_dataset', (['record'], {}), '(record)\n', (3548, 3556), False, 'import dataset\n'), ((4290, 4315), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (4313, 4315), False, 'import argparse\n'), ((978, 1054), 'multiprocessing.Process', 'Process', ([], {'target': 'inference', 'args': '(model_file, i, split_records, result_queue)'}), '(target=inference, args=(model_file, i, split_records, result_queue))\n', (985, 1054), False, 'from multiprocessing import Process, Queue\n'), ((1789, 1836), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(2)', 'suppress': '(True)'}), '(precision=2, suppress=True)\n', (1808, 1836), True, 'import numpy as np\n'), ((3754, 3864), 'dataset.resize_img_by_short_and_max_size', 'dataset.resize_img_by_short_and_max_size', (['image', 'config.eval_image_short_size', 'config.eval_image_max_size'], {}), '(image, config.\n eval_image_short_size, config.eval_image_max_size)\n', (3794, 3864), False, 'import dataset\n'), ((4110, 4198), 'numpy.array', 'np.array', (['[height, width, scale, original_height, original_width]'], {'dtype': 'np.float32'}), '([height, width, scale, original_height, original_width], dtype=np.\n float32)\n', (4118, 4198), True, 'import numpy as np\n'), ((2300, 2331), 'numpy.hstack', 'np.hstack', (['(pred_boxes, idents)'], {}), '((pred_boxes, idents))\n', (2309, 2331), True, 'import numpy as np\n'), ((2437, 2465), 'set_nms_utils.set_cpu_nms', 'set_cpu_nms', (['pred_boxes', '(0.5)'], {}), '(pred_boxes, 0.5)\n', (2448, 2465), False, 'from set_nms_utils import set_cpu_nms\n'), ((2685, 2709), 'set_nms_utils.cpu_nms', 'cpu_nms', (['pred_boxes', '(0.5)'], {}), '(pred_boxes, 0.5)\n', (2692, 2709), False, 'from set_nms_utils import cpu_nms\n'), ((2230, 2242), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (2239, 2242), True, 'import numpy as np\n')]
|
import megengine as mge
import megengine.module as M
import pytest
from basecls.models.snet import SNV2Block, SNV2XceptionBlock
@pytest.mark.parametrize("w_in", [32, 48])
@pytest.mark.parametrize("w_out", [64])
@pytest.mark.parametrize("w_mid", [32, 24])
@pytest.mark.parametrize("stride", [1, 2])
@pytest.mark.parametrize("kernel", [3, 5])
@pytest.mark.parametrize("se_r", [0.0, 0.25])
@pytest.mark.parametrize("drop_path_prob", [0.0, 0.1])
@pytest.mark.parametrize("norm_name", ["BN"])
@pytest.mark.parametrize("act_name", ["relu"])
def test_block(
w_in: int,
w_out: int,
w_mid: int,
*,
kernel: int,
stride: int,
norm_name: str,
act_name: str,
se_r: float,
drop_path_prob: float,
):
m = SNV2Block(
w_in,
w_out,
w_mid,
kernel=kernel,
stride=stride,
norm_name=norm_name,
act_name=act_name,
se_r=se_r,
drop_path_prob=drop_path_prob,
)
assert isinstance(m, M.Module)
m(mge.random.normal(size=(2, w_in * 2 // stride, 8, 8)))
@pytest.mark.parametrize("w_in", [32])
@pytest.mark.parametrize("w_out", [64])
@pytest.mark.parametrize("w_mid", [32])
@pytest.mark.parametrize("stride", [1, 2])
@pytest.mark.parametrize("kernel", [7, "x"])
@pytest.mark.parametrize("se_r", [0.25])
@pytest.mark.parametrize("drop_path_prob", [0.1])
@pytest.mark.parametrize("norm_name", ["BN"])
@pytest.mark.parametrize("act_name", ["relu"])
def test_x_block(
w_in: int,
w_out: int,
w_mid: int,
*,
kernel: int,
stride: int,
norm_name: str,
act_name: str,
se_r: float,
drop_path_prob: float,
):
m = SNV2XceptionBlock(
w_in,
w_out,
w_mid,
kernel=kernel,
stride=stride,
norm_name=norm_name,
act_name=act_name,
se_r=se_r,
drop_path_prob=drop_path_prob,
)
assert isinstance(m, M.Module)
m(mge.random.normal(size=(2, w_in * 2 // stride, 8, 8)))
|
[
"megengine.random.normal"
] |
[((132, 173), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""w_in"""', '[32, 48]'], {}), "('w_in', [32, 48])\n", (155, 173), False, 'import pytest\n'), ((175, 213), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""w_out"""', '[64]'], {}), "('w_out', [64])\n", (198, 213), False, 'import pytest\n'), ((215, 257), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""w_mid"""', '[32, 24]'], {}), "('w_mid', [32, 24])\n", (238, 257), False, 'import pytest\n'), ((259, 300), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""stride"""', '[1, 2]'], {}), "('stride', [1, 2])\n", (282, 300), False, 'import pytest\n'), ((302, 343), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""kernel"""', '[3, 5]'], {}), "('kernel', [3, 5])\n", (325, 343), False, 'import pytest\n'), ((345, 389), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""se_r"""', '[0.0, 0.25]'], {}), "('se_r', [0.0, 0.25])\n", (368, 389), False, 'import pytest\n'), ((391, 444), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""drop_path_prob"""', '[0.0, 0.1]'], {}), "('drop_path_prob', [0.0, 0.1])\n", (414, 444), False, 'import pytest\n'), ((446, 490), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""norm_name"""', "['BN']"], {}), "('norm_name', ['BN'])\n", (469, 490), False, 'import pytest\n'), ((492, 537), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""act_name"""', "['relu']"], {}), "('act_name', ['relu'])\n", (515, 537), False, 'import pytest\n'), ((1057, 1094), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""w_in"""', '[32]'], {}), "('w_in', [32])\n", (1080, 1094), False, 'import pytest\n'), ((1096, 1134), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""w_out"""', '[64]'], {}), "('w_out', [64])\n", (1119, 1134), False, 'import pytest\n'), ((1136, 1174), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""w_mid"""', '[32]'], {}), "('w_mid', [32])\n", (1159, 1174), False, 'import pytest\n'), ((1176, 1217), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""stride"""', '[1, 2]'], {}), "('stride', [1, 2])\n", (1199, 1217), False, 'import pytest\n'), ((1219, 1262), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""kernel"""', "[7, 'x']"], {}), "('kernel', [7, 'x'])\n", (1242, 1262), False, 'import pytest\n'), ((1264, 1303), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""se_r"""', '[0.25]'], {}), "('se_r', [0.25])\n", (1287, 1303), False, 'import pytest\n'), ((1305, 1353), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""drop_path_prob"""', '[0.1]'], {}), "('drop_path_prob', [0.1])\n", (1328, 1353), False, 'import pytest\n'), ((1355, 1399), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""norm_name"""', "['BN']"], {}), "('norm_name', ['BN'])\n", (1378, 1399), False, 'import pytest\n'), ((1401, 1446), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""act_name"""', "['relu']"], {}), "('act_name', ['relu'])\n", (1424, 1446), False, 'import pytest\n'), ((736, 882), 'basecls.models.snet.SNV2Block', 'SNV2Block', (['w_in', 'w_out', 'w_mid'], {'kernel': 'kernel', 'stride': 'stride', 'norm_name': 'norm_name', 'act_name': 'act_name', 'se_r': 'se_r', 'drop_path_prob': 'drop_path_prob'}), '(w_in, w_out, w_mid, kernel=kernel, stride=stride, norm_name=\n norm_name, act_name=act_name, se_r=se_r, drop_path_prob=drop_path_prob)\n', (745, 882), False, 'from basecls.models.snet import SNV2Block, SNV2XceptionBlock\n'), ((1647, 1805), 'basecls.models.snet.SNV2XceptionBlock', 'SNV2XceptionBlock', (['w_in', 'w_out', 'w_mid'], {'kernel': 'kernel', 'stride': 'stride', 'norm_name': 'norm_name', 'act_name': 'act_name', 'se_r': 'se_r', 'drop_path_prob': 'drop_path_prob'}), '(w_in, w_out, w_mid, kernel=kernel, stride=stride,\n norm_name=norm_name, act_name=act_name, se_r=se_r, drop_path_prob=\n drop_path_prob)\n', (1664, 1805), False, 'from basecls.models.snet import SNV2Block, SNV2XceptionBlock\n'), ((999, 1052), 'megengine.random.normal', 'mge.random.normal', ([], {'size': '(2, w_in * 2 // stride, 8, 8)'}), '(size=(2, w_in * 2 // stride, 8, 8))\n', (1016, 1052), True, 'import megengine as mge\n'), ((1918, 1971), 'megengine.random.normal', 'mge.random.normal', ([], {'size': '(2, w_in * 2 // stride, 8, 8)'}), '(size=(2, w_in * 2 // stride, 8, 8))\n', (1935, 1971), True, 'import megengine as mge\n')]
|
#!/usr/bin/env python3
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
import megengine as mge
import megengine.module as M
import numpy as np
import pytest
import torch
import torch.nn as nn
from basecls.configs import BaseConfig
from basecls.layers import BinaryCrossEntropy, CrossEntropy, build_loss
@pytest.mark.parametrize("name", [CrossEntropy, "BinaryCrossEntropy", "CrossEntropy"])
def test_build_loss(name):
cfg = BaseConfig(loss=dict(name=name))
m = build_loss(cfg)
assert isinstance(m, M.Module)
def test_bce():
x = np.random.rand(2, 8, 4).astype("float32")
y = np.random.rand(2, 8, 4).astype("float32")
ml = BinaryCrossEntropy()(mge.Tensor(x), mge.Tensor(y)).numpy()
tl = nn.BCEWithLogitsLoss()(torch.tensor(x), torch.tensor(y)).numpy()
np.testing.assert_allclose(ml, tl, rtol=1e-4, atol=1e-6)
def test_ce():
K = 4
x = np.random.rand(2, 8, K).astype("float32")
y = np.random.randint(K, size=(2, 8)).astype("int32")
oy = np.eye(K, dtype="int32")[y]
ml = CrossEntropy(axis=2)(mge.Tensor(x), mge.Tensor(y)).numpy()
tl = nn.CrossEntropyLoss()(
torch.tensor(x).reshape(-1, K), torch.tensor(y).flatten().long()
).numpy()
np.testing.assert_allclose(ml, tl, rtol=1e-4, atol=1e-6)
# one hot
ol = CrossEntropy(axis=2)(mge.Tensor(x), mge.Tensor(oy)).numpy()
np.testing.assert_allclose(ml, ol, rtol=1e-4, atol=1e-6)
# label smoothing
ml = CrossEntropy(axis=2, label_smooth=0.1)(mge.Tensor(x), mge.Tensor(y)).numpy()
ol = CrossEntropy(axis=2, label_smooth=0.1)(mge.Tensor(x), mge.Tensor(oy)).numpy()
np.testing.assert_allclose(ml, ol, rtol=1e-4, atol=1e-6)
|
[
"megengine.Tensor"
] |
[((318, 407), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""name"""', "[CrossEntropy, 'BinaryCrossEntropy', 'CrossEntropy']"], {}), "('name', [CrossEntropy, 'BinaryCrossEntropy',\n 'CrossEntropy'])\n", (341, 407), False, 'import pytest\n'), ((483, 498), 'basecls.layers.build_loss', 'build_loss', (['cfg'], {}), '(cfg)\n', (493, 498), False, 'from basecls.layers import BinaryCrossEntropy, CrossEntropy, build_loss\n'), ((799, 858), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['ml', 'tl'], {'rtol': '(0.0001)', 'atol': '(1e-06)'}), '(ml, tl, rtol=0.0001, atol=1e-06)\n', (825, 858), True, 'import numpy as np\n'), ((1221, 1280), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['ml', 'tl'], {'rtol': '(0.0001)', 'atol': '(1e-06)'}), '(ml, tl, rtol=0.0001, atol=1e-06)\n', (1247, 1280), True, 'import numpy as np\n'), ((1366, 1425), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['ml', 'ol'], {'rtol': '(0.0001)', 'atol': '(1e-06)'}), '(ml, ol, rtol=0.0001, atol=1e-06)\n', (1392, 1425), True, 'import numpy as np\n'), ((1623, 1682), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['ml', 'ol'], {'rtol': '(0.0001)', 'atol': '(1e-06)'}), '(ml, ol, rtol=0.0001, atol=1e-06)\n', (1649, 1682), True, 'import numpy as np\n'), ((1001, 1025), 'numpy.eye', 'np.eye', (['K'], {'dtype': '"""int32"""'}), "(K, dtype='int32')\n", (1007, 1025), True, 'import numpy as np\n'), ((560, 583), 'numpy.random.rand', 'np.random.rand', (['(2)', '(8)', '(4)'], {}), '(2, 8, 4)\n', (574, 583), True, 'import numpy as np\n'), ((610, 633), 'numpy.random.rand', 'np.random.rand', (['(2)', '(8)', '(4)'], {}), '(2, 8, 4)\n', (624, 633), True, 'import numpy as np\n'), ((892, 915), 'numpy.random.rand', 'np.random.rand', (['(2)', '(8)', 'K'], {}), '(2, 8, K)\n', (906, 915), True, 'import numpy as np\n'), ((942, 975), 'numpy.random.randint', 'np.random.randint', (['K'], {'size': '(2, 8)'}), '(K, size=(2, 8))\n', (959, 975), True, 'import numpy as np\n'), ((662, 682), 'basecls.layers.BinaryCrossEntropy', 'BinaryCrossEntropy', ([], {}), '()\n', (680, 682), False, 'from basecls.layers import BinaryCrossEntropy, CrossEntropy, build_loss\n'), ((683, 696), 'megengine.Tensor', 'mge.Tensor', (['x'], {}), '(x)\n', (693, 696), True, 'import megengine as mge\n'), ((698, 711), 'megengine.Tensor', 'mge.Tensor', (['y'], {}), '(y)\n', (708, 711), True, 'import megengine as mge\n'), ((730, 752), 'torch.nn.BCEWithLogitsLoss', 'nn.BCEWithLogitsLoss', ([], {}), '()\n', (750, 752), True, 'import torch.nn as nn\n'), ((753, 768), 'torch.tensor', 'torch.tensor', (['x'], {}), '(x)\n', (765, 768), False, 'import torch\n'), ((770, 785), 'torch.tensor', 'torch.tensor', (['y'], {}), '(y)\n', (782, 785), False, 'import torch\n'), ((1039, 1059), 'basecls.layers.CrossEntropy', 'CrossEntropy', ([], {'axis': '(2)'}), '(axis=2)\n', (1051, 1059), False, 'from basecls.layers import BinaryCrossEntropy, CrossEntropy, build_loss\n'), ((1060, 1073), 'megengine.Tensor', 'mge.Tensor', (['x'], {}), '(x)\n', (1070, 1073), True, 'import megengine as mge\n'), ((1075, 1088), 'megengine.Tensor', 'mge.Tensor', (['y'], {}), '(y)\n', (1085, 1088), True, 'import megengine as mge\n'), ((1107, 1128), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (1126, 1128), True, 'import torch.nn as nn\n'), ((1302, 1322), 'basecls.layers.CrossEntropy', 'CrossEntropy', ([], {'axis': '(2)'}), '(axis=2)\n', (1314, 1322), False, 'from basecls.layers import BinaryCrossEntropy, CrossEntropy, build_loss\n'), ((1323, 1336), 'megengine.Tensor', 'mge.Tensor', (['x'], {}), '(x)\n', (1333, 1336), True, 'import megengine as mge\n'), ((1338, 1352), 'megengine.Tensor', 'mge.Tensor', (['oy'], {}), '(oy)\n', (1348, 1352), True, 'import megengine as mge\n'), ((1455, 1493), 'basecls.layers.CrossEntropy', 'CrossEntropy', ([], {'axis': '(2)', 'label_smooth': '(0.1)'}), '(axis=2, label_smooth=0.1)\n', (1467, 1493), False, 'from basecls.layers import BinaryCrossEntropy, CrossEntropy, build_loss\n'), ((1494, 1507), 'megengine.Tensor', 'mge.Tensor', (['x'], {}), '(x)\n', (1504, 1507), True, 'import megengine as mge\n'), ((1509, 1522), 'megengine.Tensor', 'mge.Tensor', (['y'], {}), '(y)\n', (1519, 1522), True, 'import megengine as mge\n'), ((1541, 1579), 'basecls.layers.CrossEntropy', 'CrossEntropy', ([], {'axis': '(2)', 'label_smooth': '(0.1)'}), '(axis=2, label_smooth=0.1)\n', (1553, 1579), False, 'from basecls.layers import BinaryCrossEntropy, CrossEntropy, build_loss\n'), ((1580, 1593), 'megengine.Tensor', 'mge.Tensor', (['x'], {}), '(x)\n', (1590, 1593), True, 'import megengine as mge\n'), ((1595, 1609), 'megengine.Tensor', 'mge.Tensor', (['oy'], {}), '(oy)\n', (1605, 1609), True, 'import megengine as mge\n'), ((1138, 1153), 'torch.tensor', 'torch.tensor', (['x'], {}), '(x)\n', (1150, 1153), False, 'import torch\n'), ((1170, 1185), 'torch.tensor', 'torch.tensor', (['y'], {}), '(y)\n', (1182, 1185), False, 'import torch\n')]
|
import io
import numpy as np
import megengine.core.tensor.megbrain_graph as G
import megengine.utils.comp_graph_tools as cgtools
from megengine import tensor
from megengine.jit import trace
from megengine.utils.network_node import VarNode
def _default_compare_fn(x, y):
if isinstance(x, np.ndarray):
np.testing.assert_allclose(x, y, rtol=1e-6)
else:
np.testing.assert_allclose(x.numpy(), y, rtol=1e-6)
def make_tensor(x, network=None, device=None):
if network is not None:
if isinstance(x, VarNode):
return VarNode(x.var)
return network.make_const(x, device=device)
else:
return tensor(x, device=device)
def opr_test(
cases,
func,
compare_fn=_default_compare_fn,
ref_fn=None,
test_trace=True,
network=None,
**kwargs
):
"""
:param cases: the list which have dict element, the list length should be 2 for dynamic shape test.
and the dict should have input,
and should have output if ref_fn is None.
should use list for multiple inputs and outputs for each case.
:param func: the function to run opr.
:param compare_fn: the function to compare the result and expected, use
``np.testing.assert_allclose`` if None.
:param ref_fn: the function to generate expected data, should assign output if None.
Examples:
.. code-block::
dtype = np.float32
cases = [{"input": [10, 20]}, {"input": [20, 30]}]
opr_test(cases,
F.eye,
ref_fn=lambda n, m: np.eye(n, m).astype(dtype),
dtype=dtype)
"""
def check_results(results, expected):
if not isinstance(results, (tuple, list)):
results = (results,)
for r, e in zip(results, expected):
if not isinstance(r, (tensor, VarNode)):
r = tensor(r)
compare_fn(r, e)
def get_param(cases, idx):
case = cases[idx]
inp = case.get("input", None)
outp = case.get("output", None)
if inp is None:
raise ValueError("the test case should have input")
if not isinstance(inp, (tuple, list)):
inp = (inp,)
if ref_fn is not None and callable(ref_fn):
outp = ref_fn(*inp)
if outp is None:
raise ValueError("the test case should have output or reference function")
if not isinstance(outp, (tuple, list)):
outp = (outp,)
return inp, outp
if len(cases) == 0:
raise ValueError("should give one case at least")
if not callable(func):
raise ValueError("the input func should be callable")
inp, outp = get_param(cases, 0)
inp_tensor = [make_tensor(inpi, network) for inpi in inp]
if test_trace and not network:
copied_inp = inp_tensor.copy()
for symbolic in [False, True]:
traced_func = trace(symbolic=symbolic)(func)
for _ in range(3):
traced_results = traced_func(*copied_inp, **kwargs)
check_results(traced_results, outp)
dumped_func = trace(symbolic=True, capture_as_const=True)(func)
dumped_results = dumped_func(*copied_inp, **kwargs)
check_results(dumped_results, outp)
file = io.BytesIO()
dump_info = dumped_func.dump(file)
file.seek(0)
# arg_name has pattern arg_xxx, xxx is int value
def take_number(arg_name):
return int(arg_name.split("_")[-1])
input_names = dump_info[4]
inps_np = [i.numpy() for i in copied_inp]
input_names.sort(key=take_number)
inp_dict = dict(zip(input_names, inps_np))
infer_cg = cgtools.GraphInference(file)
# assume #outputs == 1
loaded_results = list(infer_cg.run(inp_dict=inp_dict).values())[0]
check_results(loaded_results, outp)
results = func(*inp_tensor, **kwargs)
check_results(results, outp)
|
[
"megengine.jit.trace",
"megengine.tensor",
"megengine.utils.comp_graph_tools.GraphInference",
"megengine.utils.network_node.VarNode"
] |
[((316, 360), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['x', 'y'], {'rtol': '(1e-06)'}), '(x, y, rtol=1e-06)\n', (342, 360), True, 'import numpy as np\n'), ((653, 677), 'megengine.tensor', 'tensor', (['x'], {'device': 'device'}), '(x, device=device)\n', (659, 677), False, 'from megengine import tensor\n'), ((3295, 3307), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (3305, 3307), False, 'import io\n'), ((3711, 3739), 'megengine.utils.comp_graph_tools.GraphInference', 'cgtools.GraphInference', (['file'], {}), '(file)\n', (3733, 3739), True, 'import megengine.utils.comp_graph_tools as cgtools\n'), ((561, 575), 'megengine.utils.network_node.VarNode', 'VarNode', (['x.var'], {}), '(x.var)\n', (568, 575), False, 'from megengine.utils.network_node import VarNode\n'), ((3125, 3168), 'megengine.jit.trace', 'trace', ([], {'symbolic': '(True)', 'capture_as_const': '(True)'}), '(symbolic=True, capture_as_const=True)\n', (3130, 3168), False, 'from megengine.jit import trace\n'), ((1879, 1888), 'megengine.tensor', 'tensor', (['r'], {}), '(r)\n', (1885, 1888), False, 'from megengine import tensor\n'), ((2923, 2947), 'megengine.jit.trace', 'trace', ([], {'symbolic': 'symbolic'}), '(symbolic=symbolic)\n', (2928, 2947), False, 'from megengine.jit import trace\n')]
|
import numpy as np
import megengine
import megengine.module as M
import megengine.functional as F
from edit.models.common import ShuffleV2Block, CoordAtt
import math
from . import default_init_weights
class MobileNeXt(M.Module):
def __init__(self, in_channels, out_channels, kernel_size=3):
"""
默认使用coordinate attention在第一个dwise之后
https://github.com/Andrew-Qibin/CoordAttention/blob/main/coordatt.py
"""
super(MobileNeXt, self).__init__()
self.dconv1 = M.ConvRelu2d(in_channels, out_channels, kernel_size=kernel_size, stride=1, padding=(kernel_size//2), groups=in_channels)
self.CA = CoordAtt(inp = out_channels, oup=out_channels)
self.conv1 = M.Conv2d(out_channels, out_channels, kernel_size=1, stride=1, padding=0)
self.conv2 = M.ConvRelu2d(out_channels, out_channels, kernel_size=1, stride=1, padding=0)
self.dconv2 = M.Conv2d(out_channels, out_channels, kernel_size=kernel_size, stride=1, padding=(kernel_size//2), groups=out_channels)
self.init_weights()
def init_weights(self):
for m in [self.conv1, self.conv2, self.dconv1, self.dconv2]:
default_init_weights(m, scale=0.1)
def forward(self, x):
identity = x
out = self.dconv2(self.conv2(self.conv1(self.CA(self.dconv1(x)))))
return identity + out
class ResBlock(M.Module):
def __init__(self, in_channels, out_channels, kernel_size=3):
super(ResBlock, self).__init__()
self.conv1 = M.ConvRelu2d(in_channels, out_channels, kernel_size=kernel_size, stride=1, padding=(kernel_size//2))
self.conv2 = M.Conv2d(out_channels, out_channels, kernel_size=kernel_size, stride=1, padding=(kernel_size//2))
self.init_weights()
def init_weights(self):
for m in [self.conv1, self.conv2]:
default_init_weights(m, scale=0.1)
def forward(self, x):
identity = x
out = self.conv2(self.conv1(x))
return identity + out
class ResBlocks(M.Module):
def __init__(self, channel_num, resblock_num, kernel_size=3, blocktype="resblock"):
super(ResBlocks, self).__init__()
assert blocktype in ("resblock", "shuffleblock", "MobileNeXt")
if blocktype == "resblock":
self.model = M.Sequential(
self.make_resblock_layer(channel_num, resblock_num, kernel_size),
)
elif blocktype == "shuffleblock":
self.model = M.Sequential(
self.make_shuffleblock_layer(channel_num, resblock_num, kernel_size),
)
elif blocktype == "MobileNeXt":
self.model = M.Sequential(
self.make_MobileNeXt_layer(channel_num, resblock_num, kernel_size)
)
else:
raise NotImplementedError("")
def make_MobileNeXt_layer(self, ch_out, num_blocks, kernel_size):
layers = []
for _ in range(num_blocks):
layers.append(MobileNeXt(ch_out, ch_out, kernel_size))
return M.Sequential(*layers)
def make_resblock_layer(self, ch_out, num_blocks, kernel_size):
layers = []
for _ in range(num_blocks):
layers.append(ResBlock(ch_out, ch_out, kernel_size))
return M.Sequential(*layers)
def make_shuffleblock_layer(self, ch_out, num_blocks, kernel_size):
layers = []
for _ in range(num_blocks):
layers.append(ShuffleV2Block(inp = ch_out//2, oup=ch_out, mid_channels=ch_out//2, ksize=kernel_size, stride=1))
return M.Sequential(*layers)
def forward(self, x):
return self.model(x)
|
[
"megengine.module.ConvRelu2d",
"megengine.module.Sequential",
"megengine.module.Conv2d"
] |
[((515, 639), 'megengine.module.ConvRelu2d', 'M.ConvRelu2d', (['in_channels', 'out_channels'], {'kernel_size': 'kernel_size', 'stride': '(1)', 'padding': '(kernel_size // 2)', 'groups': 'in_channels'}), '(in_channels, out_channels, kernel_size=kernel_size, stride=1,\n padding=kernel_size // 2, groups=in_channels)\n', (527, 639), True, 'import megengine.module as M\n'), ((654, 698), 'edit.models.common.CoordAtt', 'CoordAtt', ([], {'inp': 'out_channels', 'oup': 'out_channels'}), '(inp=out_channels, oup=out_channels)\n', (662, 698), False, 'from edit.models.common import ShuffleV2Block, CoordAtt\n'), ((722, 794), 'megengine.module.Conv2d', 'M.Conv2d', (['out_channels', 'out_channels'], {'kernel_size': '(1)', 'stride': '(1)', 'padding': '(0)'}), '(out_channels, out_channels, kernel_size=1, stride=1, padding=0)\n', (730, 794), True, 'import megengine.module as M\n'), ((816, 892), 'megengine.module.ConvRelu2d', 'M.ConvRelu2d', (['out_channels', 'out_channels'], {'kernel_size': '(1)', 'stride': '(1)', 'padding': '(0)'}), '(out_channels, out_channels, kernel_size=1, stride=1, padding=0)\n', (828, 892), True, 'import megengine.module as M\n'), ((915, 1037), 'megengine.module.Conv2d', 'M.Conv2d', (['out_channels', 'out_channels'], {'kernel_size': 'kernel_size', 'stride': '(1)', 'padding': '(kernel_size // 2)', 'groups': 'out_channels'}), '(out_channels, out_channels, kernel_size=kernel_size, stride=1,\n padding=kernel_size // 2, groups=out_channels)\n', (923, 1037), True, 'import megengine.module as M\n'), ((1515, 1619), 'megengine.module.ConvRelu2d', 'M.ConvRelu2d', (['in_channels', 'out_channels'], {'kernel_size': 'kernel_size', 'stride': '(1)', 'padding': '(kernel_size // 2)'}), '(in_channels, out_channels, kernel_size=kernel_size, stride=1,\n padding=kernel_size // 2)\n', (1527, 1619), True, 'import megengine.module as M\n'), ((1637, 1738), 'megengine.module.Conv2d', 'M.Conv2d', (['out_channels', 'out_channels'], {'kernel_size': 'kernel_size', 'stride': '(1)', 'padding': '(kernel_size // 2)'}), '(out_channels, out_channels, kernel_size=kernel_size, stride=1,\n padding=kernel_size // 2)\n', (1645, 1738), True, 'import megengine.module as M\n'), ((3022, 3043), 'megengine.module.Sequential', 'M.Sequential', (['*layers'], {}), '(*layers)\n', (3034, 3043), True, 'import megengine.module as M\n'), ((3249, 3270), 'megengine.module.Sequential', 'M.Sequential', (['*layers'], {}), '(*layers)\n', (3261, 3270), True, 'import megengine.module as M\n'), ((3539, 3560), 'megengine.module.Sequential', 'M.Sequential', (['*layers'], {}), '(*layers)\n', (3551, 3560), True, 'import megengine.module as M\n'), ((3426, 3529), 'edit.models.common.ShuffleV2Block', 'ShuffleV2Block', ([], {'inp': '(ch_out // 2)', 'oup': 'ch_out', 'mid_channels': '(ch_out // 2)', 'ksize': 'kernel_size', 'stride': '(1)'}), '(inp=ch_out // 2, oup=ch_out, mid_channels=ch_out // 2, ksize\n =kernel_size, stride=1)\n', (3440, 3529), False, 'from edit.models.common import ShuffleV2Block, CoordAtt\n')]
|
import megengine as mge
import megengine.functional as F
from megengine.core import tensor
from layers.nms import gpu_nms
from config import config
from det_opr.bbox_opr import bbox_transform_inv_opr, clip_boxes_opr, \
filter_boxes_opr
def find_top_rpn_proposals(is_train, rpn_bbox_offsets_list, rpn_cls_prob_list,
all_anchors_list, im_info):
prev_nms_top_n = config.train_prev_nms_top_n \
if is_train else config.test_prev_nms_top_n
post_nms_top_n = config.train_post_nms_top_n \
if is_train else config.test_post_nms_top_n
batch_per_gpu = config.batch_per_gpu if is_train else 1
nms_threshold = config.rpn_nms_threshold
box_min_size = config.rpn_min_box_size
bbox_normalize_targets = config.rpn_bbox_normalize_targets
bbox_normalize_means = config.bbox_normalize_means
bbox_normalize_stds = config.bbox_normalize_stds
list_size = len(rpn_bbox_offsets_list)
return_rois = []
return_probs = []
for bid in range(batch_per_gpu):
batch_proposals_list = []
batch_probs_list = []
for l in range(list_size):
# get proposals and probs
offsets = rpn_bbox_offsets_list[l][bid] \
.dimshuffle(1, 2, 0).reshape(-1, 4)
if bbox_normalize_targets:
std_opr = tensor(config.bbox_normalize_stds[None, :])
mean_opr = tensor(config.bbox_normalize_means[None, :])
pred_offsets = pred_offsets * std_opr
pred_offsets = pred_offsets + mean_opr
all_anchors = all_anchors_list[l]
proposals = bbox_transform_inv_opr(all_anchors, offsets)
if config.anchor_within_border:
proposals = clip_boxes_opr(proposals, im_info[bid, :])
probs = rpn_cls_prob_list[l][bid] \
.dimshuffle(1,2,0).reshape(-1, 2)
probs = F.softmax(probs)[:, 1]
# gather the proposals and probs
batch_proposals_list.append(proposals)
batch_probs_list.append(probs)
batch_proposals = F.concat(batch_proposals_list, axis=0)
batch_probs = F.concat(batch_probs_list, axis=0)
# filter the zero boxes.
batch_keep_mask = filter_boxes_opr(
batch_proposals, box_min_size * im_info[bid, 2])
batch_probs = batch_probs * batch_keep_mask
# prev_nms_top_n
num_proposals = F.minimum(prev_nms_top_n, batch_probs.shapeof()[0])
batch_probs, idx = F.argsort(batch_probs, descending=True)
batch_probs = batch_probs[:num_proposals].reshape(-1,1)
topk_idx = idx[:num_proposals].reshape(-1)
batch_proposals = batch_proposals.ai[topk_idx]
batch_rois = F.concat([batch_proposals, batch_probs], axis=1)
# For each image, run a total-level NMS, and choose topk results.
keep_inds = gpu_nms(batch_rois, nms_threshold, post_nms_top_n)
batch_rois = batch_rois.ai[keep_inds]
batch_probs = batch_rois[:, -1]
# cons the rois
batch_inds = mge.ones((batch_rois.shapeof()[0], 1)) * bid
batch_rois = F.concat([batch_inds, batch_rois[:, :-1]], axis=1)
return_rois.append(batch_rois)
return_probs.append(batch_probs)
if batch_per_gpu == 1:
return batch_rois, batch_probs
else:
concated_rois = F.concat(return_rois, axis=0)
concated_probs = F.concat(return_probs, axis=0)
return concated_rois, concated_probs
|
[
"megengine.functional.argsort",
"megengine.functional.concat",
"megengine.functional.softmax",
"megengine.core.tensor"
] |
[((2080, 2118), 'megengine.functional.concat', 'F.concat', (['batch_proposals_list'], {'axis': '(0)'}), '(batch_proposals_list, axis=0)\n', (2088, 2118), True, 'import megengine.functional as F\n'), ((2141, 2175), 'megengine.functional.concat', 'F.concat', (['batch_probs_list'], {'axis': '(0)'}), '(batch_probs_list, axis=0)\n', (2149, 2175), True, 'import megengine.functional as F\n'), ((2235, 2300), 'det_opr.bbox_opr.filter_boxes_opr', 'filter_boxes_opr', (['batch_proposals', '(box_min_size * im_info[bid, 2])'], {}), '(batch_proposals, box_min_size * im_info[bid, 2])\n', (2251, 2300), False, 'from det_opr.bbox_opr import bbox_transform_inv_opr, clip_boxes_opr, filter_boxes_opr\n'), ((2498, 2537), 'megengine.functional.argsort', 'F.argsort', (['batch_probs'], {'descending': '(True)'}), '(batch_probs, descending=True)\n', (2507, 2537), True, 'import megengine.functional as F\n'), ((2729, 2777), 'megengine.functional.concat', 'F.concat', (['[batch_proposals, batch_probs]'], {'axis': '(1)'}), '([batch_proposals, batch_probs], axis=1)\n', (2737, 2777), True, 'import megengine.functional as F\n'), ((2872, 2922), 'layers.nms.gpu_nms', 'gpu_nms', (['batch_rois', 'nms_threshold', 'post_nms_top_n'], {}), '(batch_rois, nms_threshold, post_nms_top_n)\n', (2879, 2922), False, 'from layers.nms import gpu_nms\n'), ((3120, 3170), 'megengine.functional.concat', 'F.concat', (['[batch_inds, batch_rois[:, :-1]]'], {'axis': '(1)'}), '([batch_inds, batch_rois[:, :-1]], axis=1)\n', (3128, 3170), True, 'import megengine.functional as F\n'), ((3352, 3381), 'megengine.functional.concat', 'F.concat', (['return_rois'], {'axis': '(0)'}), '(return_rois, axis=0)\n', (3360, 3381), True, 'import megengine.functional as F\n'), ((3407, 3437), 'megengine.functional.concat', 'F.concat', (['return_probs'], {'axis': '(0)'}), '(return_probs, axis=0)\n', (3415, 3437), True, 'import megengine.functional as F\n'), ((1610, 1654), 'det_opr.bbox_opr.bbox_transform_inv_opr', 'bbox_transform_inv_opr', (['all_anchors', 'offsets'], {}), '(all_anchors, offsets)\n', (1632, 1654), False, 'from det_opr.bbox_opr import bbox_transform_inv_opr, clip_boxes_opr, filter_boxes_opr\n'), ((1315, 1358), 'megengine.core.tensor', 'tensor', (['config.bbox_normalize_stds[None, :]'], {}), '(config.bbox_normalize_stds[None, :])\n', (1321, 1358), False, 'from megengine.core import tensor\n'), ((1386, 1430), 'megengine.core.tensor', 'tensor', (['config.bbox_normalize_means[None, :]'], {}), '(config.bbox_normalize_means[None, :])\n', (1392, 1430), False, 'from megengine.core import tensor\n'), ((1727, 1769), 'det_opr.bbox_opr.clip_boxes_opr', 'clip_boxes_opr', (['proposals', 'im_info[bid, :]'], {}), '(proposals, im_info[bid, :])\n', (1741, 1769), False, 'from det_opr.bbox_opr import bbox_transform_inv_opr, clip_boxes_opr, filter_boxes_opr\n'), ((1892, 1908), 'megengine.functional.softmax', 'F.softmax', (['probs'], {}), '(probs)\n', (1901, 1908), True, 'import megengine.functional as F\n')]
|
import os, sys
import numpy as np
from config import config
from det_opr.bbox_opr import box_overlap_opr, bbox_transform_opr
import megengine as mge
from megengine import functional as F
import pdb
def _compute_center(boxes):
ptrx = 0.5 * (boxes[:, 0] + boxes[:, 2])
ptry = 0.5 * (boxes[:, 1] + boxes[:, 3])
centre = F.stack([ptrx, ptry], axis=1)
return centre
def _compute_pos_area(gtboxes, ratio = 0.3):
H, W = gtboxes[:, 3] - gtboxes[:, 1], gtboxes[:, 2] - gtboxes[:, 0]
centres = _compute_center(gtboxes)
l = centres[:, 0] - ratio * W
r = centres[:, 0] + ratio * W
t = centres[:, 1] - ratio * H
b = centres[:, 1] + ratio * H
boundary = F.stack([l, t, r, b], axis = 1)
return boundary
def _anchor_double_target(gt_boxes, im_info, all_anchors):
gt_boxes, im_info = gt_boxes.detach(), im_info.detach()
all_anchors = all_anchors.detach()
gt_boxes = gt_boxes[:im_info[5].astype(np.int32), :]
dummy = -F.ones([1, gt_boxes.shape[1]]).to(gt_boxes.device)
gt_boxes = F.concat([gt_boxes, dummy], axis=0)
valid_mask = 1 - (gt_boxes[:, 4] < 0).astype(np.float32)
anchor_centers = _compute_center(all_anchors)
gtboxes_centers = _compute_center(gt_boxes)
# gtboxes_centers = gtboxes_centers * valid_mask.unsqueeze(1)
gtboxes_centers = gtboxes_centers * F.expand_dims(valid_mask, axis=1)
N, K = all_anchors.shape[0], gt_boxes.shape[0]
an_centers = F.expand_dims(anchor_centers, axis=1)
gt_centers = F.expand_dims(gtboxes_centers, axis=0)
# an_centers = anchor_centers.unsqueeze(1).repeat(1, K, 1)
# gt_centers = gtboxes_centers.unsqueeze(0).repeat(N, 1, 1)
distance = F.abs(an_centers - gt_centers)
distance = F.sqrt(F.pow(distance, 2).sum(axis=2))
start = 0
end = 5
overlaps = box_overlap_opr(all_anchors[:, :4], gt_boxes[:, :4])
overlaps *= F.expand_dims(valid_mask, axis=0)
default_num = 16
ious_list = []
for l in range(start, end):
_, index = F.cond_take(all_anchors[:, 4] == l, all_anchors[:, 4])
level_dist = distance[index, :].transpose(1, 0)
ious = overlaps[index, :].transpose(1, 0)
sorted_index = F.argsort(level_dist, descending=False)
n = min(sorted_index.shape[1], default_num)
ious = F.gather(ious, 1, sorted_index[:, :n]).transpose(1, 0)
ious_list.append(ious)
ious = F.concat(ious_list, axis=0)
mean_var = F.mean(ious, axis = 0)
std_var = F.std(ious, 0)
iou_thresh_per_gt = mean_var + std_var
iou_thresh_per_gt = F.maximum(iou_thresh_per_gt, 0.2)
# limits the anchor centers in the gtboxes
N, K = all_anchors.shape[0], gt_boxes.shape[0]
anchor_points = an_centers
pos_area = _compute_pos_area(gt_boxes, 0.3)
# pos_area = pos_area.unsqueeze(0).repeat(N, 1, 1)
pos_area = F.broadcast_to(F.expand_dims(pos_area, axis=0), (N, K, pos_area.shape[-1]))
l = anchor_points[:, :, 0] - pos_area[:, :, 0]
r = pos_area[:, :, 2] - anchor_points[:, :, 0]
t = anchor_points[:, :, 1] - pos_area[:, :, 1]
b = pos_area[:, :, 3] - anchor_points[:, :, 1]
is_in_gt = F.stack([l, r, t, b], axis=2)
is_in_gt = is_in_gt.min(axis = 2) > 0.1
valid_mask = (overlaps >= F.expand_dims(iou_thresh_per_gt, axis=0)) * is_in_gt.astype(np.float32)
ious = overlaps * valid_mask
sorted_index = F.argsort(ious, 1)
sorted_overlaps = F.gather(ious, 1, sorted_index)
max_overlaps = sorted_overlaps[:, :2].flatten()
argmax_overlaps = sorted_index[:, :2].flatten()
n, c = all_anchors.shape
device = all_anchors.device
labels = -F.ones(2 * n).to(device)
positive_mask = (max_overlaps >= 0.2).to(device).astype(np.float32)
negative_mask = (max_overlaps < 0.2).to(device).astype(np.float32)
labels = positive_mask + labels * (1 - positive_mask) * (1 - negative_mask)
bbox_targets = gt_boxes[argmax_overlaps, :4]
all_anchors = F.broadcast_to(F.expand_dims(all_anchors, axis=1), (n,2, c)).reshape(-1, c)
bbox_targets = bbox_transform_opr(all_anchors[:, :4], bbox_targets)
labels_cat = gt_boxes[argmax_overlaps, 4]
labels_cat = labels_cat * (1 - F.equal(labels, -1).astype(np.float32)) - F.equal(labels, -1).astype(np.float32)
return labels, bbox_targets, labels_cat
def _anchor_target(gt_boxes, im_info, all_anchors):
gt_boxes, im_info = gt_boxes.detach(), im_info.detach()
all_anchors = all_anchors.detach()
gt_boxes = gt_boxes[:im_info[5], :]
valid_mask = 1 - (gt_boxes[:, 4] < 0).astype(np.float32)
anchor_centers = _compute_center(all_anchors)
gtboxes_centers = _compute_center(gt_boxes) * F.expand_dims(valid_mask, axis=0)
N, K = all_anchors.shape[0], gt_boxes.shape[0]
# an_centers = anchor_centers.unsqueeze(1).repeat(1, K, 1)
an_centers = F.expand_dims(anchor_centers, axis=1)
gt_centers = F.expand_dims(gtboxes_centers, axis=0)
# gt_centers = gtboxes_centers.unsqueeze(0).repeat(N, 1, 1)
distance = F.abs(an_centers - gt_centers)
distance = F.sqrt(F.pow(distance, 2).sum(axis=2))
start = 0
end = 5
overlaps = box_overlap_opr(all_anchors[:, :4], gt_boxes[:, :4])
overlaps = overlaps * valid_mask.unsqueeze(0)
default_num = 9
ious_list = []
for l in range(start, end):
index = torch.nonzero(all_anchors[:,4].eq(l), as_tuple=False)[:, 0]
level_dist = level_dist[index, :].transpose(1, 0)
ious = distance[index, :].transpose(1, 0)
sorted_index = torch.argsort(ious, 1, descending=False)
n = min(default_num, sorted_index.shape[1])
ious = torch.gather(ious, 1, sorted_index[:, :n]).transpose(1, 0)
ious_list.append(ious)
ious = F.concat(ious_list, axis=0)
mean_var = ious.mean(0)
std_var = ious.std(0)
iou_thresh_per_gt = mean_var + std_var
iou_thresh_per_gt = torch.clamp(iou_thresh_per_gt, 0.35)
n = iou_thresh_per_gt.shape[0]
# limits the anchor centers in the gtboxes
N, K = all_anchors.shape[0], gt_boxes.shape[0]
anchor_points = an_centers
proxies = gt_boxes.unsqueeze(0).repeat(N, 1, 1)
l = anchor_points[:, :, 0] - proxies[:, :, 0]
r = proxies[:, :, 2] - anchor_points[:, :, 0]
t = anchor_points[:, :, 1] - proxies[:, :, 1]
b = proxies[:, :, 3] - anchor_points[:, :, 1]
is_in_gt = F.stack([l, r, t, b], axis=2)
is_in_gt = is_in_gt.min(axis = 2) > 0.1
valid_mask = (overlaps >= iou_thresh_per_gt.unsqueeze(0)) * is_in_gt
ious = overlaps * valid_mask
argmax_overlaps = torch.argmax(ious, axis=1)
max_overlaps = torch.gather(ious, 1, argmax_overlaps.unsqueeze(1))
n = all_anchors.shape[0]
labels = -F.ones(n)
positive_mask = max_overlaps > 0
negative_mask = max_overlaps < config.rpn_negative_overlap
labels = positive_mask + labels * (1 - positive_mask) * (1 - negative_mask)
bbox_targets = gt_boxes[argmax_overlaps, :4]
bbox_targets = bbox_transform_opr(all_anchors[:, :4], bbox_targets)
labels_cat = gt_boxes[argmax_overlaps, 4]
labels_cat = labels_cat * (1 - labels.eq(0).astype(np.float32))
labels_cat = labels_cat * (1 - labels.eq(-1).astype(np.float32)) - labels.eq(-1).astype(np.float32)
return labels, bbox_targets, labels_cat
def rpn_anchor_target_opr(gt_boxes, im_info, anchors):
rpn_label_list, rpn_target_boxes_list, iou_thresh_list = [], [], []
for i in range(config.train_batch_per_gpu):
rpn_labels, rpn_target_boxes, _ = _anchor_double_target(gt_boxes[i], im_info[i], anchors)
rpn_labels = rpn_labels.reshape(-1, 2)
c = rpn_target_boxes.shape[1]
rpn_target_boxes = rpn_target_boxes.reshape(-1, 2, c)
# mask the anchors overlapping with ignore regions
ignore_label = mask_anchor_opr(gt_boxes[i], im_info[i], anchors, rpn_labels[:, 0])
rpn_labels = rpn_labels - F.equal(rpn_labels, 0).astype(np.float32) * F.expand_dims(ignore_label < 0, 1).astype(np.float32)
# rpn_labels = rpn_labels - rpn_labels.eq(0).astype(np.float32) * (ignore_label < 0).unsqueeze(1).astype(np.float32)
rpn_label_list.append(F.expand_dims(rpn_labels, 0))
rpn_target_boxes_list.append(F.expand_dims(rpn_target_boxes, 0))
rpn_labels = F.concat(rpn_label_list, axis = 0)
rpn_target_boxes = F.concat(rpn_target_boxes_list, axis = 0)
return rpn_labels, rpn_target_boxes
def mask_anchor_opr(gtboxes, im_info, anchors, labels):
eps = 1e-6
gtboxes = gtboxes[:im_info[5].astype(np.int32), :]
ignore_mask = (gtboxes[:, 4] < 0).astype(np.float32)
mask_flag = F.zeros(labels.shape[0])
N, K = anchors.shape[0], gtboxes.shape[0]
p_pred = F.broadcast_to(F.expand_dims(anchors, 1), (N, K, anchors.shape[1]))
p_gt = F.broadcast_to(F.expand_dims(gtboxes, 0), (N, K, gtboxes.shape[1]))
max_off = F.concat([F.maximum(p_pred[:,:, :2], p_gt[:,:,:2]),
F.minimum(p_pred[:, :, 2:4], p_gt[:, :, 2:4])],
axis = 2)
I = F.maximum(max_off[:, :, 2] - max_off[:, :, 0] + 1, 0) * F.maximum(
max_off[:, :, 3] - max_off[:, :, 1] + 1, 0)
A = F.maximum(p_pred[:, :, 2] - p_pred[:, :, 0] + 1, 0) * F.maximum(
p_pred[:, :, 3] - p_pred[:, :, 1] + 1, 0)
# I = F.maximum(I, 0)
# A = F.maximum(A, 0)
IoA = I / (A + eps)
IoA = IoA * F.expand_dims(ignore_mask, 0)
mask_flag = (IoA > 0.5).sum(axis=1) > 0
labels = labels - F.equal(labels, 0).astype(np.float32) * mask_flag.astype(np.float32)
return labels
def rpn_anchor_target_opr_impl(
gt_boxes, im_info, anchors, clobber_positives = True, ignore_label=-1,
background_label=0):
gt_boxes, im_info = gt_boxes.detach(), im_info.detach()
anchors = anchors.detach()
# NOTE: For multi-gpu version, this function should be re-written
a_shp0 = anchors.shape[0]
valid_gt_boxes = gt_boxes[:im_info[5], :]
valid_mask = (gt_boxes[:im_info[5], 4] > 0).astype(np.float32)
overlaps = box_overlap_opr(anchors[:, :4], valid_gt_boxes[:, :4])
overlaps = overlaps * valid_mask.unsqueeze(0)
argmax_overlaps = torch.argmax(overlaps,axis=1)
max_overlaps = torch.gather(overlaps, 1, argmax_overlaps.unsqueeze(1))
gt_argmax_overlaps = torch.argmax(overlaps, axis=0)
gt_argmax_overlaps = torch.gather(overlaps, 1, gt_argmax_overlaps.unsqueeze(0))
cond_max_overlaps = overlaps.eq(gt_argmax_overlaps).astype(np.float32)
cmo_shape1 = cond_max_overlaps.shape[1]
gt_argmax_overlaps = torch.nonzero(cond_max_overlaps.flatten(), as_tuple=False)
gt_argmax_overlaps = gt_argmax_overlaps // cmo_shape1
labels = ignore_label * F.ones(a_shp0)
fg_mask = (max_overlaps >= config.rpn_positive_overlap).astype(np.float32)
fg_mask[gt_argmax_overlaps] = 1
index = torch.nonzero(fg_mask, as_tuple=False).reshape(-1).long()
labels[index] = 1
bbox_targets = bbox_transform_opr(anchors, valid_gt_boxes[index, :4])
# fg_mask[gt_argmax_overlaps]
# --- megbrain fashion code ---
# argmax_overlaps = O.Argmax(overlaps, axis=1)
# max_overlaps = O.IndexingOneHot(overlaps, 1, argmax_overlaps)
# gt_argmax_overlaps = O.Argmax(overlaps, axis=0)
# gt_max_overlaps = O.IndexingOneHot(overlaps, 0, gt_argmax_overlaps)
# cond_max_overlaps = overlaps.eq(gt_max_overlaps.add_axis(0))
# cmo_shape1 = cond_max_overlaps.shape[1]
# gt_argmax_overlaps = \
# O.CondTake(cond_max_overlaps.flatten(), cond_max_overlaps.flatten(),
# 'EQ',1).outputs[1]
# # why should be divided by the cmo_shape1
# gt_argmax_overlaps = gt_argmax_overlaps // cmo_shape1
# labels = O.ones(a_shp0) * ignore_label
# const_one = O.ConstProvider(1.0)
# if not clobber_positives:
# labels = labels * (max_overlaps >= config.rpn_negative_overlap)
# fg_mask = (max_overlaps >= config.rpn_positive_overlap)
# fg_mask = fg_mask.set_ai[gt_argmax_overlaps](
# const_one.broadcast(gt_argmax_overlaps.shape))
# fg_mask_ind = O.CondTake(fg_mask, fg_mask, 'EQ', 1).outputs[1]
# labels = labels.set_ai[fg_mask_ind](const_one.broadcast(fg_mask_ind.shape))
# if clobber_positives:
# labels = labels * (max_overlaps >= config.rpn_negative_overlap)
# Here, we compute the targets for each anchors
# bbox_targets = bbox_transform_opr(
# anchors, valid_gt_boxes.ai[argmax_overlaps, :4])
return labels, bbox_targets
|
[
"megengine.functional.zeros",
"megengine.functional.gather",
"megengine.functional.minimum",
"megengine.functional.mean",
"megengine.functional.abs",
"megengine.functional.stack",
"megengine.functional.std",
"megengine.functional.maximum",
"megengine.functional.argsort",
"megengine.functional.cond_take",
"megengine.functional.expand_dims",
"megengine.functional.equal",
"megengine.functional.concat",
"megengine.functional.ones",
"megengine.functional.pow"
] |
[((330, 359), 'megengine.functional.stack', 'F.stack', (['[ptrx, ptry]'], {'axis': '(1)'}), '([ptrx, ptry], axis=1)\n', (337, 359), True, 'from megengine import functional as F\n'), ((687, 716), 'megengine.functional.stack', 'F.stack', (['[l, t, r, b]'], {'axis': '(1)'}), '([l, t, r, b], axis=1)\n', (694, 716), True, 'from megengine import functional as F\n'), ((1040, 1075), 'megengine.functional.concat', 'F.concat', (['[gt_boxes, dummy]'], {'axis': '(0)'}), '([gt_boxes, dummy], axis=0)\n', (1048, 1075), True, 'from megengine import functional as F\n'), ((1445, 1482), 'megengine.functional.expand_dims', 'F.expand_dims', (['anchor_centers'], {'axis': '(1)'}), '(anchor_centers, axis=1)\n', (1458, 1482), True, 'from megengine import functional as F\n'), ((1500, 1538), 'megengine.functional.expand_dims', 'F.expand_dims', (['gtboxes_centers'], {'axis': '(0)'}), '(gtboxes_centers, axis=0)\n', (1513, 1538), True, 'from megengine import functional as F\n'), ((1682, 1712), 'megengine.functional.abs', 'F.abs', (['(an_centers - gt_centers)'], {}), '(an_centers - gt_centers)\n', (1687, 1712), True, 'from megengine import functional as F\n'), ((1813, 1865), 'det_opr.bbox_opr.box_overlap_opr', 'box_overlap_opr', (['all_anchors[:, :4]', 'gt_boxes[:, :4]'], {}), '(all_anchors[:, :4], gt_boxes[:, :4])\n', (1828, 1865), False, 'from det_opr.bbox_opr import box_overlap_opr, bbox_transform_opr\n'), ((1882, 1915), 'megengine.functional.expand_dims', 'F.expand_dims', (['valid_mask'], {'axis': '(0)'}), '(valid_mask, axis=0)\n', (1895, 1915), True, 'from megengine import functional as F\n'), ((2401, 2428), 'megengine.functional.concat', 'F.concat', (['ious_list'], {'axis': '(0)'}), '(ious_list, axis=0)\n', (2409, 2428), True, 'from megengine import functional as F\n'), ((2444, 2464), 'megengine.functional.mean', 'F.mean', (['ious'], {'axis': '(0)'}), '(ious, axis=0)\n', (2450, 2464), True, 'from megengine import functional as F\n'), ((2481, 2495), 'megengine.functional.std', 'F.std', (['ious', '(0)'], {}), '(ious, 0)\n', (2486, 2495), True, 'from megengine import functional as F\n'), ((2564, 2597), 'megengine.functional.maximum', 'F.maximum', (['iou_thresh_per_gt', '(0.2)'], {}), '(iou_thresh_per_gt, 0.2)\n', (2573, 2597), True, 'from megengine import functional as F\n'), ((3143, 3172), 'megengine.functional.stack', 'F.stack', (['[l, r, t, b]'], {'axis': '(2)'}), '([l, r, t, b], axis=2)\n', (3150, 3172), True, 'from megengine import functional as F\n'), ((3372, 3390), 'megengine.functional.argsort', 'F.argsort', (['ious', '(1)'], {}), '(ious, 1)\n', (3381, 3390), True, 'from megengine import functional as F\n'), ((3413, 3444), 'megengine.functional.gather', 'F.gather', (['ious', '(1)', 'sorted_index'], {}), '(ious, 1, sorted_index)\n', (3421, 3444), True, 'from megengine import functional as F\n'), ((4037, 4089), 'det_opr.bbox_opr.bbox_transform_opr', 'bbox_transform_opr', (['all_anchors[:, :4]', 'bbox_targets'], {}), '(all_anchors[:, :4], bbox_targets)\n', (4055, 4089), False, 'from det_opr.bbox_opr import box_overlap_opr, bbox_transform_opr\n'), ((4825, 4862), 'megengine.functional.expand_dims', 'F.expand_dims', (['anchor_centers'], {'axis': '(1)'}), '(anchor_centers, axis=1)\n', (4838, 4862), True, 'from megengine import functional as F\n'), ((4880, 4918), 'megengine.functional.expand_dims', 'F.expand_dims', (['gtboxes_centers'], {'axis': '(0)'}), '(gtboxes_centers, axis=0)\n', (4893, 4918), True, 'from megengine import functional as F\n'), ((4999, 5029), 'megengine.functional.abs', 'F.abs', (['(an_centers - gt_centers)'], {}), '(an_centers - gt_centers)\n', (5004, 5029), True, 'from megengine import functional as F\n'), ((5130, 5182), 'det_opr.bbox_opr.box_overlap_opr', 'box_overlap_opr', (['all_anchors[:, :4]', 'gt_boxes[:, :4]'], {}), '(all_anchors[:, :4], gt_boxes[:, :4])\n', (5145, 5182), False, 'from det_opr.bbox_opr import box_overlap_opr, bbox_transform_opr\n'), ((5723, 5750), 'megengine.functional.concat', 'F.concat', (['ious_list'], {'axis': '(0)'}), '(ious_list, axis=0)\n', (5731, 5750), True, 'from megengine import functional as F\n'), ((6343, 6372), 'megengine.functional.stack', 'F.stack', (['[l, r, t, b]'], {'axis': '(2)'}), '([l, r, t, b], axis=2)\n', (6350, 6372), True, 'from megengine import functional as F\n'), ((6947, 6999), 'det_opr.bbox_opr.bbox_transform_opr', 'bbox_transform_opr', (['all_anchors[:, :4]', 'bbox_targets'], {}), '(all_anchors[:, :4], bbox_targets)\n', (6965, 6999), False, 'from det_opr.bbox_opr import box_overlap_opr, bbox_transform_opr\n'), ((8267, 8299), 'megengine.functional.concat', 'F.concat', (['rpn_label_list'], {'axis': '(0)'}), '(rpn_label_list, axis=0)\n', (8275, 8299), True, 'from megengine import functional as F\n'), ((8325, 8364), 'megengine.functional.concat', 'F.concat', (['rpn_target_boxes_list'], {'axis': '(0)'}), '(rpn_target_boxes_list, axis=0)\n', (8333, 8364), True, 'from megengine import functional as F\n'), ((8613, 8637), 'megengine.functional.zeros', 'F.zeros', (['labels.shape[0]'], {}), '(labels.shape[0])\n', (8620, 8637), True, 'from megengine import functional as F\n'), ((10025, 10079), 'det_opr.bbox_opr.box_overlap_opr', 'box_overlap_opr', (['anchors[:, :4]', 'valid_gt_boxes[:, :4]'], {}), '(anchors[:, :4], valid_gt_boxes[:, :4])\n', (10040, 10079), False, 'from det_opr.bbox_opr import box_overlap_opr, bbox_transform_opr\n'), ((10932, 10986), 'det_opr.bbox_opr.bbox_transform_opr', 'bbox_transform_opr', (['anchors', 'valid_gt_boxes[index, :4]'], {}), '(anchors, valid_gt_boxes[index, :4])\n', (10950, 10986), False, 'from det_opr.bbox_opr import box_overlap_opr, bbox_transform_opr\n'), ((1342, 1375), 'megengine.functional.expand_dims', 'F.expand_dims', (['valid_mask'], {'axis': '(1)'}), '(valid_mask, axis=1)\n', (1355, 1375), True, 'from megengine import functional as F\n'), ((2010, 2064), 'megengine.functional.cond_take', 'F.cond_take', (['(all_anchors[:, 4] == l)', 'all_anchors[:, 4]'], {}), '(all_anchors[:, 4] == l, all_anchors[:, 4])\n', (2021, 2064), True, 'from megengine import functional as F\n'), ((2195, 2234), 'megengine.functional.argsort', 'F.argsort', (['level_dist'], {'descending': '(False)'}), '(level_dist, descending=False)\n', (2204, 2234), True, 'from megengine import functional as F\n'), ((2861, 2892), 'megengine.functional.expand_dims', 'F.expand_dims', (['pos_area'], {'axis': '(0)'}), '(pos_area, axis=0)\n', (2874, 2892), True, 'from megengine import functional as F\n'), ((4659, 4692), 'megengine.functional.expand_dims', 'F.expand_dims', (['valid_mask'], {'axis': '(0)'}), '(valid_mask, axis=0)\n', (4672, 4692), True, 'from megengine import functional as F\n'), ((6688, 6697), 'megengine.functional.ones', 'F.ones', (['n'], {}), '(n)\n', (6694, 6697), True, 'from megengine import functional as F\n'), ((8712, 8737), 'megengine.functional.expand_dims', 'F.expand_dims', (['anchors', '(1)'], {}), '(anchors, 1)\n', (8725, 8737), True, 'from megengine import functional as F\n'), ((8791, 8816), 'megengine.functional.expand_dims', 'F.expand_dims', (['gtboxes', '(0)'], {}), '(gtboxes, 0)\n', (8804, 8816), True, 'from megengine import functional as F\n'), ((9035, 9088), 'megengine.functional.maximum', 'F.maximum', (['(max_off[:, :, 2] - max_off[:, :, 0] + 1)', '(0)'], {}), '(max_off[:, :, 2] - max_off[:, :, 0] + 1, 0)\n', (9044, 9088), True, 'from megengine import functional as F\n'), ((9091, 9144), 'megengine.functional.maximum', 'F.maximum', (['(max_off[:, :, 3] - max_off[:, :, 1] + 1)', '(0)'], {}), '(max_off[:, :, 3] - max_off[:, :, 1] + 1, 0)\n', (9100, 9144), True, 'from megengine import functional as F\n'), ((9162, 9213), 'megengine.functional.maximum', 'F.maximum', (['(p_pred[:, :, 2] - p_pred[:, :, 0] + 1)', '(0)'], {}), '(p_pred[:, :, 2] - p_pred[:, :, 0] + 1, 0)\n', (9171, 9213), True, 'from megengine import functional as F\n'), ((9216, 9267), 'megengine.functional.maximum', 'F.maximum', (['(p_pred[:, :, 3] - p_pred[:, :, 1] + 1)', '(0)'], {}), '(p_pred[:, :, 3] - p_pred[:, :, 1] + 1, 0)\n', (9225, 9267), True, 'from megengine import functional as F\n'), ((9374, 9403), 'megengine.functional.expand_dims', 'F.expand_dims', (['ignore_mask', '(0)'], {}), '(ignore_mask, 0)\n', (9387, 9403), True, 'from megengine import functional as F\n'), ((10690, 10704), 'megengine.functional.ones', 'F.ones', (['a_shp0'], {}), '(a_shp0)\n', (10696, 10704), True, 'from megengine import functional as F\n'), ((3247, 3287), 'megengine.functional.expand_dims', 'F.expand_dims', (['iou_thresh_per_gt'], {'axis': '(0)'}), '(iou_thresh_per_gt, axis=0)\n', (3260, 3287), True, 'from megengine import functional as F\n'), ((8146, 8174), 'megengine.functional.expand_dims', 'F.expand_dims', (['rpn_labels', '(0)'], {}), '(rpn_labels, 0)\n', (8159, 8174), True, 'from megengine import functional as F\n'), ((8213, 8247), 'megengine.functional.expand_dims', 'F.expand_dims', (['rpn_target_boxes', '(0)'], {}), '(rpn_target_boxes, 0)\n', (8226, 8247), True, 'from megengine import functional as F\n'), ((8873, 8916), 'megengine.functional.maximum', 'F.maximum', (['p_pred[:, :, :2]', 'p_gt[:, :, :2]'], {}), '(p_pred[:, :, :2], p_gt[:, :, :2])\n', (8882, 8916), True, 'from megengine import functional as F\n'), ((8940, 8985), 'megengine.functional.minimum', 'F.minimum', (['p_pred[:, :, 2:4]', 'p_gt[:, :, 2:4]'], {}), '(p_pred[:, :, 2:4], p_gt[:, :, 2:4])\n', (8949, 8985), True, 'from megengine import functional as F\n'), ((974, 1004), 'megengine.functional.ones', 'F.ones', (['[1, gt_boxes.shape[1]]'], {}), '([1, gt_boxes.shape[1]])\n', (980, 1004), True, 'from megengine import functional as F\n'), ((1735, 1753), 'megengine.functional.pow', 'F.pow', (['distance', '(2)'], {}), '(distance, 2)\n', (1740, 1753), True, 'from megengine import functional as F\n'), ((2302, 2340), 'megengine.functional.gather', 'F.gather', (['ious', '(1)', 'sorted_index[:, :n]'], {}), '(ious, 1, sorted_index[:, :n])\n', (2310, 2340), True, 'from megengine import functional as F\n'), ((3625, 3638), 'megengine.functional.ones', 'F.ones', (['(2 * n)'], {}), '(2 * n)\n', (3631, 3638), True, 'from megengine import functional as F\n'), ((3956, 3990), 'megengine.functional.expand_dims', 'F.expand_dims', (['all_anchors'], {'axis': '(1)'}), '(all_anchors, axis=1)\n', (3969, 3990), True, 'from megengine import functional as F\n'), ((4214, 4233), 'megengine.functional.equal', 'F.equal', (['labels', '(-1)'], {}), '(labels, -1)\n', (4221, 4233), True, 'from megengine import functional as F\n'), ((5052, 5070), 'megengine.functional.pow', 'F.pow', (['distance', '(2)'], {}), '(distance, 2)\n', (5057, 5070), True, 'from megengine import functional as F\n'), ((9471, 9489), 'megengine.functional.equal', 'F.equal', (['labels', '(0)'], {}), '(labels, 0)\n', (9478, 9489), True, 'from megengine import functional as F\n'), ((4172, 4191), 'megengine.functional.equal', 'F.equal', (['labels', '(-1)'], {}), '(labels, -1)\n', (4179, 4191), True, 'from megengine import functional as F\n'), ((7884, 7906), 'megengine.functional.equal', 'F.equal', (['rpn_labels', '(0)'], {}), '(rpn_labels, 0)\n', (7891, 7906), True, 'from megengine import functional as F\n'), ((7928, 7962), 'megengine.functional.expand_dims', 'F.expand_dims', (['(ignore_label < 0)', '(1)'], {}), '(ignore_label < 0, 1)\n', (7941, 7962), True, 'from megengine import functional as F\n')]
|
import os
import time
from megengine.distributed.group import is_distributed
import megengine.distributed as dist
from megengine.data.dataloader import DataLoader
from edit.core.hook import Hook
from edit.utils import to_list, is_list_of, get_logger, mkdir_or_exist
class EvalIterHook(Hook):
"""evaluation hook by iteration-based.
This hook will regularly perform evaluation in a given interval
Args:
dataloader (DataLoader): A mge dataloader.
interval (int): Evaluation interval. Default: 3000.
eval_kwargs (dict): Other eval kwargs. It contains:
save_image (bool): Whether to save image.
save_path (str): The path to save image.
"""
def __init__(self, dataloader, **eval_kwargs):
if not isinstance(dataloader, DataLoader):
raise TypeError('dataloader must be a mge DataLoader, but got {}'.format(type(dataloader)))
self.dataloader = dataloader
self.eval_kwargs = eval_kwargs
self.interval = self.eval_kwargs.pop('interval', 10000)
self.save_image = self.eval_kwargs.pop('save_image', False)
self.save_path = self.eval_kwargs.pop('save_path', None)
self.log_path = self.eval_kwargs.pop('log_path', None)
self.multi_process = self.eval_kwargs.pop('multi_process', False)
self.ensemble = self.eval_kwargs.pop('ensemble', False)
mkdir_or_exist(self.save_path)
self.logger = get_logger(name = "EvalIterHook", log_file=self.log_path) # only for rank0
if is_distributed():
self.local_rank = dist.get_rank()
self.nranks = dist.get_world_size()
else:
self.local_rank = 0
self.nranks = 1
def after_train_iter(self, runner):
if not self.every_n_iters(runner, self.interval):
return
self.logger.info("start to eval for iter: {}".format(runner.iter+1))
save_path = os.path.join(self.save_path, "iter_{}".format(runner.iter+1))
mkdir_or_exist(save_path)
results = [] # list of dict
if self.multi_process:
assert is_distributed(), "when set multiprocess eval, you should use multi process training"
raise NotImplementedError("not support multi process for eval now")
elif self.local_rank == 0: # 全部交给rank0来处理
for data in self.dataloader:
outputs = runner.model.test_step(data, save_image=self.save_image, save_path=save_path, ensemble=self.ensemble)
result = runner.model.cal_for_eval(outputs, data)
assert isinstance(result, list)
results += result
self.evaluate(results, runner.iter+1)
else:
pass
if is_distributed():
dist.group_barrier()
def evaluate(self, results, iters):
"""Evaluation function.
Args:
runner (``BaseRunner``): The runner.
results (list of dict): Model forward results.
iter: now iter.
"""
save_path = os.path.join(self.save_path, "iter_{}".format(iters)) # save for some information. e.g. SVG for everyframe value in VSR.
eval_res = self.dataloader.dataset.evaluate(results, save_path)
self.logger.info("***** eval results for {} iters: *****".format(iters))
for name, val in eval_res.items():
self.logger.info("metric: {} average_val: {:.4f}".format(name, val))
|
[
"megengine.distributed.group.is_distributed",
"megengine.distributed.get_rank",
"megengine.distributed.group_barrier",
"megengine.distributed.get_world_size"
] |
[((1392, 1422), 'edit.utils.mkdir_or_exist', 'mkdir_or_exist', (['self.save_path'], {}), '(self.save_path)\n', (1406, 1422), False, 'from edit.utils import to_list, is_list_of, get_logger, mkdir_or_exist\n'), ((1445, 1500), 'edit.utils.get_logger', 'get_logger', ([], {'name': '"""EvalIterHook"""', 'log_file': 'self.log_path'}), "(name='EvalIterHook', log_file=self.log_path)\n", (1455, 1500), False, 'from edit.utils import to_list, is_list_of, get_logger, mkdir_or_exist\n'), ((1540, 1556), 'megengine.distributed.group.is_distributed', 'is_distributed', ([], {}), '()\n', (1554, 1556), False, 'from megengine.distributed.group import is_distributed\n'), ((2012, 2037), 'edit.utils.mkdir_or_exist', 'mkdir_or_exist', (['save_path'], {}), '(save_path)\n', (2026, 2037), False, 'from edit.utils import to_list, is_list_of, get_logger, mkdir_or_exist\n'), ((2752, 2768), 'megengine.distributed.group.is_distributed', 'is_distributed', ([], {}), '()\n', (2766, 2768), False, 'from megengine.distributed.group import is_distributed\n'), ((1588, 1603), 'megengine.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (1601, 1603), True, 'import megengine.distributed as dist\n'), ((1630, 1651), 'megengine.distributed.get_world_size', 'dist.get_world_size', ([], {}), '()\n', (1649, 1651), True, 'import megengine.distributed as dist\n'), ((2125, 2141), 'megengine.distributed.group.is_distributed', 'is_distributed', ([], {}), '()\n', (2139, 2141), False, 'from megengine.distributed.group import is_distributed\n'), ((2782, 2802), 'megengine.distributed.group_barrier', 'dist.group_barrier', ([], {}), '()\n', (2800, 2802), True, 'import megengine.distributed as dist\n')]
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
from functools import partial
import numpy as np
import pytest
from utils import opr_test
import megengine.functional as F
from megengine import jit, tensor
def common_test_reduce(opr, ref_opr):
data1_shape = (5, 6, 7)
data2_shape = (2, 9, 12)
data1 = np.random.random(data1_shape).astype(np.float32)
data2 = np.random.random(data2_shape).astype(np.float32)
cases = [
{"input": data1},
{"input": data2},
{"input": np.array([[[1, 2, np.nan, 4], [8, 6, 5, 2], [2, 3, 4, 5]]])},
]
if opr not in (F.argmin, F.argmax):
# test default axis
opr_test(cases, opr, ref_fn=ref_opr)
# test all axises in range of input shape
for axis in range(-3, 3):
# test keepdims False
opr_test(cases, opr, ref_fn=lambda x: ref_opr(x, axis=axis), axis=axis)
# test keepdims True
opr_test(
cases,
opr,
ref_fn=lambda x: ref_opr(x, axis=axis, keepdims=True),
axis=axis,
keepdims=True,
)
else:
# test defaut axis
opr_test(cases, opr, ref_fn=lambda x: ref_opr(x).astype(np.int32))
# test all axises in range of input shape
for axis in range(0, 3):
opr_test(
cases,
opr,
ref_fn=lambda x: ref_opr(x, axis=axis).astype(np.int32),
axis=axis,
)
# test negative axis
axis = axis - len(data1_shape)
opr_test(
cases,
opr,
ref_fn=lambda x: ref_opr(x, axis=axis).astype(np.int32),
axis=axis,
)
def test_sum():
common_test_reduce(opr=F.sum, ref_opr=np.sum)
def test_prod():
common_test_reduce(opr=F.prod, ref_opr=np.prod)
def test_mean():
common_test_reduce(opr=F.mean, ref_opr=np.mean)
def test_var():
common_test_reduce(opr=F.var, ref_opr=np.var)
def test_std():
common_test_reduce(opr=F.std, ref_opr=np.std)
def test_min():
common_test_reduce(opr=F.min, ref_opr=np.min)
def test_max():
common_test_reduce(opr=F.max, ref_opr=np.max)
def test_argmin():
common_test_reduce(opr=F.argmin, ref_opr=np.argmin)
def test_argmax():
common_test_reduce(opr=F.argmax, ref_opr=np.argmax)
def test_sqrt():
d1_shape = (15,)
d2_shape = (25,)
d1 = np.random.random(d1_shape).astype(np.float32)
d2 = np.random.random(d2_shape).astype(np.float32)
cases = [{"input": d1}, {"input": d2}]
opr_test(cases, F.sqrt, ref_fn=np.sqrt)
def test_sort():
data1_shape = (10, 3)
data2_shape = (12, 2)
data1 = np.random.random(data1_shape).astype(np.float32)
data2 = np.random.random(data2_shape).astype(np.float32)
output1 = [np.sort(data1), np.argsort(data1).astype(np.int32)]
output2 = [np.sort(data2), np.argsort(data2).astype(np.int32)]
cases = [
{"input": data1, "output": output1},
{"input": data2, "output": output2},
]
opr_test(cases, F.sort)
@pytest.mark.parametrize("is_symbolic", [None, False, True])
def test_sort_empty(is_symbolic):
data_shapes = [
(0,),
(10, 0),
]
def fn(x):
return F.sort(x)
for shape in data_shapes:
if is_symbolic is not None:
fn_ = jit.trace(symbolic=is_symbolic)(fn)
else:
fn_ = fn
data = np.random.random(shape).astype(np.float32)
for _ in range(3):
outs = fn_(tensor(data))
ref_outs = (np.sort(data), np.argsort(data))
assert len(ref_outs) == len(outs)
for i in range(len(outs)):
np.testing.assert_equal(outs[i].numpy(), ref_outs[i])
if is_symbolic is None:
break
def test_normalize():
cases = [
{"input": np.random.random((2, 3, 12, 12)).astype(np.float32)} for i in range(2)
]
def np_normalize(x, p=2, axis=None, eps=1e-12):
if axis is None:
norm = np.sum(x ** p) ** (1.0 / p)
else:
norm = np.sum(x ** p, axis=axis, keepdims=True) ** (1.0 / p)
return x / np.clip(norm, a_min=eps, a_max=np.inf)
# # Test L-2 norm along all dimensions
# opr_test(cases, F.normalize, ref_fn=np_normalize)
# # Test L-1 norm along all dimensions
# opr_test(cases, partial(F.normalize, p=1), ref_fn=partial(np_normalize, p=1))
# Test L-2 norm along the second dimension
opr_test(cases, partial(F.normalize, axis=1), ref_fn=partial(np_normalize, axis=1))
# Test some norm == 0
cases[0]["input"][0, 0, 0, :] = 0
cases[1]["input"][0, 0, 0, :] = 0
opr_test(cases, partial(F.normalize, axis=3), ref_fn=partial(np_normalize, axis=3))
def test_sum_neg_axis():
shape = (2, 3)
data = np.random.random(shape).astype(np.float32)
for axis in (-1, -2, (-2, 1), (-1, 0)):
get = F.sum(tensor(data), axis=axis)
ref = np.sum(data, axis=axis)
np.testing.assert_allclose(get.numpy(), ref, rtol=1e-6)
with pytest.raises(AssertionError):
F.sum(tensor(data), axis=(-1, 1))
def test_non_finite():
shape = (32, 3, 32, 32)
data1 = np.random.random(shape).astype(np.float32)
data2 = np.random.random(shape).astype(np.float32)
rst = F.math._check_non_finite([tensor(data1), tensor(data2)])
np.testing.assert_equal(rst.numpy(), [0])
data2[0][0][0][0] = float("inf")
rst = F.math._check_non_finite([tensor(data1), tensor(data2)])
np.testing.assert_equal(rst.numpy(), [1])
data2[0][0][0][0] = float("nan")
rst = F.math._check_non_finite([tensor(data1), tensor(data2)])
np.testing.assert_equal(rst.numpy(), [1])
@pytest.mark.parametrize("descending", [True, False])
@pytest.mark.parametrize("sorted", [True, False])
@pytest.mark.parametrize("inp1d", [True, False])
@pytest.mark.parametrize("kth_only", [True, False])
def test_topk(descending, sorted, inp1d, kth_only):
k = 3
if inp1d:
data = np.random.permutation(7)
else:
data = np.random.permutation(5 * 7).reshape(5, 7)
data = data.astype(np.int32)
def np_sort(x):
if descending:
return np.sort(x)[..., ::-1]
return np.sort(x)
res = F.topk(
tensor(data), k, descending=descending, no_sort=(not sorted), kth_only=kth_only
)
values, indices = res
values = values.numpy()
indices = indices.numpy()
if kth_only:
np.testing.assert_equal(
values, np.take_along_axis(data, indices[..., None], -1).squeeze(-1)
)
np.testing.assert_equal(values, np_sort(data)[..., k - 1])
else:
np.testing.assert_equal(values, np.take_along_axis(data, indices, -1))
if not sorted:
values = np_sort(values)
np.testing.assert_equal(values, np_sort(data)[..., :k])
@pytest.mark.parametrize("is_trace", [True, False])
def test_reduce_on_empty_tensor(is_trace):
dtypes = [np.float32, np.int32, np.bool]
inputs = [
(np.random.random((0,)), None),
(np.random.random((3, 0, 2)), 1),
(np.random.random((10, 10, 0, 10)), 0),
]
def run_test(fn, ref_fn, input, dtype, axis=None, symbolic=False):
if is_trace:
fn = jit.trace(symbolic=symbolic)(fn)
for i in range(3):
out = fn(tensor(input, dtype=dtype), axis=axis).numpy()
out_ref = ref_fn(input.astype(dtype), axis=axis)
np.testing.assert_equal(out, out_ref)
for dtype in dtypes:
for inp, axis in inputs:
run_test(F.sum, np.sum, inp, dtype, axis, True)
run_test(F.sum, np.sum, inp, dtype, axis, False)
run_test(F.prod, np.prod, inp, dtype, axis, True)
run_test(F.prod, np.prod, inp, dtype, axis, False)
|
[
"megengine.jit.trace",
"megengine.tensor",
"megengine.functional.sort"
] |
[((3460, 3519), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""is_symbolic"""', '[None, False, True]'], {}), "('is_symbolic', [None, False, True])\n", (3483, 3519), False, 'import pytest\n'), ((6105, 6157), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""descending"""', '[True, False]'], {}), "('descending', [True, False])\n", (6128, 6157), False, 'import pytest\n'), ((6159, 6207), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""sorted"""', '[True, False]'], {}), "('sorted', [True, False])\n", (6182, 6207), False, 'import pytest\n'), ((6209, 6256), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""inp1d"""', '[True, False]'], {}), "('inp1d', [True, False])\n", (6232, 6256), False, 'import pytest\n'), ((6258, 6308), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""kth_only"""', '[True, False]'], {}), "('kth_only', [True, False])\n", (6281, 6308), False, 'import pytest\n'), ((7259, 7309), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""is_trace"""', '[True, False]'], {}), "('is_trace', [True, False])\n", (7282, 7309), False, 'import pytest\n'), ((2951, 2990), 'utils.opr_test', 'opr_test', (['cases', 'F.sqrt'], {'ref_fn': 'np.sqrt'}), '(cases, F.sqrt, ref_fn=np.sqrt)\n', (2959, 2990), False, 'from utils import opr_test\n'), ((3433, 3456), 'utils.opr_test', 'opr_test', (['cases', 'F.sort'], {}), '(cases, F.sort)\n', (3441, 3456), False, 'from utils import opr_test\n'), ((984, 1020), 'utils.opr_test', 'opr_test', (['cases', 'opr'], {'ref_fn': 'ref_opr'}), '(cases, opr, ref_fn=ref_opr)\n', (992, 1020), False, 'from utils import opr_test\n'), ((3199, 3213), 'numpy.sort', 'np.sort', (['data1'], {}), '(data1)\n', (3206, 3213), True, 'import numpy as np\n'), ((3266, 3280), 'numpy.sort', 'np.sort', (['data2'], {}), '(data2)\n', (3273, 3280), True, 'import numpy as np\n'), ((3642, 3651), 'megengine.functional.sort', 'F.sort', (['x'], {}), '(x)\n', (3648, 3651), True, 'import megengine.functional as F\n'), ((4892, 4920), 'functools.partial', 'partial', (['F.normalize'], {'axis': '(1)'}), '(F.normalize, axis=1)\n', (4899, 4920), False, 'from functools import partial\n'), ((5083, 5111), 'functools.partial', 'partial', (['F.normalize'], {'axis': '(3)'}), '(F.normalize, axis=3)\n', (5090, 5111), False, 'from functools import partial\n'), ((5354, 5377), 'numpy.sum', 'np.sum', (['data'], {'axis': 'axis'}), '(data, axis=axis)\n', (5360, 5377), True, 'import numpy as np\n'), ((5451, 5480), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (5464, 5480), False, 'import pytest\n'), ((6400, 6424), 'numpy.random.permutation', 'np.random.permutation', (['(7)'], {}), '(7)\n', (6421, 6424), True, 'import numpy as np\n'), ((6626, 6636), 'numpy.sort', 'np.sort', (['x'], {}), '(x)\n', (6633, 6636), True, 'import numpy as np\n'), ((6664, 6676), 'megengine.tensor', 'tensor', (['data'], {}), '(data)\n', (6670, 6676), False, 'from megengine import jit, tensor\n'), ((645, 674), 'numpy.random.random', 'np.random.random', (['data1_shape'], {}), '(data1_shape)\n', (661, 674), True, 'import numpy as np\n'), ((706, 735), 'numpy.random.random', 'np.random.random', (['data2_shape'], {}), '(data2_shape)\n', (722, 735), True, 'import numpy as np\n'), ((839, 898), 'numpy.array', 'np.array', (['[[[1, 2, np.nan, 4], [8, 6, 5, 2], [2, 3, 4, 5]]]'], {}), '([[[1, 2, np.nan, 4], [8, 6, 5, 2], [2, 3, 4, 5]]])\n', (847, 898), True, 'import numpy as np\n'), ((2802, 2828), 'numpy.random.random', 'np.random.random', (['d1_shape'], {}), '(d1_shape)\n', (2818, 2828), True, 'import numpy as np\n'), ((2857, 2883), 'numpy.random.random', 'np.random.random', (['d2_shape'], {}), '(d2_shape)\n', (2873, 2883), True, 'import numpy as np\n'), ((3074, 3103), 'numpy.random.random', 'np.random.random', (['data1_shape'], {}), '(data1_shape)\n', (3090, 3103), True, 'import numpy as np\n'), ((3135, 3164), 'numpy.random.random', 'np.random.random', (['data2_shape'], {}), '(data2_shape)\n', (3151, 3164), True, 'import numpy as np\n'), ((4557, 4595), 'numpy.clip', 'np.clip', (['norm'], {'a_min': 'eps', 'a_max': 'np.inf'}), '(norm, a_min=eps, a_max=np.inf)\n', (4564, 4595), True, 'import numpy as np\n'), ((4929, 4958), 'functools.partial', 'partial', (['np_normalize'], {'axis': '(1)'}), '(np_normalize, axis=1)\n', (4936, 4958), False, 'from functools import partial\n'), ((5120, 5149), 'functools.partial', 'partial', (['np_normalize'], {'axis': '(3)'}), '(np_normalize, axis=3)\n', (5127, 5149), False, 'from functools import partial\n'), ((5208, 5231), 'numpy.random.random', 'np.random.random', (['shape'], {}), '(shape)\n', (5224, 5231), True, 'import numpy as np\n'), ((5315, 5327), 'megengine.tensor', 'tensor', (['data'], {}), '(data)\n', (5321, 5327), False, 'from megengine import jit, tensor\n'), ((5496, 5508), 'megengine.tensor', 'tensor', (['data'], {}), '(data)\n', (5502, 5508), False, 'from megengine import jit, tensor\n'), ((5589, 5612), 'numpy.random.random', 'np.random.random', (['shape'], {}), '(shape)\n', (5605, 5612), True, 'import numpy as np\n'), ((5644, 5667), 'numpy.random.random', 'np.random.random', (['shape'], {}), '(shape)\n', (5660, 5667), True, 'import numpy as np\n'), ((5723, 5736), 'megengine.tensor', 'tensor', (['data1'], {}), '(data1)\n', (5729, 5736), False, 'from megengine import jit, tensor\n'), ((5738, 5751), 'megengine.tensor', 'tensor', (['data2'], {}), '(data2)\n', (5744, 5751), False, 'from megengine import jit, tensor\n'), ((5874, 5887), 'megengine.tensor', 'tensor', (['data1'], {}), '(data1)\n', (5880, 5887), False, 'from megengine import jit, tensor\n'), ((5889, 5902), 'megengine.tensor', 'tensor', (['data2'], {}), '(data2)\n', (5895, 5902), False, 'from megengine import jit, tensor\n'), ((6025, 6038), 'megengine.tensor', 'tensor', (['data1'], {}), '(data1)\n', (6031, 6038), False, 'from megengine import jit, tensor\n'), ((6040, 6053), 'megengine.tensor', 'tensor', (['data2'], {}), '(data2)\n', (6046, 6053), False, 'from megengine import jit, tensor\n'), ((7093, 7130), 'numpy.take_along_axis', 'np.take_along_axis', (['data', 'indices', '(-1)'], {}), '(data, indices, -1)\n', (7111, 7130), True, 'import numpy as np\n'), ((7422, 7444), 'numpy.random.random', 'np.random.random', (['(0,)'], {}), '((0,))\n', (7438, 7444), True, 'import numpy as np\n'), ((7462, 7489), 'numpy.random.random', 'np.random.random', (['(3, 0, 2)'], {}), '((3, 0, 2))\n', (7478, 7489), True, 'import numpy as np\n'), ((7504, 7537), 'numpy.random.random', 'np.random.random', (['(10, 10, 0, 10)'], {}), '((10, 10, 0, 10))\n', (7520, 7537), True, 'import numpy as np\n'), ((7860, 7897), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['out', 'out_ref'], {}), '(out, out_ref)\n', (7883, 7897), True, 'import numpy as np\n'), ((3215, 3232), 'numpy.argsort', 'np.argsort', (['data1'], {}), '(data1)\n', (3225, 3232), True, 'import numpy as np\n'), ((3282, 3299), 'numpy.argsort', 'np.argsort', (['data2'], {}), '(data2)\n', (3292, 3299), True, 'import numpy as np\n'), ((3737, 3768), 'megengine.jit.trace', 'jit.trace', ([], {'symbolic': 'is_symbolic'}), '(symbolic=is_symbolic)\n', (3746, 3768), False, 'from megengine import jit, tensor\n'), ((3823, 3846), 'numpy.random.random', 'np.random.random', (['shape'], {}), '(shape)\n', (3839, 3846), True, 'import numpy as np\n'), ((3916, 3928), 'megengine.tensor', 'tensor', (['data'], {}), '(data)\n', (3922, 3928), False, 'from megengine import jit, tensor\n'), ((3954, 3967), 'numpy.sort', 'np.sort', (['data'], {}), '(data)\n', (3961, 3967), True, 'import numpy as np\n'), ((3969, 3985), 'numpy.argsort', 'np.argsort', (['data'], {}), '(data)\n', (3979, 3985), True, 'import numpy as np\n'), ((4423, 4437), 'numpy.sum', 'np.sum', (['(x ** p)'], {}), '(x ** p)\n', (4429, 4437), True, 'import numpy as np\n'), ((4484, 4524), 'numpy.sum', 'np.sum', (['(x ** p)'], {'axis': 'axis', 'keepdims': '(True)'}), '(x ** p, axis=axis, keepdims=True)\n', (4490, 4524), True, 'import numpy as np\n'), ((6450, 6478), 'numpy.random.permutation', 'np.random.permutation', (['(5 * 7)'], {}), '(5 * 7)\n', (6471, 6478), True, 'import numpy as np\n'), ((6589, 6599), 'numpy.sort', 'np.sort', (['x'], {}), '(x)\n', (6596, 6599), True, 'import numpy as np\n'), ((7659, 7687), 'megengine.jit.trace', 'jit.trace', ([], {'symbolic': 'symbolic'}), '(symbolic=symbolic)\n', (7668, 7687), False, 'from megengine import jit, tensor\n'), ((4249, 4281), 'numpy.random.random', 'np.random.random', (['(2, 3, 12, 12)'], {}), '((2, 3, 12, 12))\n', (4265, 4281), True, 'import numpy as np\n'), ((6905, 6953), 'numpy.take_along_axis', 'np.take_along_axis', (['data', 'indices[..., None]', '(-1)'], {}), '(data, indices[..., None], -1)\n', (6923, 6953), True, 'import numpy as np\n'), ((7740, 7766), 'megengine.tensor', 'tensor', (['input'], {'dtype': 'dtype'}), '(input, dtype=dtype)\n', (7746, 7766), False, 'from megengine import jit, tensor\n')]
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
"""Test int8 quantizated model on ImageNet.
Note:
* QAT simulate int8 with fp32, gpu only.
* Quantized use real int8, cpu only, a bit slow.
* Results may be slightly different between qat and quantized mode.
"""
import argparse
import multiprocessing as mp
import time
import megengine as mge
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.jit as jit
import megengine.quantization as Q
import models
logger = mge.get_logger(__name__)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--arch", default="resnet18", type=str)
parser.add_argument("-d", "--data", default=None, type=str)
parser.add_argument("-s", "--save", default="/data/models", type=str)
parser.add_argument("-c", "--checkpoint", default=None, type=str,
help="pretrained model to finetune")
parser.add_argument("-m", "--mode", default="qat", type=str,
choices=["normal", "qat", "quantized"],
help="Quantization Mode\n"
"normal: no quantization, using float32\n"
"qat: quantization aware training, simulate int8\n"
"quantized: convert mode to int8 quantized, inference only")
parser.add_argument("-n", "--ngpus", default=None, type=int)
parser.add_argument("-w", "--workers", default=4, type=int)
parser.add_argument("--report-freq", default=50, type=int)
args = parser.parse_args()
world_size = mge.get_device_count("gpu") if args.ngpus is None else args.ngpus
if args.mode == "quantized":
world_size = 1
args.report_freq = 1 # test is slow on cpu
mge.set_default_device("cpux")
logger.warning("quantized mode use cpu only")
if world_size > 1:
# start distributed training, dispatch sub-processes
mp.set_start_method("spawn")
processes = []
for rank in range(world_size):
p = mp.Process(target=worker, args=(rank, world_size, args))
p.start()
processes.append(p)
for p in processes:
p.join()
else:
worker(0, 1, args)
def worker(rank, world_size, args):
# pylint: disable=too-many-statements
if world_size > 1:
# Initialize distributed process group
logger.info("init distributed process group {} / {}".format(rank, world_size))
dist.init_process_group(
master_ip="localhost",
master_port=23456,
world_size=world_size,
rank=rank,
dev=rank,
)
model = models.__dict__[args.arch]()
if args.mode != "normal":
Q.quantize_qat(model, Q.ema_fakequant_qconfig)
if args.checkpoint:
logger.info("Load pretrained weights from %s", args.checkpoint)
ckpt = mge.load(args.checkpoint)
ckpt = ckpt["state_dict"] if "state_dict" in ckpt else ckpt
model.load_state_dict(ckpt, strict=False)
if args.mode == "quantized":
Q.quantize(model)
# Define valid graph
@jit.trace(symbolic=True)
def valid_func(image, label):
model.eval()
logits = model(image)
loss = F.cross_entropy_with_softmax(logits, label, label_smooth=0.1)
acc1, acc5 = F.accuracy(logits, label, (1, 5))
if dist.is_distributed(): # all_reduce_mean
loss = dist.all_reduce_sum(loss, "valid_loss") / dist.get_world_size()
acc1 = dist.all_reduce_sum(acc1, "valid_acc1") / dist.get_world_size()
acc5 = dist.all_reduce_sum(acc5, "valid_acc5") / dist.get_world_size()
return loss, acc1, acc5
# Build valid datasets
logger.info("preparing dataset..")
valid_dataset = data.dataset.ImageNet(args.data, train=False)
valid_sampler = data.SequentialSampler(
valid_dataset, batch_size=100, drop_last=False
)
valid_queue = data.DataLoader(
valid_dataset,
sampler=valid_sampler,
transform=T.Compose(
[
T.Resize(256),
T.CenterCrop(224),
T.Normalize(mean=128),
T.ToMode("CHW"),
]
),
num_workers=args.workers,
)
_, valid_acc, valid_acc5 = infer(valid_func, valid_queue, args)
logger.info("TEST %f, %f", valid_acc, valid_acc5)
def infer(model, data_queue, args):
objs = AverageMeter("Loss")
top1 = AverageMeter("Acc@1")
top5 = AverageMeter("Acc@5")
total_time = AverageMeter("Time")
t = time.time()
for step, (image, label) in enumerate(data_queue):
n = image.shape[0]
image = image.astype("float32") # convert np.uint8 to float32
label = label.astype("int32")
loss, acc1, acc5 = model(image, label)
objs.update(loss.numpy()[0], n)
top1.update(100 * acc1.numpy()[0], n)
top5.update(100 * acc5.numpy()[0], n)
total_time.update(time.time() - t)
t = time.time()
if step % args.report_freq == 0 and dist.get_rank() == 0:
logger.info("Step %d, %s %s %s %s",
step, objs, top1, top5, total_time)
return objs.avg, top1.avg, top5.avg
class AverageMeter:
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=":.3f"):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = "{name} {val" + self.fmt + "} ({avg" + self.fmt + "})"
return fmtstr.format(**self.__dict__)
if __name__ == "__main__":
main()
|
[
"megengine.jit.trace",
"megengine.distributed.init_process_group",
"megengine.distributed.is_distributed",
"megengine.functional.cross_entropy_with_softmax",
"megengine.distributed.get_rank",
"megengine.distributed.get_world_size",
"megengine.data.transform.CenterCrop",
"megengine.get_device_count",
"megengine.load",
"megengine.data.transform.Resize",
"megengine.set_default_device",
"megengine.quantization.quantize_qat",
"megengine.get_logger",
"megengine.data.SequentialSampler",
"megengine.data.transform.Normalize",
"megengine.data.transform.ToMode",
"megengine.functional.accuracy",
"megengine.data.dataset.ImageNet",
"megengine.quantization.quantize",
"megengine.distributed.all_reduce_sum"
] |
[((909, 933), 'megengine.get_logger', 'mge.get_logger', (['__name__'], {}), '(__name__)\n', (923, 933), True, 'import megengine as mge\n'), ((961, 986), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (984, 986), False, 'import argparse\n'), ((3464, 3488), 'megengine.jit.trace', 'jit.trace', ([], {'symbolic': '(True)'}), '(symbolic=True)\n', (3473, 3488), True, 'import megengine.jit as jit\n'), ((4127, 4172), 'megengine.data.dataset.ImageNet', 'data.dataset.ImageNet', (['args.data'], {'train': '(False)'}), '(args.data, train=False)\n', (4148, 4172), True, 'import megengine.data as data\n'), ((4193, 4263), 'megengine.data.SequentialSampler', 'data.SequentialSampler', (['valid_dataset'], {'batch_size': '(100)', 'drop_last': '(False)'}), '(valid_dataset, batch_size=100, drop_last=False)\n', (4215, 4263), True, 'import megengine.data as data\n'), ((4919, 4930), 'time.time', 'time.time', ([], {}), '()\n', (4928, 4930), False, 'import time\n'), ((1896, 1923), 'megengine.get_device_count', 'mge.get_device_count', (['"""gpu"""'], {}), "('gpu')\n", (1916, 1923), True, 'import megengine as mge\n'), ((2079, 2109), 'megengine.set_default_device', 'mge.set_default_device', (['"""cpux"""'], {}), "('cpux')\n", (2101, 2109), True, 'import megengine as mge\n'), ((2257, 2285), 'multiprocessing.set_start_method', 'mp.set_start_method', (['"""spawn"""'], {}), "('spawn')\n", (2276, 2285), True, 'import multiprocessing as mp\n'), ((2808, 2921), 'megengine.distributed.init_process_group', 'dist.init_process_group', ([], {'master_ip': '"""localhost"""', 'master_port': '(23456)', 'world_size': 'world_size', 'rank': 'rank', 'dev': 'rank'}), "(master_ip='localhost', master_port=23456,\n world_size=world_size, rank=rank, dev=rank)\n", (2831, 2921), True, 'import megengine.distributed as dist\n'), ((3070, 3116), 'megengine.quantization.quantize_qat', 'Q.quantize_qat', (['model', 'Q.ema_fakequant_qconfig'], {}), '(model, Q.ema_fakequant_qconfig)\n', (3084, 3116), True, 'import megengine.quantization as Q\n'), ((3229, 3254), 'megengine.load', 'mge.load', (['args.checkpoint'], {}), '(args.checkpoint)\n', (3237, 3254), True, 'import megengine as mge\n'), ((3415, 3432), 'megengine.quantization.quantize', 'Q.quantize', (['model'], {}), '(model)\n', (3425, 3432), True, 'import megengine.quantization as Q\n'), ((3589, 3650), 'megengine.functional.cross_entropy_with_softmax', 'F.cross_entropy_with_softmax', (['logits', 'label'], {'label_smooth': '(0.1)'}), '(logits, label, label_smooth=0.1)\n', (3617, 3650), True, 'import megengine.functional as F\n'), ((3672, 3705), 'megengine.functional.accuracy', 'F.accuracy', (['logits', 'label', '(1, 5)'], {}), '(logits, label, (1, 5))\n', (3682, 3705), True, 'import megengine.functional as F\n'), ((3717, 3738), 'megengine.distributed.is_distributed', 'dist.is_distributed', ([], {}), '()\n', (3736, 3738), True, 'import megengine.distributed as dist\n'), ((5358, 5369), 'time.time', 'time.time', ([], {}), '()\n', (5367, 5369), False, 'import time\n'), ((2364, 2420), 'multiprocessing.Process', 'mp.Process', ([], {'target': 'worker', 'args': '(rank, world_size, args)'}), '(target=worker, args=(rank, world_size, args))\n', (2374, 2420), True, 'import multiprocessing as mp\n'), ((3778, 3817), 'megengine.distributed.all_reduce_sum', 'dist.all_reduce_sum', (['loss', '"""valid_loss"""'], {}), "(loss, 'valid_loss')\n", (3797, 3817), True, 'import megengine.distributed as dist\n'), ((3820, 3841), 'megengine.distributed.get_world_size', 'dist.get_world_size', ([], {}), '()\n', (3839, 3841), True, 'import megengine.distributed as dist\n'), ((3861, 3900), 'megengine.distributed.all_reduce_sum', 'dist.all_reduce_sum', (['acc1', '"""valid_acc1"""'], {}), "(acc1, 'valid_acc1')\n", (3880, 3900), True, 'import megengine.distributed as dist\n'), ((3903, 3924), 'megengine.distributed.get_world_size', 'dist.get_world_size', ([], {}), '()\n', (3922, 3924), True, 'import megengine.distributed as dist\n'), ((3944, 3983), 'megengine.distributed.all_reduce_sum', 'dist.all_reduce_sum', (['acc5', '"""valid_acc5"""'], {}), "(acc5, 'valid_acc5')\n", (3963, 3983), True, 'import megengine.distributed as dist\n'), ((3986, 4007), 'megengine.distributed.get_world_size', 'dist.get_world_size', ([], {}), '()\n', (4005, 4007), True, 'import megengine.distributed as dist\n'), ((5329, 5340), 'time.time', 'time.time', ([], {}), '()\n', (5338, 5340), False, 'import time\n'), ((5415, 5430), 'megengine.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (5428, 5430), True, 'import megengine.distributed as dist\n'), ((4426, 4439), 'megengine.data.transform.Resize', 'T.Resize', (['(256)'], {}), '(256)\n', (4434, 4439), True, 'import megengine.data.transform as T\n'), ((4457, 4474), 'megengine.data.transform.CenterCrop', 'T.CenterCrop', (['(224)'], {}), '(224)\n', (4469, 4474), True, 'import megengine.data.transform as T\n'), ((4492, 4513), 'megengine.data.transform.Normalize', 'T.Normalize', ([], {'mean': '(128)'}), '(mean=128)\n', (4503, 4513), True, 'import megengine.data.transform as T\n'), ((4531, 4546), 'megengine.data.transform.ToMode', 'T.ToMode', (['"""CHW"""'], {}), "('CHW')\n", (4539, 4546), True, 'import megengine.data.transform as T\n')]
|
#!/usr/bin/env python3
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
import argparse
import importlib
import json
import multiprocessing as mp
import os
import pathlib
import sys
import megengine as mge
import megengine.distributed as dist
from basecore.config import ConfigDict
from loguru import logger
from basecls.engine import ClsTester
from basecls.models import build_model, load_model
from basecls.utils import default_logging, registers, set_nccl_env, set_num_threads, setup_logger
def make_parser() -> argparse.ArgumentParser:
"""Build args parser for testing script.
Returns:
The args parser.
"""
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--dir", type=str, help="testing directory")
return parser
@logger.catch
def worker(args: argparse.Namespace):
"""Worker function for testing script.
Args:
args: args for testing script.
"""
logger.info(f"Init process group for gpu{dist.get_rank()} done")
args.dir = os.path.abspath(args.dir)
setup_logger(args.dir, "test_all_log.txt", to_loguru=True)
logger.info(f"args: {args}")
result = dict()
for f in pathlib.Path(args.dir).glob("**/*.py"):
sys.path.append(os.path.dirname(f))
module_name = os.path.splitext(os.path.basename(f))[0]
current_network = importlib.import_module(module_name)
cfg = current_network.Cfg()
weight_path = f"{os.path.splitext(f)[0]}.pkl"
if os.path.isfile(weight_path):
cfg.weights = weight_path
else:
sys.path.pop(-1)
continue
cfg.set_mode("freeze")
if cfg.fastrun:
logger.info("Using fastrun mode...")
mge.functional.debug_param.set_execution_strategy("PROFILE")
tester = build(cfg)
acc1, acc5 = tester.test()
result[module_name] = dict(acc1=acc1, acc5=acc5)
sys.path.pop(-1)
logger.info(json.dumps(result, indent=4))
with open("result.json", "w") as f:
json.dump(result, f)
def build(cfg: ConfigDict):
"""Build function for testing script.
Args:
cfg: config for testing.
Returns:
A tester.
"""
model = build_model(cfg)
load_model(model, cfg.weights)
model.eval()
default_logging(cfg, model)
dataloader = registers.dataloaders.get(cfg.data.name).build(cfg, False)
# FIXME: need atomic user_pop, maybe in MegEngine 1.5?
# tester = BaseTester(model, dataloader, AccEvaluator())
return ClsTester(cfg, model, dataloader)
def main():
"""Main function for testing script."""
parser = make_parser()
args = parser.parse_args()
mp.set_start_method("spawn")
set_nccl_env()
set_num_threads()
if not os.path.exists(args.dir):
raise ValueError("Directory does not exist")
device_count = mge.device.get_device_count("gpu")
if device_count == 0:
logger.warning("No GPU was found, testing on CPU")
worker(args)
elif device_count > 1:
mp_worker = dist.launcher(worker)
mp_worker(args)
else:
worker(args)
if __name__ == "__main__":
main()
|
[
"megengine.distributed.get_rank",
"megengine.device.get_device_count",
"megengine.functional.debug_param.set_execution_strategy",
"megengine.distributed.launcher"
] |
[((659, 684), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (682, 684), False, 'import argparse\n'), ((1017, 1042), 'os.path.abspath', 'os.path.abspath', (['args.dir'], {}), '(args.dir)\n', (1032, 1042), False, 'import os\n'), ((1047, 1105), 'basecls.utils.setup_logger', 'setup_logger', (['args.dir', '"""test_all_log.txt"""'], {'to_loguru': '(True)'}), "(args.dir, 'test_all_log.txt', to_loguru=True)\n", (1059, 1105), False, 'from basecls.utils import default_logging, registers, set_nccl_env, set_num_threads, setup_logger\n'), ((1110, 1138), 'loguru.logger.info', 'logger.info', (['f"""args: {args}"""'], {}), "(f'args: {args}')\n", (1121, 1138), False, 'from loguru import logger\n'), ((2228, 2244), 'basecls.models.build_model', 'build_model', (['cfg'], {}), '(cfg)\n', (2239, 2244), False, 'from basecls.models import build_model, load_model\n'), ((2249, 2279), 'basecls.models.load_model', 'load_model', (['model', 'cfg.weights'], {}), '(model, cfg.weights)\n', (2259, 2279), False, 'from basecls.models import build_model, load_model\n'), ((2302, 2329), 'basecls.utils.default_logging', 'default_logging', (['cfg', 'model'], {}), '(cfg, model)\n', (2317, 2329), False, 'from basecls.utils import default_logging, registers, set_nccl_env, set_num_threads, setup_logger\n'), ((2538, 2571), 'basecls.engine.ClsTester', 'ClsTester', (['cfg', 'model', 'dataloader'], {}), '(cfg, model, dataloader)\n', (2547, 2571), False, 'from basecls.engine import ClsTester\n'), ((2693, 2721), 'multiprocessing.set_start_method', 'mp.set_start_method', (['"""spawn"""'], {}), "('spawn')\n", (2712, 2721), True, 'import multiprocessing as mp\n'), ((2727, 2741), 'basecls.utils.set_nccl_env', 'set_nccl_env', ([], {}), '()\n', (2739, 2741), False, 'from basecls.utils import default_logging, registers, set_nccl_env, set_num_threads, setup_logger\n'), ((2746, 2763), 'basecls.utils.set_num_threads', 'set_num_threads', ([], {}), '()\n', (2761, 2763), False, 'from basecls.utils import default_logging, registers, set_nccl_env, set_num_threads, setup_logger\n'), ((2875, 2909), 'megengine.device.get_device_count', 'mge.device.get_device_count', (['"""gpu"""'], {}), "('gpu')\n", (2902, 2909), True, 'import megengine as mge\n'), ((1347, 1383), 'importlib.import_module', 'importlib.import_module', (['module_name'], {}), '(module_name)\n', (1370, 1383), False, 'import importlib\n'), ((1486, 1513), 'os.path.isfile', 'os.path.isfile', (['weight_path'], {}), '(weight_path)\n', (1500, 1513), False, 'import os\n'), ((1927, 1943), 'sys.path.pop', 'sys.path.pop', (['(-1)'], {}), '(-1)\n', (1939, 1943), False, 'import sys\n'), ((1961, 1989), 'json.dumps', 'json.dumps', (['result'], {'indent': '(4)'}), '(result, indent=4)\n', (1971, 1989), False, 'import json\n'), ((2039, 2059), 'json.dump', 'json.dump', (['result', 'f'], {}), '(result, f)\n', (2048, 2059), False, 'import json\n'), ((2776, 2800), 'os.path.exists', 'os.path.exists', (['args.dir'], {}), '(args.dir)\n', (2790, 2800), False, 'import os\n'), ((2945, 2995), 'loguru.logger.warning', 'logger.warning', (['"""No GPU was found, testing on CPU"""'], {}), "('No GPU was found, testing on CPU')\n", (2959, 2995), False, 'from loguru import logger\n'), ((1173, 1195), 'pathlib.Path', 'pathlib.Path', (['args.dir'], {}), '(args.dir)\n', (1185, 1195), False, 'import pathlib\n'), ((1237, 1255), 'os.path.dirname', 'os.path.dirname', (['f'], {}), '(f)\n', (1252, 1255), False, 'import os\n'), ((1579, 1595), 'sys.path.pop', 'sys.path.pop', (['(-1)'], {}), '(-1)\n', (1591, 1595), False, 'import sys\n'), ((1686, 1722), 'loguru.logger.info', 'logger.info', (['"""Using fastrun mode..."""'], {}), "('Using fastrun mode...')\n", (1697, 1722), False, 'from loguru import logger\n'), ((1735, 1795), 'megengine.functional.debug_param.set_execution_strategy', 'mge.functional.debug_param.set_execution_strategy', (['"""PROFILE"""'], {}), "('PROFILE')\n", (1784, 1795), True, 'import megengine as mge\n'), ((2348, 2388), 'basecls.utils.registers.dataloaders.get', 'registers.dataloaders.get', (['cfg.data.name'], {}), '(cfg.data.name)\n', (2373, 2388), False, 'from basecls.utils import default_logging, registers, set_nccl_env, set_num_threads, setup_logger\n'), ((3064, 3085), 'megengine.distributed.launcher', 'dist.launcher', (['worker'], {}), '(worker)\n', (3077, 3085), True, 'import megengine.distributed as dist\n'), ((978, 993), 'megengine.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (991, 993), True, 'import megengine.distributed as dist\n'), ((1297, 1316), 'os.path.basename', 'os.path.basename', (['f'], {}), '(f)\n', (1313, 1316), False, 'import os\n'), ((1446, 1465), 'os.path.splitext', 'os.path.splitext', (['f'], {}), '(f)\n', (1462, 1465), False, 'import os\n')]
|
import logging
import os
import pickle
import numpy as np
import h5py
from megengine.data import DataLoader
from megengine.data.dataset import Dataset
from megengine.data.sampler import RandomSampler, SequentialSampler
import megengine.distributed as dist
from dataset.transformations import fetch_transform
from common import utils
_logger = logging.getLogger(__name__)
class ModelNetNpy(Dataset):
def __init__(self, dataset_path: str, dataset_mode: str, subset: str = "train", categories=None, transform=None):
self._logger = logging.getLogger(self.__class__.__name__)
self._root = dataset_path
self._subset = subset
self._is_master = dist.get_rank() == 0
metadata_fpath = os.path.join(self._root, "modelnet_{}_{}.pickle".format(dataset_mode, subset))
utils.master_logger(self._logger, "Loading data from {} for {}".format(metadata_fpath, subset), self._is_master)
if not os.path.exists(os.path.join(dataset_path)):
assert FileNotFoundError("Not found dataset_path: {}".format(dataset_path))
with open(os.path.join(dataset_path, "shape_names.txt")) as fid:
self._classes = [l.strip() for l in fid]
self._category2idx = {e[1]: e[0] for e in enumerate(self._classes)}
self._idx2category = self._classes
if categories is not None:
categories_idx = [self._category2idx[c] for c in categories]
utils.master_logger(self._logger, "Categories used: {}.".format(categories_idx), self._is_master)
self._classes = categories
else:
categories_idx = None
utils.master_logger(self._logger, "Using all categories.", self._is_master)
self._data = self._read_pickle_files(os.path.join(dataset_path, "modelnet_{}_{}.pickle".format(dataset_mode, subset)),
categories_idx)
self._transform = transform
utils.master_logger(self._logger, "Loaded {} {} instances.".format(len(self._data), subset), self._is_master)
@property
def classes(self):
return self._classes
@staticmethod
def _read_pickle_files(fnames, categories):
all_data_dict = []
with open(fnames, "rb") as f:
data = pickle.load(f)
for category in categories:
all_data_dict.extend(data[category])
return all_data_dict
def to_category(self, i):
return self._idx2category[i]
def __getitem__(self, item):
data_path = self._data[item]
# load and process data
points = np.load(data_path)
idx = np.array(int(os.path.splitext(os.path.basename(data_path))[0].split("_")[1]))
label = np.array(int(os.path.splitext(os.path.basename(data_path))[0].split("_")[3]))
sample = {"points": points, "label": label, "idx": idx}
if self._transform:
sample = self._transform(sample)
return sample
def __len__(self):
return len(self._data)
def fetch_dataloader(params):
utils.master_logger(_logger, "Dataset type: {}, transform type: {}".format(params.dataset_type, params.transform_type),
dist.get_rank() == 0)
train_transforms, test_transforms = fetch_transform(params)
if params.dataset_type == "modelnet_os":
dataset_path = "./dataset/data/modelnet_os"
train_categories = [line.rstrip("\n") for line in open("./dataset/data/modelnet40_half1_rm_rotate.txt")]
val_categories = [line.rstrip("\n") for line in open("./dataset/data/modelnet40_half1_rm_rotate.txt")]
test_categories = [line.rstrip("\n") for line in open("./dataset/data/modelnet40_half2_rm_rotate.txt")]
train_categories.sort()
val_categories.sort()
test_categories.sort()
train_ds = ModelNetNpy(dataset_path, dataset_mode="os", subset="train", categories=train_categories, transform=train_transforms)
val_ds = ModelNetNpy(dataset_path, dataset_mode="os", subset="val", categories=val_categories, transform=test_transforms)
test_ds = ModelNetNpy(dataset_path, dataset_mode="os", subset="test", categories=test_categories, transform=test_transforms)
elif params.dataset_type == "modelnet_ts":
dataset_path = "./dataset/data/modelnet_ts"
train_categories = [line.rstrip("\n") for line in open("./dataset/data/modelnet40_half1_rm_rotate.txt")]
val_categories = [line.rstrip("\n") for line in open("./dataset/data/modelnet40_half1_rm_rotate.txt")]
test_categories = [line.rstrip("\n") for line in open("./dataset/data/modelnet40_half2_rm_rotate.txt")]
train_categories.sort()
val_categories.sort()
test_categories.sort()
train_ds = ModelNetNpy(dataset_path, dataset_mode="ts", subset="train", categories=train_categories, transform=train_transforms)
val_ds = ModelNetNpy(dataset_path, dataset_mode="ts", subset="val", categories=val_categories, transform=test_transforms)
test_ds = ModelNetNpy(dataset_path, dataset_mode="ts", subset="test", categories=test_categories, transform=test_transforms)
dataloaders = {}
# add defalt train data loader
train_sampler = RandomSampler(train_ds, batch_size=params.train_batch_size, drop_last=True)
train_dl = DataLoader(train_ds, train_sampler, num_workers=params.num_workers)
dataloaders["train"] = train_dl
# chosse val or test data loader for evaluate
for split in ["val", "test"]:
if split in params.eval_type:
if split == "val":
val_sampler = SequentialSampler(val_ds, batch_size=params.eval_batch_size)
dl = DataLoader(val_ds, val_sampler, num_workers=params.num_workers)
elif split == "test":
test_sampler = SequentialSampler(test_ds, batch_size=params.eval_batch_size)
dl = DataLoader(test_ds, test_sampler, num_workers=params.num_workers)
else:
raise ValueError("Unknown eval_type in params, should in [val, test]")
dataloaders[split] = dl
else:
dataloaders[split] = None
return dataloaders
|
[
"megengine.data.DataLoader",
"megengine.data.sampler.RandomSampler",
"megengine.data.sampler.SequentialSampler",
"megengine.distributed.get_rank"
] |
[((347, 374), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (364, 374), False, 'import logging\n'), ((3269, 3292), 'dataset.transformations.fetch_transform', 'fetch_transform', (['params'], {}), '(params)\n', (3284, 3292), False, 'from dataset.transformations import fetch_transform\n'), ((5226, 5301), 'megengine.data.sampler.RandomSampler', 'RandomSampler', (['train_ds'], {'batch_size': 'params.train_batch_size', 'drop_last': '(True)'}), '(train_ds, batch_size=params.train_batch_size, drop_last=True)\n', (5239, 5301), False, 'from megengine.data.sampler import RandomSampler, SequentialSampler\n'), ((5317, 5384), 'megengine.data.DataLoader', 'DataLoader', (['train_ds', 'train_sampler'], {'num_workers': 'params.num_workers'}), '(train_ds, train_sampler, num_workers=params.num_workers)\n', (5327, 5384), False, 'from megengine.data import DataLoader\n'), ((546, 588), 'logging.getLogger', 'logging.getLogger', (['self.__class__.__name__'], {}), '(self.__class__.__name__)\n', (563, 588), False, 'import logging\n'), ((2606, 2624), 'numpy.load', 'np.load', (['data_path'], {}), '(data_path)\n', (2613, 2624), True, 'import numpy as np\n'), ((679, 694), 'megengine.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (692, 694), True, 'import megengine.distributed as dist\n'), ((1646, 1721), 'common.utils.master_logger', 'utils.master_logger', (['self._logger', '"""Using all categories."""', 'self._is_master'], {}), "(self._logger, 'Using all categories.', self._is_master)\n", (1665, 1721), False, 'from common import utils\n'), ((2285, 2299), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (2296, 2299), False, 'import pickle\n'), ((3206, 3221), 'megengine.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (3219, 3221), True, 'import megengine.distributed as dist\n'), ((957, 983), 'os.path.join', 'os.path.join', (['dataset_path'], {}), '(dataset_path)\n', (969, 983), False, 'import os\n'), ((1093, 1138), 'os.path.join', 'os.path.join', (['dataset_path', '"""shape_names.txt"""'], {}), "(dataset_path, 'shape_names.txt')\n", (1105, 1138), False, 'import os\n'), ((5605, 5665), 'megengine.data.sampler.SequentialSampler', 'SequentialSampler', (['val_ds'], {'batch_size': 'params.eval_batch_size'}), '(val_ds, batch_size=params.eval_batch_size)\n', (5622, 5665), False, 'from megengine.data.sampler import RandomSampler, SequentialSampler\n'), ((5687, 5750), 'megengine.data.DataLoader', 'DataLoader', (['val_ds', 'val_sampler'], {'num_workers': 'params.num_workers'}), '(val_ds, val_sampler, num_workers=params.num_workers)\n', (5697, 5750), False, 'from megengine.data import DataLoader\n'), ((5816, 5877), 'megengine.data.sampler.SequentialSampler', 'SequentialSampler', (['test_ds'], {'batch_size': 'params.eval_batch_size'}), '(test_ds, batch_size=params.eval_batch_size)\n', (5833, 5877), False, 'from megengine.data.sampler import RandomSampler, SequentialSampler\n'), ((5899, 5964), 'megengine.data.DataLoader', 'DataLoader', (['test_ds', 'test_sampler'], {'num_workers': 'params.num_workers'}), '(test_ds, test_sampler, num_workers=params.num_workers)\n', (5909, 5964), False, 'from megengine.data import DataLoader\n'), ((2669, 2696), 'os.path.basename', 'os.path.basename', (['data_path'], {}), '(data_path)\n', (2685, 2696), False, 'import os\n'), ((2763, 2790), 'os.path.basename', 'os.path.basename', (['data_path'], {}), '(data_path)\n', (2779, 2790), False, 'import os\n')]
|
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
from config import config
from backbone.resnet50 import ResNet50
from module.rpn import RPN
from layers.roi_pool import roi_pool
from det_opr.bbox_opr import bbox_transform_inv_opr, restore_bbox
from det_opr.fpn_roi_target import fpn_roi_target
from det_opr.loss_opr import softmax_loss_opr, smooth_l1_loss_rcnn_opr
from det_opr.utils import get_padded_tensor
import pdb
class Network(M.Module):
def __init__(self):
super().__init__()
# ----------------------- build the backbone ------------------------ #
self.resnet50 = ResNet50()
# ------------ freeze the weights of resnet stage1 and stage 2 ------ #
if config.backbone_freeze_at >= 1:
for p in self.resnet50.conv1.parameters():
# p.requires_grad = False
p = p.detach()
if config.backbone_freeze_at >= 2:
for p in self.resnet50.layer1.parameters():
# p.requires_grad = False
p = p.detach()
# -------------------------- build the FPN -------------------------- #
self.backbone = FPN(self.resnet50)
# -------------------------- build the RPN -------------------------- #
self.RPN = RPN(config.rpn_channel)
# ----------------------- build the RCNN head ----------------------- #
self.RCNN = RCNN()
# -------------------------- input Tensor --------------------------- #
self.inputs = {
"image": mge.tensor(
np.random.random([2, 3, 224, 224]).astype(np.float32), dtype="float32",
),
"im_info": mge.tensor(
np.random.random([2, 5]).astype(np.float32), dtype="float32",
),
"gt_boxes": mge.tensor(
np.random.random([2, 100, 5]).astype(np.float32), dtype="float32",
),
}
def pre_process(self, images):
mean = config.image_mean.reshape(1, 3, 1, 1).astype(np.float32)
std = config.image_std.reshape(1, 3, 1, 1).astype(np.float32)
mean = mge.tensor(mean).to(images.device)
std = mge.tensor(std).to(images.device)
normed_images = (images - mean) / std
normed_images = get_padded_tensor(normed_images, 64)
return normed_images
def forward(self, inputs):
images = inputs['image']
im_info = inputs['im_info']
gt_boxes = inputs['gt_boxes']
#del images
# process the images
normed_images = self.pre_process(images)
if self.training:
return self._forward_train(normed_images, im_info, gt_boxes)
else:
return self._forward_test(normed_images, im_info)
def _forward_train(self, image, im_info, gt_boxes):
loss_dict = {}
# stride: 64,32,16,8,4, p6->p2
fpn_fms = self.backbone(image)
rpn_rois, loss_dict_rpn = \
self.RPN(fpn_fms, im_info, gt_boxes)
rcnn_rois, rcnn_labels, rcnn_bbox_targets = fpn_roi_target(
rpn_rois, im_info, gt_boxes, top_k=2)
loss_dict_rcnn = self.RCNN(
fpn_fms, rcnn_rois, rcnn_labels, rcnn_bbox_targets)
loss_dict.update(loss_dict_rpn)
loss_dict.update(loss_dict_rcnn)
return loss_dict
def _forward_test(self, image, im_info):
fpn_fms = self.backbone(image)
rpn_rois = self.RPN(fpn_fms, im_info)
pred_bbox = self.RCNN(fpn_fms, rpn_rois)
return pred_bbox
class RCNN(M.Module):
def __init__(self):
super().__init__()
# roi head
self.refinement = True
self.fc1 = M.Linear(256*7*7, 1024)
self.fc2 = M.Linear(1024, 1024)
self.fc3 = M.Linear(1054, 1024) if self.refinement else None
self.relu = M.ReLU()
self.n = config.num_classes
self.a = M.Linear(1024, 5 * self.n)
self.b = M.Linear(1024, 5 * self.n)
self.q = M.Linear(1024, 5 * self.n) if self.refinement else None
self.r = M.Linear(1024, 5 * self.n) if self.refinement else None
self._init_weights()
def _init_weights(self,):
for l in [self.fc1, self.fc2, self.a, self.b]:
M.init.normal_(l.weight, std=0.01)
M.init.fill_(l.bias, 0)
if self.refinement:
for l in [self.q, self.r, self.fc3]:
M.init.normal_(l.weight, std=0.01)
M.init.fill_(l.bias, 0)
def refinement_module(self, prob, fc2):
m = prob.reshape(-1, 5*self.n)
offsets, scores = m[:, :-self.n], m[:, -self.n:]
n = offsets.shape[0]
offsets = offsets.reshape(-1, self.n, 4)
cls_scores = F.expand_dims(F.softmax(scores, axis=1), axis=2)
pred_boxes = F.concat([offsets, cls_scores], axis=2)[:, 1]
n, c = pred_boxes.shape
pred_boxes = F.broadcast_to(F.expand_dims(pred_boxes, axis=1), (n, 6, c)).reshape(n,-1)
n, c = fc2.shape
fc3 = F.broadcast_to(F.expand_dims(fc2, axis=1), (n, 2, c)).reshape(-1, c)
fc3 = F.concat([fc3, pred_boxes], axis=1)
fc3 = self.relu(self.fc3(fc3))
fc3 = fc3.reshape(n, 2, -1).transpose(1, 0, 2)
a = self.q(fc3[0])
b = self.r(fc3[1])
prob = F.stack([a, b], axis=1).reshape(-1, a.shape[1])
return prob
def forward(self, fpn_fms, rcnn_rois, labels=None, bbox_targets=None):
# stride: 64,32,16,8,4 -> 4, 8, 16, 32
fpn_fms = fpn_fms[1:]
fpn_fms.reverse()
stride = [4, 8, 16, 32]
poo5, rcnn_rois, labels, bbox_targets = roi_pool(
fpn_fms, rcnn_rois, stride, (7, 7), 'roi_align',
labels, bbox_targets)
poo5 = F.flatten(poo5, start_axis=1)
fc1 = F.relu(self.fc1(poo5))
fc2 = F.relu(self.fc2(fc1))
a = self.a(fc2)
b = self.b(fc2)
prob = F.stack([a, b], axis=1).reshape(-1, a.shape[1])
if self.refinement:
final_prob = self.refinement_module(prob, fc2)
if self.training:
emd_loss = self.compute_gemini_loss(prob, bbox_targets, labels)
loss_dict = {}
loss_dict['loss_rcnn_emd'] = emd_loss
if self.refinement_module:
final_emd_loss = self.compute_gemini_loss(final_prob, bbox_targets, labels)
loss_dict['final_rcnn_emd'] = final_emd_loss
return loss_dict
else:
offsets, cls_scores = prob[:, :-self.n], prob[:, -self.n:]
pred_bbox = offsets.reshape(-1, self.n, 4)
cls_prob = F.softmax(cls_scores, axis=1)
n = rcnn_rois.shape[0]
rois = F.broadcast_to(F.expand_dims(rcnn_rois[:, 1:5], axis=1), (n, 2, 4)).reshape(-1, 4)
normalized = config.rcnn_bbox_normalize_targets
pred_boxes = restore_bbox(rois, pred_bbox, normalized, config)
pred_bbox = F.concat([pred_boxes, F.expand_dims(cls_prob, axis=2)], axis=2)
return pred_bbox
def compute_emd_loss(self, a, b, bbox_targets, labels):
c = a.shape[1]
prob = F.stack([a, b], axis = 1).reshape(-1, c)
pred_bbox, cls_scores = prob[:,:-self.n], prob[:,-self.n:]
n, c = bbox_targets.shape[0], bbox_targets.shape[1]
bbox_targets, labels = bbox_targets.reshape(-1, 4), labels.flatten()
cls_loss = softmax_loss_opr(cls_scores, labels)
pred_bbox = pred_bbox.reshape(-1, self.n, 4)
rcnn_bbox_loss = smooth_l1_loss_rcnn_opr(pred_bbox, bbox_targets, labels,
config.rcnn_smooth_l1_beta)
loss = cls_loss + rcnn_bbox_loss
loss = loss.reshape(-1, 2).sum(axis=1)
return loss
def compute_gemini_loss(self, prob, bbox_targets, labels):
c = prob.shape[1]
prob = prob.reshape(-1, 2, c).transpose(1, 0, 2)
a, b = prob[0], prob[1]
loss0 = self.compute_emd_loss(a, b, bbox_targets, labels)
loss1 = self.compute_emd_loss(b, a, bbox_targets, labels)
loss = F.stack([loss0, loss1], axis=1)
vlabel = (labels > -1).reshape(-1, 2).sum(axis=1) > 1
emd_loss = loss.min(axis=1).sum() / F.maximum(vlabel.sum(), 1)
return emd_loss
class FPN(M.Module):
"""
This module implements Feature Pyramid Network.
It creates pyramid features built on top of some input feature maps.
"""
def __init__(self, bottom_up):
super(FPN, self).__init__()
in_channels = [256, 512, 1024, 2048]
fpn_dim = 256
use_bias =True
# lateral_convs = list()
# output_convs = list()
lateral_convs, output_convs = [], []
for idx, in_channels in enumerate(in_channels):
lateral_conv = M.Conv2d(
in_channels, fpn_dim, kernel_size=1, bias=use_bias)
output_conv = M.Conv2d(
fpn_dim, fpn_dim, kernel_size=3, stride=1, padding=1, bias=use_bias)
M.init.msra_normal_(lateral_conv.weight, mode="fan_in")
M.init.msra_normal_(output_conv.weight, mode="fan_in")
if use_bias:
M.init.fill_(lateral_conv.bias, 0)
M.init.fill_(output_conv.bias, 0)
lateral_convs.append(lateral_conv)
output_convs.append(output_conv)
self.lateral_convs = lateral_convs[::-1]
self.output_convs = output_convs[::-1]
self.bottom_up = bottom_up
def forward(self, x):
bottom_up_features = self.bottom_up(x)
bottom_up_features = bottom_up_features[::-1]
results = []
prev_features = self.lateral_convs[0](bottom_up_features[0])
results.append(self.output_convs[0](prev_features))
for features, lateral_conv, output_conv in zip(
bottom_up_features[1:], self.lateral_convs[1:], self.output_convs[1:]
):
fh, fw = features.shape[2:]
top_down_features = F.nn.interpolate(
prev_features, size = (fh, fw), mode="BILINEAR")
lateral_features = lateral_conv(features)
prev_features = lateral_features + top_down_features
results.append(output_conv(prev_features))
# p6
last_p6 = F.max_pool2d(results[0], kernel_size=1, stride=2, padding=0)
results.insert(0, last_p6)
return results
|
[
"megengine.module.ReLU",
"megengine.functional.softmax",
"megengine.functional.flatten",
"megengine.functional.stack",
"megengine.tensor",
"megengine.module.init.msra_normal_",
"megengine.functional.max_pool2d",
"megengine.functional.concat",
"megengine.module.init.normal_",
"megengine.module.init.fill_",
"megengine.module.Conv2d",
"megengine.module.Linear",
"megengine.functional.expand_dims",
"megengine.functional.nn.interpolate"
] |
[((656, 666), 'backbone.resnet50.ResNet50', 'ResNet50', ([], {}), '()\n', (664, 666), False, 'from backbone.resnet50 import ResNet50\n'), ((1312, 1335), 'module.rpn.RPN', 'RPN', (['config.rpn_channel'], {}), '(config.rpn_channel)\n', (1315, 1335), False, 'from module.rpn import RPN\n'), ((2306, 2342), 'det_opr.utils.get_padded_tensor', 'get_padded_tensor', (['normed_images', '(64)'], {}), '(normed_images, 64)\n', (2323, 2342), False, 'from det_opr.utils import get_padded_tensor\n'), ((3081, 3133), 'det_opr.fpn_roi_target.fpn_roi_target', 'fpn_roi_target', (['rpn_rois', 'im_info', 'gt_boxes'], {'top_k': '(2)'}), '(rpn_rois, im_info, gt_boxes, top_k=2)\n', (3095, 3133), False, 'from det_opr.fpn_roi_target import fpn_roi_target\n'), ((3724, 3751), 'megengine.module.Linear', 'M.Linear', (['(256 * 7 * 7)', '(1024)'], {}), '(256 * 7 * 7, 1024)\n', (3732, 3751), True, 'import megengine.module as M\n'), ((3767, 3787), 'megengine.module.Linear', 'M.Linear', (['(1024)', '(1024)'], {}), '(1024, 1024)\n', (3775, 3787), True, 'import megengine.module as M\n'), ((3878, 3886), 'megengine.module.ReLU', 'M.ReLU', ([], {}), '()\n', (3884, 3886), True, 'import megengine.module as M\n'), ((3941, 3967), 'megengine.module.Linear', 'M.Linear', (['(1024)', '(5 * self.n)'], {}), '(1024, 5 * self.n)\n', (3949, 3967), True, 'import megengine.module as M\n'), ((3985, 4011), 'megengine.module.Linear', 'M.Linear', (['(1024)', '(5 * self.n)'], {}), '(1024, 5 * self.n)\n', (3993, 4011), True, 'import megengine.module as M\n'), ((5155, 5190), 'megengine.functional.concat', 'F.concat', (['[fc3, pred_boxes]'], {'axis': '(1)'}), '([fc3, pred_boxes], axis=1)\n', (5163, 5190), True, 'import megengine.functional as F\n'), ((5690, 5769), 'layers.roi_pool.roi_pool', 'roi_pool', (['fpn_fms', 'rcnn_rois', 'stride', '(7, 7)', '"""roi_align"""', 'labels', 'bbox_targets'], {}), "(fpn_fms, rcnn_rois, stride, (7, 7), 'roi_align', labels, bbox_targets)\n", (5698, 5769), False, 'from layers.roi_pool import roi_pool\n'), ((5818, 5847), 'megengine.functional.flatten', 'F.flatten', (['poo5'], {'start_axis': '(1)'}), '(poo5, start_axis=1)\n', (5827, 5847), True, 'import megengine.functional as F\n'), ((7502, 7538), 'det_opr.loss_opr.softmax_loss_opr', 'softmax_loss_opr', (['cls_scores', 'labels'], {}), '(cls_scores, labels)\n', (7518, 7538), False, 'from det_opr.loss_opr import softmax_loss_opr, smooth_l1_loss_rcnn_opr\n'), ((7617, 7706), 'det_opr.loss_opr.smooth_l1_loss_rcnn_opr', 'smooth_l1_loss_rcnn_opr', (['pred_bbox', 'bbox_targets', 'labels', 'config.rcnn_smooth_l1_beta'], {}), '(pred_bbox, bbox_targets, labels, config.\n rcnn_smooth_l1_beta)\n', (7640, 7706), False, 'from det_opr.loss_opr import softmax_loss_opr, smooth_l1_loss_rcnn_opr\n'), ((8149, 8180), 'megengine.functional.stack', 'F.stack', (['[loss0, loss1]'], {'axis': '(1)'}), '([loss0, loss1], axis=1)\n', (8156, 8180), True, 'import megengine.functional as F\n'), ((10329, 10389), 'megengine.functional.max_pool2d', 'F.max_pool2d', (['results[0]'], {'kernel_size': '(1)', 'stride': '(2)', 'padding': '(0)'}), '(results[0], kernel_size=1, stride=2, padding=0)\n', (10341, 10389), True, 'import megengine.functional as F\n'), ((3807, 3827), 'megengine.module.Linear', 'M.Linear', (['(1054)', '(1024)'], {}), '(1054, 1024)\n', (3815, 3827), True, 'import megengine.module as M\n'), ((4030, 4056), 'megengine.module.Linear', 'M.Linear', (['(1024)', '(5 * self.n)'], {}), '(1024, 5 * self.n)\n', (4038, 4056), True, 'import megengine.module as M\n'), ((4103, 4129), 'megengine.module.Linear', 'M.Linear', (['(1024)', '(5 * self.n)'], {}), '(1024, 5 * self.n)\n', (4111, 4129), True, 'import megengine.module as M\n'), ((4295, 4329), 'megengine.module.init.normal_', 'M.init.normal_', (['l.weight'], {'std': '(0.01)'}), '(l.weight, std=0.01)\n', (4309, 4329), True, 'import megengine.module as M\n'), ((4342, 4365), 'megengine.module.init.fill_', 'M.init.fill_', (['l.bias', '(0)'], {}), '(l.bias, 0)\n', (4354, 4365), True, 'import megengine.module as M\n'), ((4802, 4827), 'megengine.functional.softmax', 'F.softmax', (['scores'], {'axis': '(1)'}), '(scores, axis=1)\n', (4811, 4827), True, 'import megengine.functional as F\n'), ((4858, 4897), 'megengine.functional.concat', 'F.concat', (['[offsets, cls_scores]'], {'axis': '(2)'}), '([offsets, cls_scores], axis=2)\n', (4866, 4897), True, 'import megengine.functional as F\n'), ((6714, 6743), 'megengine.functional.softmax', 'F.softmax', (['cls_scores'], {'axis': '(1)'}), '(cls_scores, axis=1)\n', (6723, 6743), True, 'import megengine.functional as F\n'), ((6966, 7015), 'det_opr.bbox_opr.restore_bbox', 'restore_bbox', (['rois', 'pred_bbox', 'normalized', 'config'], {}), '(rois, pred_bbox, normalized, config)\n', (6978, 7015), False, 'from det_opr.bbox_opr import bbox_transform_inv_opr, restore_bbox\n'), ((8856, 8916), 'megengine.module.Conv2d', 'M.Conv2d', (['in_channels', 'fpn_dim'], {'kernel_size': '(1)', 'bias': 'use_bias'}), '(in_channels, fpn_dim, kernel_size=1, bias=use_bias)\n', (8864, 8916), True, 'import megengine.module as M\n'), ((8960, 9037), 'megengine.module.Conv2d', 'M.Conv2d', (['fpn_dim', 'fpn_dim'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': 'use_bias'}), '(fpn_dim, fpn_dim, kernel_size=3, stride=1, padding=1, bias=use_bias)\n', (8968, 9037), True, 'import megengine.module as M\n'), ((9067, 9122), 'megengine.module.init.msra_normal_', 'M.init.msra_normal_', (['lateral_conv.weight'], {'mode': '"""fan_in"""'}), "(lateral_conv.weight, mode='fan_in')\n", (9086, 9122), True, 'import megengine.module as M\n'), ((9135, 9189), 'megengine.module.init.msra_normal_', 'M.init.msra_normal_', (['output_conv.weight'], {'mode': '"""fan_in"""'}), "(output_conv.weight, mode='fan_in')\n", (9154, 9189), True, 'import megengine.module as M\n'), ((10041, 10104), 'megengine.functional.nn.interpolate', 'F.nn.interpolate', (['prev_features'], {'size': '(fh, fw)', 'mode': '"""BILINEAR"""'}), "(prev_features, size=(fh, fw), mode='BILINEAR')\n", (10057, 10104), True, 'import megengine.functional as F\n'), ((2011, 2048), 'config.config.image_mean.reshape', 'config.image_mean.reshape', (['(1)', '(3)', '(1)', '(1)'], {}), '(1, 3, 1, 1)\n', (2036, 2048), False, 'from config import config\n'), ((2082, 2118), 'config.config.image_std.reshape', 'config.image_std.reshape', (['(1)', '(3)', '(1)', '(1)'], {}), '(1, 3, 1, 1)\n', (2106, 2118), False, 'from config import config\n'), ((2153, 2169), 'megengine.tensor', 'mge.tensor', (['mean'], {}), '(mean)\n', (2163, 2169), True, 'import megengine as mge\n'), ((2202, 2217), 'megengine.tensor', 'mge.tensor', (['std'], {}), '(std)\n', (2212, 2217), True, 'import megengine as mge\n'), ((4460, 4494), 'megengine.module.init.normal_', 'M.init.normal_', (['l.weight'], {'std': '(0.01)'}), '(l.weight, std=0.01)\n', (4474, 4494), True, 'import megengine.module as M\n'), ((4511, 4534), 'megengine.module.init.fill_', 'M.init.fill_', (['l.bias', '(0)'], {}), '(l.bias, 0)\n', (4523, 4534), True, 'import megengine.module as M\n'), ((5363, 5386), 'megengine.functional.stack', 'F.stack', (['[a, b]'], {'axis': '(1)'}), '([a, b], axis=1)\n', (5370, 5386), True, 'import megengine.functional as F\n'), ((5985, 6008), 'megengine.functional.stack', 'F.stack', (['[a, b]'], {'axis': '(1)'}), '([a, b], axis=1)\n', (5992, 6008), True, 'import megengine.functional as F\n'), ((7237, 7260), 'megengine.functional.stack', 'F.stack', (['[a, b]'], {'axis': '(1)'}), '([a, b], axis=1)\n', (7244, 7260), True, 'import megengine.functional as F\n'), ((9231, 9265), 'megengine.module.init.fill_', 'M.init.fill_', (['lateral_conv.bias', '(0)'], {}), '(lateral_conv.bias, 0)\n', (9243, 9265), True, 'import megengine.module as M\n'), ((9282, 9315), 'megengine.module.init.fill_', 'M.init.fill_', (['output_conv.bias', '(0)'], {}), '(output_conv.bias, 0)\n', (9294, 9315), True, 'import megengine.module as M\n'), ((4972, 5005), 'megengine.functional.expand_dims', 'F.expand_dims', (['pred_boxes'], {'axis': '(1)'}), '(pred_boxes, axis=1)\n', (4985, 5005), True, 'import megengine.functional as F\n'), ((5087, 5113), 'megengine.functional.expand_dims', 'F.expand_dims', (['fc2'], {'axis': '(1)'}), '(fc2, axis=1)\n', (5100, 5113), True, 'import megengine.functional as F\n'), ((7062, 7093), 'megengine.functional.expand_dims', 'F.expand_dims', (['cls_prob'], {'axis': '(2)'}), '(cls_prob, axis=2)\n', (7075, 7093), True, 'import megengine.functional as F\n'), ((1596, 1630), 'numpy.random.random', 'np.random.random', (['[2, 3, 224, 224]'], {}), '([2, 3, 224, 224])\n', (1612, 1630), True, 'import numpy as np\n'), ((1734, 1758), 'numpy.random.random', 'np.random.random', (['[2, 5]'], {}), '([2, 5])\n', (1750, 1758), True, 'import numpy as np\n'), ((1863, 1892), 'numpy.random.random', 'np.random.random', (['[2, 100, 5]'], {}), '([2, 100, 5])\n', (1879, 1892), True, 'import numpy as np\n'), ((6813, 6853), 'megengine.functional.expand_dims', 'F.expand_dims', (['rcnn_rois[:, 1:5]'], {'axis': '(1)'}), '(rcnn_rois[:, 1:5], axis=1)\n', (6826, 6853), True, 'import megengine.functional as F\n')]
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import itertools
import numpy as np
from megengine import Parameter, tensor
from megengine.module import AvgPool2d, MaxPool2d
def test_avg_pool2d():
def test_func(
batch_size,
in_channels,
out_channels,
in_height,
in_width,
kernel_size,
stride,
padding,
):
pool = AvgPool2d(kernel_size, stride=stride, padding=padding, mode="average")
inp = np.random.normal(
size=(batch_size, in_channels, in_height, in_width)
).astype(np.float32)
out_height = (in_height + padding * 2 - kernel_size) // stride + 1
out_width = (in_width + padding * 2 - kernel_size) // stride + 1
out = pool(tensor(inp))
inp = np.pad(inp, ((0, 0), (0, 0), (padding, padding), (padding, padding)))
expected = np.zeros(
(batch_size, out_channels, out_height, out_width), dtype=np.float32,
)
for n, c, oh, ow in itertools.product(
*map(range, [batch_size, out_channels, out_height, out_width])
):
ih, iw = oh * stride, ow * stride
expected[n, c, oh, ow] = np.sum(
inp[n, c, ih : ih + kernel_size, iw : iw + kernel_size,]
) / (kernel_size * kernel_size)
np.testing.assert_almost_equal(out.numpy(), expected, 1e-5)
test_func(10, 4, 4, 5, 5, 2, 2, 1)
test_func(10, 4, 4, 6, 6, 2, 2, 0)
test_func(10, 16, 16, 14, 14, 2, 2, 0)
|
[
"megengine.module.AvgPool2d",
"megengine.tensor"
] |
[((725, 795), 'megengine.module.AvgPool2d', 'AvgPool2d', (['kernel_size'], {'stride': 'stride', 'padding': 'padding', 'mode': '"""average"""'}), "(kernel_size, stride=stride, padding=padding, mode='average')\n", (734, 795), False, 'from megengine.module import AvgPool2d, MaxPool2d\n'), ((1115, 1184), 'numpy.pad', 'np.pad', (['inp', '((0, 0), (0, 0), (padding, padding), (padding, padding))'], {}), '(inp, ((0, 0), (0, 0), (padding, padding), (padding, padding)))\n', (1121, 1184), True, 'import numpy as np\n'), ((1204, 1281), 'numpy.zeros', 'np.zeros', (['(batch_size, out_channels, out_height, out_width)'], {'dtype': 'np.float32'}), '((batch_size, out_channels, out_height, out_width), dtype=np.float32)\n', (1212, 1281), True, 'import numpy as np\n'), ((1088, 1099), 'megengine.tensor', 'tensor', (['inp'], {}), '(inp)\n', (1094, 1099), False, 'from megengine import Parameter, tensor\n'), ((810, 879), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(batch_size, in_channels, in_height, in_width)'}), '(size=(batch_size, in_channels, in_height, in_width))\n', (826, 879), True, 'import numpy as np\n'), ((1521, 1580), 'numpy.sum', 'np.sum', (['inp[n, c, ih:ih + kernel_size, iw:iw + kernel_size]'], {}), '(inp[n, c, ih:ih + kernel_size, iw:iw + kernel_size])\n', (1527, 1580), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import pickle
from tempfile import TemporaryFile
import numpy as np
from megengine.core import Buffer, Parameter, tensor
from megengine.test import assertTensorClose
def test_tensor_serialization():
def tensor_eq(a, b):
assert a.dtype == b.dtype
assert a.device == b.device
assert a.requires_grad == b.requires_grad
assertTensorClose(a, b)
with TemporaryFile() as f:
data = np.random.randint(low=0, high=7, size=[233])
a = tensor(data, device="xpux", dtype=np.int32)
pickle.dump(a, f)
f.seek(0)
b = pickle.load(f)
tensor_eq(a, b)
with TemporaryFile() as f:
a = Parameter(np.random.random(size=(233, 2)).astype(np.float32))
pickle.dump(a, f)
f.seek(0)
b = pickle.load(f)
assert isinstance(b, Parameter)
tensor_eq(a, b)
with TemporaryFile() as f:
a = Buffer(np.random.random(size=(2, 233)).astype(np.float32))
pickle.dump(a, f)
f.seek(0)
b = pickle.load(f)
assert isinstance(b, Buffer)
tensor_eq(a, b)
|
[
"megengine.core.tensor",
"megengine.test.assertTensorClose"
] |
[((733, 756), 'megengine.test.assertTensorClose', 'assertTensorClose', (['a', 'b'], {}), '(a, b)\n', (750, 756), False, 'from megengine.test import assertTensorClose\n'), ((767, 782), 'tempfile.TemporaryFile', 'TemporaryFile', ([], {}), '()\n', (780, 782), False, 'from tempfile import TemporaryFile\n'), ((804, 848), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(0)', 'high': '(7)', 'size': '[233]'}), '(low=0, high=7, size=[233])\n', (821, 848), True, 'import numpy as np\n'), ((861, 904), 'megengine.core.tensor', 'tensor', (['data'], {'device': '"""xpux"""', 'dtype': 'np.int32'}), "(data, device='xpux', dtype=np.int32)\n", (867, 904), False, 'from megengine.core import Buffer, Parameter, tensor\n'), ((913, 930), 'pickle.dump', 'pickle.dump', (['a', 'f'], {}), '(a, f)\n', (924, 930), False, 'import pickle\n'), ((961, 975), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (972, 975), False, 'import pickle\n'), ((1010, 1025), 'tempfile.TemporaryFile', 'TemporaryFile', ([], {}), '()\n', (1023, 1025), False, 'from tempfile import TemporaryFile\n'), ((1114, 1131), 'pickle.dump', 'pickle.dump', (['a', 'f'], {}), '(a, f)\n', (1125, 1131), False, 'import pickle\n'), ((1162, 1176), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1173, 1176), False, 'import pickle\n'), ((1251, 1266), 'tempfile.TemporaryFile', 'TemporaryFile', ([], {}), '()\n', (1264, 1266), False, 'from tempfile import TemporaryFile\n'), ((1352, 1369), 'pickle.dump', 'pickle.dump', (['a', 'f'], {}), '(a, f)\n', (1363, 1369), False, 'import pickle\n'), ((1400, 1414), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1411, 1414), False, 'import pickle\n'), ((1054, 1085), 'numpy.random.random', 'np.random.random', ([], {'size': '(233, 2)'}), '(size=(233, 2))\n', (1070, 1085), True, 'import numpy as np\n'), ((1292, 1323), 'numpy.random.random', 'np.random.random', ([], {'size': '(2, 233)'}), '(size=(2, 233))\n', (1308, 1323), True, 'import numpy as np\n')]
|
# Copyright (c) Megvii, Inc. and its affiliates.
import math
import megengine.functional as F
import megengine.module as M
class LogitsFullyConnected(M.Module):
"""single fully connected layer, mapping embedding to logits with normalized weight
"""
def __init__(self, num_class, feature_dim):
super().__init__()
fc = M.Linear(feature_dim, num_class, bias=False)
self.weight = fc.weight
M.init.msra_uniform_(self.weight, a=math.sqrt(5))
def forward(self, embedding):
w = F.normalize(self.weight, axis=1)
x = embedding # embedding has been normalized already
logits = F.matmul(x, w.transpose(1, 0))
return logits
class AdditiveMarginSoftmax(M.Module):
"""additive margin softmax from
`"Additive Margin Softmax for Face Verification" <https://arxiv.org/pdf/1801.05599.pdf>`_
and
`"CosFace: Large Margin Cosine Loss for Deep Face Recognition" <https://arxiv.org/pdf/1801.09414.pdf>`_
"""
def __init__(self, num_class, scale, m1, m2, m3, feature_dim=512):
assert m1 == 1.0, f"m1 expected to be 1.0 in AdditiveMarginSoftmax, got {m1}"
assert m2 == 0.0, f"m2 expected to be 0.0 in AdditiveMarginSoftmax, got {m2}"
super().__init__()
self.fc = LogitsFullyConnected(num_class, feature_dim)
self.num_class = num_class
self.scale = scale
self.margin = m3
def forward(self, embedding, target):
origin_logits = self.fc(embedding)
one_hot_target = F.one_hot(target, self.num_class)
# get how much to decrease
delta_one_hot_target = one_hot_target * self.margin
# apply the decrease
logits = origin_logits - delta_one_hot_target
logits = logits * self.scale
loss = F.loss.cross_entropy(logits, target)
accuracy = F.topk_accuracy(origin_logits, target, topk=1)
return loss, accuracy
class AdditiveAngularMarginSoftmax(M.Module):
"""additive angular margin softmax from
`"ArcFace: Additive Angular Margin Loss for Deep Face Recognition" <https://arxiv.org/pdf/1801.07698.pdf>`_
"""
def __init__(self, num_class, scale, m1, m2, m3, feature_dim=512):
assert m1 == 1.0, f"m1 expected to be 1.0 in AdditiveAngularMarginSoftmax, got {m1}"
assert m3 == 0.0, f"m3 expected to be 0.0 in AdditiveAngularMarginSoftmax, got {m3}"
super().__init__()
self.fc = LogitsFullyConnected(num_class, feature_dim)
self.num_class = num_class
self.scale = scale
self.margin = m2
def forward(self, embedding, target):
origin_logits = self.fc(embedding)
one_hot_target = F.one_hot(target, self.num_class).astype("bool")
large_margined_logit = F.cos(F.acos(origin_logits) + self.margin)
small_margined_logit = origin_logits
margined_logit = F.where(origin_logits >= 0, large_margined_logit, small_margined_logit)
logits = F.where(one_hot_target, margined_logit, origin_logits)
logits = logits * self.scale
loss = F.loss.cross_entropy(logits, target)
accuracy = F.topk_accuracy(origin_logits, target, topk=1)
return loss, accuracy
def get_loss(name):
"""get loss class by name
Args:
name (str): costum name of loss
Returns:
M.Module: corresponding loss class
"""
mapping = {
"cosface": AdditiveMarginSoftmax,
"arcface": AdditiveAngularMarginSoftmax,
}
assert name in mapping, f"head {name} is not found, choose one from {mapping.keys()}"
return mapping[name]
|
[
"megengine.module.Linear",
"megengine.functional.normalize",
"megengine.functional.topk_accuracy",
"megengine.functional.where",
"megengine.functional.acos",
"megengine.functional.one_hot",
"megengine.functional.loss.cross_entropy"
] |
[((350, 394), 'megengine.module.Linear', 'M.Linear', (['feature_dim', 'num_class'], {'bias': '(False)'}), '(feature_dim, num_class, bias=False)\n', (358, 394), True, 'import megengine.module as M\n'), ((532, 564), 'megengine.functional.normalize', 'F.normalize', (['self.weight'], {'axis': '(1)'}), '(self.weight, axis=1)\n', (543, 564), True, 'import megengine.functional as F\n'), ((1527, 1560), 'megengine.functional.one_hot', 'F.one_hot', (['target', 'self.num_class'], {}), '(target, self.num_class)\n', (1536, 1560), True, 'import megengine.functional as F\n'), ((1793, 1829), 'megengine.functional.loss.cross_entropy', 'F.loss.cross_entropy', (['logits', 'target'], {}), '(logits, target)\n', (1813, 1829), True, 'import megengine.functional as F\n'), ((1849, 1895), 'megengine.functional.topk_accuracy', 'F.topk_accuracy', (['origin_logits', 'target'], {'topk': '(1)'}), '(origin_logits, target, topk=1)\n', (1864, 1895), True, 'import megengine.functional as F\n'), ((2878, 2949), 'megengine.functional.where', 'F.where', (['(origin_logits >= 0)', 'large_margined_logit', 'small_margined_logit'], {}), '(origin_logits >= 0, large_margined_logit, small_margined_logit)\n', (2885, 2949), True, 'import megengine.functional as F\n'), ((2967, 3021), 'megengine.functional.where', 'F.where', (['one_hot_target', 'margined_logit', 'origin_logits'], {}), '(one_hot_target, margined_logit, origin_logits)\n', (2974, 3021), True, 'import megengine.functional as F\n'), ((3074, 3110), 'megengine.functional.loss.cross_entropy', 'F.loss.cross_entropy', (['logits', 'target'], {}), '(logits, target)\n', (3094, 3110), True, 'import megengine.functional as F\n'), ((3130, 3176), 'megengine.functional.topk_accuracy', 'F.topk_accuracy', (['origin_logits', 'target'], {'topk': '(1)'}), '(origin_logits, target, topk=1)\n', (3145, 3176), True, 'import megengine.functional as F\n'), ((471, 483), 'math.sqrt', 'math.sqrt', (['(5)'], {}), '(5)\n', (480, 483), False, 'import math\n'), ((2685, 2718), 'megengine.functional.one_hot', 'F.one_hot', (['target', 'self.num_class'], {}), '(target, self.num_class)\n', (2694, 2718), True, 'import megengine.functional as F\n'), ((2771, 2792), 'megengine.functional.acos', 'F.acos', (['origin_logits'], {}), '(origin_logits)\n', (2777, 2792), True, 'import megengine.functional as F\n')]
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
from megengine import tensor
from megengine.core.autodiff.grad import Function, Grad
from megengine.core.tensor.utils import make_shape_tuple
from megengine.quantization.internal_fake_quant import *
from megengine.quantization.utils import QuantMode, fake_quant_tensor, tqt_forward
class TQT_numpy:
def __init__(self, lowerbound, upperbound):
super().__init__()
self.lowerbound = lowerbound
self.upperbound = upperbound
def forward(self, inp, scale):
t = 2 ** scale
# t = F.maximum(t, 1e-4)
inp_scaled = inp / t
inp_clipped = np.maximum(
np.minimum(inp_scaled, self.upperbound), self.lowerbound
)
inp_rounded = np.round(inp_clipped)
inp_flq = inp_rounded * t
self.saved_tensors = (inp_scaled, inp_rounded, t)
return inp_flq
def backward(self, grad_inp_flq):
(inp_scaled, inp_rounded, t) = self.saved_tensors
mask_clip = (inp_scaled < -0.5 + self.lowerbound) + (
inp_scaled > self.upperbound + 0.5
) # mask for accumulating the gradients of |data_scaled|>L
mask_quant = np.abs(
mask_clip - 1
) # mask for accumulating the gradients with |data_scaled|<=L
grad_quant = (
grad_inp_flq * mask_quant * (inp_rounded - inp_scaled)
) # gradient within |data_scaled|<=L
grad_clip = (
grad_inp_flq * mask_clip * inp_rounded
) # gradient with | data_scaled|>L
grad_s = grad_clip.sum() + grad_quant.sum()
# dL/ds = dL/dt * t * ln(2)
grad_s = grad_s * t * np.log(2)
grad_inp = grad_inp_flq * mask_quant
return grad_inp, grad_s
def test_tqt():
g = []
def cb(grad):
g.append(grad)
x = np.random.normal(size=(1, 2, 3, 4))
s = np.random.rand(1) + 1
g_y = np.ones(shape=(1, 2, 3, 4), dtype="float32")
n = TQT_numpy(-127, 127)
y_np = n.forward(x, s)
g_x_np, g_s_np = n.backward(g_y)
x = mge.tensor(x, dtype="float32")
s = mge.tensor(s, dtype="float32")
g_y = mge.tensor(g_y, dtype="float32")
grad = Grad().wrt(x, s, callback=cb)
y = tqt_forward(-127, 127, x, s)
grad(y, g_y)
g_x, g_s = g
np.testing.assert_allclose(y.numpy(), y_np, atol=1e-6)
np.testing.assert_allclose(g_x.numpy(), g_x_np, atol=1e-6)
np.testing.assert_allclose(g_s.numpy(), g_s_np, atol=1e-6)
def _save_to(self, name="grad"):
def callback(grad):
setattr(self, name, grad)
return callback
class Round(Function):
def forward(self, x):
return F.round(x)
def backward(self, output_grads):
return output_grads
def fake_quant_tensor_gt(inp, scale, zero_point, qmin, qmax):
oup = Round()(inp / scale) + zero_point
oup = F.minimum(F.maximum(oup, qmin), qmax)
oup = (oup - zero_point) * scale
return oup
def test_fakequant():
qmin = -126
qmax = 129
def run(zero_point, scale):
q_dict = {}
q_dict["mode"] = QuantMode.ASYMMERTIC
q_dict["scale"] = scale
q_dict["zero_point"] = zero_point
inp_data = np.random.uniform(low=-512.0, high=512.0, size=(1, 32, 32, 32))
inp = tensor(inp_data, dtype=np.float32)
# test forward
oup = fake_quant_tensor(inp, qmin, qmax, q_dict).numpy()
oup_gt = fake_quant_tensor_gt(inp, scale, zero_point, qmin, qmax).numpy()
assert np.allclose(oup, oup_gt)
assert oup.shape == oup_gt.shape
# test backward
x = tensor(inp_data, dtype=np.float32)
grad = Grad().wrt(x, callback=_save_to(x))
y = fake_quant_tensor(x, qmin, qmax, q_dict)
grad(y, tensor(F.ones_like(x)))
x1 = tensor(inp_data, dtype=np.float32)
grad = Grad().wrt(x1, callback=_save_to(x1))
y1 = fake_quant_tensor_gt(x1, scale, zero_point, qmin, qmax)
grad(y1, tensor(F.ones_like(x1)))
assert np.allclose(x.grad.numpy(), x1.grad.numpy())
assert make_shape_tuple(x.grad.shape) == make_shape_tuple(x1.grad.shape)
zero_point = tensor([1.0], dtype=np.float32)
scale = tensor([4.0], dtype=np.float32)
run(zero_point, scale)
zero_point = tensor(1.0 * np.ones((1, 32, 1, 1)), dtype=np.float32)
scale = tensor(4.0 * np.ones((1, 32, 1, 1)), dtype=np.float32)
run(zero_point, scale)
|
[
"megengine.tensor",
"megengine.quantization.utils.tqt_forward",
"megengine.quantization.utils.fake_quant_tensor",
"megengine.core.tensor.utils.make_shape_tuple",
"megengine.core.autodiff.grad.Grad"
] |
[((2219, 2254), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(1, 2, 3, 4)'}), '(size=(1, 2, 3, 4))\n', (2235, 2254), True, 'import numpy as np\n'), ((2295, 2339), 'numpy.ones', 'np.ones', ([], {'shape': '(1, 2, 3, 4)', 'dtype': '"""float32"""'}), "(shape=(1, 2, 3, 4), dtype='float32')\n", (2302, 2339), True, 'import numpy as np\n'), ((2443, 2473), 'megengine.tensor', 'mge.tensor', (['x'], {'dtype': '"""float32"""'}), "(x, dtype='float32')\n", (2453, 2473), True, 'import megengine as mge\n'), ((2482, 2512), 'megengine.tensor', 'mge.tensor', (['s'], {'dtype': '"""float32"""'}), "(s, dtype='float32')\n", (2492, 2512), True, 'import megengine as mge\n'), ((2523, 2555), 'megengine.tensor', 'mge.tensor', (['g_y'], {'dtype': '"""float32"""'}), "(g_y, dtype='float32')\n", (2533, 2555), True, 'import megengine as mge\n'), ((2605, 2633), 'megengine.quantization.utils.tqt_forward', 'tqt_forward', (['(-127)', '(127)', 'x', 's'], {}), '(-127, 127, x, s)\n', (2616, 2633), False, 'from megengine.quantization.utils import QuantMode, fake_quant_tensor, tqt_forward\n'), ((4522, 4553), 'megengine.tensor', 'tensor', (['[1.0]'], {'dtype': 'np.float32'}), '([1.0], dtype=np.float32)\n', (4528, 4553), False, 'from megengine import tensor\n'), ((4566, 4597), 'megengine.tensor', 'tensor', (['[4.0]'], {'dtype': 'np.float32'}), '([4.0], dtype=np.float32)\n', (4572, 4597), False, 'from megengine import tensor\n'), ((1141, 1162), 'numpy.round', 'np.round', (['inp_clipped'], {}), '(inp_clipped)\n', (1149, 1162), True, 'import numpy as np\n'), ((1573, 1594), 'numpy.abs', 'np.abs', (['(mask_clip - 1)'], {}), '(mask_clip - 1)\n', (1579, 1594), True, 'import numpy as np\n'), ((2263, 2280), 'numpy.random.rand', 'np.random.rand', (['(1)'], {}), '(1)\n', (2277, 2280), True, 'import numpy as np\n'), ((3569, 3632), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-512.0)', 'high': '(512.0)', 'size': '(1, 32, 32, 32)'}), '(low=-512.0, high=512.0, size=(1, 32, 32, 32))\n', (3586, 3632), True, 'import numpy as np\n'), ((3647, 3681), 'megengine.tensor', 'tensor', (['inp_data'], {'dtype': 'np.float32'}), '(inp_data, dtype=np.float32)\n', (3653, 3681), False, 'from megengine import tensor\n'), ((3867, 3891), 'numpy.allclose', 'np.allclose', (['oup', 'oup_gt'], {}), '(oup, oup_gt)\n', (3878, 3891), True, 'import numpy as np\n'), ((3970, 4004), 'megengine.tensor', 'tensor', (['inp_data'], {'dtype': 'np.float32'}), '(inp_data, dtype=np.float32)\n', (3976, 4004), False, 'from megengine import tensor\n'), ((4068, 4108), 'megengine.quantization.utils.fake_quant_tensor', 'fake_quant_tensor', (['x', 'qmin', 'qmax', 'q_dict'], {}), '(x, qmin, qmax, q_dict)\n', (4085, 4108), False, 'from megengine.quantization.utils import QuantMode, fake_quant_tensor, tqt_forward\n'), ((4163, 4197), 'megengine.tensor', 'tensor', (['inp_data'], {'dtype': 'np.float32'}), '(inp_data, dtype=np.float32)\n', (4169, 4197), False, 'from megengine import tensor\n'), ((1052, 1091), 'numpy.minimum', 'np.minimum', (['inp_scaled', 'self.upperbound'], {}), '(inp_scaled, self.upperbound)\n', (1062, 1091), True, 'import numpy as np\n'), ((2051, 2060), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (2057, 2060), True, 'import numpy as np\n'), ((2567, 2573), 'megengine.core.autodiff.grad.Grad', 'Grad', ([], {}), '()\n', (2571, 2573), False, 'from megengine.core.autodiff.grad import Function, Grad\n'), ((4438, 4468), 'megengine.core.tensor.utils.make_shape_tuple', 'make_shape_tuple', (['x.grad.shape'], {}), '(x.grad.shape)\n', (4454, 4468), False, 'from megengine.core.tensor.utils import make_shape_tuple\n'), ((4472, 4503), 'megengine.core.tensor.utils.make_shape_tuple', 'make_shape_tuple', (['x1.grad.shape'], {}), '(x1.grad.shape)\n', (4488, 4503), False, 'from megengine.core.tensor.utils import make_shape_tuple\n'), ((4656, 4678), 'numpy.ones', 'np.ones', (['(1, 32, 1, 1)'], {}), '((1, 32, 1, 1))\n', (4663, 4678), True, 'import numpy as np\n'), ((4723, 4745), 'numpy.ones', 'np.ones', (['(1, 32, 1, 1)'], {}), '((1, 32, 1, 1))\n', (4730, 4745), True, 'import numpy as np\n'), ((3719, 3761), 'megengine.quantization.utils.fake_quant_tensor', 'fake_quant_tensor', (['inp', 'qmin', 'qmax', 'q_dict'], {}), '(inp, qmin, qmax, q_dict)\n', (3736, 3761), False, 'from megengine.quantization.utils import QuantMode, fake_quant_tensor, tqt_forward\n'), ((4020, 4026), 'megengine.core.autodiff.grad.Grad', 'Grad', ([], {}), '()\n', (4024, 4026), False, 'from megengine.core.autodiff.grad import Function, Grad\n'), ((4213, 4219), 'megengine.core.autodiff.grad.Grad', 'Grad', ([], {}), '()\n', (4217, 4219), False, 'from megengine.core.autodiff.grad import Function, Grad\n')]
|
import megengine.module as M
import megengine.functional as F
from megengine import amp
from .update import BasicUpdateBlock
from .extractor import BasicEncoder
from .corr import AGCL
from .attention import PositionEncodingSine, LocalFeatureTransformer
class CREStereo(M.Module):
def __init__(self, max_disp=192, mixed_precision=False, test_mode=False):
super(CREStereo, self).__init__()
self.max_flow = max_disp
self.mixed_precision = mixed_precision
self.test_mode = test_mode
self.hidden_dim = 128
self.context_dim = 128
self.dropout = 0
# feature network and update block
self.fnet = BasicEncoder(
output_dim=256, norm_fn="instance", dropout=self.dropout
)
self.update_block = BasicUpdateBlock(
hidden_dim=self.hidden_dim, cor_planes=4 * 9, mask_size=4
)
# loftr
self.self_att_fn = LocalFeatureTransformer(
d_model=256, nhead=8, layer_names=["self"] * 1, attention="linear"
)
self.cross_att_fn = LocalFeatureTransformer(
d_model=256, nhead=8, layer_names=["cross"] * 1, attention="linear"
)
# adaptive search
self.search_num = 9
self.conv_offset_16 = M.Conv2d(
256, self.search_num * 2, kernel_size=3, stride=1, padding=1
)
self.conv_offset_8 = M.Conv2d(
256, self.search_num * 2, kernel_size=3, stride=1, padding=1
)
self.range_16 = 1
self.range_8 = 1
def freeze_bn(self):
for m in self.modules():
if isinstance(m, M.BatchNorm2d):
m.eval()
def unfold(self, x, kernel_size, dilation=1, padding=0, stride=1):
n, c, h, w = x.shape
if isinstance(kernel_size, tuple) or isinstance(kernel_size, list):
assert len(kernel_size) == 2
k1, k2 = kernel_size
else:
assert isinstance(kernel_size, int)
k1 = k2 = kernel_size
x = F.sliding_window(
x,
kernel_size=kernel_size,
dilation=dilation,
padding=padding,
stride=stride,
)
x = F.reshape(x, (n, c, -1, k1 * k2))
x = F.transpose(x, (0, 1, 3, 2))
x = F.reshape(x, (n, c * k1 * k2, -1))
return x
def convex_upsample(self, flow, mask, rate=4):
"""[H/rate, W/rate, 2] -> [H, W, 2]"""
N, _, H, W = flow.shape
mask = F.reshape(mask, (N, 1, 9, rate, rate, H, W))
mask = F.softmax(mask, axis=2)
up_flow = self.unfold(rate * flow, [3, 3], padding=1)
up_flow = F.reshape(up_flow, (N, 2, 9, 1, 1, H, W))
up_flow = F.sum(mask * up_flow, axis=2)
up_flow = F.transpose(up_flow, (0, 1, 4, 2, 5, 3))
return F.reshape(up_flow, (N, 2, rate * H, rate * W))
def zero_init(self, fmap):
N, C, H, W = fmap.shape
_x = F.zeros([N, 1, H, W], dtype="float32")
_y = F.zeros([N, 1, H, W], dtype="float32")
zero_flow = F.concat([_x, _y], axis=1).to(fmap.device)
return zero_flow
def forward(self, image1, image2, iters=10, flow_init=None):
image1 = 2 * (image1 / 255.0) - 1.0
image2 = 2 * (image2 / 255.0) - 1.0
hdim = self.hidden_dim
cdim = self.context_dim
# feature network
with amp.autocast(enabled=self.mixed_precision):
fmap1, fmap2 = self.fnet([image1, image2])
fmap1 = fmap1.astype("float32")
fmap2 = fmap2.astype("float32")
with amp.autocast(enabled=self.mixed_precision):
# 1/4 -> 1/8
# feature
fmap1_dw8 = F.avg_pool2d(fmap1, 2, stride=2)
fmap2_dw8 = F.avg_pool2d(fmap2, 2, stride=2)
# offset
offset_dw8 = self.conv_offset_8(fmap1_dw8)
offset_dw8 = self.range_8 * (F.sigmoid(offset_dw8) - 0.5) * 2.0
# context
net, inp = F.split(fmap1, [hdim], axis=1)
net = F.tanh(net)
inp = F.relu(inp)
net_dw8 = F.avg_pool2d(net, 2, stride=2)
inp_dw8 = F.avg_pool2d(inp, 2, stride=2)
# 1/4 -> 1/16
# feature
fmap1_dw16 = F.avg_pool2d(fmap1, 4, stride=4)
fmap2_dw16 = F.avg_pool2d(fmap2, 4, stride=4)
offset_dw16 = self.conv_offset_16(fmap1_dw16)
offset_dw16 = self.range_16 * (F.sigmoid(offset_dw16) - 0.5) * 2.0
# context
net_dw16 = F.avg_pool2d(net, 4, stride=4)
inp_dw16 = F.avg_pool2d(inp, 4, stride=4)
# positional encoding and self-attention
pos_encoding_fn_small = PositionEncodingSine(
d_model=256, max_shape=(image1.shape[2] // 16, image1.shape[3] // 16)
)
# 'n c h w -> n (h w) c'
x_tmp = pos_encoding_fn_small(fmap1_dw16)
fmap1_dw16 = F.reshape(
F.transpose(x_tmp, (0, 2, 3, 1)),
(x_tmp.shape[0], x_tmp.shape[2] * x_tmp.shape[3], x_tmp.shape[1]),
)
# 'n c h w -> n (h w) c'
x_tmp = pos_encoding_fn_small(fmap2_dw16)
fmap2_dw16 = F.reshape(
F.transpose(x_tmp, (0, 2, 3, 1)),
(x_tmp.shape[0], x_tmp.shape[2] * x_tmp.shape[3], x_tmp.shape[1]),
)
fmap1_dw16, fmap2_dw16 = self.self_att_fn(fmap1_dw16, fmap2_dw16)
fmap1_dw16, fmap2_dw16 = [
F.transpose(
F.reshape(x, (x.shape[0], image1.shape[2] // 16, -1, x.shape[2])),
(0, 3, 1, 2),
)
for x in [fmap1_dw16, fmap2_dw16]
]
corr_fn = AGCL(fmap1, fmap2)
corr_fn_dw8 = AGCL(fmap1_dw8, fmap2_dw8)
corr_fn_att_dw16 = AGCL(fmap1_dw16, fmap2_dw16, att=self.cross_att_fn)
# Cascaded refinement (1/16 + 1/8 + 1/4)
predictions = []
flow = None
flow_up = None
if flow_init is not None:
scale = fmap1.shape[2] / flow_init.shape[2]
flow = -scale * F.nn.interpolate(
flow_init,
size=(fmap1.shape[2], fmap1.shape[3]),
mode="bilinear",
align_corners=True,
)
else:
# zero initialization
flow_dw16 = self.zero_init(fmap1_dw16)
# Recurrent Update Module
# RUM: 1/16
for itr in range(iters // 2):
if itr % 2 == 0:
small_patch = False
else:
small_patch = True
flow_dw16 = flow_dw16.detach()
out_corrs = corr_fn_att_dw16(
flow_dw16, offset_dw16, small_patch=small_patch
)
with amp.autocast(enabled=self.mixed_precision):
net_dw16, up_mask, delta_flow = self.update_block(
net_dw16, inp_dw16, out_corrs, flow_dw16
)
flow_dw16 = flow_dw16 + delta_flow
flow = self.convex_upsample(flow_dw16, up_mask, rate=4)
flow_up = -4 * F.nn.interpolate(
flow,
size=(4 * flow.shape[2], 4 * flow.shape[3]),
mode="bilinear",
align_corners=True,
)
predictions.append(flow_up)
scale = fmap1_dw8.shape[2] / flow.shape[2]
flow_dw8 = -scale * F.nn.interpolate(
flow,
size=(fmap1_dw8.shape[2], fmap1_dw8.shape[3]),
mode="bilinear",
align_corners=True,
)
# RUM: 1/8
for itr in range(iters // 2):
if itr % 2 == 0:
small_patch = False
else:
small_patch = True
flow_dw8 = flow_dw8.detach()
out_corrs = corr_fn_dw8(flow_dw8, offset_dw8, small_patch=small_patch)
with amp.autocast(enabled=self.mixed_precision):
net_dw8, up_mask, delta_flow = self.update_block(
net_dw8, inp_dw8, out_corrs, flow_dw8
)
flow_dw8 = flow_dw8 + delta_flow
flow = self.convex_upsample(flow_dw8, up_mask, rate=4)
flow_up = -2 * F.nn.interpolate(
flow,
size=(2 * flow.shape[2], 2 * flow.shape[3]),
mode="bilinear",
align_corners=True,
)
predictions.append(flow_up)
scale = fmap1.shape[2] / flow.shape[2]
flow = -scale * F.nn.interpolate(
flow,
size=(fmap1.shape[2], fmap1.shape[3]),
mode="bilinear",
align_corners=True,
)
# RUM: 1/4
for itr in range(iters):
if itr % 2 == 0:
small_patch = False
else:
small_patch = True
flow = flow.detach()
out_corrs = corr_fn(flow, None, small_patch=small_patch, iter_mode=True)
with amp.autocast(enabled=self.mixed_precision):
net, up_mask, delta_flow = self.update_block(net, inp, out_corrs, flow)
flow = flow + delta_flow
flow_up = -self.convex_upsample(flow, up_mask, rate=4)
predictions.append(flow_up)
if self.test_mode:
return flow_up
return predictions
|
[
"megengine.functional.split",
"megengine.functional.sigmoid",
"megengine.functional.nn.interpolate",
"megengine.functional.softmax",
"megengine.functional.transpose",
"megengine.amp.autocast",
"megengine.functional.avg_pool2d",
"megengine.functional.relu",
"megengine.functional.sum",
"megengine.functional.zeros",
"megengine.functional.concat",
"megengine.module.Conv2d",
"megengine.functional.reshape",
"megengine.functional.sliding_window",
"megengine.functional.tanh"
] |
[((1276, 1346), 'megengine.module.Conv2d', 'M.Conv2d', (['(256)', '(self.search_num * 2)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(256, self.search_num * 2, kernel_size=3, stride=1, padding=1)\n', (1284, 1346), True, 'import megengine.module as M\n'), ((1398, 1468), 'megengine.module.Conv2d', 'M.Conv2d', (['(256)', '(self.search_num * 2)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(256, self.search_num * 2, kernel_size=3, stride=1, padding=1)\n', (1406, 1468), True, 'import megengine.module as M\n'), ((2030, 2130), 'megengine.functional.sliding_window', 'F.sliding_window', (['x'], {'kernel_size': 'kernel_size', 'dilation': 'dilation', 'padding': 'padding', 'stride': 'stride'}), '(x, kernel_size=kernel_size, dilation=dilation, padding=\n padding, stride=stride)\n', (2046, 2130), True, 'import megengine.functional as F\n'), ((2209, 2242), 'megengine.functional.reshape', 'F.reshape', (['x', '(n, c, -1, k1 * k2)'], {}), '(x, (n, c, -1, k1 * k2))\n', (2218, 2242), True, 'import megengine.functional as F\n'), ((2255, 2283), 'megengine.functional.transpose', 'F.transpose', (['x', '(0, 1, 3, 2)'], {}), '(x, (0, 1, 3, 2))\n', (2266, 2283), True, 'import megengine.functional as F\n'), ((2296, 2330), 'megengine.functional.reshape', 'F.reshape', (['x', '(n, c * k1 * k2, -1)'], {}), '(x, (n, c * k1 * k2, -1))\n', (2305, 2330), True, 'import megengine.functional as F\n'), ((2494, 2538), 'megengine.functional.reshape', 'F.reshape', (['mask', '(N, 1, 9, rate, rate, H, W)'], {}), '(mask, (N, 1, 9, rate, rate, H, W))\n', (2503, 2538), True, 'import megengine.functional as F\n'), ((2554, 2577), 'megengine.functional.softmax', 'F.softmax', (['mask'], {'axis': '(2)'}), '(mask, axis=2)\n', (2563, 2577), True, 'import megengine.functional as F\n'), ((2659, 2700), 'megengine.functional.reshape', 'F.reshape', (['up_flow', '(N, 2, 9, 1, 1, H, W)'], {}), '(up_flow, (N, 2, 9, 1, 1, H, W))\n', (2668, 2700), True, 'import megengine.functional as F\n'), ((2720, 2749), 'megengine.functional.sum', 'F.sum', (['(mask * up_flow)'], {'axis': '(2)'}), '(mask * up_flow, axis=2)\n', (2725, 2749), True, 'import megengine.functional as F\n'), ((2768, 2808), 'megengine.functional.transpose', 'F.transpose', (['up_flow', '(0, 1, 4, 2, 5, 3)'], {}), '(up_flow, (0, 1, 4, 2, 5, 3))\n', (2779, 2808), True, 'import megengine.functional as F\n'), ((2824, 2870), 'megengine.functional.reshape', 'F.reshape', (['up_flow', '(N, 2, rate * H, rate * W)'], {}), '(up_flow, (N, 2, rate * H, rate * W))\n', (2833, 2870), True, 'import megengine.functional as F\n'), ((2948, 2986), 'megengine.functional.zeros', 'F.zeros', (['[N, 1, H, W]'], {'dtype': '"""float32"""'}), "([N, 1, H, W], dtype='float32')\n", (2955, 2986), True, 'import megengine.functional as F\n'), ((3000, 3038), 'megengine.functional.zeros', 'F.zeros', (['[N, 1, H, W]'], {'dtype': '"""float32"""'}), "([N, 1, H, W], dtype='float32')\n", (3007, 3038), True, 'import megengine.functional as F\n'), ((3386, 3428), 'megengine.amp.autocast', 'amp.autocast', ([], {'enabled': 'self.mixed_precision'}), '(enabled=self.mixed_precision)\n', (3398, 3428), False, 'from megengine import amp\n'), ((3580, 3622), 'megengine.amp.autocast', 'amp.autocast', ([], {'enabled': 'self.mixed_precision'}), '(enabled=self.mixed_precision)\n', (3592, 3622), False, 'from megengine import amp\n'), ((3696, 3728), 'megengine.functional.avg_pool2d', 'F.avg_pool2d', (['fmap1', '(2)'], {'stride': '(2)'}), '(fmap1, 2, stride=2)\n', (3708, 3728), True, 'import megengine.functional as F\n'), ((3753, 3785), 'megengine.functional.avg_pool2d', 'F.avg_pool2d', (['fmap2', '(2)'], {'stride': '(2)'}), '(fmap2, 2, stride=2)\n', (3765, 3785), True, 'import megengine.functional as F\n'), ((3985, 4015), 'megengine.functional.split', 'F.split', (['fmap1', '[hdim]'], {'axis': '(1)'}), '(fmap1, [hdim], axis=1)\n', (3992, 4015), True, 'import megengine.functional as F\n'), ((4034, 4045), 'megengine.functional.tanh', 'F.tanh', (['net'], {}), '(net)\n', (4040, 4045), True, 'import megengine.functional as F\n'), ((4064, 4075), 'megengine.functional.relu', 'F.relu', (['inp'], {}), '(inp)\n', (4070, 4075), True, 'import megengine.functional as F\n'), ((4098, 4128), 'megengine.functional.avg_pool2d', 'F.avg_pool2d', (['net', '(2)'], {'stride': '(2)'}), '(net, 2, stride=2)\n', (4110, 4128), True, 'import megengine.functional as F\n'), ((4151, 4181), 'megengine.functional.avg_pool2d', 'F.avg_pool2d', (['inp', '(2)'], {'stride': '(2)'}), '(inp, 2, stride=2)\n', (4163, 4181), True, 'import megengine.functional as F\n'), ((4256, 4288), 'megengine.functional.avg_pool2d', 'F.avg_pool2d', (['fmap1', '(4)'], {'stride': '(4)'}), '(fmap1, 4, stride=4)\n', (4268, 4288), True, 'import megengine.functional as F\n'), ((4314, 4346), 'megengine.functional.avg_pool2d', 'F.avg_pool2d', (['fmap2', '(4)'], {'stride': '(4)'}), '(fmap2, 4, stride=4)\n', (4326, 4346), True, 'import megengine.functional as F\n'), ((4530, 4560), 'megengine.functional.avg_pool2d', 'F.avg_pool2d', (['net', '(4)'], {'stride': '(4)'}), '(net, 4, stride=4)\n', (4542, 4560), True, 'import megengine.functional as F\n'), ((4584, 4614), 'megengine.functional.avg_pool2d', 'F.avg_pool2d', (['inp', '(4)'], {'stride': '(4)'}), '(inp, 4, stride=4)\n', (4596, 4614), True, 'import megengine.functional as F\n'), ((3059, 3085), 'megengine.functional.concat', 'F.concat', (['[_x, _y]'], {'axis': '(1)'}), '([_x, _y], axis=1)\n', (3067, 3085), True, 'import megengine.functional as F\n'), ((4970, 5002), 'megengine.functional.transpose', 'F.transpose', (['x_tmp', '(0, 2, 3, 1)'], {}), '(x_tmp, (0, 2, 3, 1))\n', (4981, 5002), True, 'import megengine.functional as F\n'), ((5244, 5276), 'megengine.functional.transpose', 'F.transpose', (['x_tmp', '(0, 2, 3, 1)'], {}), '(x_tmp, (0, 2, 3, 1))\n', (5255, 5276), True, 'import megengine.functional as F\n'), ((6127, 6235), 'megengine.functional.nn.interpolate', 'F.nn.interpolate', (['flow_init'], {'size': '(fmap1.shape[2], fmap1.shape[3])', 'mode': '"""bilinear"""', 'align_corners': '(True)'}), "(flow_init, size=(fmap1.shape[2], fmap1.shape[3]), mode=\n 'bilinear', align_corners=True)\n", (6143, 6235), True, 'import megengine.functional as F\n'), ((7543, 7654), 'megengine.functional.nn.interpolate', 'F.nn.interpolate', (['flow'], {'size': '(fmap1_dw8.shape[2], fmap1_dw8.shape[3])', 'mode': '"""bilinear"""', 'align_corners': '(True)'}), "(flow, size=(fmap1_dw8.shape[2], fmap1_dw8.shape[3]), mode=\n 'bilinear', align_corners=True)\n", (7559, 7654), True, 'import megengine.functional as F\n'), ((8762, 8865), 'megengine.functional.nn.interpolate', 'F.nn.interpolate', (['flow'], {'size': '(fmap1.shape[2], fmap1.shape[3])', 'mode': '"""bilinear"""', 'align_corners': '(True)'}), "(flow, size=(fmap1.shape[2], fmap1.shape[3]), mode=\n 'bilinear', align_corners=True)\n", (8778, 8865), True, 'import megengine.functional as F\n'), ((9248, 9290), 'megengine.amp.autocast', 'amp.autocast', ([], {'enabled': 'self.mixed_precision'}), '(enabled=self.mixed_precision)\n', (9260, 9290), False, 'from megengine import amp\n'), ((5542, 5607), 'megengine.functional.reshape', 'F.reshape', (['x', '(x.shape[0], image1.shape[2] // 16, -1, x.shape[2])'], {}), '(x, (x.shape[0], image1.shape[2] // 16, -1, x.shape[2]))\n', (5551, 5607), True, 'import megengine.functional as F\n'), ((6850, 6892), 'megengine.amp.autocast', 'amp.autocast', ([], {'enabled': 'self.mixed_precision'}), '(enabled=self.mixed_precision)\n', (6862, 6892), False, 'from megengine import amp\n'), ((7207, 7316), 'megengine.functional.nn.interpolate', 'F.nn.interpolate', (['flow'], {'size': '(4 * flow.shape[2], 4 * flow.shape[3])', 'mode': '"""bilinear"""', 'align_corners': '(True)'}), "(flow, size=(4 * flow.shape[2], 4 * flow.shape[3]), mode=\n 'bilinear', align_corners=True)\n", (7223, 7316), True, 'import megengine.functional as F\n'), ((8084, 8126), 'megengine.amp.autocast', 'amp.autocast', ([], {'enabled': 'self.mixed_precision'}), '(enabled=self.mixed_precision)\n', (8096, 8126), False, 'from megengine import amp\n'), ((8434, 8543), 'megengine.functional.nn.interpolate', 'F.nn.interpolate', (['flow'], {'size': '(2 * flow.shape[2], 2 * flow.shape[3])', 'mode': '"""bilinear"""', 'align_corners': '(True)'}), "(flow, size=(2 * flow.shape[2], 2 * flow.shape[3]), mode=\n 'bilinear', align_corners=True)\n", (8450, 8543), True, 'import megengine.functional as F\n'), ((3904, 3925), 'megengine.functional.sigmoid', 'F.sigmoid', (['offset_dw8'], {}), '(offset_dw8)\n', (3913, 3925), True, 'import megengine.functional as F\n'), ((4448, 4470), 'megengine.functional.sigmoid', 'F.sigmoid', (['offset_dw16'], {}), '(offset_dw16)\n', (4457, 4470), True, 'import megengine.functional as F\n')]
|
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# pylint: disable=import-error,no-name-in-module,no-member
from typing import List, Union
import megengine as mge
from megengine.traced_module import TracedModule
from ..backend.ir_to_caffe.caffe_converter import BackEnd, CaffeConverter
from ..converter_ir.ir_quantizer import IRQuantizer
from ..converter_ir.ir_transform import IRTransform, TransformerRule
from ..frontend.tm_to_ir import TM_FrontEnd
from ..frontend.tm_to_ir.tm_utils import _update_inputs_qparams
def tracedmodule_to_caffe(
traced_module,
prototxt="out.prototxt",
caffemodel="out.caffemodel",
outspec=None,
use_empty_blobs=False,
input_data_type: str = None,
input_scales: Union[float, List[float]] = None,
input_zero_points: Union[int, List[int]] = None,
require_quantize=False,
param_fake_quant=False,
split_conv_relu=False,
quantize_file_path="quant_params.json",
convert_backend: BackEnd = BackEnd.CAFFE,
):
"""
Convert TracedModule model to Caffe,
and save caffe model to `prototxt` and `caffemodel`.
:param traced_module: the file path of TracedModule model.
:type traced_module: str
:param prototxt: the filename used for saved model definition.
:type prototxt: str
:param caffemodel: the filename used for saved model weights.
:type caffemodel: str
:param outspec: specify the end points of the model, expect the full names of nodes.
:type outspec: list
"""
if isinstance(traced_module, str):
traced_module = mge.load(traced_module)
assert isinstance(
traced_module, TracedModule
), "Input should be a traced module or a path of traced module."
_update_inputs_qparams(
traced_module, input_data_type, input_scales, input_zero_points
)
irgraph = TM_FrontEnd(traced_module, outspec=outspec).resolve()
transformer_options = [
TransformerRule.REMOVE_DROPOUT,
TransformerRule.REMOVE_RESHAPE_REALTED_OP,
TransformerRule.REMOVE_UNRELATED_IROP,
TransformerRule.ADD_FAKE_HSIGMOID_OUT,
TransformerRule.EXPAND_CONVRELU,
]
if split_conv_relu:
transformer_options += [TransformerRule.REMOVE_RELU]
transformer = IRTransform(transformer_options)
transformed_irgraph = transformer.transform(irgraph)
quantizer = IRQuantizer(
require_quantize=require_quantize, param_fake_quant=param_fake_quant
)
if require_quantize:
quantizer.save_quantize_params(transformed_irgraph)
converter = CaffeConverter(
transformed_irgraph, quantizer, use_empty_blobs, convert_backend
)
converter.convert()
if require_quantize:
quantizer.dump_quant_param(path=quantize_file_path)
assert isinstance(prototxt, str) and isinstance(
caffemodel, str
), "'prototxt' and 'caffemodel' must be string"
converter.dump(prototxt, caffemodel)
|
[
"megengine.load"
] |
[((1858, 1881), 'megengine.load', 'mge.load', (['traced_module'], {}), '(traced_module)\n', (1866, 1881), True, 'import megengine as mge\n')]
|
# Copyright (c) 2020 <NAME>
# This code is licensed under MIT license
# (https://github.com/kwotsin/mimicry/blob/master/LICENSE)
# ------------------------------------------------------------------------------
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2014-2019 Megvii Inc. All rights reserved.
# ------------------------------------------------------------------------------
import math
import megengine.functional as F
import megengine.module as M
class GBlock(M.Module):
r"""
Residual block for generator.
Uses bilinear (rather than nearest) interpolation, and align_corners
set to False. This is as per how torchvision does upsampling, as seen in:
https://github.com/pytorch/vision/blob/master/torchvision/models/segmentation/_utils.py
Attributes:
in_channels (int): The channel size of input feature map.
out_channels (int): The channel size of output feature map.
hidden_channels (int): The channel size of intermediate feature maps.
upsample (bool): If True, upsamples the input feature map.
num_classes (int): If more than 0, uses conditional batch norm instead.
spectral_norm (bool): If True, uses spectral norm for convolutional layers.
"""
def __init__(self,
in_channels,
out_channels,
hidden_channels=None,
upsample=False):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.hidden_channels = hidden_channels if hidden_channels is not None else out_channels
self.learnable_sc = in_channels != out_channels or upsample
self.upsample = upsample
self.c1 = M.Conv2d(self.in_channels,
self.hidden_channels,
3,
1,
padding=1)
self.c2 = M.Conv2d(self.hidden_channels,
self.out_channels,
3,
1,
padding=1)
self.b1 = M.BatchNorm2d(self.in_channels)
self.b2 = M.BatchNorm2d(self.hidden_channels)
self.activation = M.ReLU()
M.init.xavier_uniform_(self.c1.weight, math.sqrt(2.0))
M.init.xavier_uniform_(self.c2.weight, math.sqrt(2.0))
# Shortcut layer
if self.learnable_sc:
self.c_sc = M.Conv2d(in_channels,
out_channels,
1,
1,
padding=0)
M.init.xavier_uniform_(self.c_sc.weight, 1.0)
def _upsample_conv(self, x, conv):
r"""
Helper function for performing convolution after upsampling.
"""
return conv(
F.interpolate(x,
scale_factor=2,
mode='bilinear',
align_corners=False))
def _residual(self, x):
r"""
Helper function for feedforwarding through main layers.
"""
h = x
h = self.b1(h)
h = self.activation(h)
h = self._upsample_conv(h, self.c1) if self.upsample else self.c1(h)
h = self.b2(h)
h = self.activation(h)
h = self.c2(h)
return h
def _shortcut(self, x):
r"""
Helper function for feedforwarding through shortcut layers.
"""
if self.learnable_sc:
x = self._upsample_conv(
x, self.c_sc) if self.upsample else self.c_sc(x)
return x
else:
return x
def forward(self, x):
r"""
Residual block feedforward function.
"""
return self._residual(x) + self._shortcut(x)
class DBlock(M.Module):
"""
Residual block for discriminator.
Attributes:
in_channels (int): The channel size of input feature map.
out_channels (int): The channel size of output feature map.
hidden_channels (int): The channel size of intermediate feature maps.
downsample (bool): If True, downsamples the input feature map.
spectral_norm (bool): If True, uses spectral norm for convolutional layers.
"""
def __init__(self,
in_channels,
out_channels,
hidden_channels=None,
downsample=False):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.hidden_channels = hidden_channels if hidden_channels is not None else in_channels
self.downsample = downsample
self.learnable_sc = (in_channels != out_channels) or downsample
# Build the layers
self.c1 = M.Conv2d(self.in_channels, self.hidden_channels, 3, 1,
1)
self.c2 = M.Conv2d(self.hidden_channels, self.out_channels, 3, 1,
1)
self.activation = M.ReLU()
M.init.xavier_uniform_(self.c1.weight, math.sqrt(2.0))
M.init.xavier_uniform_(self.c2.weight, math.sqrt(2.0))
# Shortcut layer
if self.learnable_sc:
self.c_sc = M.Conv2d(in_channels, out_channels, 1, 1, 0)
M.init.xavier_uniform_(self.c_sc.weight, 1.0)
def _residual(self, x):
"""
Helper function for feedforwarding through main layers.
"""
h = x
h = self.activation(h)
h = self.c1(h)
h = self.activation(h)
h = self.c2(h)
if self.downsample:
h = F.avg_pool2d(h, 2)
return h
def _shortcut(self, x):
"""
Helper function for feedforwarding through shortcut layers.
"""
if self.learnable_sc:
x = self.c_sc(x)
return F.avg_pool2d(x, 2) if self.downsample else x
else:
return x
def forward(self, x):
"""
Residual block feedforward function.
"""
# NOTE: to completely reproduce pytorch, we use F.relu(x) to replace x in shortcut
# since pytorch use inplace relu in residual branch.
return self._residual(x) + self._shortcut(F.relu(x))
class DBlockOptimized(M.Module):
"""
Optimized residual block for discriminator. This is used as the first residual block,
where there is a definite downsampling involved. Follows the official SNGAN reference implementation
in chainer.
Attributes:
in_channels (int): The channel size of input feature map.
out_channels (int): The channel size of output feature map.
spectral_norm (bool): If True, uses spectral norm for convolutional layers.
"""
def __init__(self, in_channels, out_channels, spectral_norm=False):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.spectral_norm = spectral_norm
# Build the layers
self.c1 = M.Conv2d(self.in_channels, self.out_channels, 3, 1, 1)
self.c2 = M.Conv2d(self.out_channels, self.out_channels, 3, 1, 1)
self.c_sc = M.Conv2d(self.in_channels, self.out_channels, 1, 1, 0)
self.activation = M.ReLU()
M.init.xavier_uniform_(self.c1.weight, math.sqrt(2.0))
M.init.xavier_uniform_(self.c2.weight, math.sqrt(2.0))
M.init.xavier_uniform_(self.c_sc.weight, 1.0)
def _residual(self, x):
"""
Helper function for feedforwarding through main layers.
"""
h = x
h = self.c1(h)
h = self.activation(h)
h = self.c2(h)
h = F.avg_pool2d(h, 2)
return h
def _shortcut(self, x):
"""
Helper function for feedforwarding through shortcut layers.
"""
return self.c_sc(F.avg_pool2d(x, 2))
def forward(self, x):
"""
Residual block feedforward function.
"""
return self._residual(x) + self._shortcut(x)
|
[
"megengine.module.ReLU",
"megengine.module.BatchNorm2d",
"megengine.functional.avg_pool2d",
"megengine.functional.relu",
"megengine.module.init.xavier_uniform_",
"megengine.functional.interpolate",
"megengine.module.Conv2d"
] |
[((2136, 2201), 'megengine.module.Conv2d', 'M.Conv2d', (['self.in_channels', 'self.hidden_channels', '(3)', '(1)'], {'padding': '(1)'}), '(self.in_channels, self.hidden_channels, 3, 1, padding=1)\n', (2144, 2201), True, 'import megengine.module as M\n'), ((2328, 2394), 'megengine.module.Conv2d', 'M.Conv2d', (['self.hidden_channels', 'self.out_channels', '(3)', '(1)'], {'padding': '(1)'}), '(self.hidden_channels, self.out_channels, 3, 1, padding=1)\n', (2336, 2394), True, 'import megengine.module as M\n'), ((2522, 2553), 'megengine.module.BatchNorm2d', 'M.BatchNorm2d', (['self.in_channels'], {}), '(self.in_channels)\n', (2535, 2553), True, 'import megengine.module as M\n'), ((2572, 2607), 'megengine.module.BatchNorm2d', 'M.BatchNorm2d', (['self.hidden_channels'], {}), '(self.hidden_channels)\n', (2585, 2607), True, 'import megengine.module as M\n'), ((2635, 2643), 'megengine.module.ReLU', 'M.ReLU', ([], {}), '()\n', (2641, 2643), True, 'import megengine.module as M\n'), ((5209, 5266), 'megengine.module.Conv2d', 'M.Conv2d', (['self.in_channels', 'self.hidden_channels', '(3)', '(1)', '(1)'], {}), '(self.in_channels, self.hidden_channels, 3, 1, 1)\n', (5217, 5266), True, 'import megengine.module as M\n'), ((5312, 5370), 'megengine.module.Conv2d', 'M.Conv2d', (['self.hidden_channels', 'self.out_channels', '(3)', '(1)', '(1)'], {}), '(self.hidden_channels, self.out_channels, 3, 1, 1)\n', (5320, 5370), True, 'import megengine.module as M\n'), ((5425, 5433), 'megengine.module.ReLU', 'M.ReLU', ([], {}), '()\n', (5431, 5433), True, 'import megengine.module as M\n'), ((7418, 7472), 'megengine.module.Conv2d', 'M.Conv2d', (['self.in_channels', 'self.out_channels', '(3)', '(1)', '(1)'], {}), '(self.in_channels, self.out_channels, 3, 1, 1)\n', (7426, 7472), True, 'import megengine.module as M\n'), ((7491, 7546), 'megengine.module.Conv2d', 'M.Conv2d', (['self.out_channels', 'self.out_channels', '(3)', '(1)', '(1)'], {}), '(self.out_channels, self.out_channels, 3, 1, 1)\n', (7499, 7546), True, 'import megengine.module as M\n'), ((7567, 7621), 'megengine.module.Conv2d', 'M.Conv2d', (['self.in_channels', 'self.out_channels', '(1)', '(1)', '(0)'], {}), '(self.in_channels, self.out_channels, 1, 1, 0)\n', (7575, 7621), True, 'import megengine.module as M\n'), ((7649, 7657), 'megengine.module.ReLU', 'M.ReLU', ([], {}), '()\n', (7655, 7657), True, 'import megengine.module as M\n'), ((7793, 7838), 'megengine.module.init.xavier_uniform_', 'M.init.xavier_uniform_', (['self.c_sc.weight', '(1.0)'], {}), '(self.c_sc.weight, 1.0)\n', (7815, 7838), True, 'import megengine.module as M\n'), ((8059, 8077), 'megengine.functional.avg_pool2d', 'F.avg_pool2d', (['h', '(2)'], {}), '(h, 2)\n', (8071, 8077), True, 'import megengine.functional as F\n'), ((2692, 2706), 'math.sqrt', 'math.sqrt', (['(2.0)'], {}), '(2.0)\n', (2701, 2706), False, 'import math\n'), ((2755, 2769), 'math.sqrt', 'math.sqrt', (['(2.0)'], {}), '(2.0)\n', (2764, 2769), False, 'import math\n'), ((2851, 2903), 'megengine.module.Conv2d', 'M.Conv2d', (['in_channels', 'out_channels', '(1)', '(1)'], {'padding': '(0)'}), '(in_channels, out_channels, 1, 1, padding=0)\n', (2859, 2903), True, 'import megengine.module as M\n'), ((3048, 3093), 'megengine.module.init.xavier_uniform_', 'M.init.xavier_uniform_', (['self.c_sc.weight', '(1.0)'], {}), '(self.c_sc.weight, 1.0)\n', (3070, 3093), True, 'import megengine.module as M\n'), ((3261, 3331), 'megengine.functional.interpolate', 'F.interpolate', (['x'], {'scale_factor': '(2)', 'mode': '"""bilinear"""', 'align_corners': '(False)'}), "(x, scale_factor=2, mode='bilinear', align_corners=False)\n", (3274, 3331), True, 'import megengine.functional as F\n'), ((5482, 5496), 'math.sqrt', 'math.sqrt', (['(2.0)'], {}), '(2.0)\n', (5491, 5496), False, 'import math\n'), ((5545, 5559), 'math.sqrt', 'math.sqrt', (['(2.0)'], {}), '(2.0)\n', (5554, 5559), False, 'import math\n'), ((5641, 5685), 'megengine.module.Conv2d', 'M.Conv2d', (['in_channels', 'out_channels', '(1)', '(1)', '(0)'], {}), '(in_channels, out_channels, 1, 1, 0)\n', (5649, 5685), True, 'import megengine.module as M\n'), ((5698, 5743), 'megengine.module.init.xavier_uniform_', 'M.init.xavier_uniform_', (['self.c_sc.weight', '(1.0)'], {}), '(self.c_sc.weight, 1.0)\n', (5720, 5743), True, 'import megengine.module as M\n'), ((6027, 6045), 'megengine.functional.avg_pool2d', 'F.avg_pool2d', (['h', '(2)'], {}), '(h, 2)\n', (6039, 6045), True, 'import megengine.functional as F\n'), ((7706, 7720), 'math.sqrt', 'math.sqrt', (['(2.0)'], {}), '(2.0)\n', (7715, 7720), False, 'import math\n'), ((7769, 7783), 'math.sqrt', 'math.sqrt', (['(2.0)'], {}), '(2.0)\n', (7778, 7783), False, 'import math\n'), ((8242, 8260), 'megengine.functional.avg_pool2d', 'F.avg_pool2d', (['x', '(2)'], {}), '(x, 2)\n', (8254, 8260), True, 'import megengine.functional as F\n'), ((6263, 6281), 'megengine.functional.avg_pool2d', 'F.avg_pool2d', (['x', '(2)'], {}), '(x, 2)\n', (6275, 6281), True, 'import megengine.functional as F\n'), ((6642, 6651), 'megengine.functional.relu', 'F.relu', (['x'], {}), '(x)\n', (6648, 6651), True, 'import megengine.functional as F\n')]
|
#!/usr/bin/env python
# -*-coding=utf-8-*-
from megengine.logger import get_logger
logger = get_logger(__name__)
try:
from tensorboardX import SummaryWriter
from tensorboardX.proto.attr_value_pb2 import AttrValue
from tensorboardX.proto.graph_pb2 import GraphDef
from tensorboardX.proto.node_def_pb2 import NodeDef
from tensorboardX.proto.plugin_text_pb2 import TextPluginData
from tensorboardX.proto.step_stats_pb2 import (
DeviceStepStats,
RunMetadata,
StepStats,
)
from tensorboardX.proto.summary_pb2 import Summary, SummaryMetadata
from tensorboardX.proto.tensor_pb2 import TensorProto
from tensorboardX.proto.tensor_shape_pb2 import TensorShapeProto
from tensorboardX.proto.versions_pb2 import VersionDef
except ImportError:
logger.error(
"TensorBoard and TensorboardX are required for visualize.", exc_info=True,
)
def tensor_shape_proto(shape):
"""Creates an object matching
https://github.com/tensorflow/tensorboard/blob/master/tensorboard/compat/proto/tensor_shape.proto
"""
return TensorShapeProto(dim=[TensorShapeProto.Dim(size=d) for d in shape])
def attr_value_proto(shape, dtype, attr):
"""Creates a dict of objects matching
https://github.com/tensorflow/tensorboard/blob/master/tensorboard/compat/proto/attr_value.proto
specifically designed for a NodeDef. The values have been
reverse engineered from standard TensorBoard logged data.
"""
attr_proto = {}
if shape is not None:
shapeproto = tensor_shape_proto(shape)
attr_proto["_output_shapes"] = AttrValue(
list=AttrValue.ListValue(shape=[shapeproto])
)
if dtype is not None:
attr_proto["dtype"] = AttrValue(s=dtype.encode(encoding="utf-8"))
if attr is not None:
for key in attr.keys():
attr_proto[key] = AttrValue(s=attr[key].encode(encoding="utf-8"))
return attr_proto
def node_proto(
name, op="UnSpecified", input=None, outputshape=None, dtype=None, attributes={}
):
"""Creates an object matching
https://github.com/tensorflow/tensorboard/blob/master/tensorboard/compat/proto/node_def.proto
"""
if input is None:
input = []
if not isinstance(input, list):
input = [input]
return NodeDef(
name=name.encode(encoding="utf_8"),
op=op,
input=input,
attr=attr_value_proto(outputshape, dtype, attributes),
)
def node(
name, op="UnSpecified", input=None, outputshape=None, dtype=None, attributes={}
):
return node_proto(name, op, input, outputshape, dtype, attributes)
def graph(node_list):
graph_def = GraphDef(node=node_list, versions=VersionDef(producer=22))
stepstats = RunMetadata(
step_stats=StepStats(dev_stats=[DeviceStepStats(device="/device:CPU:0")])
)
return graph_def, stepstats
def text(tag, text):
plugin_data = SummaryMetadata.PluginData(
plugin_name="text", content=TextPluginData(version=0).SerializeToString()
)
smd = SummaryMetadata(plugin_data=plugin_data)
string_val = []
for item in text:
string_val.append(item.encode(encoding="utf_8"))
tensor = TensorProto(
dtype="DT_STRING",
string_val=string_val,
tensor_shape=TensorShapeProto(dim=[TensorShapeProto.Dim(size=len(text))]),
)
return Summary(value=[Summary.Value(tag=tag, metadata=smd, tensor=tensor)])
class NodeRaw:
def __init__(self, name, op, input, outputshape, dtype, attributes):
self.name = name
self.op = op
self.input = input
self.outputshape = outputshape
self.dtype = dtype
self.attributes = attributes
class SummaryWriterExtend(SummaryWriter):
def __init__(
self,
logdir=None,
comment="",
purge_step=None,
max_queue=10,
flush_secs=120,
filename_suffix="",
write_to_disk=True,
log_dir=None,
**kwargs
):
self.node_raw_dict = {}
super().__init__(
logdir,
comment,
purge_step,
max_queue,
flush_secs,
filename_suffix,
write_to_disk,
log_dir,
**kwargs,
)
def add_text(self, tag, text_string_list, global_step=None, walltime=None):
"""Add text data to summary.
Args:
tag (string): Data identifier
text_string_list (string list): String to save
global_step (int): Global step value to record
walltime (float): Optional override default walltime (time.time())
seconds after epoch of event
Examples::
# text can be divided into three levels by tag and global_step
from writer import SummaryWriterExtend
writer = SummaryWriterExtend()
writer.add_text('level1.0/level2.0', ['text0'], 0)
writer.add_text('level1.0/level2.0', ['text1'], 1)
writer.add_text('level1.0/level2.1', ['text2'])
writer.add_text('level1.1', ['text3'])
"""
self._get_file_writer().add_summary(
text(tag, text_string_list), global_step, walltime
)
def add_node_raw(
self,
name,
op="UnSpecified",
input=[],
outputshape=None,
dtype=None,
attributes={},
):
"""Add node raw datas that can help build graph.After add all nodes, call
add_graph_by_node_raw_list() to build graph and add graph data to summary.
Args:
name (string): opr name.
op (string): opr class name.
input (string list): input opr name.
outputshape (list): output shape.
dtype (string): output data dtype.
attributes (dict): attributes info.
Examples::
from writer import SummaryWriterExtend
writer = SummaryWriterExtend()
writer.add_node_raw('node1', 'opr1', outputshape=[6, 2, 3], dtype="float32", attributes={
"peak_size": "12MB", "mmory_alloc": "2MB, percent: 16.7%"})
writer.add_node_raw('node2', 'opr2', outputshape=[6, 2, 3], dtype="float32", input="node1", attributes={
"peak_size": "12MB", "mmory_alloc": "2MB, percent: 16.7%"})
writer.add_graph_by_node_raw_list()
"""
# self.node_raw_list.append(
# node(name, op, input, outputshape, dtype, attributes))
self.node_raw_dict[name] = NodeRaw(
name, op, input, outputshape, dtype, dict(attributes)
)
def add_node_raw_name_suffix(self, name, suffix):
"""Give node name suffix in order to finding this node by 'search nodes'
Args:
name (string): opr name.
suffix (string): nam suffix.
"""
old_name = self.node_raw_dict[name].name
new_name = old_name + suffix
# self.node_raw_dict[new_name] = self.node_raw_dict.pop(name)
self.node_raw_dict[name].name = new_name
for node_name, node in self.node_raw_dict.items():
node.input = [new_name if x == old_name else x for x in node.input]
def add_node_raw_attributes(self, name, attributes):
"""
Args:
name (string): opr name.
attributes (dict): attributes info that need to be added.
"""
for key, value in attributes.items():
self.node_raw_dict[name].attributes[key] = value
def add_graph_by_node_raw_list(self):
"""Build graph and add graph data to summary."""
node_raw_list = []
for key, value in self.node_raw_dict.items():
node_raw_list.append(
node(
value.name,
value.op,
value.input,
value.outputshape,
value.dtype,
value.attributes,
)
)
self._get_file_writer().add_graph(graph(node_raw_list))
|
[
"megengine.logger.get_logger"
] |
[((94, 114), 'megengine.logger.get_logger', 'get_logger', (['__name__'], {}), '(__name__)\n', (104, 114), False, 'from megengine.logger import get_logger\n'), ((3050, 3090), 'tensorboardX.proto.summary_pb2.SummaryMetadata', 'SummaryMetadata', ([], {'plugin_data': 'plugin_data'}), '(plugin_data=plugin_data)\n', (3065, 3090), False, 'from tensorboardX.proto.summary_pb2 import Summary, SummaryMetadata\n'), ((2709, 2732), 'tensorboardX.proto.versions_pb2.VersionDef', 'VersionDef', ([], {'producer': '(22)'}), '(producer=22)\n', (2719, 2732), False, 'from tensorboardX.proto.versions_pb2 import VersionDef\n'), ((1118, 1146), 'tensorboardX.proto.tensor_shape_pb2.TensorShapeProto.Dim', 'TensorShapeProto.Dim', ([], {'size': 'd'}), '(size=d)\n', (1138, 1146), False, 'from tensorboardX.proto.tensor_shape_pb2 import TensorShapeProto\n'), ((1642, 1681), 'tensorboardX.proto.attr_value_pb2.AttrValue.ListValue', 'AttrValue.ListValue', ([], {'shape': '[shapeproto]'}), '(shape=[shapeproto])\n', (1661, 1681), False, 'from tensorboardX.proto.attr_value_pb2 import AttrValue\n'), ((3390, 3441), 'tensorboardX.proto.summary_pb2.Summary.Value', 'Summary.Value', ([], {'tag': 'tag', 'metadata': 'smd', 'tensor': 'tensor'}), '(tag=tag, metadata=smd, tensor=tensor)\n', (3403, 3441), False, 'from tensorboardX.proto.summary_pb2 import Summary, SummaryMetadata\n'), ((2988, 3013), 'tensorboardX.proto.plugin_text_pb2.TextPluginData', 'TextPluginData', ([], {'version': '(0)'}), '(version=0)\n', (3002, 3013), False, 'from tensorboardX.proto.plugin_text_pb2 import TextPluginData\n'), ((2803, 2842), 'tensorboardX.proto.step_stats_pb2.DeviceStepStats', 'DeviceStepStats', ([], {'device': '"""/device:CPU:0"""'}), "(device='/device:CPU:0')\n", (2818, 2842), False, 'from tensorboardX.proto.step_stats_pb2 import DeviceStepStats, RunMetadata, StepStats\n')]
|
# -*- coding: utf-8 -*-
# MIT License
#
# Copyright (c) 2019 Megvii Technology
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# ------------------------------------------------------------------------------
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2014-2019 Megvii Inc. All rights reserved.
# ------------------------------------------------------------------------------
import argparse
import multiprocessing as mp
import os
import time
import sys
import logging
import megengine.distributed as dist
import torch
import torch.optim as optim
import torch.nn.functional as F
import datasets
import torchvision.transforms as transforms
import shufflenet_v2_pytorch as M
from tensorboardX import SummaryWriter
from devkit.core import (init_dist, broadcast_params, average_gradients, load_state_ckpt, load_state, save_checkpoint, LRScheduler, CrossEntropyLoss)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--arch", default="shufflenet_v2_x0_5", type=str)
parser.add_argument("-d", "--data", default=None, type=str)
parser.add_argument("-s", "--save", default="./models", type=str)
parser.add_argument("-m", "--model", default=None, type=str)
parser.add_argument('-o', '--output', type=str, required=True, help='set path for checkpoints \w tensorboard')
parser.add_argument("-b", "--batch-size", default=128, type=int)
parser.add_argument("--learning-rate", default=0.0625, type=float)
parser.add_argument("--momentum", default=0.9, type=float)
parser.add_argument("--weight-decay", default=4e-5, type=float)
parser.add_argument("--steps", default=300000, type=int)
parser.add_argument("-n", "--ngpus", default=None, type=int)
parser.add_argument("-w", "--workers", default=4, type=int)
parser.add_argument("--report-freq", default=50, type=int)
parser.add_argument(
'--port', default=29500, type=int, help='port of server')
args = parser.parse_args()
rank, world_size = init_dist(
backend='nccl', port=args.port)
if not os.path.exists(args.output):
os.makedirs(args.output)
if world_size > 1:
# scale learning rate by number of gpus
args.learning_rate *= world_size
# start distributed training, dispatch sub-processes
mp.set_start_method("spawn")
processes = []
for rank in range(world_size):
p = mp.Process(target=worker, args=(rank, world_size, args))
p.start()
processes.append(p)
for p in processes:
p.join()
else:
worker(0, 1, args)
def get_parameters(model):
group_no_weight_decay = []
group_weight_decay = []
for pname, p in model.named_parameters():
if p.requires_grad:
if pname.find("weight") >= 0 and len(p.shape) > 1:
# print("include ", pname, p.shape)
group_weight_decay.append(p)
else:
# print("not include ", pname, p.shape)
group_no_weight_decay.append(p)
assert len(list(model.parameters())) == len(group_weight_decay) + len(
group_no_weight_decay
)
groups = [
dict(params=group_weight_decay),
dict(params=group_no_weight_decay, weight_decay=0.0),
]
return groups
def worker(rank, world_size, args):
# pylint: disable=too-many-statements
if rank == 0:
save_dir = os.path.join(args.save, args.arch, "b{}".format(args.batch_size * world_size))
if not os.path.exists(save_dir):
os.makedirs(save_dir)
log_format = '%(asctime)s %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
format=log_format, datefmt='%m/%d %I:%M:%S %p')
fh = logging.FileHandler(os.path.join(save_dir, 'log.txt'))
fh.setFormatter(logging.Formatter(log_format))
logging.getLogger().addHandler(fh)
if world_size > 1:
# Initialize distributed process group
logging.info("init distributed process group {} / {}".format(rank, world_size))
dist.init_process_group(
master_ip="localhost",
master_port=23456,
world_size=world_size,
rank=rank,
dev=rank,
)
save_dir = os.path.join(args.save, args.arch)
if rank == 0:
prefixs=['train', 'valid']
writers = {prefix: SummaryWriter(os.path.join(args.output, prefix)) for prefix in prefixs}
model = getattr(M, args.arch)()
step_start = 0
# if args.model:
# logging.info("load weights from %s", args.model)
# model.load_state_dict(mge.load(args.model))
# step_start = int(args.model.split("-")[1].split(".")[0])
optimizer = optim.SGD(
get_parameters(model),
lr=args.learning_rate,
momentum=args.momentum,
weight_decay=args.weight_decay,
)
# Define train and valid graph
def train_func(image, label):
model.train()
logits = model(image)
loss = F.cross_entropy_with_softmax(logits, label, label_smooth=0.1)
acc1, acc5 = F.accuracy(logits, label, (1, 5))
optimizer.backward(loss) # compute gradients
if dist.is_distributed(): # all_reduce_mean
loss = dist.all_reduce_sum(loss) / dist.get_world_size()
acc1 = dist.all_reduce_sum(acc1) / dist.get_world_size()
acc5 = dist.all_reduce_sum(acc5) / dist.get_world_size()
return loss, acc1, acc5
def valid_func(image, label):
model.eval()
logits = model(image)
loss = F.cross_entropy_with_softmax(logits, label, label_smooth=0.1)
acc1, acc5 = F.accuracy(logits, label, (1, 5))
if dist.is_distributed(): # all_reduce_mean
loss = dist.all_reduce_sum(loss) / dist.get_world_size()
acc1 = dist.all_reduce_sum(acc1) / dist.get_world_size()
acc5 = dist.all_reduce_sum(acc5) / dist.get_world_size()
return loss, acc1, acc5
# Build train and valid datasets
logging.info("preparing dataset..")
transform = transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
train_dataset = datasets.ImageNet(split='train', transform=transform)
train_sampler = torch.utils.data.RandomSampler(train_dataset)
train_queue = torch.utils.data.DataLoader(
train_dataset,
batch_size=args.batch_size,
sampler=train_sampler,
shuffle=False,
drop_last=True,
pin_memory=True,
num_workers=args.workers
)
train_queue = iter(train_queue)
transform = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
valid_dataset = datasets.ImageNet(split='val', transform=transform)
valid_sampler = torch.utils.data.SequentialSampler(valid_dataset)
valid_queue = torch.utils.data.DataLoader(
valid_dataset,
batch_size=100,
sampler=valid_sampler,
shuffle=False,
drop_last=False,
num_workers=args.workers
)
# Start training
objs = AverageMeter("Loss")
top1 = AverageMeter("Acc@1")
top5 = AverageMeter("Acc@5")
total_time = AverageMeter("Time")
t = time.time()
best_valid_acc = 0
for step in range(step_start, args.steps + 1):
# Linear learning rate decay
decay = 1.0
decay = 1 - float(step) / args.steps if step < args.steps else 0
for param_group in optimizer.param_groups:
param_group["lr"] = args.learning_rate * decay
image, label = next(train_queue)
time_data=time.time()-t
# image = image.astype("float32")
# label = label.astype("int32")
n = image.shape[0]
optimizer.zero_grad()
loss, acc1, acc5 = train_func(image, label)
optimizer.step()
top1.update(100 * acc1.numpy()[0], n)
top5.update(100 * acc5.numpy()[0], n)
objs.update(loss.numpy()[0], n)
total_time.update(time.time() - t)
time_iter=time.time()-t
t = time.time()
if step % args.report_freq == 0 and rank == 0:
logging.info(
"TRAIN Iter %06d: lr = %f,\tloss = %f,\twc_loss = 1,\tTop-1 err = %f,\tTop-5 err = %f,\tdata_time = %f,\ttrain_time = %f,\tremain_hours=%f",
step,
args.learning_rate * decay,
float(objs.__str__().split()[1]),
1-float(top1.__str__().split()[1])/100,
1-float(top5.__str__().split()[1])/100,
time_data,
time_iter - time_data,
time_iter * (args.steps - step) / 3600,
)
writers['train'].add_scalar('loss', float(objs.__str__().split()[1]), global_step=step)
writers['train'].add_scalar('top1_err', 1-float(top1.__str__().split()[1])/100, global_step=step)
writers['train'].add_scalar('top5_err', 1-float(top5.__str__().split()[1])/100, global_step=step)
objs.reset()
top1.reset()
top5.reset()
total_time.reset()
if step % 10000 == 0 and step != 0:
loss, valid_acc, valid_acc5 = infer(valid_func, valid_queue, args)
logging.info("TEST Iter %06d: loss = %f,\tTop-1 err = %f,\tTop-5 err = %f", step, loss, 1-valid_acc/100, 1-valid_acc5/100)
is_best = valid_acc > best_valid_acc
best_valid_acc = max(valid_acc, best_valid_acc)
if rank == 0:
writers['valid'].add_scalar('loss', loss, global_step=step)
writers['valid'].add_scalar('top1_err', 1-valid_acc/100, global_step=step)
writers['valid'].add_scalar('top5_err', 1-valid_acc5/100, global_step=step)
logging.info("SAVING %06d", step)
save_checkpoint(save_dir, {
'step': step + 1,
'model': args.arch,
'state_dict': model.state_dict(),
'best_prec1': best_valid_acc,
'optimizer': optimizer.state_dict(),
}, is_best)
def infer(model, data_queue, args):
objs = AverageMeter("Loss")
top1 = AverageMeter("Acc@1")
top5 = AverageMeter("Acc@5")
total_time = AverageMeter("Time")
t = time.time()
for step, (image, label) in enumerate(data_queue):
n = image.shape[0]
image = image.astype("float32") # convert np.uint8 to float32
label = label.astype("int32")
loss, acc1, acc5 = model(image, label)
objs.update(loss.numpy()[0], n)
top1.update(100 * acc1.numpy()[0], n)
top5.update(100 * acc5.numpy()[0], n)
total_time.update(time.time() - t)
t = time.time()
if step % args.report_freq == 0 and dist.get_rank() == 0:
logging.info(
"Step %d, %s %s %s %s",
step,
objs,
top1,
top5,
total_time,
)
return objs.avg, top1.avg, top5.avg
class AverageMeter:
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=":.3f"):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = "{name} {val" + self.fmt + "} ({avg" + self.fmt + "})"
return fmtstr.format(**self.__dict__)
if __name__ == "__main__":
main()
|
[
"megengine.distributed.is_distributed",
"megengine.distributed.get_rank",
"megengine.distributed.get_world_size",
"megengine.distributed.all_reduce_sum",
"megengine.distributed.init_process_group"
] |
[((2327, 2352), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2350, 2352), False, 'import argparse\n'), ((3419, 3460), 'devkit.core.init_dist', 'init_dist', ([], {'backend': '"""nccl"""', 'port': 'args.port'}), "(backend='nccl', port=args.port)\n", (3428, 3460), False, 'from devkit.core import init_dist, broadcast_params, average_gradients, load_state_ckpt, load_state, save_checkpoint, LRScheduler, CrossEntropyLoss\n'), ((5720, 5754), 'os.path.join', 'os.path.join', (['args.save', 'args.arch'], {}), '(args.save, args.arch)\n', (5732, 5754), False, 'import os\n'), ((7485, 7520), 'logging.info', 'logging.info', (['"""preparing dataset.."""'], {}), "('preparing dataset..')\n", (7497, 7520), False, 'import logging\n'), ((7865, 7918), 'datasets.ImageNet', 'datasets.ImageNet', ([], {'split': '"""train"""', 'transform': 'transform'}), "(split='train', transform=transform)\n", (7882, 7918), False, 'import datasets\n'), ((7939, 7984), 'torch.utils.data.RandomSampler', 'torch.utils.data.RandomSampler', (['train_dataset'], {}), '(train_dataset)\n', (7969, 7984), False, 'import torch\n'), ((8003, 8178), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['train_dataset'], {'batch_size': 'args.batch_size', 'sampler': 'train_sampler', 'shuffle': '(False)', 'drop_last': '(True)', 'pin_memory': '(True)', 'num_workers': 'args.workers'}), '(train_dataset, batch_size=args.batch_size,\n sampler=train_sampler, shuffle=False, drop_last=True, pin_memory=True,\n num_workers=args.workers)\n', (8030, 8178), False, 'import torch\n'), ((8518, 8569), 'datasets.ImageNet', 'datasets.ImageNet', ([], {'split': '"""val"""', 'transform': 'transform'}), "(split='val', transform=transform)\n", (8535, 8569), False, 'import datasets\n'), ((8590, 8639), 'torch.utils.data.SequentialSampler', 'torch.utils.data.SequentialSampler', (['valid_dataset'], {}), '(valid_dataset)\n', (8624, 8639), False, 'import torch\n'), ((8658, 8802), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['valid_dataset'], {'batch_size': '(100)', 'sampler': 'valid_sampler', 'shuffle': '(False)', 'drop_last': '(False)', 'num_workers': 'args.workers'}), '(valid_dataset, batch_size=100, sampler=\n valid_sampler, shuffle=False, drop_last=False, num_workers=args.workers)\n', (8685, 8802), False, 'import torch\n'), ((9019, 9030), 'time.time', 'time.time', ([], {}), '()\n', (9028, 9030), False, 'import time\n'), ((12105, 12116), 'time.time', 'time.time', ([], {}), '()\n', (12114, 12116), False, 'import time\n'), ((3482, 3509), 'os.path.exists', 'os.path.exists', (['args.output'], {}), '(args.output)\n', (3496, 3509), False, 'import os\n'), ((3519, 3543), 'os.makedirs', 'os.makedirs', (['args.output'], {}), '(args.output)\n', (3530, 3543), False, 'import os\n'), ((3726, 3754), 'multiprocessing.set_start_method', 'mp.set_start_method', (['"""spawn"""'], {}), "('spawn')\n", (3745, 3754), True, 'import multiprocessing as mp\n'), ((5055, 5166), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'logging.INFO', 'format': 'log_format', 'datefmt': '"""%m/%d %I:%M:%S %p"""'}), "(stream=sys.stdout, level=logging.INFO, format=\n log_format, datefmt='%m/%d %I:%M:%S %p')\n", (5074, 5166), False, 'import logging\n'), ((5523, 5636), 'megengine.distributed.init_process_group', 'dist.init_process_group', ([], {'master_ip': '"""localhost"""', 'master_port': '(23456)', 'world_size': 'world_size', 'rank': 'rank', 'dev': 'rank'}), "(master_ip='localhost', master_port=23456,\n world_size=world_size, rank=rank, dev=rank)\n", (5546, 5636), True, 'import megengine.distributed as dist\n'), ((6470, 6531), 'torch.nn.functional.cross_entropy_with_softmax', 'F.cross_entropy_with_softmax', (['logits', 'label'], {'label_smooth': '(0.1)'}), '(logits, label, label_smooth=0.1)\n', (6498, 6531), True, 'import torch.nn.functional as F\n'), ((6553, 6586), 'torch.nn.functional.accuracy', 'F.accuracy', (['logits', 'label', '(1, 5)'], {}), '(logits, label, (1, 5))\n', (6563, 6586), True, 'import torch.nn.functional as F\n'), ((6652, 6673), 'megengine.distributed.is_distributed', 'dist.is_distributed', ([], {}), '()\n', (6671, 6673), True, 'import megengine.distributed as dist\n'), ((7034, 7095), 'torch.nn.functional.cross_entropy_with_softmax', 'F.cross_entropy_with_softmax', (['logits', 'label'], {'label_smooth': '(0.1)'}), '(logits, label, label_smooth=0.1)\n', (7062, 7095), True, 'import torch.nn.functional as F\n'), ((7117, 7150), 'torch.nn.functional.accuracy', 'F.accuracy', (['logits', 'label', '(1, 5)'], {}), '(logits, label, (1, 5))\n', (7127, 7150), True, 'import torch.nn.functional as F\n'), ((7162, 7183), 'megengine.distributed.is_distributed', 'dist.is_distributed', ([], {}), '()\n', (7181, 7183), True, 'import megengine.distributed as dist\n'), ((9858, 9869), 'time.time', 'time.time', ([], {}), '()\n', (9867, 9869), False, 'import time\n'), ((12544, 12555), 'time.time', 'time.time', ([], {}), '()\n', (12553, 12555), False, 'import time\n'), ((3833, 3889), 'multiprocessing.Process', 'mp.Process', ([], {'target': 'worker', 'args': '(rank, world_size, args)'}), '(target=worker, args=(rank, world_size, args))\n', (3843, 3889), True, 'import multiprocessing as mp\n'), ((4939, 4963), 'os.path.exists', 'os.path.exists', (['save_dir'], {}), '(save_dir)\n', (4953, 4963), False, 'import os\n'), ((4977, 4998), 'os.makedirs', 'os.makedirs', (['save_dir'], {}), '(save_dir)\n', (4988, 4998), False, 'import os\n'), ((5223, 5256), 'os.path.join', 'os.path.join', (['save_dir', '"""log.txt"""'], {}), "(save_dir, 'log.txt')\n", (5235, 5256), False, 'import os\n'), ((5282, 5311), 'logging.Formatter', 'logging.Formatter', (['log_format'], {}), '(log_format)\n', (5299, 5311), False, 'import logging\n'), ((7567, 7600), 'torchvision.transforms.RandomResizedCrop', 'transforms.RandomResizedCrop', (['(224)'], {}), '(224)\n', (7595, 7600), True, 'import torchvision.transforms as transforms\n'), ((7610, 7643), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {}), '()\n', (7641, 7643), True, 'import torchvision.transforms as transforms\n'), ((7653, 7721), 'torchvision.transforms.ColorJitter', 'transforms.ColorJitter', ([], {'brightness': '(0.4)', 'contrast': '(0.4)', 'saturation': '(0.4)'}), '(brightness=0.4, contrast=0.4, saturation=0.4)\n', (7675, 7721), True, 'import torchvision.transforms as transforms\n'), ((7731, 7752), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (7750, 7752), True, 'import torchvision.transforms as transforms\n'), ((7762, 7837), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[0.485, 0.456, 0.406]', 'std': '[0.229, 0.224, 0.225]'}), '(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n', (7782, 7837), True, 'import torchvision.transforms as transforms\n'), ((8316, 8338), 'torchvision.transforms.Resize', 'transforms.Resize', (['(256)'], {}), '(256)\n', (8333, 8338), True, 'import torchvision.transforms as transforms\n'), ((8348, 8374), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['(224)'], {}), '(224)\n', (8369, 8374), True, 'import torchvision.transforms as transforms\n'), ((8384, 8405), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (8403, 8405), True, 'import torchvision.transforms as transforms\n'), ((8415, 8490), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[0.485, 0.456, 0.406]', 'std': '[0.229, 0.224, 0.225]'}), '(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n', (8435, 8490), True, 'import torchvision.transforms as transforms\n'), ((9406, 9417), 'time.time', 'time.time', ([], {}), '()\n', (9415, 9417), False, 'import time\n'), ((9832, 9843), 'time.time', 'time.time', ([], {}), '()\n', (9841, 9843), False, 'import time\n'), ((11038, 11172), 'logging.info', 'logging.info', (['"""TEST Iter %06d: loss = %f,\tTop-1 err = %f,\tTop-5 err = %f"""', 'step', 'loss', '(1 - valid_acc / 100)', '(1 - valid_acc5 / 100)'], {}), "('TEST Iter %06d: loss = %f,\\tTop-1 err = %f,\\tTop-5 err = %f',\n step, loss, 1 - valid_acc / 100, 1 - valid_acc5 / 100)\n", (11050, 11172), False, 'import logging\n'), ((12635, 12707), 'logging.info', 'logging.info', (['"""Step %d, %s %s %s %s"""', 'step', 'objs', 'top1', 'top5', 'total_time'], {}), "('Step %d, %s %s %s %s', step, objs, top1, top5, total_time)\n", (12647, 12707), False, 'import logging\n'), ((5321, 5340), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (5338, 5340), False, 'import logging\n'), ((5850, 5883), 'os.path.join', 'os.path.join', (['args.output', 'prefix'], {}), '(args.output, prefix)\n', (5862, 5883), False, 'import os\n'), ((6713, 6738), 'megengine.distributed.all_reduce_sum', 'dist.all_reduce_sum', (['loss'], {}), '(loss)\n', (6732, 6738), True, 'import megengine.distributed as dist\n'), ((6741, 6762), 'megengine.distributed.get_world_size', 'dist.get_world_size', ([], {}), '()\n', (6760, 6762), True, 'import megengine.distributed as dist\n'), ((6782, 6807), 'megengine.distributed.all_reduce_sum', 'dist.all_reduce_sum', (['acc1'], {}), '(acc1)\n', (6801, 6807), True, 'import megengine.distributed as dist\n'), ((6810, 6831), 'megengine.distributed.get_world_size', 'dist.get_world_size', ([], {}), '()\n', (6829, 6831), True, 'import megengine.distributed as dist\n'), ((6851, 6876), 'megengine.distributed.all_reduce_sum', 'dist.all_reduce_sum', (['acc5'], {}), '(acc5)\n', (6870, 6876), True, 'import megengine.distributed as dist\n'), ((6879, 6900), 'megengine.distributed.get_world_size', 'dist.get_world_size', ([], {}), '()\n', (6898, 6900), True, 'import megengine.distributed as dist\n'), ((7223, 7248), 'megengine.distributed.all_reduce_sum', 'dist.all_reduce_sum', (['loss'], {}), '(loss)\n', (7242, 7248), True, 'import megengine.distributed as dist\n'), ((7251, 7272), 'megengine.distributed.get_world_size', 'dist.get_world_size', ([], {}), '()\n', (7270, 7272), True, 'import megengine.distributed as dist\n'), ((7292, 7317), 'megengine.distributed.all_reduce_sum', 'dist.all_reduce_sum', (['acc1'], {}), '(acc1)\n', (7311, 7317), True, 'import megengine.distributed as dist\n'), ((7320, 7341), 'megengine.distributed.get_world_size', 'dist.get_world_size', ([], {}), '()\n', (7339, 7341), True, 'import megengine.distributed as dist\n'), ((7361, 7386), 'megengine.distributed.all_reduce_sum', 'dist.all_reduce_sum', (['acc5'], {}), '(acc5)\n', (7380, 7386), True, 'import megengine.distributed as dist\n'), ((7389, 7410), 'megengine.distributed.get_world_size', 'dist.get_world_size', ([], {}), '()\n', (7408, 7410), True, 'import megengine.distributed as dist\n'), ((9797, 9808), 'time.time', 'time.time', ([], {}), '()\n', (9806, 9808), False, 'import time\n'), ((11574, 11607), 'logging.info', 'logging.info', (['"""SAVING %06d"""', 'step'], {}), "('SAVING %06d', step)\n", (11586, 11607), False, 'import logging\n'), ((12515, 12526), 'time.time', 'time.time', ([], {}), '()\n', (12524, 12526), False, 'import time\n'), ((12601, 12616), 'megengine.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (12614, 12616), True, 'import megengine.distributed as dist\n')]
|
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# pylint: disable=import-error,no-name-in-module,no-member
from typing import List, Union
import megengine as mge
from megengine.traced_module import TracedModule
from ..backend.ir_to_onnx.onnx_converter import OnnxConverter
from ..converter_ir.ir_quantizer import IRQuantizer
from ..converter_ir.ir_transform import IRTransform, TransformerRule
from ..frontend.tm_to_ir import TM_FrontEnd
from ..frontend.tm_to_ir.tm_utils import _update_inputs_qparams
def tracedmodule_to_onnx(
traced_module,
output="out.onnx",
*,
graph_name="graph",
opset=8,
outspec=None,
input_data_type: str = None,
input_scales: Union[float, List[float]] = None,
input_zero_points: Union[int, List[int]] = None,
require_quantize=False,
param_fake_quant=False,
quantize_file_path="quant_params.json",
):
"""
Convert megengine model to ONNX,
and save the ONNX model to file `output`.
:param mge_fpath: the file path of megengine model.
:type fpath: str
:param output: the filename used for the saved model.
:type output: str
:param graph_name: the name of the ONNX graph.
:type graph_name: str
:param opset: opset version of ONNX model.
:type opset: int
"""
if isinstance(traced_module, str):
traced_module = mge.load(traced_module)
assert isinstance(
traced_module, TracedModule
), "Input should be a traced module or a path of traced module."
_update_inputs_qparams(
traced_module, input_data_type, input_scales, input_zero_points
)
assert not require_quantize, "Caffe do not support quantize model."
tm_resolver = TM_FrontEnd(traced_module, outspec=outspec)
irgraph = tm_resolver.resolve()
transformer_options = [
TransformerRule.REMOVE_RESHAPE_REALTED_OP,
TransformerRule.REMOVE_UNRELATED_IROP,
TransformerRule.EXPAND_CONVRELU,
]
transformer = IRTransform(transformer_options)
transformed_irgraph = transformer.transform(irgraph)
quantizer = IRQuantizer(
require_quantize=require_quantize, param_fake_quant=param_fake_quant
)
if tm_resolver.has_qat:
quantizer.save_quantize_params(transformed_irgraph)
converter = OnnxConverter(transformed_irgraph, opset, graph_name, quantizer)
model = converter.convert()
if tm_resolver.has_qat:
quantizer.dump_quant_param(path=quantize_file_path)
assert isinstance(output, str), "onnx_fpath must be string"
with open(output, "wb") as fout:
fout.write(model.SerializeToString())
|
[
"megengine.load"
] |
[((1616, 1639), 'megengine.load', 'mge.load', (['traced_module'], {}), '(traced_module)\n', (1624, 1639), True, 'import megengine as mge\n')]
|
import numpy as np
from megengine import tensor
def _default_compare_fn(x, y):
np.testing.assert_allclose(x.numpy(), y, rtol=1e-6)
def opr_test(cases, func, compare_fn=_default_compare_fn, ref_fn=None, **kwargs):
"""
:param cases: the list which have dict element, the list length should be 2 for dynamic shape test.
and the dict should have input,
and should have output if ref_fn is None.
should use list for multiple inputs and outputs for each case.
:param func: the function to run opr.
:param compare_fn: the function to compare the result and expected, use
``np.testing.assert_allclose`` if None.
:param ref_fn: the function to generate expected data, should assign output if None.
Examples:
.. code-block::
dtype = np.float32
cases = [{"input": [10, 20]}, {"input": [20, 30]}]
opr_test(cases,
F.eye,
ref_fn=lambda n, m: np.eye(n, m).astype(dtype),
dtype=dtype)
"""
def check_results(results, expected):
if not isinstance(results, (tuple, list)):
results = (results,)
for r, e in zip(results, expected):
compare_fn(r, e)
def get_param(cases, idx):
case = cases[idx]
inp = case.get("input", None)
outp = case.get("output", None)
if inp is None:
raise ValueError("the test case should have input")
if not isinstance(inp, (tuple, list)):
inp = (inp,)
if ref_fn is not None and callable(ref_fn):
outp = ref_fn(*inp)
if outp is None:
raise ValueError("the test case should have output or reference function")
if not isinstance(outp, (tuple, list)):
outp = (outp,)
return inp, outp
if len(cases) == 0:
raise ValueError("should give one case at least")
if not callable(func):
raise ValueError("the input func should be callable")
inp, outp = get_param(cases, 0)
inp_tensor = [tensor(inpi) for inpi in inp]
results = func(*inp_tensor, **kwargs)
check_results(results, outp)
|
[
"megengine.tensor"
] |
[((2055, 2067), 'megengine.tensor', 'tensor', (['inpi'], {}), '(inpi)\n', (2061, 2067), False, 'from megengine import tensor\n')]
|
from itertools import product
import numpy as np
from megengine import tensor
from megengine.module import (
Conv2d,
ConvBn2d,
ConvRelu2d,
DequantStub,
Module,
QuantStub,
)
from megengine.quantization.quantize import disable_fake_quant, quantize_qat
def test_qat_convbn2d():
in_channels = 32
out_channels = 64
kernel_size = 3
for groups, bias in product([1, 4], [True, False]):
module = ConvBn2d(
in_channels, out_channels, kernel_size, groups=groups, bias=bias
)
module.train()
qat_module = quantize_qat(module, inplace=False)
disable_fake_quant(qat_module)
inputs = tensor(np.random.randn(4, in_channels, 32, 32).astype(np.float32))
normal_outputs = module(inputs)
# import pdb
# pdb.set_trace()
qat_outputs = qat_module(inputs)
np.testing.assert_allclose(
normal_outputs.numpy(), qat_outputs.numpy(), atol=5e-6
)
np.testing.assert_allclose(
module.bn.running_mean.numpy(),
qat_module.bn.running_mean.numpy(),
atol=5e-8,
)
np.testing.assert_allclose(
module.bn.running_var.numpy(), qat_module.bn.running_var.numpy(), atol=5e-7,
)
module.eval()
normal_outputs = module(inputs)
qat_module.eval()
qat_outputs = qat_module(inputs)
np.testing.assert_allclose(
normal_outputs.numpy(), qat_outputs.numpy(), atol=5e-6
)
def test_qat_conv():
in_channels = 32
out_channels = 64
kernel_size = 3
class TestNet(Module):
def __init__(self, groups, bias):
super().__init__()
self.quant = QuantStub()
self.dequant = DequantStub()
self.conv = Conv2d(
in_channels, out_channels, kernel_size, groups=groups, bias=bias
)
self.conv_relu = ConvRelu2d(
out_channels, in_channels, kernel_size, groups=groups, bias=bias
)
def forward(self, inp):
out = self.quant(inp)
out = self.conv(out)
out = self.conv_relu(out)
out = self.dequant(out)
return out
inputs = tensor(np.random.randn(4, in_channels, 32, 32).astype(np.float32))
for groups, bias in product([1, 4], [True, False]):
net = TestNet(groups, bias)
net.train()
qat_net = quantize_qat(net, inplace=False)
disable_fake_quant(qat_net)
normal_outputs = net(inputs)
qat_outputs = qat_net(inputs)
np.testing.assert_allclose(normal_outputs.numpy(), qat_outputs.numpy())
net.eval()
normal_outputs = net(inputs)
qat_net.eval()
qat_outputs = qat_net(inputs)
np.testing.assert_allclose(normal_outputs.numpy(), qat_outputs.numpy())
|
[
"megengine.quantization.quantize.quantize_qat",
"megengine.module.QuantStub",
"megengine.module.ConvBn2d",
"megengine.quantization.quantize.disable_fake_quant",
"megengine.module.DequantStub",
"megengine.module.ConvRelu2d",
"megengine.module.Conv2d"
] |
[((390, 420), 'itertools.product', 'product', (['[1, 4]', '[True, False]'], {}), '([1, 4], [True, False])\n', (397, 420), False, 'from itertools import product\n'), ((2349, 2379), 'itertools.product', 'product', (['[1, 4]', '[True, False]'], {}), '([1, 4], [True, False])\n', (2356, 2379), False, 'from itertools import product\n'), ((439, 513), 'megengine.module.ConvBn2d', 'ConvBn2d', (['in_channels', 'out_channels', 'kernel_size'], {'groups': 'groups', 'bias': 'bias'}), '(in_channels, out_channels, kernel_size, groups=groups, bias=bias)\n', (447, 513), False, 'from megengine.module import Conv2d, ConvBn2d, ConvRelu2d, DequantStub, Module, QuantStub\n'), ((580, 615), 'megengine.quantization.quantize.quantize_qat', 'quantize_qat', (['module'], {'inplace': '(False)'}), '(module, inplace=False)\n', (592, 615), False, 'from megengine.quantization.quantize import disable_fake_quant, quantize_qat\n'), ((624, 654), 'megengine.quantization.quantize.disable_fake_quant', 'disable_fake_quant', (['qat_module'], {}), '(qat_module)\n', (642, 654), False, 'from megengine.quantization.quantize import disable_fake_quant, quantize_qat\n'), ((2455, 2487), 'megengine.quantization.quantize.quantize_qat', 'quantize_qat', (['net'], {'inplace': '(False)'}), '(net, inplace=False)\n', (2467, 2487), False, 'from megengine.quantization.quantize import disable_fake_quant, quantize_qat\n'), ((2496, 2523), 'megengine.quantization.quantize.disable_fake_quant', 'disable_fake_quant', (['qat_net'], {}), '(qat_net)\n', (2514, 2523), False, 'from megengine.quantization.quantize import disable_fake_quant, quantize_qat\n'), ((1731, 1742), 'megengine.module.QuantStub', 'QuantStub', ([], {}), '()\n', (1740, 1742), False, 'from megengine.module import Conv2d, ConvBn2d, ConvRelu2d, DequantStub, Module, QuantStub\n'), ((1770, 1783), 'megengine.module.DequantStub', 'DequantStub', ([], {}), '()\n', (1781, 1783), False, 'from megengine.module import Conv2d, ConvBn2d, ConvRelu2d, DequantStub, Module, QuantStub\n'), ((1808, 1880), 'megengine.module.Conv2d', 'Conv2d', (['in_channels', 'out_channels', 'kernel_size'], {'groups': 'groups', 'bias': 'bias'}), '(in_channels, out_channels, kernel_size, groups=groups, bias=bias)\n', (1814, 1880), False, 'from megengine.module import Conv2d, ConvBn2d, ConvRelu2d, DequantStub, Module, QuantStub\n'), ((1940, 2016), 'megengine.module.ConvRelu2d', 'ConvRelu2d', (['out_channels', 'in_channels', 'kernel_size'], {'groups': 'groups', 'bias': 'bias'}), '(out_channels, in_channels, kernel_size, groups=groups, bias=bias)\n', (1950, 2016), False, 'from megengine.module import Conv2d, ConvBn2d, ConvRelu2d, DequantStub, Module, QuantStub\n'), ((2265, 2304), 'numpy.random.randn', 'np.random.randn', (['(4)', 'in_channels', '(32)', '(32)'], {}), '(4, in_channels, 32, 32)\n', (2280, 2304), True, 'import numpy as np\n'), ((679, 718), 'numpy.random.randn', 'np.random.randn', (['(4)', 'in_channels', '(32)', '(32)'], {}), '(4, in_channels, 32, 32)\n', (694, 718), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
# Copyright (c) 2020 <NAME>
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
"""LARS optimizer
References: https://github.com/rwightman/pytorch-image-models/blob/master/timm/optim/lars.py
"""
import os
from typing import Iterable, Union
import megengine.functional as F
from megengine import Parameter, tensor
from megengine.functional.inplace import _inplace_add_
from megengine.optimizer import Optimizer
class LARS(Optimizer):
r"""Implements LARS algorithm.
LARS is proposed in `"Large Batch Optimization for Deep Learning: Training BERT in 76 minutes"
<https://arxiv.org/abs/1904.00962>`_.
Args:
params: iterable of parameters to optimize or dicts defining parameter groups.
lr: learning rate.
momentum: momentum factor. Default: ``0.0``
nesterov: enables Nesterov momentum. Default: ``False``
weight_decay: weight decay (L2 penalty). Default: ``0.0``
always_adapt: apply adaptive lr to ``0.0`` weight decay parameter. Default: ``False``
"""
def __init__(
self,
params: Union[Iterable[Parameter], dict],
lr: float,
momentum: float = 0.0,
nesterov: bool = False,
weight_decay: float = 0.0,
always_adapt: bool = False,
):
if lr < 0.0:
raise ValueError("Invalid learning rate: {}".format(lr))
if momentum < 0.0:
raise ValueError("Invalid momentum value: {}".format(momentum))
if weight_decay < 0.0:
raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
if nesterov and momentum <= 0:
raise ValueError("Nesterov momentum requires a momentum")
defaults = dict(lr=lr, momentum=momentum, weight_decay=weight_decay)
super().__init__(params, defaults)
self.nesterov = nesterov
self.always_adapt = always_adapt
self._disable_type_convert = True
def _create_state(self, param_group):
if param_group["momentum"] != 0.0:
for param in param_group["params"]:
self._add_state(param, "momentum_buffer")
def _updates(self, param_group):
lr = param_group["lr"]
weight_decay = param_group["weight_decay"]
momentum = param_group["momentum"]
# since `conver_inputs` is disabled for param updates,
# scalar should be explicitly tansforred to tensor
_lr = tensor(lr)
_weight_decay = tensor(weight_decay)
_momentum = tensor(momentum)
c1, c05, c0 = map(tensor, (1.0, 0.5, 0.0))
def norm(vec):
return F.sum(vec * vec) ** c05
inplace_mode = int(os.getenv("MEGENGINE_INPLACE_UPDATE", "0"))
if inplace_mode:
_neg_lr = tensor(-lr)
for param in param_group["params"]:
if param.grad is None:
continue
grad = param.grad
if weight_decay != 0.0:
grad = grad + param * _weight_decay
p_norm = norm(param.flatten())
if inplace_mode:
if momentum != 0.0:
v = self._state[param]["momentum_buffer"]
_inplace_add_(v, grad, alpha=_momentum, beta=c1)
if self.nesterov:
grad = grad + v * _momentum
else:
grad = v
d_norm = norm(grad.flatten())
trust_ratio = (
p_norm / d_norm
if (self.always_adapt or weight_decay > 0) and p_norm > c0 and d_norm > c0
else c1
)
_inplace_add_(param, grad, alpha=c1, beta=_neg_lr * trust_ratio)
continue
if momentum != 0.0:
v = self._state[param]["momentum_buffer"]
v *= _momentum
v += grad
if self.nesterov:
grad = grad + v * _momentum
else:
grad = v
d_norm = norm(grad.flatten())
trust_ratio = (
p_norm / d_norm
if (self.always_adapt or weight_decay > 0) and p_norm > c0 and d_norm > c0
else c1
)
param -= _lr * trust_ratio * grad
|
[
"megengine.tensor",
"megengine.functional.inplace._inplace_add_",
"megengine.functional.sum"
] |
[((2538, 2548), 'megengine.tensor', 'tensor', (['lr'], {}), '(lr)\n', (2544, 2548), False, 'from megengine import Parameter, tensor\n'), ((2573, 2593), 'megengine.tensor', 'tensor', (['weight_decay'], {}), '(weight_decay)\n', (2579, 2593), False, 'from megengine import Parameter, tensor\n'), ((2614, 2630), 'megengine.tensor', 'tensor', (['momentum'], {}), '(momentum)\n', (2620, 2630), False, 'from megengine import Parameter, tensor\n'), ((2778, 2820), 'os.getenv', 'os.getenv', (['"""MEGENGINE_INPLACE_UPDATE"""', '"""0"""'], {}), "('MEGENGINE_INPLACE_UPDATE', '0')\n", (2787, 2820), False, 'import os\n'), ((2869, 2880), 'megengine.tensor', 'tensor', (['(-lr)'], {}), '(-lr)\n', (2875, 2880), False, 'from megengine import Parameter, tensor\n'), ((2726, 2742), 'megengine.functional.sum', 'F.sum', (['(vec * vec)'], {}), '(vec * vec)\n', (2731, 2742), True, 'import megengine.functional as F\n'), ((3766, 3830), 'megengine.functional.inplace._inplace_add_', '_inplace_add_', (['param', 'grad'], {'alpha': 'c1', 'beta': '(_neg_lr * trust_ratio)'}), '(param, grad, alpha=c1, beta=_neg_lr * trust_ratio)\n', (3779, 3830), False, 'from megengine.functional.inplace import _inplace_add_\n'), ((3297, 3345), 'megengine.functional.inplace._inplace_add_', '_inplace_add_', (['v', 'grad'], {'alpha': '_momentum', 'beta': 'c1'}), '(v, grad, alpha=_momentum, beta=c1)\n', (3310, 3345), False, 'from megengine.functional.inplace import _inplace_add_\n')]
|
import os
import megengine as mge
import megengine.functional as F
import argparse
import numpy as np
import cv2
from nets import Model
def load_model(model_path):
print("Loading model:", os.path.abspath(model_path))
pretrained_dict = mge.load(model_path)
model = Model(max_disp=256, mixed_precision=False, test_mode=True)
model.load_state_dict(pretrained_dict["state_dict"], strict=True)
model.eval()
return model
def inference(left, right, model, n_iter=20):
print("Model Forwarding...")
imgL = left.transpose(2, 0, 1)
imgR = right.transpose(2, 0, 1)
imgL = np.ascontiguousarray(imgL[None, :, :, :])
imgR = np.ascontiguousarray(imgR[None, :, :, :])
imgL = mge.tensor(imgL).astype("float32")
imgR = mge.tensor(imgR).astype("float32")
imgL_dw2 = F.nn.interpolate(
imgL,
size=(imgL.shape[2] // 2, imgL.shape[3] // 2),
mode="bilinear",
align_corners=True,
)
imgR_dw2 = F.nn.interpolate(
imgR,
size=(imgL.shape[2] // 2, imgL.shape[3] // 2),
mode="bilinear",
align_corners=True,
)
pred_flow_dw2 = model(imgL_dw2, imgR_dw2, iters=n_iter, flow_init=None)
pred_flow = model(imgL, imgR, iters=n_iter, flow_init=pred_flow_dw2)
pred_disp = F.squeeze(pred_flow[:, 0, :, :]).numpy()
return pred_disp
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="A demo to run CREStereo.")
parser.add_argument(
"--model_path",
default="crestereo_eth3d.mge",
help="The path of pre-trained MegEngine model.",
)
parser.add_argument(
"--left", default="img/test/left.png", help="The path of left image."
)
parser.add_argument(
"--right", default="img/test/right.png", help="The path of right image."
)
parser.add_argument(
"--size",
default="1024x1536",
help="The image size for inference. Te default setting is 1024x1536. \
To evaluate on ETH3D Benchmark, use 768x1024 instead.",
)
parser.add_argument(
"--output", default="disparity.png", help="The path of output disparity."
)
args = parser.parse_args()
assert os.path.exists(args.model_path), "The model path do not exist."
assert os.path.exists(args.left), "The left image path do not exist."
assert os.path.exists(args.right), "The right image path do not exist."
model_func = load_model(args.model_path)
left = cv2.imread(args.left)
right = cv2.imread(args.right)
assert left.shape == right.shape, "The input images have inconsistent shapes."
in_h, in_w = left.shape[:2]
print("Images resized:", args.size)
eval_h, eval_w = [int(e) for e in args.size.split("x")]
left_img = cv2.resize(left, (eval_w, eval_h), interpolation=cv2.INTER_LINEAR)
right_img = cv2.resize(right, (eval_w, eval_h), interpolation=cv2.INTER_LINEAR)
pred = inference(left_img, right_img, model_func, n_iter=20)
t = float(in_w) / float(eval_w)
disp = cv2.resize(pred, (in_w, in_h), interpolation=cv2.INTER_LINEAR) * t
disp_vis = (disp - disp.min()) / (disp.max() - disp.min()) * 255.0
disp_vis = disp_vis.astype("uint8")
disp_vis = cv2.applyColorMap(disp_vis, cv2.COLORMAP_INFERNO)
parent_path = os.path.abspath(os.path.join(args.output, os.pardir))
if not os.path.exists(parent_path):
os.makedirs(parent_path)
cv2.imwrite(args.output, disp_vis)
print("Done! Result path:", os.path.abspath(args.output))
|
[
"megengine.functional.nn.interpolate",
"megengine.functional.squeeze",
"megengine.tensor",
"megengine.load"
] |
[((247, 267), 'megengine.load', 'mge.load', (['model_path'], {}), '(model_path)\n', (255, 267), True, 'import megengine as mge\n'), ((280, 338), 'nets.Model', 'Model', ([], {'max_disp': '(256)', 'mixed_precision': '(False)', 'test_mode': '(True)'}), '(max_disp=256, mixed_precision=False, test_mode=True)\n', (285, 338), False, 'from nets import Model\n'), ((608, 649), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['imgL[None, :, :, :]'], {}), '(imgL[None, :, :, :])\n', (628, 649), True, 'import numpy as np\n'), ((661, 702), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['imgR[None, :, :, :]'], {}), '(imgR[None, :, :, :])\n', (681, 702), True, 'import numpy as np\n'), ((812, 923), 'megengine.functional.nn.interpolate', 'F.nn.interpolate', (['imgL'], {'size': '(imgL.shape[2] // 2, imgL.shape[3] // 2)', 'mode': '"""bilinear"""', 'align_corners': '(True)'}), "(imgL, size=(imgL.shape[2] // 2, imgL.shape[3] // 2), mode=\n 'bilinear', align_corners=True)\n", (828, 923), True, 'import megengine.functional as F\n'), ((973, 1084), 'megengine.functional.nn.interpolate', 'F.nn.interpolate', (['imgR'], {'size': '(imgL.shape[2] // 2, imgL.shape[3] // 2)', 'mode': '"""bilinear"""', 'align_corners': '(True)'}), "(imgR, size=(imgL.shape[2] // 2, imgL.shape[3] // 2), mode=\n 'bilinear', align_corners=True)\n", (989, 1084), True, 'import megengine.functional as F\n'), ((1390, 1453), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""A demo to run CREStereo."""'}), "(description='A demo to run CREStereo.')\n", (1413, 1453), False, 'import argparse\n'), ((2219, 2250), 'os.path.exists', 'os.path.exists', (['args.model_path'], {}), '(args.model_path)\n', (2233, 2250), False, 'import os\n'), ((2294, 2319), 'os.path.exists', 'os.path.exists', (['args.left'], {}), '(args.left)\n', (2308, 2319), False, 'import os\n'), ((2368, 2394), 'os.path.exists', 'os.path.exists', (['args.right'], {}), '(args.right)\n', (2382, 2394), False, 'import os\n'), ((2490, 2511), 'cv2.imread', 'cv2.imread', (['args.left'], {}), '(args.left)\n', (2500, 2511), False, 'import cv2\n'), ((2524, 2546), 'cv2.imread', 'cv2.imread', (['args.right'], {}), '(args.right)\n', (2534, 2546), False, 'import cv2\n'), ((2780, 2846), 'cv2.resize', 'cv2.resize', (['left', '(eval_w, eval_h)'], {'interpolation': 'cv2.INTER_LINEAR'}), '(left, (eval_w, eval_h), interpolation=cv2.INTER_LINEAR)\n', (2790, 2846), False, 'import cv2\n'), ((2863, 2930), 'cv2.resize', 'cv2.resize', (['right', '(eval_w, eval_h)'], {'interpolation': 'cv2.INTER_LINEAR'}), '(right, (eval_w, eval_h), interpolation=cv2.INTER_LINEAR)\n', (2873, 2930), False, 'import cv2\n'), ((3239, 3288), 'cv2.applyColorMap', 'cv2.applyColorMap', (['disp_vis', 'cv2.COLORMAP_INFERNO'], {}), '(disp_vis, cv2.COLORMAP_INFERNO)\n', (3256, 3288), False, 'import cv2\n'), ((3439, 3473), 'cv2.imwrite', 'cv2.imwrite', (['args.output', 'disp_vis'], {}), '(args.output, disp_vis)\n', (3450, 3473), False, 'import cv2\n'), ((196, 223), 'os.path.abspath', 'os.path.abspath', (['model_path'], {}), '(model_path)\n', (211, 223), False, 'import os\n'), ((3045, 3107), 'cv2.resize', 'cv2.resize', (['pred', '(in_w, in_h)'], {'interpolation': 'cv2.INTER_LINEAR'}), '(pred, (in_w, in_h), interpolation=cv2.INTER_LINEAR)\n', (3055, 3107), False, 'import cv2\n'), ((3324, 3360), 'os.path.join', 'os.path.join', (['args.output', 'os.pardir'], {}), '(args.output, os.pardir)\n', (3336, 3360), False, 'import os\n'), ((3373, 3400), 'os.path.exists', 'os.path.exists', (['parent_path'], {}), '(parent_path)\n', (3387, 3400), False, 'import os\n'), ((3410, 3434), 'os.makedirs', 'os.makedirs', (['parent_path'], {}), '(parent_path)\n', (3421, 3434), False, 'import os\n'), ((3506, 3534), 'os.path.abspath', 'os.path.abspath', (['args.output'], {}), '(args.output)\n', (3521, 3534), False, 'import os\n'), ((715, 731), 'megengine.tensor', 'mge.tensor', (['imgL'], {}), '(imgL)\n', (725, 731), True, 'import megengine as mge\n'), ((761, 777), 'megengine.tensor', 'mge.tensor', (['imgR'], {}), '(imgR)\n', (771, 777), True, 'import megengine as mge\n'), ((1285, 1317), 'megengine.functional.squeeze', 'F.squeeze', (['pred_flow[:, 0, :, :]'], {}), '(pred_flow[:, 0, :, :])\n', (1294, 1317), True, 'import megengine.functional as F\n')]
|
import os
import cv2
import argparse
import warnings
import megengine as mge
import megengine.functional as F
warnings.filterwarnings("ignore")
parser = argparse.ArgumentParser(description='Interpolation for a pair of images')
parser.add_argument('--img', dest='img', nargs=2, required=True)
parser.add_argument('--exp', default=4, type=int)
parser.add_argument('--ratio', default=0, type=float, help='inference ratio between two images with 0 - 1 range')
parser.add_argument('--rthreshold', default=0.02, type=float, help='returns image when actual ratio falls in given range threshold')
parser.add_argument('--rmaxcycles', default=8, type=int, help='limit max number of bisectional cycles')
parser.add_argument('--model', dest='modelDir', type=str, default='train_log', help='directory with trained model files')
args = parser.parse_args()
from model.RIFE import Model
model = Model()
model.load_model(args.modelDir, -1)
print("Loaded model")
model.eval()
if args.img[0].endswith('.exr') and args.img[1].endswith('.exr'):
img0 = cv2.imread(args.img[0], cv2.IMREAD_COLOR | cv2.IMREAD_ANYDEPTH)
img1 = cv2.imread(args.img[1], cv2.IMREAD_COLOR | cv2.IMREAD_ANYDEPTH)
img0 = F.expand_dims(mge.Tensor(img0.transpose(2, 0, 1)), 0)
img1 = F.expand_dims(mge.Tensor(img1.transpose(2, 0, 1)), 0)
else:
img0 = cv2.imread(args.img[0], cv2.IMREAD_UNCHANGED)
img1 = cv2.imread(args.img[1], cv2.IMREAD_UNCHANGED)
img0 = F.expand_dims(mge.Tensor(img0.transpose(2, 0, 1)) / 255. , 0)
img1 = F.expand_dims(mge.Tensor(img1.transpose(2, 0, 1)) / 255. , 0)
n, c, h, w = img0.shape
ph = ((h - 1) // 32 + 1) * 32
pw = ((w - 1) // 32 + 1) * 32
padding = ((0, 0), (0, 0), (0, ph - h), (0, pw - w))
img0 = F.nn.pad(img0, padding)
img1 = F.nn.pad(img1, padding)
if args.ratio:
img_list = [img0]
img0_ratio = 0.0
img1_ratio = 1.0
if args.ratio <= img0_ratio + args.rthreshold / 2:
middle = img0
elif args.ratio >= img1_ratio - args.rthreshold / 2:
middle = img1
else:
tmp_img0 = img0
tmp_img1 = img1
for inference_cycle in range(args.rmaxcycles):
middle = model.inference(tmp_img0, tmp_img1)
middle_ratio = ( img0_ratio + img1_ratio ) / 2
if args.ratio - (args.rthreshold / 2) <= middle_ratio <= args.ratio + (args.rthreshold / 2):
break
if args.ratio > middle_ratio:
tmp_img0 = middle
img0_ratio = middle_ratio
else:
tmp_img1 = middle
img1_ratio = middle_ratio
img_list.append(middle)
img_list.append(img1)
else:
img_list = [img0, img1]
for i in range(args.exp):
tmp = []
for j in range(len(img_list) - 1):
mid = model.inference(img_list[j], img_list[j + 1])
tmp.append(img_list[j])
tmp.append(mid)
tmp.append(img1)
img_list = tmp
if not os.path.exists('output'):
os.mkdir('output')
for i in range(len(img_list)):
if args.img[0].endswith('.exr') and args.img[1].endswith('.exr'):
cv2.imwrite('output/img{}.exr'.format(i), (img_list[i][0]).numpy().transpose(1, 2, 0)[:h, :w], [cv2.IMWRITE_EXR_TYPE, cv2.IMWRITE_EXR_TYPE_HALF])
else:
cv2.imwrite('output/img{}.png'.format(i), (img_list[i][0] * 255).numpy().transpose(1, 2, 0)[:h, :w])
|
[
"megengine.functional.nn.pad"
] |
[((110, 143), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (133, 143), False, 'import warnings\n'), ((154, 227), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Interpolation for a pair of images"""'}), "(description='Interpolation for a pair of images')\n", (177, 227), False, 'import argparse\n'), ((883, 890), 'model.RIFE.Model', 'Model', ([], {}), '()\n', (888, 890), False, 'from model.RIFE import Model\n'), ((1721, 1744), 'megengine.functional.nn.pad', 'F.nn.pad', (['img0', 'padding'], {}), '(img0, padding)\n', (1729, 1744), True, 'import megengine.functional as F\n'), ((1752, 1775), 'megengine.functional.nn.pad', 'F.nn.pad', (['img1', 'padding'], {}), '(img1, padding)\n', (1760, 1775), True, 'import megengine.functional as F\n'), ((1040, 1103), 'cv2.imread', 'cv2.imread', (['args.img[0]', '(cv2.IMREAD_COLOR | cv2.IMREAD_ANYDEPTH)'], {}), '(args.img[0], cv2.IMREAD_COLOR | cv2.IMREAD_ANYDEPTH)\n', (1050, 1103), False, 'import cv2\n'), ((1115, 1178), 'cv2.imread', 'cv2.imread', (['args.img[1]', '(cv2.IMREAD_COLOR | cv2.IMREAD_ANYDEPTH)'], {}), '(args.img[1], cv2.IMREAD_COLOR | cv2.IMREAD_ANYDEPTH)\n', (1125, 1178), False, 'import cv2\n'), ((1327, 1372), 'cv2.imread', 'cv2.imread', (['args.img[0]', 'cv2.IMREAD_UNCHANGED'], {}), '(args.img[0], cv2.IMREAD_UNCHANGED)\n', (1337, 1372), False, 'import cv2\n'), ((1384, 1429), 'cv2.imread', 'cv2.imread', (['args.img[1]', 'cv2.IMREAD_UNCHANGED'], {}), '(args.img[1], cv2.IMREAD_UNCHANGED)\n', (1394, 1429), False, 'import cv2\n'), ((2943, 2967), 'os.path.exists', 'os.path.exists', (['"""output"""'], {}), "('output')\n", (2957, 2967), False, 'import os\n'), ((2973, 2991), 'os.mkdir', 'os.mkdir', (['"""output"""'], {}), "('output')\n", (2981, 2991), False, 'import os\n')]
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine as mge
import megengine.functional as F
import megengine.hub as hub
import megengine.module as M
import math
import official.vision.classification.resnet.model as resnet
import numpy as np
class ResnetBody(M.Module):
def __init__(
self,
block,
init_channel,
layers,
channels,
zero_init_residual=False,
norm=M.BatchNorm2d,
):
super(ResnetBody, self).__init__()
self.in_channels = init_channel
self.layer1 = self._make_layer(
block, channels[0], layers[0], stride=1, norm=norm
)
self.layer2 = self._make_layer(
block, channels[1], layers[1], stride=2, norm=norm
)
self.layer3 = self._make_layer(
block, channels[2], layers[2], stride=2, norm=norm,
)
self.layer4 = self._make_layer(
block, channels[3], layers[3], stride=2, norm=norm,
)
for m in self.modules():
if isinstance(m, M.Conv2d):
M.init.msra_normal_(m.weight, mode="fan_out", nonlinearity="relu")
if m.bias is not None:
fan_in, _ = M.init.calculate_fan_in_and_fan_out(m.weight)
bound = 1 / math.sqrt(fan_in)
M.init.uniform_(m.bias, -bound, bound)
elif isinstance(m, M.BatchNorm2d):
M.init.ones_(m.weight)
M.init.zeros_(m.bias)
elif isinstance(m, M.Linear):
M.init.msra_uniform_(m.weight, a=math.sqrt(5))
if m.bias is not None:
fan_in, _ = M.init.calculate_fan_in_and_fan_out(m.weight)
bound = 1 / math.sqrt(fan_in)
M.init.uniform_(m.bias, -bound, bound)
def _make_layer(self, block, channels, blocks, stride=1, norm=M.BatchNorm2d):
layers = []
layers.append(block(self.in_channels, channels, stride, norm=norm))
self.in_channels = channels * block.expansion
for _ in range(1, blocks):
layers.append(block(self.in_channels, channels, norm=norm))
return M.Sequential(*layers)
def forward(self, x):
outputs = []
x = self.layer1(x)
outputs.append(x)
x = self.layer2(x)
outputs.append(x)
x = self.layer3(x)
outputs.append(x)
x = self.layer4(x)
outputs.append(x)
return outputs
class SingleStage(M.Module):
def __init__(
self, block, init_channel, layers, channels, mid_channel, norm=M.BatchNorm2d
):
super(SingleStage, self).__init__()
self.down = ResnetBody(block, init_channel, layers, channels, norm)
channel = block.expansion * channels[-1]
self.up1 = M.Sequential(
M.Conv2d(channel, mid_channel, 1, 1, 0), norm(mid_channel)
)
self.deconv1 = M.Sequential(
M.ConvTranspose2d(mid_channel, mid_channel, 4, 2, 1), norm(mid_channel)
)
channel = block.expansion * channels[-2]
self.up2 = M.Sequential(
M.Conv2d(channel, mid_channel, 1, 1, 0), norm(mid_channel)
)
self.deconv2 = M.Sequential(
M.ConvTranspose2d(mid_channel, mid_channel, 4, 2, 1), norm(mid_channel)
)
channel = block.expansion * channels[-3]
self.up3 = M.Sequential(
M.Conv2d(channel, mid_channel, 1, 1, 0), norm(mid_channel)
)
self.deconv3 = M.Sequential(
M.ConvTranspose2d(mid_channel, mid_channel, 4, 2, 1), norm(mid_channel)
)
channel = block.expansion * channels[-4]
self.up4 = M.Sequential(
M.Conv2d(channel, mid_channel, 1, 1, 0), norm(mid_channel)
)
def forward(self, x):
branches = self.down(x)
branches = list(reversed(branches))
outputs = []
f_up = F.relu(self.up1(branches[0]))
outputs.append(f_up)
f = self.up2(branches[1])
f_up = F.relu(self.deconv1(f_up) + f)
outputs.append(f_up)
f = self.up3(branches[2])
f_up = F.relu(self.deconv2(f_up) + f)
outputs.append(f_up)
f = self.up4(branches[3])
f_up = F.relu(self.deconv3(f_up) + f)
outputs.append(f_up)
return outputs
class MSPN(M.Module):
def __init__(self, block, layers, channels, mid_channel, keypoint_num, nr_stg):
super(MSPN, self).__init__()
block = getattr(resnet, block)
norm = M.BatchNorm2d
self.nr_stg = nr_stg
self.keypoint_num = keypoint_num
self.head = M.Sequential(
M.Conv2d(3, 64, 3, 2, 1),
norm(64),
M.ReLU(),
M.Conv2d(64, 64, 3, 1, 1),
norm(64),
M.ReLU(),
M.Conv2d(64, 64, 3, 2, 1),
norm(64),
M.ReLU(),
)
self.stages = {}
for i in range(nr_stg):
init_channel = 64
self.stages["Stage_{}_body".format(i)] = SingleStage(
block, init_channel, layers, channels, mid_channel, norm
)
tail = {}
for j in range(4):
tail["tail_{}".format(j)] = M.Conv2d(mid_channel, keypoint_num, 3, 1, 1)
self.stages["Stage_{}_tail".format(i)] = tail
if i < nr_stg - 1:
self.stages["Stage_{}_next".format(i)] = M.Sequential(
M.Conv2d(mid_channel, 64, 1, 1, 0), norm(64), M.ReLU()
)
self.inputs = {
"image": mge.tensor(dtype="float32"),
"heatmap": mge.tensor(dtype="float32"),
"heat_valid": mge.tensor(dtype="float32"),
}
def calc_loss(self):
outs = self.forward(self.inputs["image"])
loss = 0
for stage_out in outs:
for ind, scale_out in enumerate(stage_out[:-1]):
label = (
self.inputs["heatmap"][:, ind]
* (self.inputs["heat_valid"] > 1.1)[:, :, None, None]
)
tmp = F.square_loss(scale_out, label)
loss += tmp / 4 / len(outs)
# OHKM loss for the largest heatmap
tmp = ((stage_out[-1] - self.inputs["heatmap"][:, -1]) ** 2).mean(3).mean(
2
) * (self.inputs["heat_valid"] > 0.1)
ohkm_loss = 0
for i in range(tmp.shape[0]):
selected_loss, _ = F.top_k(
tmp[i], self.keypoint_num // 2, descending=True
)
ohkm_loss += selected_loss.mean()
ohkm_loss /= tmp.shape[0]
loss += ohkm_loss
return loss
def predict(self):
outputs = self.forward(self.inputs["image"])
pred = outputs[-1][-1]
return pred
def forward(self, x):
f = self.head(x)
outputs = []
for i in range(self.nr_stg):
multi_scale_features = self.stages["Stage_{}_body".format(i)](f)
multi_scale_heatmaps = []
for j in range(4):
out = self.stages["Stage_{}_tail".format(i)]["tail_{}".format(j)](
multi_scale_features[j]
)
out = F.interpolate(out, scale_factor=2 ** (3 - j))
multi_scale_heatmaps.append(out)
if i < self.nr_stg - 1:
f = self.stages["Stage_{}_next".format(i)](multi_scale_features[-1])
outputs.append(multi_scale_heatmaps)
return outputs
@hub.pretrained(
"https://data.megengine.org.cn/models/weights/mspn_4stage_256x192_0_255_75_2.pkl"
)
def mspn_4stage(**kwargs):
model = MSPN(
block="Bottleneck",
layers=[5, 5, 6, 3],
channels=[64, 128, 192, 384],
nr_stg=4,
mid_channel=256,
keypoint_num=17,
**kwargs
)
return model
|
[
"megengine.module.ReLU",
"megengine.tensor",
"megengine.functional.top_k",
"megengine.module.init.zeros_",
"megengine.module.init.msra_normal_",
"megengine.module.init.calculate_fan_in_and_fan_out",
"megengine.module.ConvTranspose2d",
"megengine.module.Sequential",
"megengine.module.Conv2d",
"megengine.functional.square_loss",
"megengine.functional.interpolate",
"megengine.hub.pretrained",
"megengine.module.init.uniform_",
"megengine.module.init.ones_"
] |
[((7936, 8043), 'megengine.hub.pretrained', 'hub.pretrained', (['"""https://data.megengine.org.cn/models/weights/mspn_4stage_256x192_0_255_75_2.pkl"""'], {}), "(\n 'https://data.megengine.org.cn/models/weights/mspn_4stage_256x192_0_255_75_2.pkl'\n )\n", (7950, 8043), True, 'import megengine.hub as hub\n'), ((2520, 2541), 'megengine.module.Sequential', 'M.Sequential', (['*layers'], {}), '(*layers)\n', (2532, 2541), True, 'import megengine.module as M\n'), ((3182, 3221), 'megengine.module.Conv2d', 'M.Conv2d', (['channel', 'mid_channel', '(1)', '(1)', '(0)'], {}), '(channel, mid_channel, 1, 1, 0)\n', (3190, 3221), True, 'import megengine.module as M\n'), ((3300, 3352), 'megengine.module.ConvTranspose2d', 'M.ConvTranspose2d', (['mid_channel', 'mid_channel', '(4)', '(2)', '(1)'], {}), '(mid_channel, mid_channel, 4, 2, 1)\n', (3317, 3352), True, 'import megengine.module as M\n'), ((3477, 3516), 'megengine.module.Conv2d', 'M.Conv2d', (['channel', 'mid_channel', '(1)', '(1)', '(0)'], {}), '(channel, mid_channel, 1, 1, 0)\n', (3485, 3516), True, 'import megengine.module as M\n'), ((3595, 3647), 'megengine.module.ConvTranspose2d', 'M.ConvTranspose2d', (['mid_channel', 'mid_channel', '(4)', '(2)', '(1)'], {}), '(mid_channel, mid_channel, 4, 2, 1)\n', (3612, 3647), True, 'import megengine.module as M\n'), ((3772, 3811), 'megengine.module.Conv2d', 'M.Conv2d', (['channel', 'mid_channel', '(1)', '(1)', '(0)'], {}), '(channel, mid_channel, 1, 1, 0)\n', (3780, 3811), True, 'import megengine.module as M\n'), ((3890, 3942), 'megengine.module.ConvTranspose2d', 'M.ConvTranspose2d', (['mid_channel', 'mid_channel', '(4)', '(2)', '(1)'], {}), '(mid_channel, mid_channel, 4, 2, 1)\n', (3907, 3942), True, 'import megengine.module as M\n'), ((4067, 4106), 'megengine.module.Conv2d', 'M.Conv2d', (['channel', 'mid_channel', '(1)', '(1)', '(0)'], {}), '(channel, mid_channel, 1, 1, 0)\n', (4075, 4106), True, 'import megengine.module as M\n'), ((5022, 5046), 'megengine.module.Conv2d', 'M.Conv2d', (['(3)', '(64)', '(3)', '(2)', '(1)'], {}), '(3, 64, 3, 2, 1)\n', (5030, 5046), True, 'import megengine.module as M\n'), ((5082, 5090), 'megengine.module.ReLU', 'M.ReLU', ([], {}), '()\n', (5088, 5090), True, 'import megengine.module as M\n'), ((5104, 5129), 'megengine.module.Conv2d', 'M.Conv2d', (['(64)', '(64)', '(3)', '(1)', '(1)'], {}), '(64, 64, 3, 1, 1)\n', (5112, 5129), True, 'import megengine.module as M\n'), ((5165, 5173), 'megengine.module.ReLU', 'M.ReLU', ([], {}), '()\n', (5171, 5173), True, 'import megengine.module as M\n'), ((5187, 5212), 'megengine.module.Conv2d', 'M.Conv2d', (['(64)', '(64)', '(3)', '(2)', '(1)'], {}), '(64, 64, 3, 2, 1)\n', (5195, 5212), True, 'import megengine.module as M\n'), ((5248, 5256), 'megengine.module.ReLU', 'M.ReLU', ([], {}), '()\n', (5254, 5256), True, 'import megengine.module as M\n'), ((5951, 5978), 'megengine.tensor', 'mge.tensor', ([], {'dtype': '"""float32"""'}), "(dtype='float32')\n", (5961, 5978), True, 'import megengine as mge\n'), ((6003, 6030), 'megengine.tensor', 'mge.tensor', ([], {'dtype': '"""float32"""'}), "(dtype='float32')\n", (6013, 6030), True, 'import megengine as mge\n'), ((6058, 6085), 'megengine.tensor', 'mge.tensor', ([], {'dtype': '"""float32"""'}), "(dtype='float32')\n", (6068, 6085), True, 'import megengine as mge\n'), ((1416, 1482), 'megengine.module.init.msra_normal_', 'M.init.msra_normal_', (['m.weight'], {'mode': '"""fan_out"""', 'nonlinearity': '"""relu"""'}), "(m.weight, mode='fan_out', nonlinearity='relu')\n", (1435, 1482), True, 'import megengine.module as M\n'), ((5606, 5650), 'megengine.module.Conv2d', 'M.Conv2d', (['mid_channel', 'keypoint_num', '(3)', '(1)', '(1)'], {}), '(mid_channel, keypoint_num, 3, 1, 1)\n', (5614, 5650), True, 'import megengine.module as M\n'), ((6474, 6505), 'megengine.functional.square_loss', 'F.square_loss', (['scale_out', 'label'], {}), '(scale_out, label)\n', (6487, 6505), True, 'import megengine.functional as F\n'), ((6857, 6913), 'megengine.functional.top_k', 'F.top_k', (['tmp[i]', '(self.keypoint_num // 2)'], {'descending': '(True)'}), '(tmp[i], self.keypoint_num // 2, descending=True)\n', (6864, 6913), True, 'import megengine.functional as F\n'), ((7643, 7688), 'megengine.functional.interpolate', 'F.interpolate', (['out'], {'scale_factor': '(2 ** (3 - j))'}), '(out, scale_factor=2 ** (3 - j))\n', (7656, 7688), True, 'import megengine.functional as F\n'), ((1554, 1599), 'megengine.module.init.calculate_fan_in_and_fan_out', 'M.init.calculate_fan_in_and_fan_out', (['m.weight'], {}), '(m.weight)\n', (1589, 1599), True, 'import megengine.module as M\n'), ((1670, 1708), 'megengine.module.init.uniform_', 'M.init.uniform_', (['m.bias', '(-bound)', 'bound'], {}), '(m.bias, -bound, bound)\n', (1685, 1708), True, 'import megengine.module as M\n'), ((1772, 1794), 'megengine.module.init.ones_', 'M.init.ones_', (['m.weight'], {}), '(m.weight)\n', (1784, 1794), True, 'import megengine.module as M\n'), ((1811, 1832), 'megengine.module.init.zeros_', 'M.init.zeros_', (['m.bias'], {}), '(m.bias)\n', (1824, 1832), True, 'import megengine.module as M\n'), ((5832, 5866), 'megengine.module.Conv2d', 'M.Conv2d', (['mid_channel', '(64)', '(1)', '(1)', '(0)'], {}), '(mid_channel, 64, 1, 1, 0)\n', (5840, 5866), True, 'import megengine.module as M\n'), ((5878, 5886), 'megengine.module.ReLU', 'M.ReLU', ([], {}), '()\n', (5884, 5886), True, 'import megengine.module as M\n'), ((1632, 1649), 'math.sqrt', 'math.sqrt', (['fan_in'], {}), '(fan_in)\n', (1641, 1649), False, 'import math\n'), ((2009, 2054), 'megengine.module.init.calculate_fan_in_and_fan_out', 'M.init.calculate_fan_in_and_fan_out', (['m.weight'], {}), '(m.weight)\n', (2044, 2054), True, 'import megengine.module as M\n'), ((2125, 2163), 'megengine.module.init.uniform_', 'M.init.uniform_', (['m.bias', '(-bound)', 'bound'], {}), '(m.bias, -bound, bound)\n', (2140, 2163), True, 'import megengine.module as M\n'), ((1924, 1936), 'math.sqrt', 'math.sqrt', (['(5)'], {}), '(5)\n', (1933, 1936), False, 'import math\n'), ((2087, 2104), 'math.sqrt', 'math.sqrt', (['fan_in'], {}), '(fan_in)\n', (2096, 2104), False, 'import math\n')]
|
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import platform
import weakref
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
import megengine.functional as F
import megengine.module as M
import megengine.optimizer as optim
from megengine.autodiff import GradManager
from megengine.distributed.helper import get_device_count_by_fork
from megengine.jit import trace
def test_basic():
x = mge.tensor([1.0, 3.0, 5.0]).reshape(1, 3)
w = mge.tensor([2.0, 4.0, 6.0]).reshape(3, 1)
b = mge.tensor(-1.0)
gm = GradManager().attach([w, b])
gm.record()
p = F.matmul(x, w)
y = p + b
gm.backward(y)
gm.release() # is not necessary
np.testing.assert_equal(w.grad.numpy(), [[1], [3], [5]])
np.testing.assert_equal(b.grad.numpy(), [1])
w.grad = None
b.grad = None
with gm:
p = F.matmul(x, w)
y = p + b
gm.backward(y)
np.testing.assert_equal(w.grad.numpy(), [[1], [3], [5]])
np.testing.assert_equal(b.grad.numpy(), [1])
def test_attach_in_with_block():
a = mge.Parameter([1.0])
gm = GradManager()
with gm:
b = a * 3
gm.attach(b)
c = b + 1
gm.backward(c)
assert int(b.grad.numpy()) == 1
def test_attach_temporary():
w = mge.Parameter(2.0)
gm = GradManager()
gm.attach(w)
def cb(x, g):
assert x is ref()
cb.called = True
for i in range(3):
with gm:
cb.called = False
x = mge.Tensor(i, dtype="float32")
gm.attach(x, callbacks=cb)
ref = weakref.ref(x)
y = x * w
gm.backward(y)
assert cb.called
del x
assert ref() is None
# NOTE: does not guarantee timely release when recording
# for i in range(3):
# with gm:
# x = mge.Tensor(i, dtype='float32')
# gm.attach(x)
# ref = weakref.ref(x)
# y = x * w
# del x
# assert ref() is None
# gm.backward(y)
def test_no_dependency():
x = mge.tensor(3)
w = mge.Parameter(1.0)
w_no_dep = mge.Parameter(1.0)
gm = GradManager()
gm.attach(w)
gm.attach(w_no_dep)
with gm:
out1 = x * w
out2 = w_no_dep * out1
gm.backward(out1.sum())
assert w.grad is not None
assert w_no_dep.grad is None
def test_regression_1762():
x = F.ones((10, 10, 3, 3))
conv = M.Conv2d(10, 10, kernel_size=3, padding=1)
t_shape = (1, 10, 1, 1)
weight = mge.Parameter(np.ones(t_shape, dtype=np.float32))
bias = mge.Parameter(np.zeros(t_shape, dtype=np.float32))
gm = GradManager()
gm.attach(list(conv.parameters()) + [weight, bias])
with gm:
out1 = conv(x)
out2 = F.batch_norm(out1, None, None, weight, bias, training=True,)
# Weird error only occur when this action is placed after BN
# Op type is not relevant
loss = out1 + 1
gm.backward(loss)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="windows disable MGB_ENABLE_OPR_MM"
)
@pytest.mark.skipif(get_device_count_by_fork("gpu") < 2, reason="need more gpu device")
@pytest.mark.isolated_distributed
def test_remote_grad():
@dist.launcher
def worker():
rank = dist.get_rank()
size = dist.get_world_size()
x = mge.tensor(np.random.randn(1, rank * 2 + 2), dtype=np.float32)
m = M.Linear(rank * 2 + 2, rank * 2 + 4)
gm = GradManager().attach(m.parameters())
opt = optim.SGD(m.parameters(), 1e-3, momentum=0.9)
def train_func(x):
with gm:
if rank != 0:
x = dist.functional.remote_recv(
rank - 1, shape=(1, rank * 2 + 2), dtype=np.float32
)
y = m(x)
if rank != size - 1:
dist.functional.remote_send(y, dest_rank=rank + 1)
gm.backward()
else:
y = y.mean()
gm.backward(y)
opt.step().clear_grad()
train_funcs = [
train_func,
trace(symbolic=False)(train_func),
trace(symbolic=True)(train_func),
]
for func in train_funcs:
for i in range(3):
func(x)
worker()
|
[
"megengine.distributed.functional.remote_recv",
"megengine.functional.ones",
"megengine.module.Conv2d",
"megengine.distributed.get_rank",
"megengine.functional.batch_norm",
"megengine.distributed.get_world_size",
"megengine.Parameter",
"megengine.jit.trace",
"megengine.functional.matmul",
"megengine.tensor",
"megengine.distributed.functional.remote_send",
"megengine.distributed.helper.get_device_count_by_fork",
"megengine.module.Linear",
"megengine.autodiff.GradManager",
"megengine.Tensor"
] |
[((847, 863), 'megengine.tensor', 'mge.tensor', (['(-1.0)'], {}), '(-1.0)\n', (857, 863), True, 'import megengine as mge\n'), ((928, 942), 'megengine.functional.matmul', 'F.matmul', (['x', 'w'], {}), '(x, w)\n', (936, 942), True, 'import megengine.functional as F\n'), ((1396, 1416), 'megengine.Parameter', 'mge.Parameter', (['[1.0]'], {}), '([1.0])\n', (1409, 1416), True, 'import megengine as mge\n'), ((1426, 1439), 'megengine.autodiff.GradManager', 'GradManager', ([], {}), '()\n', (1437, 1439), False, 'from megengine.autodiff import GradManager\n'), ((1608, 1626), 'megengine.Parameter', 'mge.Parameter', (['(2.0)'], {}), '(2.0)\n', (1621, 1626), True, 'import megengine as mge\n'), ((1636, 1649), 'megengine.autodiff.GradManager', 'GradManager', ([], {}), '()\n', (1647, 1649), False, 'from megengine.autodiff import GradManager\n'), ((2409, 2422), 'megengine.tensor', 'mge.tensor', (['(3)'], {}), '(3)\n', (2419, 2422), True, 'import megengine as mge\n'), ((2432, 2450), 'megengine.Parameter', 'mge.Parameter', (['(1.0)'], {}), '(1.0)\n', (2445, 2450), True, 'import megengine as mge\n'), ((2466, 2484), 'megengine.Parameter', 'mge.Parameter', (['(1.0)'], {}), '(1.0)\n', (2479, 2484), True, 'import megengine as mge\n'), ((2494, 2507), 'megengine.autodiff.GradManager', 'GradManager', ([], {}), '()\n', (2505, 2507), False, 'from megengine.autodiff import GradManager\n'), ((2749, 2771), 'megengine.functional.ones', 'F.ones', (['(10, 10, 3, 3)'], {}), '((10, 10, 3, 3))\n', (2755, 2771), True, 'import megengine.functional as F\n'), ((2784, 2826), 'megengine.module.Conv2d', 'M.Conv2d', (['(10)', '(10)'], {'kernel_size': '(3)', 'padding': '(1)'}), '(10, 10, kernel_size=3, padding=1)\n', (2792, 2826), True, 'import megengine.module as M\n'), ((2991, 3004), 'megengine.autodiff.GradManager', 'GradManager', ([], {}), '()\n', (3002, 3004), False, 'from megengine.autodiff import GradManager\n'), ((1186, 1200), 'megengine.functional.matmul', 'F.matmul', (['x', 'w'], {}), '(x, w)\n', (1194, 1200), True, 'import megengine.functional as F\n'), ((2883, 2917), 'numpy.ones', 'np.ones', (['t_shape'], {'dtype': 'np.float32'}), '(t_shape, dtype=np.float32)\n', (2890, 2917), True, 'import numpy as np\n'), ((2944, 2979), 'numpy.zeros', 'np.zeros', (['t_shape'], {'dtype': 'np.float32'}), '(t_shape, dtype=np.float32)\n', (2952, 2979), True, 'import numpy as np\n'), ((3114, 3173), 'megengine.functional.batch_norm', 'F.batch_norm', (['out1', 'None', 'None', 'weight', 'bias'], {'training': '(True)'}), '(out1, None, None, weight, bias, training=True)\n', (3126, 3173), True, 'import megengine.functional as F\n'), ((3731, 3746), 'megengine.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (3744, 3746), True, 'import megengine.distributed as dist\n'), ((3762, 3783), 'megengine.distributed.get_world_size', 'dist.get_world_size', ([], {}), '()\n', (3781, 3783), True, 'import megengine.distributed as dist\n'), ((3871, 3907), 'megengine.module.Linear', 'M.Linear', (['(rank * 2 + 2)', '(rank * 2 + 4)'], {}), '(rank * 2 + 2, rank * 2 + 4)\n', (3879, 3907), True, 'import megengine.module as M\n'), ((3356, 3373), 'platform.system', 'platform.system', ([], {}), '()\n', (3371, 3373), False, 'import platform\n'), ((3456, 3473), 'platform.system', 'platform.system', ([], {}), '()\n', (3471, 3473), False, 'import platform\n'), ((3553, 3584), 'megengine.distributed.helper.get_device_count_by_fork', 'get_device_count_by_fork', (['"""gpu"""'], {}), "('gpu')\n", (3577, 3584), False, 'from megengine.distributed.helper import get_device_count_by_fork\n'), ((747, 774), 'megengine.tensor', 'mge.tensor', (['[1.0, 3.0, 5.0]'], {}), '([1.0, 3.0, 5.0])\n', (757, 774), True, 'import megengine as mge\n'), ((797, 824), 'megengine.tensor', 'mge.tensor', (['[2.0, 4.0, 6.0]'], {}), '([2.0, 4.0, 6.0])\n', (807, 824), True, 'import megengine as mge\n'), ((874, 887), 'megengine.autodiff.GradManager', 'GradManager', ([], {}), '()\n', (885, 887), False, 'from megengine.autodiff import GradManager\n'), ((1824, 1854), 'megengine.Tensor', 'mge.Tensor', (['i'], {'dtype': '"""float32"""'}), "(i, dtype='float32')\n", (1834, 1854), True, 'import megengine as mge\n'), ((1912, 1926), 'weakref.ref', 'weakref.ref', (['x'], {}), '(x)\n', (1923, 1926), False, 'import weakref\n'), ((3807, 3839), 'numpy.random.randn', 'np.random.randn', (['(1)', '(rank * 2 + 2)'], {}), '(1, rank * 2 + 2)\n', (3822, 3839), True, 'import numpy as np\n'), ((3921, 3934), 'megengine.autodiff.GradManager', 'GradManager', ([], {}), '()\n', (3932, 3934), False, 'from megengine.autodiff import GradManager\n'), ((4606, 4627), 'megengine.jit.trace', 'trace', ([], {'symbolic': '(False)'}), '(symbolic=False)\n', (4611, 4627), False, 'from megengine.jit import trace\n'), ((4653, 4673), 'megengine.jit.trace', 'trace', ([], {'symbolic': '(True)'}), '(symbolic=True)\n', (4658, 4673), False, 'from megengine.jit import trace\n'), ((4121, 4206), 'megengine.distributed.functional.remote_recv', 'dist.functional.remote_recv', (['(rank - 1)'], {'shape': '(1, rank * 2 + 2)', 'dtype': 'np.float32'}), '(rank - 1, shape=(1, rank * 2 + 2), dtype=np.float32\n )\n', (4148, 4206), True, 'import megengine.distributed as dist\n'), ((4330, 4380), 'megengine.distributed.functional.remote_send', 'dist.functional.remote_send', (['y'], {'dest_rank': '(rank + 1)'}), '(y, dest_rank=rank + 1)\n', (4357, 4380), True, 'import megengine.distributed as dist\n')]
|
import numpy as np
import megengine as mge
import megengine.module as M
import megengine.functional as F
from .utils.utils import bilinear_sampler, coords_grid
class AGCL:
"""
Implementation of Adaptive Group Correlation Layer (AGCL).
"""
def __init__(self, fmap1, fmap2, att=None):
self.fmap1 = fmap1
self.fmap2 = fmap2
self.att = att
self.coords = coords_grid(fmap1.shape[0], fmap1.shape[2], fmap1.shape[3]).to(
fmap1.device
)
def __call__(self, flow, extra_offset, small_patch=False, iter_mode=False):
if iter_mode:
corr = self.corr_iter(self.fmap1, self.fmap2, flow, small_patch)
else:
corr = self.corr_att_offset(
self.fmap1, self.fmap2, flow, extra_offset, small_patch
)
return corr
def get_correlation(self, left_feature, right_feature, psize=(3, 3), dilate=(1, 1)):
N, C, H, W = left_feature.shape
di_y, di_x = dilate[0], dilate[1]
pady, padx = psize[0] // 2 * di_y, psize[1] // 2 * di_x
right_pad = F.pad(right_feature, pad_witdth=(
(0, 0), (0, 0), (pady, pady), (padx, padx)), mode="replicate")
right_slid = F.sliding_window(
right_pad, kernel_size=(H, W), stride=(di_y, di_x))
right_slid = right_slid.reshape(N, C, -1, H, W)
right_slid = F.transpose(right_slid, (0, 2, 1, 3, 4))
right_slid = right_slid.reshape(-1, C, H, W)
corr_mean = F.mean(left_feature * right_slid, axis=1, keepdims=True)
corr_final = corr_mean.reshape(1, -1, H, W)
return corr_final
def corr_iter(self, left_feature, right_feature, flow, small_patch):
coords = self.coords + flow
coords = F.transpose(coords, (0, 2, 3, 1))
right_feature = bilinear_sampler(right_feature, coords)
if small_patch:
psize_list = [(3, 3), (3, 3), (3, 3), (3, 3)]
dilate_list = [(1, 1), (1, 1), (1, 1), (1, 1)]
else:
psize_list = [(1, 9), (1, 9), (1, 9), (1, 9)]
dilate_list = [(1, 1), (1, 1), (1, 1), (1, 1)]
N, C, H, W = left_feature.shape
lefts = F.split(left_feature, 4, axis=1)
rights = F.split(right_feature, 4, axis=1)
corrs = []
for i in range(len(psize_list)):
corr = self.get_correlation(
lefts[i], rights[i], psize_list[i], dilate_list[i]
)
corrs.append(corr)
final_corr = F.concat(corrs, axis=1)
return final_corr
def corr_att_offset(
self, left_feature, right_feature, flow, extra_offset, small_patch
):
N, C, H, W = left_feature.shape
if self.att is not None:
left_feature = F.reshape(
F.transpose(left_feature, (0, 2, 3, 1)), (N, H * W, C)
) # 'n c h w -> n (h w) c'
right_feature = F.reshape(
F.transpose(right_feature, (0, 2, 3, 1)), (N, H * W, C)
) # 'n c h w -> n (h w) c'
left_feature, right_feature = self.att(left_feature, right_feature)
# 'n (h w) c -> n c h w'
left_feature, right_feature = [
F.transpose(F.reshape(x, (N, H, W, C)), (0, 3, 1, 2))
for x in [left_feature, right_feature]
]
lefts = F.split(left_feature, 4, axis=1)
rights = F.split(right_feature, 4, axis=1)
C = C // 4
if small_patch:
psize_list = [(3, 3), (3, 3), (3, 3), (3, 3)]
dilate_list = [(1, 1), (1, 1), (1, 1), (1, 1)]
else:
psize_list = [(1, 9), (1, 9), (1, 9), (1, 9)]
dilate_list = [(1, 1), (1, 1), (1, 1), (1, 1)]
search_num = 9
extra_offset = F.transpose(
F.reshape(extra_offset, (N, search_num, 2, H, W)), (0, 1, 3, 4, 2)
) # [N, search_num, 1, 1, 2]
corrs = []
for i in range(len(psize_list)):
left_feature, right_feature = lefts[i], rights[i]
psize, dilate = psize_list[i], dilate_list[i]
psizey, psizex = psize[0], psize[1]
dilatey, dilatex = dilate[0], dilate[1]
ry = psizey // 2 * dilatey
rx = psizex // 2 * dilatex
x_grid, y_grid = np.meshgrid(
np.arange(-rx, rx + 1, dilatex), np.arange(-ry, ry + 1, dilatey)
)
y_grid, x_grid = mge.tensor(y_grid, device=self.fmap1.device), mge.tensor(
x_grid, device=self.fmap1.device
)
offsets = F.transpose(
F.reshape(F.stack((x_grid, y_grid)), (2, -1)), (1, 0)
) # [search_num, 2]
offsets = F.expand_dims(offsets, (0, 2, 3))
offsets = offsets + extra_offset
coords = self.coords + flow # [N, 2, H, W]
coords = F.transpose(coords, (0, 2, 3, 1)) # [N, H, W, 2]
coords = F.expand_dims(coords, 1) + offsets
coords = F.reshape(coords, (N, -1, W, 2)) # [N, search_num*H, W, 2]
right_feature = bilinear_sampler(
right_feature, coords
) # [N, C, search_num*H, W]
right_feature = F.reshape(
right_feature, (N, C, -1, H, W)
) # [N, C, search_num, H, W]
left_feature = F.expand_dims(left_feature, 2)
corr = F.mean(left_feature * right_feature, axis=1)
corrs.append(corr)
final_corr = F.concat(corrs, axis=1)
return final_corr
|
[
"megengine.functional.split",
"megengine.functional.pad",
"megengine.tensor",
"megengine.functional.expand_dims",
"megengine.functional.transpose",
"megengine.functional.mean",
"megengine.functional.concat",
"megengine.functional.reshape",
"megengine.functional.sliding_window",
"megengine.functional.stack"
] |
[((1102, 1202), 'megengine.functional.pad', 'F.pad', (['right_feature'], {'pad_witdth': '((0, 0), (0, 0), (pady, pady), (padx, padx))', 'mode': '"""replicate"""'}), "(right_feature, pad_witdth=((0, 0), (0, 0), (pady, pady), (padx, padx)\n ), mode='replicate')\n", (1107, 1202), True, 'import megengine.functional as F\n'), ((1233, 1301), 'megengine.functional.sliding_window', 'F.sliding_window', (['right_pad'], {'kernel_size': '(H, W)', 'stride': '(di_y, di_x)'}), '(right_pad, kernel_size=(H, W), stride=(di_y, di_x))\n', (1249, 1301), True, 'import megengine.functional as F\n'), ((1392, 1432), 'megengine.functional.transpose', 'F.transpose', (['right_slid', '(0, 2, 1, 3, 4)'], {}), '(right_slid, (0, 2, 1, 3, 4))\n', (1403, 1432), True, 'import megengine.functional as F\n'), ((1507, 1563), 'megengine.functional.mean', 'F.mean', (['(left_feature * right_slid)'], {'axis': '(1)', 'keepdims': '(True)'}), '(left_feature * right_slid, axis=1, keepdims=True)\n', (1513, 1563), True, 'import megengine.functional as F\n'), ((1771, 1804), 'megengine.functional.transpose', 'F.transpose', (['coords', '(0, 2, 3, 1)'], {}), '(coords, (0, 2, 3, 1))\n', (1782, 1804), True, 'import megengine.functional as F\n'), ((2199, 2231), 'megengine.functional.split', 'F.split', (['left_feature', '(4)'], {'axis': '(1)'}), '(left_feature, 4, axis=1)\n', (2206, 2231), True, 'import megengine.functional as F\n'), ((2249, 2282), 'megengine.functional.split', 'F.split', (['right_feature', '(4)'], {'axis': '(1)'}), '(right_feature, 4, axis=1)\n', (2256, 2282), True, 'import megengine.functional as F\n'), ((2519, 2542), 'megengine.functional.concat', 'F.concat', (['corrs'], {'axis': '(1)'}), '(corrs, axis=1)\n', (2527, 2542), True, 'import megengine.functional as F\n'), ((3370, 3402), 'megengine.functional.split', 'F.split', (['left_feature', '(4)'], {'axis': '(1)'}), '(left_feature, 4, axis=1)\n', (3377, 3402), True, 'import megengine.functional as F\n'), ((3420, 3453), 'megengine.functional.split', 'F.split', (['right_feature', '(4)'], {'axis': '(1)'}), '(right_feature, 4, axis=1)\n', (3427, 3453), True, 'import megengine.functional as F\n'), ((5509, 5532), 'megengine.functional.concat', 'F.concat', (['corrs'], {'axis': '(1)'}), '(corrs, axis=1)\n', (5517, 5532), True, 'import megengine.functional as F\n'), ((3819, 3868), 'megengine.functional.reshape', 'F.reshape', (['extra_offset', '(N, search_num, 2, H, W)'], {}), '(extra_offset, (N, search_num, 2, H, W))\n', (3828, 3868), True, 'import megengine.functional as F\n'), ((4733, 4766), 'megengine.functional.expand_dims', 'F.expand_dims', (['offsets', '(0, 2, 3)'], {}), '(offsets, (0, 2, 3))\n', (4746, 4766), True, 'import megengine.functional as F\n'), ((4890, 4923), 'megengine.functional.transpose', 'F.transpose', (['coords', '(0, 2, 3, 1)'], {}), '(coords, (0, 2, 3, 1))\n', (4901, 4923), True, 'import megengine.functional as F\n'), ((5017, 5049), 'megengine.functional.reshape', 'F.reshape', (['coords', '(N, -1, W, 2)'], {}), '(coords, (N, -1, W, 2))\n', (5026, 5049), True, 'import megengine.functional as F\n'), ((5231, 5273), 'megengine.functional.reshape', 'F.reshape', (['right_feature', '(N, C, -1, H, W)'], {}), '(right_feature, (N, C, -1, H, W))\n', (5240, 5273), True, 'import megengine.functional as F\n'), ((5360, 5390), 'megengine.functional.expand_dims', 'F.expand_dims', (['left_feature', '(2)'], {}), '(left_feature, 2)\n', (5373, 5390), True, 'import megengine.functional as F\n'), ((5410, 5454), 'megengine.functional.mean', 'F.mean', (['(left_feature * right_feature)'], {'axis': '(1)'}), '(left_feature * right_feature, axis=1)\n', (5416, 5454), True, 'import megengine.functional as F\n'), ((2807, 2846), 'megengine.functional.transpose', 'F.transpose', (['left_feature', '(0, 2, 3, 1)'], {}), '(left_feature, (0, 2, 3, 1))\n', (2818, 2846), True, 'import megengine.functional as F\n'), ((2957, 2997), 'megengine.functional.transpose', 'F.transpose', (['right_feature', '(0, 2, 3, 1)'], {}), '(right_feature, (0, 2, 3, 1))\n', (2968, 2997), True, 'import megengine.functional as F\n'), ((4344, 4375), 'numpy.arange', 'np.arange', (['(-rx)', '(rx + 1)', 'dilatex'], {}), '(-rx, rx + 1, dilatex)\n', (4353, 4375), True, 'import numpy as np\n'), ((4377, 4408), 'numpy.arange', 'np.arange', (['(-ry)', '(ry + 1)', 'dilatey'], {}), '(-ry, ry + 1, dilatey)\n', (4386, 4408), True, 'import numpy as np\n'), ((4452, 4496), 'megengine.tensor', 'mge.tensor', (['y_grid'], {'device': 'self.fmap1.device'}), '(y_grid, device=self.fmap1.device)\n', (4462, 4496), True, 'import megengine as mge\n'), ((4498, 4542), 'megengine.tensor', 'mge.tensor', (['x_grid'], {'device': 'self.fmap1.device'}), '(x_grid, device=self.fmap1.device)\n', (4508, 4542), True, 'import megengine as mge\n'), ((4961, 4985), 'megengine.functional.expand_dims', 'F.expand_dims', (['coords', '(1)'], {}), '(coords, 1)\n', (4974, 4985), True, 'import megengine.functional as F\n'), ((3242, 3268), 'megengine.functional.reshape', 'F.reshape', (['x', '(N, H, W, C)'], {}), '(x, (N, H, W, C))\n', (3251, 3268), True, 'import megengine.functional as F\n'), ((4634, 4659), 'megengine.functional.stack', 'F.stack', (['(x_grid, y_grid)'], {}), '((x_grid, y_grid))\n', (4641, 4659), True, 'import megengine.functional as F\n')]
|
import megengine as mge
import megengine.module as M
from megengine import functional as F
import numpy as np
from .transformer import MultiheadAttention
#from .utility import has_nan_or_inf
# mge.core.set_option('async_level', 0)
class DecoderWrapper(M.Module):
def __init__(self, cfg):
super().__init__()
channels = cfg.distiller.HIDDEN_DIM
heads = cfg.distiller.ATT_HEADS
# this is a local module derived from official implementation, we modify the last modules
self.matt = MultiheadAttention(channels, heads)
self.pos_projector = M.Linear(in_features=channels, out_features=channels)
self.use_pos = cfg.distiller.USE_POS_EMBEDDING
self.pos_on_v = cfg.distiller.DECODER_POSEMB_ON_V
def with_pos_embed(self, tensor, pos):
'''
tensor: [S, N, C]
pos: [S, N, C] or [S, 1, C]
'''
if not self.use_pos:
return tensor
pos = self.pos_projector(pos)
return tensor if pos is None else tensor + pos
def forward(self, q, k, v, query_mask=None, key_padding_mask=None, pos_embedding=None, proj_only=False):
# q, v: [sequence_len, batch_size, channels]
k = self.with_pos_embed(k, pos_embedding)
if self.pos_on_v:
v = self.with_pos_embed(v, pos_embedding)
att, mask, values = self.matt(
q, k, v, key_padding_mask=key_padding_mask, proj_only=proj_only)
return att, mask, values
|
[
"megengine.module.Linear"
] |
[((590, 643), 'megengine.module.Linear', 'M.Linear', ([], {'in_features': 'channels', 'out_features': 'channels'}), '(in_features=channels, out_features=channels)\n', (598, 643), True, 'import megengine.module as M\n')]
|
import io
import pickle
import numpy as np
import megengine.functional as F
import megengine.module as M
import megengine.utils.comp_graph_tools as cgtools
from megengine.core._trace_option import set_symbolic_shape
from megengine.jit import trace
from megengine.traced_module import trace_module
set_symbolic_shape(True)
class Main(M.Module):
def forward(self, x):
return x
class PreProcess(M.Module):
def __init__(self):
super().__init__()
self.I = F.ones((1,))
self.M = F.zeros((1,))
def forward(self, data, idx, roi):
N, H, W, C = data.shape
xmax = roi[:, 1, 0]
xmin = roi[:, 0, 0]
ymax = roi[:, 1, 1]
ymin = roi[:, 0, 1]
scale = F.maximum((xmax - xmin) / W, (ymax - ymin) / H)
I = F.broadcast_to(self.I, (N,))
M = F.broadcast_to(self.M, (N, 3, 3))
M[:, 0, 0] = scale
M[:, 0, 2] = xmin
M[:, 1, 1] = scale
M[:, 1, 2] = ymin
M[:, 2, 2] = I
resized = (
F.warp_perspective(
data, M, (H, W), mat_idx=idx, border_mode="CONSTANT", format="NHWC"
)
.transpose(0, 3, 1, 2)
.astype(np.float32)
)
return resized
class Net(M.Module):
def __init__(self, traced_module):
super().__init__()
self.pre_process = PreProcess()
self.traced_module = traced_module
def forward(self, data, idx, roi):
x = self.pre_process(data, idx, roi)
x = self.traced_module(x)
return x
def test_preprocess():
module = Main()
data = F.ones((1, 14, 8, 8), dtype=np.uint8)
traced_module = trace_module(module, data)
obj = pickle.dumps(traced_module)
traced_module = pickle.loads(obj)
module = Net(traced_module)
module.eval()
idx = F.zeros((1,), dtype=np.int32)
roi = F.ones((1, 2, 2), dtype=np.float32)
y = module(data, idx, roi)
traced_module = trace_module(module, data, idx, roi)
np.testing.assert_array_equal(traced_module(data, idx, roi), y)
func = trace(traced_module, capture_as_const=True)
np.testing.assert_array_equal(func(data, idx, roi), y)
model = io.BytesIO()
func.dump(model, arg_names=("data", "idx", "roi"))
model.seek(0)
infer_cg = cgtools.GraphInference(model)
np.testing.assert_allclose(
list(
infer_cg.run(
inp_dict={"data": data.numpy(), "idx": idx.numpy(), "roi": roi.numpy()}
).values()
)[0],
y,
atol=1e-6,
)
|
[
"megengine.jit.trace",
"megengine.functional.maximum",
"megengine.functional.zeros",
"megengine.functional.broadcast_to",
"megengine.functional.ones",
"megengine.functional.warp_perspective",
"megengine.core._trace_option.set_symbolic_shape",
"megengine.traced_module.trace_module",
"megengine.utils.comp_graph_tools.GraphInference"
] |
[((300, 324), 'megengine.core._trace_option.set_symbolic_shape', 'set_symbolic_shape', (['(True)'], {}), '(True)\n', (318, 324), False, 'from megengine.core._trace_option import set_symbolic_shape\n'), ((1612, 1649), 'megengine.functional.ones', 'F.ones', (['(1, 14, 8, 8)'], {'dtype': 'np.uint8'}), '((1, 14, 8, 8), dtype=np.uint8)\n', (1618, 1649), True, 'import megengine.functional as F\n'), ((1670, 1696), 'megengine.traced_module.trace_module', 'trace_module', (['module', 'data'], {}), '(module, data)\n', (1682, 1696), False, 'from megengine.traced_module import trace_module\n'), ((1707, 1734), 'pickle.dumps', 'pickle.dumps', (['traced_module'], {}), '(traced_module)\n', (1719, 1734), False, 'import pickle\n'), ((1755, 1772), 'pickle.loads', 'pickle.loads', (['obj'], {}), '(obj)\n', (1767, 1772), False, 'import pickle\n'), ((1833, 1862), 'megengine.functional.zeros', 'F.zeros', (['(1,)'], {'dtype': 'np.int32'}), '((1,), dtype=np.int32)\n', (1840, 1862), True, 'import megengine.functional as F\n'), ((1873, 1908), 'megengine.functional.ones', 'F.ones', (['(1, 2, 2)'], {'dtype': 'np.float32'}), '((1, 2, 2), dtype=np.float32)\n', (1879, 1908), True, 'import megengine.functional as F\n'), ((1960, 1996), 'megengine.traced_module.trace_module', 'trace_module', (['module', 'data', 'idx', 'roi'], {}), '(module, data, idx, roi)\n', (1972, 1996), False, 'from megengine.traced_module import trace_module\n'), ((2076, 2119), 'megengine.jit.trace', 'trace', (['traced_module'], {'capture_as_const': '(True)'}), '(traced_module, capture_as_const=True)\n', (2081, 2119), False, 'from megengine.jit import trace\n'), ((2191, 2203), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (2201, 2203), False, 'import io\n'), ((2292, 2321), 'megengine.utils.comp_graph_tools.GraphInference', 'cgtools.GraphInference', (['model'], {}), '(model)\n', (2314, 2321), True, 'import megengine.utils.comp_graph_tools as cgtools\n'), ((490, 502), 'megengine.functional.ones', 'F.ones', (['(1,)'], {}), '((1,))\n', (496, 502), True, 'import megengine.functional as F\n'), ((520, 533), 'megengine.functional.zeros', 'F.zeros', (['(1,)'], {}), '((1,))\n', (527, 533), True, 'import megengine.functional as F\n'), ((734, 781), 'megengine.functional.maximum', 'F.maximum', (['((xmax - xmin) / W)', '((ymax - ymin) / H)'], {}), '((xmax - xmin) / W, (ymax - ymin) / H)\n', (743, 781), True, 'import megengine.functional as F\n'), ((794, 822), 'megengine.functional.broadcast_to', 'F.broadcast_to', (['self.I', '(N,)'], {}), '(self.I, (N,))\n', (808, 822), True, 'import megengine.functional as F\n'), ((835, 868), 'megengine.functional.broadcast_to', 'F.broadcast_to', (['self.M', '(N, 3, 3)'], {}), '(self.M, (N, 3, 3))\n', (849, 868), True, 'import megengine.functional as F\n'), ((1030, 1121), 'megengine.functional.warp_perspective', 'F.warp_perspective', (['data', 'M', '(H, W)'], {'mat_idx': 'idx', 'border_mode': '"""CONSTANT"""', 'format': '"""NHWC"""'}), "(data, M, (H, W), mat_idx=idx, border_mode='CONSTANT',\n format='NHWC')\n", (1048, 1121), True, 'import megengine.functional as F\n')]
|
#!/usr/bin/env python3
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
import os
from typing import List
import megengine.distributed as dist
from basecore.config import ConfigDict
from basecore.engine import BaseHook
from basecore.utils import str_timestamp
from basecls.utils import registers
from .hooks import (
CheckpointHook,
EvalHook,
LoggerHook,
LRSchedulerHook,
PreciseBNHook,
ResumeHook,
TensorboardHook,
)
__all__ = ["DefaultHooks"]
@registers.hooks.register()
class DefaultHooks:
"""The default hooks factory.
It combines :py:class:`~basecls.engine.LRSchedulerHook` ->
:py:class:`~basecls.engine.PreciseBNHook` -> :py:class:`~basecls.engine.ResumeHook` ->
:py:class:`~basecls.engine.TensorboardHook` -> :py:class:`~basecls.engine.LoggerHook` ->
:py:class:`~basecls.engine.CheckpointHook` -> :py:class:`~basecls.engine.EvalHook`.
"""
@classmethod
def build(cls, cfg: ConfigDict) -> List[BaseHook]:
"""Build function with a simple strategy.
Args:
cfg: config for setting hooks.
Returns:
A hook list.
"""
output_dir = cfg.output_dir
hook_list = [
LRSchedulerHook(),
PreciseBNHook(cfg.bn.precise_every_n_epoch),
ResumeHook(output_dir, cfg.resume),
]
if dist.get_rank() == 0:
# Since LoggerHook will reset value, TensorboardHook should be added before LoggerHook
hook_list.append(
TensorboardHook(
os.path.join(output_dir, "tensorboard", str_timestamp()), cfg.tb_every_n_iter
)
)
hook_list.append(LoggerHook(cfg.log_every_n_iter))
hook_list.append(CheckpointHook(output_dir, cfg.save_every_n_epoch))
# Hooks better work after CheckpointHook
hook_list.append(EvalHook(output_dir, cfg.eval_every_n_epoch))
return hook_list
|
[
"megengine.distributed.get_rank"
] |
[((490, 516), 'basecls.utils.registers.hooks.register', 'registers.hooks.register', ([], {}), '()\n', (514, 516), False, 'from basecls.utils import registers\n'), ((1367, 1382), 'megengine.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (1380, 1382), True, 'import megengine.distributed as dist\n'), ((1611, 1626), 'basecore.utils.str_timestamp', 'str_timestamp', ([], {}), '()\n', (1624, 1626), False, 'from basecore.utils import str_timestamp\n')]
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import random
import megengine as mge
import megengine.functional as F
import megengine.module as M
import numpy as np
from megengine.jit import trace
def dump_mge_model(net, data, fpath="test_model", optimize_for_inference=False):
if mge.__version__ <= "0.6.0":
@trace(symbolic=True)
def inference(data, *, net):
net.eval()
output = net(data)
return output
inference.trace(data, net=net)
mge_result = inference(data, net=net).numpy()
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result
else:
mge_result = net(mge.tensor(data))
net.eval()
mge_result = net(mge.tensor(data))
@trace(symbolic=True, capture_as_const=True)
def inference(data):
net.eval()
output = net(data)
return output
inference(mge.tensor(data))
inference.dump(
fpath + ".mge",
arg_names=["data"],
optimize_for_inference=optimize_for_inference,
)
return mge_result.numpy()
class ConvOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((1, 3, 224, 224)).astype(np.float32)
self.normal_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1)
)
self.group_conv = M.Conv2d(
3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1), groups=3
)
self.normal_conv.bias = mge.Parameter(
np.random.random(self.normal_conv.bias.shape).astype(np.float32)
)
self.group_conv.bias = mge.Parameter(
np.random.random(self.group_conv.bias.shape).astype(np.float32)
)
self.transpose_conv = M.Sequential(
M.ConvTranspose2d(
3, 5, (3, 4), dilation=(2, 2), stride=(3, 2), padding=(2, 3), groups=1
),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv = M.Sequential(
M.ConvTranspose2d(3, 5, (3, 4), stride=(3, 2), groups=1),
M.ConvTranspose2d(5, 3, (3, 3)),
)
self.tflite_transpose_conv[0].bias = mge.Parameter(
np.random.random(self.transpose_conv[0].bias.shape).astype(np.float32)
)
self.tflite_transpose_conv[1].bias = mge.Parameter(
np.random.random(self.transpose_conv[1].bias.shape).astype(np.float32)
)
def forward(self, x):
return getattr(self, self.mode + "_conv")(x)
class LinearOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((10, 100)).astype(np.float32)
self.linear = M.Linear(100, 200, bias=False)
self.linear_bias = M.Linear(200, 200, bias=True)
self.linear_bias.bias = mge.Parameter(
np.random.random(self.linear_bias.bias.shape).astype(np.float32)
)
def forward(self, x):
x = self.linear(x)
x = self.linear_bias(x)
x = F.relu(x)
return x
class PoolOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((30, 3, 224, 224)).astype(np.float32)
self.maxpool = M.pooling.MaxPool2d(kernel_size=3, stride=2, padding=2)
self.avgpool = M.pooling.AvgPool2d(kernel_size=3, stride=2, padding=2)
def forward(self, x):
return getattr(self, self.mode + "pool")(x)
class BnOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data1 = np.random.random((1, 32, 32)).astype(np.float32)
self.data2 = np.random.random((20, 3, 24, 24)).astype(np.float32)
self.bn1d = M.BatchNorm1d(32)
self.bn2d = M.BatchNorm2d(3)
def forward(self, x):
return getattr(self, self.mode)(x)
class SubtensorOpr(M.Module):
def __init__(self, fix_batch=False):
super().__init__()
self.fix_batch = fix_batch
self.data = np.random.random((10, 10, 10, 10)).astype(np.float32)
def forward(self, x):
if self.fix_batch:
x = x[:, 4:8, :, 4:9]
x = x[:, :, 2:7, 3]
else:
x = x[1:3, 4:8, :, 4:9]
x = x[:, :, :, 3]
x = x[1, 1:]
return x
class TransposeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.perm = [0, 2, 3, 1]
def forward(self, x):
return F.transpose(x, self.perm)
class ConcatOpr(M.Module):
def __init__(self):
super().__init__()
self.concat_idx = random.randint(0, 3)
self.data = np.random.random((1, 2, 4, 5)).astype(np.float32)
def forward(self, a):
return F.concat([a, a], self.concat_idx)
class SoftmaxOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 1000)).astype(np.float32)
def forward(self, a):
return F.softmax(a)
class SqueezeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 1, 1000)).astype(np.float32)
def forward(self, a):
if mge.__version__ <= "0.6.0":
return F.remove_axis(a, 0) # pylint: disable=no-member
else:
return F.squeeze(a, 0)
class ReshapeOpr(M.Module):
def __init__(self, fix_batch=False):
super().__init__()
if fix_batch:
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.out_shape = (1, 2 * 3, 4)
self.out_shape1 = (1, 2 * 3 * 4)
self.out_shape2 = (1, 2, 3 * 4)
else:
self.data = np.random.random((1, 2, 3, 4, 5)).astype(np.float32)
self.out_shape = [1, 2, 3 * 4, 5]
self.out_shape1 = [1 * 2, 3 * 4 * 5]
self.out_shape2 = [1 * 2 * 3, 4 * 5]
def forward(self, x):
x = F.reshape(x, self.out_shape)
x = F.reshape(x, self.out_shape1)
x = F.reshape(x, self.out_shape2)
return x
class ElemwiseOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.data = np.ones((2, 3, 224, 224)).astype(np.float32)
self.data1 = np.random.random((1, 3, 1, 1)).astype(np.float32)
self.data2 = np.random.random((2, 3, 224, 224)).astype(np.float32) - 0.8
self.mode = mode
def forward(self, a):
# add
if self.mode == "add":
x = a + mge.tensor(np.float32(10))
y = a + mge.tensor(self.data1)
z = x + y
# sub
elif self.mode == "sub":
x = a - mge.tensor(np.float32(10))
y = a - mge.tensor(self.data1)
z = x - y
# mul
elif self.mode == "mul":
x = a * mge.tensor(np.float32(10))
y = mge.tensor(self.data1) * a
z = x * y
# div
elif self.mode == "max":
x = a + mge.tensor(self.data)
y = a + mge.tensor(self.data2)
z = F.maximum(x, y)
elif self.mode == "min":
x = a + mge.tensor(self.data)
y = a + mge.tensor(self.data2)
z = F.minimum(x, y)
elif self.mode == "pow":
z = a ** 2
elif self.mode == "ceil":
z = F.ceil(a)
elif self.mode == "floor":
z = F.floor(a)
elif self.mode == "div":
y = mge.tensor(self.data1) / a
x = a / mge.tensor(np.float32(2))
z = y / x
# cycle_div
elif self.mode == "cycle_div":
z = a / mge.tensor(self.data1)
# abs
elif self.mode == "abs":
z = F.abs(a)
# exp
elif self.mode == "exp":
z = F.exp(a)
# log
elif self.mode == "log":
z = F.log(a)
elif self.mode == "fuse_add_relu":
y = a + mge.tensor(self.data2)
z = F.relu(y)
elif self.mode == "fuse_mul_add3":
y = a * mge.tensor(self.data1)
z = y + mge.tensor(self.data2)
elif self.mode == "fuse_add_sigmoid":
y = a + mge.tensor(self.data2)
z = F.sigmoid(y)
else:
raise NotImplementedError('no such elemwise mode "%s"' % self.mode)
return z
class ReduceOpr(M.Module):
def __init__(self, mode):
super().__init__()
self.mode = mode
self.data = np.random.random((1, 3, 1000)).astype(np.float32)
def forward(self, a):
if self.mode == "sum":
return F.sum(a, axis=2)
elif self.mode == "mean":
return F.mean(a, axis=2)
else:
return F.max(a, axis=2)
class ResizeOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.random.random((1, 2, 3, 4)).astype(np.float32)
self.out_shape = [8, 8]
self.out_shape2 = [3, 4]
def forward(self, x):
x = F.vision.interpolate(x, size=self.out_shape, mode="bilinear")
x = F.vision.interpolate(x, size=self.out_shape2, mode="bilinear")
return x
class ActiveOpr(M.Module):
str2fun = {
"relu": F.relu,
"tanh": F.tanh,
"sigmoid": F.sigmoid,
"leaky_relu": F.leaky_relu,
"softmax": F.softmax,
"relu6": lambda x: F.maximum(F.minimum(x, 6), 0),
}
def __init__(self, mode, fused=False):
super().__init__()
self.mode = mode
self.fused = fused
self.data = (np.random.random((1, 2, 3, 4)).astype(np.float32) - 0.5) * 8.0
def forward(self, x):
if self.fused:
return ActiveOpr.str2fun[self.mode](x + x)
else:
return ActiveOpr.str2fun[self.mode](x)
class BroadcastOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.array([1], dtype=np.float16)
def forward(self, x):
return F.broadcast_to(x, (3, 5))
class TypeCvtOpr(M.Module):
def __init__(self):
super().__init__()
self.data = np.array([[2, 2, 2, 2], [3, 3, 3, 3]], dtype=np.int32)
def forward(self, x):
x = x + 1
x = x.astype(np.float32)
return x
class XORNet(M.Module):
def __init__(self, converter="normal"):
self.converter = converter
self.mid_dim = 14
self.num_class = 2
super().__init__()
self.fc0 = M.Linear(self.num_class, self.mid_dim, bias=True)
self.bn0 = M.BatchNorm1d(self.mid_dim)
self.fc1 = M.Linear(self.mid_dim, self.mid_dim, bias=True)
self.bn1 = M.BatchNorm1d(self.mid_dim)
self.fc2 = M.Linear(self.mid_dim, self.num_class, bias=True)
self.data = np.arange(24).reshape(12, 2).astype(np.float32)
def forward(self, x):
x = self.fc0(x)
x = self.bn0(x)
x = F.softmax(x) if self.converter == "tflite" else F.tanh(x)
x = self.fc1(x)
x = self.bn1(x)
x = F.softmax(x) if self.converter == "tflite" else F.tanh(x)
x = self.fc2(x)
return x
class XORNet_LeakyRelu(M.Module):
def __init__(self):
self.mid_dim = 14
self.num_class = 2
super().__init__()
self.fc0 = M.Linear(self.num_class, self.mid_dim, bias=True)
self.bn0 = M.BatchNorm1d(self.mid_dim)
self.fc1 = M.Linear(self.mid_dim, self.mid_dim, bias=True)
self.bn1 = M.BatchNorm1d(self.mid_dim)
self.fc2 = M.Linear(self.mid_dim, self.num_class, bias=True)
self.data = np.random.random((12, 2)).astype(np.float32)
def forward(self, x):
x = self.fc0(x)
x = self.bn0(x)
x = F.leaky_relu(x)
x = F.leaky_relu(x)
x = F.tanh(x)
x = self.fc1(x)
x = F.leaky_relu(x)
x = self.bn1(x)
x = F.tanh(x)
x = self.fc2(x)
x = F.leaky_relu(x)
return x
|
[
"megengine.functional.transpose",
"megengine.functional.sum",
"megengine.module.Conv2d",
"megengine.module.pooling.MaxPool2d",
"megengine.functional.remove_axis",
"megengine.functional.squeeze",
"megengine.functional.floor",
"megengine.module.BatchNorm2d",
"megengine.functional.log",
"megengine.functional.leaky_relu",
"megengine.functional.broadcast_to",
"megengine.jit.trace",
"megengine.tensor",
"megengine.functional.concat",
"megengine.module.ConvTranspose2d",
"megengine.module.BatchNorm1d",
"megengine.functional.exp",
"megengine.functional.ceil",
"megengine.functional.vision.interpolate",
"megengine.functional.minimum",
"megengine.module.Linear",
"megengine.functional.sigmoid",
"megengine.module.pooling.AvgPool2d",
"megengine.functional.maximum",
"megengine.functional.softmax",
"megengine.functional.relu",
"megengine.functional.mean",
"megengine.functional.abs",
"megengine.functional.max",
"megengine.functional.reshape",
"megengine.functional.tanh"
] |
[((657, 677), 'megengine.jit.trace', 'trace', ([], {'symbolic': '(True)'}), '(symbolic=True)\n', (662, 677), False, 'from megengine.jit import trace\n'), ((1193, 1236), 'megengine.jit.trace', 'trace', ([], {'symbolic': '(True)', 'capture_as_const': '(True)'}), '(symbolic=True, capture_as_const=True)\n', (1198, 1236), False, 'from megengine.jit import trace\n'), ((1780, 1846), 'megengine.module.Conv2d', 'M.Conv2d', (['(3)', '(30)', '(3)'], {'stride': '(2, 3)', 'dilation': '(2, 2)', 'padding': '(3, 1)'}), '(3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1))\n', (1788, 1846), True, 'import megengine.module as M\n'), ((1895, 1971), 'megengine.module.Conv2d', 'M.Conv2d', (['(3)', '(30)', '(3)'], {'stride': '(2, 3)', 'dilation': '(2, 2)', 'padding': '(3, 1)', 'groups': '(3)'}), '(3, 30, 3, stride=(2, 3), dilation=(2, 2), padding=(3, 1), groups=3)\n', (1903, 1971), True, 'import megengine.module as M\n'), ((3518, 3548), 'megengine.module.Linear', 'M.Linear', (['(100)', '(200)'], {'bias': '(False)'}), '(100, 200, bias=False)\n', (3526, 3548), True, 'import megengine.module as M\n'), ((3576, 3605), 'megengine.module.Linear', 'M.Linear', (['(200)', '(200)'], {'bias': '(True)'}), '(200, 200, bias=True)\n', (3584, 3605), True, 'import megengine.module as M\n'), ((3838, 3847), 'megengine.functional.relu', 'F.relu', (['x'], {}), '(x)\n', (3844, 3847), True, 'import megengine.functional as F\n'), ((4072, 4127), 'megengine.module.pooling.MaxPool2d', 'M.pooling.MaxPool2d', ([], {'kernel_size': '(3)', 'stride': '(2)', 'padding': '(2)'}), '(kernel_size=3, stride=2, padding=2)\n', (4091, 4127), True, 'import megengine.module as M\n'), ((4151, 4206), 'megengine.module.pooling.AvgPool2d', 'M.pooling.AvgPool2d', ([], {'kernel_size': '(3)', 'stride': '(2)', 'padding': '(2)'}), '(kernel_size=3, stride=2, padding=2)\n', (4170, 4206), True, 'import megengine.module as M\n'), ((4557, 4574), 'megengine.module.BatchNorm1d', 'M.BatchNorm1d', (['(32)'], {}), '(32)\n', (4570, 4574), True, 'import megengine.module as M\n'), ((4595, 4611), 'megengine.module.BatchNorm2d', 'M.BatchNorm2d', (['(3)'], {}), '(3)\n', (4608, 4611), True, 'import megengine.module as M\n'), ((5361, 5386), 'megengine.functional.transpose', 'F.transpose', (['x', 'self.perm'], {}), '(x, self.perm)\n', (5372, 5386), True, 'import megengine.functional as F\n'), ((5493, 5513), 'random.randint', 'random.randint', (['(0)', '(3)'], {}), '(0, 3)\n', (5507, 5513), False, 'import random\n'), ((5626, 5659), 'megengine.functional.concat', 'F.concat', (['[a, a]', 'self.concat_idx'], {}), '([a, a], self.concat_idx)\n', (5634, 5659), True, 'import megengine.functional as F\n'), ((5850, 5862), 'megengine.functional.softmax', 'F.softmax', (['a'], {}), '(a)\n', (5859, 5862), True, 'import megengine.functional as F\n'), ((6797, 6825), 'megengine.functional.reshape', 'F.reshape', (['x', 'self.out_shape'], {}), '(x, self.out_shape)\n', (6806, 6825), True, 'import megengine.functional as F\n'), ((6838, 6867), 'megengine.functional.reshape', 'F.reshape', (['x', 'self.out_shape1'], {}), '(x, self.out_shape1)\n', (6847, 6867), True, 'import megengine.functional as F\n'), ((6880, 6909), 'megengine.functional.reshape', 'F.reshape', (['x', 'self.out_shape2'], {}), '(x, self.out_shape2)\n', (6889, 6909), True, 'import megengine.functional as F\n'), ((9837, 9898), 'megengine.functional.vision.interpolate', 'F.vision.interpolate', (['x'], {'size': 'self.out_shape', 'mode': '"""bilinear"""'}), "(x, size=self.out_shape, mode='bilinear')\n", (9857, 9898), True, 'import megengine.functional as F\n'), ((9911, 9973), 'megengine.functional.vision.interpolate', 'F.vision.interpolate', (['x'], {'size': 'self.out_shape2', 'mode': '"""bilinear"""'}), "(x, size=self.out_shape2, mode='bilinear')\n", (9931, 9973), True, 'import megengine.functional as F\n'), ((10724, 10755), 'numpy.array', 'np.array', (['[1]'], {'dtype': 'np.float16'}), '([1], dtype=np.float16)\n', (10732, 10755), True, 'import numpy as np\n'), ((10798, 10823), 'megengine.functional.broadcast_to', 'F.broadcast_to', (['x', '(3, 5)'], {}), '(x, (3, 5))\n', (10812, 10823), True, 'import megengine.functional as F\n'), ((10925, 10979), 'numpy.array', 'np.array', (['[[2, 2, 2, 2], [3, 3, 3, 3]]'], {'dtype': 'np.int32'}), '([[2, 2, 2, 2], [3, 3, 3, 3]], dtype=np.int32)\n', (10933, 10979), True, 'import numpy as np\n'), ((11279, 11328), 'megengine.module.Linear', 'M.Linear', (['self.num_class', 'self.mid_dim'], {'bias': '(True)'}), '(self.num_class, self.mid_dim, bias=True)\n', (11287, 11328), True, 'import megengine.module as M\n'), ((11348, 11375), 'megengine.module.BatchNorm1d', 'M.BatchNorm1d', (['self.mid_dim'], {}), '(self.mid_dim)\n', (11361, 11375), True, 'import megengine.module as M\n'), ((11395, 11442), 'megengine.module.Linear', 'M.Linear', (['self.mid_dim', 'self.mid_dim'], {'bias': '(True)'}), '(self.mid_dim, self.mid_dim, bias=True)\n', (11403, 11442), True, 'import megengine.module as M\n'), ((11462, 11489), 'megengine.module.BatchNorm1d', 'M.BatchNorm1d', (['self.mid_dim'], {}), '(self.mid_dim)\n', (11475, 11489), True, 'import megengine.module as M\n'), ((11509, 11558), 'megengine.module.Linear', 'M.Linear', (['self.mid_dim', 'self.num_class'], {'bias': '(True)'}), '(self.mid_dim, self.num_class, bias=True)\n', (11517, 11558), True, 'import megengine.module as M\n'), ((12090, 12139), 'megengine.module.Linear', 'M.Linear', (['self.num_class', 'self.mid_dim'], {'bias': '(True)'}), '(self.num_class, self.mid_dim, bias=True)\n', (12098, 12139), True, 'import megengine.module as M\n'), ((12159, 12186), 'megengine.module.BatchNorm1d', 'M.BatchNorm1d', (['self.mid_dim'], {}), '(self.mid_dim)\n', (12172, 12186), True, 'import megengine.module as M\n'), ((12206, 12253), 'megengine.module.Linear', 'M.Linear', (['self.mid_dim', 'self.mid_dim'], {'bias': '(True)'}), '(self.mid_dim, self.mid_dim, bias=True)\n', (12214, 12253), True, 'import megengine.module as M\n'), ((12273, 12300), 'megengine.module.BatchNorm1d', 'M.BatchNorm1d', (['self.mid_dim'], {}), '(self.mid_dim)\n', (12286, 12300), True, 'import megengine.module as M\n'), ((12320, 12369), 'megengine.module.Linear', 'M.Linear', (['self.mid_dim', 'self.num_class'], {'bias': '(True)'}), '(self.mid_dim, self.num_class, bias=True)\n', (12328, 12369), True, 'import megengine.module as M\n'), ((12522, 12537), 'megengine.functional.leaky_relu', 'F.leaky_relu', (['x'], {}), '(x)\n', (12534, 12537), True, 'import megengine.functional as F\n'), ((12550, 12565), 'megengine.functional.leaky_relu', 'F.leaky_relu', (['x'], {}), '(x)\n', (12562, 12565), True, 'import megengine.functional as F\n'), ((12578, 12587), 'megengine.functional.tanh', 'F.tanh', (['x'], {}), '(x)\n', (12584, 12587), True, 'import megengine.functional as F\n'), ((12624, 12639), 'megengine.functional.leaky_relu', 'F.leaky_relu', (['x'], {}), '(x)\n', (12636, 12639), True, 'import megengine.functional as F\n'), ((12676, 12685), 'megengine.functional.tanh', 'F.tanh', (['x'], {}), '(x)\n', (12682, 12685), True, 'import megengine.functional as F\n'), ((12722, 12737), 'megengine.functional.leaky_relu', 'F.leaky_relu', (['x'], {}), '(x)\n', (12734, 12737), True, 'import megengine.functional as F\n'), ((1103, 1119), 'megengine.tensor', 'mge.tensor', (['data'], {}), '(data)\n', (1113, 1119), True, 'import megengine as mge\n'), ((1165, 1181), 'megengine.tensor', 'mge.tensor', (['data'], {}), '(data)\n', (1175, 1181), True, 'import megengine as mge\n'), ((1365, 1381), 'megengine.tensor', 'mge.tensor', (['data'], {}), '(data)\n', (1375, 1381), True, 'import megengine as mge\n'), ((2318, 2411), 'megengine.module.ConvTranspose2d', 'M.ConvTranspose2d', (['(3)', '(5)', '(3, 4)'], {'dilation': '(2, 2)', 'stride': '(3, 2)', 'padding': '(2, 3)', 'groups': '(1)'}), '(3, 5, (3, 4), dilation=(2, 2), stride=(3, 2), padding=(2,\n 3), groups=1)\n', (2335, 2411), True, 'import megengine.module as M\n'), ((2451, 2482), 'megengine.module.ConvTranspose2d', 'M.ConvTranspose2d', (['(5)', '(3)', '(3, 3)'], {}), '(5, 3, (3, 3))\n', (2468, 2482), True, 'import megengine.module as M\n'), ((2850, 2906), 'megengine.module.ConvTranspose2d', 'M.ConvTranspose2d', (['(3)', '(5)', '(3, 4)'], {'stride': '(3, 2)', 'groups': '(1)'}), '(3, 5, (3, 4), stride=(3, 2), groups=1)\n', (2867, 2906), True, 'import megengine.module as M\n'), ((2920, 2951), 'megengine.module.ConvTranspose2d', 'M.ConvTranspose2d', (['(5)', '(3)', '(3, 3)'], {}), '(5, 3, (3, 3))\n', (2937, 2951), True, 'import megengine.module as M\n'), ((6099, 6118), 'megengine.functional.remove_axis', 'F.remove_axis', (['a', '(0)'], {}), '(a, 0)\n', (6112, 6118), True, 'import megengine.functional as F\n'), ((6181, 6196), 'megengine.functional.squeeze', 'F.squeeze', (['a', '(0)'], {}), '(a, 0)\n', (6190, 6196), True, 'import megengine.functional as F\n'), ((9445, 9461), 'megengine.functional.sum', 'F.sum', (['a'], {'axis': '(2)'}), '(a, axis=2)\n', (9450, 9461), True, 'import megengine.functional as F\n'), ((11714, 11726), 'megengine.functional.softmax', 'F.softmax', (['x'], {}), '(x)\n', (11723, 11726), True, 'import megengine.functional as F\n'), ((11762, 11771), 'megengine.functional.tanh', 'F.tanh', (['x'], {}), '(x)\n', (11768, 11771), True, 'import megengine.functional as F\n'), ((11832, 11844), 'megengine.functional.softmax', 'F.softmax', (['x'], {}), '(x)\n', (11841, 11844), True, 'import megengine.functional as F\n'), ((11880, 11889), 'megengine.functional.tanh', 'F.tanh', (['x'], {}), '(x)\n', (11886, 11889), True, 'import megengine.functional as F\n'), ((1699, 1733), 'numpy.random.random', 'np.random.random', (['(1, 3, 224, 224)'], {}), '((1, 3, 224, 224))\n', (1715, 1733), True, 'import numpy as np\n'), ((3449, 3476), 'numpy.random.random', 'np.random.random', (['(10, 100)'], {}), '((10, 100))\n', (3465, 3476), True, 'import numpy as np\n'), ((3994, 4029), 'numpy.random.random', 'np.random.random', (['(30, 3, 224, 224)'], {}), '((30, 3, 224, 224))\n', (4010, 4029), True, 'import numpy as np\n'), ((4414, 4443), 'numpy.random.random', 'np.random.random', (['(1, 32, 32)'], {}), '((1, 32, 32))\n', (4430, 4443), True, 'import numpy as np\n'), ((4484, 4517), 'numpy.random.random', 'np.random.random', (['(20, 3, 24, 24)'], {}), '((20, 3, 24, 24))\n', (4500, 4517), True, 'import numpy as np\n'), ((4837, 4871), 'numpy.random.random', 'np.random.random', (['(10, 10, 10, 10)'], {}), '((10, 10, 10, 10))\n', (4853, 4871), True, 'import numpy as np\n'), ((5236, 5266), 'numpy.random.random', 'np.random.random', (['(1, 2, 3, 4)'], {}), '((1, 2, 3, 4))\n', (5252, 5266), True, 'import numpy as np\n'), ((5534, 5564), 'numpy.random.random', 'np.random.random', (['(1, 2, 4, 5)'], {}), '((1, 2, 4, 5))\n', (5550, 5564), True, 'import numpy as np\n'), ((5761, 5788), 'numpy.random.random', 'np.random.random', (['(1, 1000)'], {}), '((1, 1000))\n', (5777, 5788), True, 'import numpy as np\n'), ((5964, 5994), 'numpy.random.random', 'np.random.random', (['(1, 1, 1000)'], {}), '((1, 1, 1000))\n', (5980, 5994), True, 'import numpy as np\n'), ((7035, 7060), 'numpy.ones', 'np.ones', (['(2, 3, 224, 224)'], {}), '((2, 3, 224, 224))\n', (7042, 7060), True, 'import numpy as np\n'), ((7101, 7131), 'numpy.random.random', 'np.random.random', (['(1, 3, 1, 1)'], {}), '((1, 3, 1, 1))\n', (7117, 7131), True, 'import numpy as np\n'), ((7396, 7418), 'megengine.tensor', 'mge.tensor', (['self.data1'], {}), '(self.data1)\n', (7406, 7418), True, 'import megengine as mge\n'), ((9318, 9348), 'numpy.random.random', 'np.random.random', (['(1, 3, 1000)'], {}), '((1, 3, 1000))\n', (9334, 9348), True, 'import numpy as np\n'), ((9515, 9532), 'megengine.functional.mean', 'F.mean', (['a'], {'axis': '(2)'}), '(a, axis=2)\n', (9521, 9532), True, 'import megengine.functional as F\n'), ((9566, 9582), 'megengine.functional.max', 'F.max', (['a'], {'axis': '(2)'}), '(a, axis=2)\n', (9571, 9582), True, 'import megengine.functional as F\n'), ((9683, 9713), 'numpy.random.random', 'np.random.random', (['(1, 2, 3, 4)'], {}), '((1, 2, 3, 4))\n', (9699, 9713), True, 'import numpy as np\n'), ((10217, 10232), 'megengine.functional.minimum', 'F.minimum', (['x', '(6)'], {}), '(x, 6)\n', (10226, 10232), True, 'import megengine.functional as F\n'), ((12390, 12415), 'numpy.random.random', 'np.random.random', (['(12, 2)'], {}), '((12, 2))\n', (12406, 12415), True, 'import numpy as np\n'), ((2054, 2099), 'numpy.random.random', 'np.random.random', (['self.normal_conv.bias.shape'], {}), '(self.normal_conv.bias.shape)\n', (2070, 2099), True, 'import numpy as np\n'), ((2187, 2231), 'numpy.random.random', 'np.random.random', (['self.group_conv.bias.shape'], {}), '(self.group_conv.bias.shape)\n', (2203, 2231), True, 'import numpy as np\n'), ((2559, 2610), 'numpy.random.random', 'np.random.random', (['self.transpose_conv[0].bias.shape'], {}), '(self.transpose_conv[0].bias.shape)\n', (2575, 2610), True, 'import numpy as np\n'), ((2705, 2756), 'numpy.random.random', 'np.random.random', (['self.transpose_conv[1].bias.shape'], {}), '(self.transpose_conv[1].bias.shape)\n', (2721, 2756), True, 'import numpy as np\n'), ((3035, 3086), 'numpy.random.random', 'np.random.random', (['self.transpose_conv[0].bias.shape'], {}), '(self.transpose_conv[0].bias.shape)\n', (3051, 3086), True, 'import numpy as np\n'), ((3188, 3239), 'numpy.random.random', 'np.random.random', (['self.transpose_conv[1].bias.shape'], {}), '(self.transpose_conv[1].bias.shape)\n', (3204, 3239), True, 'import numpy as np\n'), ((3665, 3710), 'numpy.random.random', 'np.random.random', (['self.linear_bias.bias.shape'], {}), '(self.linear_bias.bias.shape)\n', (3681, 3710), True, 'import numpy as np\n'), ((6341, 6371), 'numpy.random.random', 'np.random.random', (['(1, 2, 3, 4)'], {}), '((1, 2, 3, 4))\n', (6357, 6371), True, 'import numpy as np\n'), ((6561, 6594), 'numpy.random.random', 'np.random.random', (['(1, 2, 3, 4, 5)'], {}), '((1, 2, 3, 4, 5))\n', (6577, 6594), True, 'import numpy as np\n'), ((7172, 7206), 'numpy.random.random', 'np.random.random', (['(2, 3, 224, 224)'], {}), '((2, 3, 224, 224))\n', (7188, 7206), True, 'import numpy as np\n'), ((7360, 7374), 'numpy.float32', 'np.float32', (['(10)'], {}), '(10)\n', (7370, 7374), True, 'import numpy as np\n'), ((7555, 7577), 'megengine.tensor', 'mge.tensor', (['self.data1'], {}), '(self.data1)\n', (7565, 7577), True, 'import megengine as mge\n'), ((7519, 7533), 'numpy.float32', 'np.float32', (['(10)'], {}), '(10)\n', (7529, 7533), True, 'import numpy as np\n'), ((7710, 7732), 'megengine.tensor', 'mge.tensor', (['self.data1'], {}), '(self.data1)\n', (7720, 7732), True, 'import megengine as mge\n'), ((7907, 7922), 'megengine.functional.maximum', 'F.maximum', (['x', 'y'], {}), '(x, y)\n', (7916, 7922), True, 'import megengine.functional as F\n'), ((10388, 10418), 'numpy.random.random', 'np.random.random', (['(1, 2, 3, 4)'], {}), '((1, 2, 3, 4))\n', (10404, 10418), True, 'import numpy as np\n'), ((11579, 11592), 'numpy.arange', 'np.arange', (['(24)'], {}), '(24)\n', (11588, 11592), True, 'import numpy as np\n'), ((7678, 7692), 'numpy.float32', 'np.float32', (['(10)'], {}), '(10)\n', (7688, 7692), True, 'import numpy as np\n'), ((7826, 7847), 'megengine.tensor', 'mge.tensor', (['self.data'], {}), '(self.data)\n', (7836, 7847), True, 'import megengine as mge\n'), ((7868, 7890), 'megengine.tensor', 'mge.tensor', (['self.data2'], {}), '(self.data2)\n', (7878, 7890), True, 'import megengine as mge\n'), ((8057, 8072), 'megengine.functional.minimum', 'F.minimum', (['x', 'y'], {}), '(x, y)\n', (8066, 8072), True, 'import megengine.functional as F\n'), ((7976, 7997), 'megengine.tensor', 'mge.tensor', (['self.data'], {}), '(self.data)\n', (7986, 7997), True, 'import megengine as mge\n'), ((8018, 8040), 'megengine.tensor', 'mge.tensor', (['self.data2'], {}), '(self.data2)\n', (8028, 8040), True, 'import megengine as mge\n'), ((8181, 8190), 'megengine.functional.ceil', 'F.ceil', (['a'], {}), '(a)\n', (8187, 8190), True, 'import megengine.functional as F\n'), ((8243, 8253), 'megengine.functional.floor', 'F.floor', (['a'], {}), '(a)\n', (8250, 8253), True, 'import megengine.functional as F\n'), ((8304, 8326), 'megengine.tensor', 'mge.tensor', (['self.data1'], {}), '(self.data1)\n', (8314, 8326), True, 'import megengine as mge\n'), ((8362, 8375), 'numpy.float32', 'np.float32', (['(2)'], {}), '(2)\n', (8372, 8375), True, 'import numpy as np\n'), ((8478, 8500), 'megengine.tensor', 'mge.tensor', (['self.data1'], {}), '(self.data1)\n', (8488, 8500), True, 'import megengine as mge\n'), ((8564, 8572), 'megengine.functional.abs', 'F.abs', (['a'], {}), '(a)\n', (8569, 8572), True, 'import megengine.functional as F\n'), ((8636, 8644), 'megengine.functional.exp', 'F.exp', (['a'], {}), '(a)\n', (8641, 8644), True, 'import megengine.functional as F\n'), ((8708, 8716), 'megengine.functional.log', 'F.log', (['a'], {}), '(a)\n', (8713, 8716), True, 'import megengine.functional as F\n'), ((8819, 8828), 'megengine.functional.relu', 'F.relu', (['y'], {}), '(y)\n', (8825, 8828), True, 'import megengine.functional as F\n'), ((8780, 8802), 'megengine.tensor', 'mge.tensor', (['self.data2'], {}), '(self.data2)\n', (8790, 8802), True, 'import megengine as mge\n'), ((8892, 8914), 'megengine.tensor', 'mge.tensor', (['self.data1'], {}), '(self.data1)\n', (8902, 8914), True, 'import megengine as mge\n'), ((8935, 8957), 'megengine.tensor', 'mge.tensor', (['self.data2'], {}), '(self.data2)\n', (8945, 8957), True, 'import megengine as mge\n'), ((9063, 9075), 'megengine.functional.sigmoid', 'F.sigmoid', (['y'], {}), '(y)\n', (9072, 9075), True, 'import megengine.functional as F\n'), ((9024, 9046), 'megengine.tensor', 'mge.tensor', (['self.data2'], {}), '(self.data2)\n', (9034, 9046), True, 'import megengine as mge\n')]
|
# -*- coding:utf-8 -*-
# This repo is licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine.functional as F
import megengine.module as M
import layers
class RPN(M.Module):
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
self.box_coder = layers.BoxCoder(cfg.rpn_reg_mean, cfg.rpn_reg_std)
# check anchor settings
assert len(set(len(x) for x in cfg.anchor_scales)) == 1
assert len(set(len(x) for x in cfg.anchor_ratios)) == 1
self.num_cell_anchors = len(cfg.anchor_scales[0]) * len(cfg.anchor_ratios[0])
rpn_channel = cfg.rpn_channel
self.in_features = cfg.rpn_in_features
self.anchor_generator = layers.AnchorBoxGenerator(
anchor_scales=cfg.anchor_scales,
anchor_ratios=cfg.anchor_ratios,
strides=cfg.rpn_stride,
offset=self.cfg.anchor_offset,
)
self.matcher = layers.Matcher(
cfg.match_thresholds, cfg.match_labels, cfg.match_allow_low_quality
)
self.rpn_conv = M.Conv2d(256, rpn_channel, kernel_size=3, stride=1, padding=1)
self.rpn_cls_score = M.Conv2d(
rpn_channel, self.num_cell_anchors, kernel_size=1, stride=1
)
self.rpn_bbox_offsets = M.Conv2d(
rpn_channel, self.num_cell_anchors * 4, kernel_size=1, stride=1
)
for l in [self.rpn_conv, self.rpn_cls_score, self.rpn_bbox_offsets]:
M.init.normal_(l.weight, std=0.01)
M.init.fill_(l.bias, 0)
def forward(self, features, im_info, boxes=None):
# prediction
features = [features[x] for x in self.in_features]
# get anchors
anchors_list = self.anchor_generator(features)
pred_cls_logit_list = []
pred_bbox_offset_list = []
for x in features:
t = F.relu(self.rpn_conv(x))
scores = self.rpn_cls_score(t)
pred_cls_logit_list.append(
scores.reshape(
scores.shape[0],
self.num_cell_anchors,
scores.shape[2],
scores.shape[3],
)
)
bbox_offsets = self.rpn_bbox_offsets(t)
pred_bbox_offset_list.append(
bbox_offsets.reshape(
bbox_offsets.shape[0],
self.num_cell_anchors,
4,
bbox_offsets.shape[2],
bbox_offsets.shape[3],
)
)
# get rois from the predictions
rpn_rois = self.find_top_rpn_proposals(
pred_cls_logit_list, pred_bbox_offset_list, anchors_list, im_info
)
if self.training:
rpn_labels, rpn_offsets = self.get_ground_truth(
anchors_list, boxes, im_info[:, 4].astype("int32")
)
pred_cls_logits, pred_bbox_offsets = self.merge_rpn_score_box(
pred_cls_logit_list, pred_bbox_offset_list
)
fg_mask = rpn_labels > 0
valid_mask = rpn_labels >= 0
num_valid = valid_mask.sum()
# rpn classification loss
loss_rpn_cls = F.loss.binary_cross_entropy(
pred_cls_logits[valid_mask], rpn_labels[valid_mask]
)
# rpn regression loss
loss_rpn_bbox = layers.smooth_l1_loss(
pred_bbox_offsets[fg_mask],
rpn_offsets[fg_mask],
self.cfg.rpn_smooth_l1_beta,
).sum() / F.maximum(num_valid, 1)
loss_dict = {"loss_rpn_cls": loss_rpn_cls, "loss_rpn_bbox": loss_rpn_bbox}
return rpn_rois, loss_dict
else:
return rpn_rois
def find_top_rpn_proposals(
self, rpn_cls_score_list, rpn_bbox_offset_list, anchors_list, im_info
):
prev_nms_top_n = (
self.cfg.train_prev_nms_top_n
if self.training
else self.cfg.test_prev_nms_top_n
)
post_nms_top_n = (
self.cfg.train_post_nms_top_n
if self.training
else self.cfg.test_post_nms_top_n
)
return_rois = []
for bid in range(im_info.shape[0]):
batch_proposal_list = []
batch_score_list = []
batch_level_list = []
for l, (rpn_cls_score, rpn_bbox_offset, anchors) in enumerate(
zip(rpn_cls_score_list, rpn_bbox_offset_list, anchors_list)
):
# get proposals and scores
offsets = rpn_bbox_offset[bid].transpose(2, 3, 0, 1).reshape(-1, 4)
proposals = self.box_coder.decode(anchors, offsets)
scores = rpn_cls_score[bid].transpose(1, 2, 0).flatten()
scores.detach()
# prev nms top n
scores, order = F.topk(scores, descending=True, k=prev_nms_top_n)
proposals = proposals[order]
batch_proposal_list.append(proposals)
batch_score_list.append(scores)
batch_level_list.append(F.full_like(scores, l))
# gather proposals, scores, level
proposals = F.concat(batch_proposal_list, axis=0)
scores = F.concat(batch_score_list, axis=0)
levels = F.concat(batch_level_list, axis=0)
proposals = layers.get_clipped_boxes(proposals, im_info[bid])
# filter invalid proposals and apply total level nms
keep_mask = layers.filter_boxes(proposals)
proposals = proposals[keep_mask]
scores = scores[keep_mask]
levels = levels[keep_mask]
nms_keep_inds = layers.batched_nms(
proposals, scores, levels, self.cfg.rpn_nms_threshold, post_nms_top_n
)
# generate rois to rcnn head, rois shape (N, 5), info [batch_id, x1, y1, x2, y2]
rois = F.concat([proposals, scores.reshape(-1, 1)], axis=1)
rois = rois[nms_keep_inds]
batch_inds = F.full((rois.shape[0], 1), bid)
batch_rois = F.concat([batch_inds, rois[:, :4]], axis=1)
return_rois.append(batch_rois)
return_rois = F.concat(return_rois, axis=0)
return return_rois.detach()
def merge_rpn_score_box(self, rpn_cls_score_list, rpn_bbox_offset_list):
final_rpn_cls_score_list = []
final_rpn_bbox_offset_list = []
for bid in range(rpn_cls_score_list[0].shape[0]):
batch_rpn_cls_score_list = []
batch_rpn_bbox_offset_list = []
for i in range(len(self.in_features)):
rpn_cls_scores = rpn_cls_score_list[i][bid].transpose(1, 2, 0).flatten()
rpn_bbox_offsets = (
rpn_bbox_offset_list[i][bid].transpose(2, 3, 0, 1).reshape(-1, 4)
)
batch_rpn_cls_score_list.append(rpn_cls_scores)
batch_rpn_bbox_offset_list.append(rpn_bbox_offsets)
batch_rpn_cls_scores = F.concat(batch_rpn_cls_score_list, axis=0)
batch_rpn_bbox_offsets = F.concat(batch_rpn_bbox_offset_list, axis=0)
final_rpn_cls_score_list.append(batch_rpn_cls_scores)
final_rpn_bbox_offset_list.append(batch_rpn_bbox_offsets)
final_rpn_cls_scores = F.concat(final_rpn_cls_score_list, axis=0)
final_rpn_bbox_offsets = F.concat(final_rpn_bbox_offset_list, axis=0)
return final_rpn_cls_scores, final_rpn_bbox_offsets
def get_ground_truth(self, anchors_list, batched_gt_boxes, batched_num_gts):
anchors = F.concat(anchors_list, axis=0)
labels_list = []
offsets_list = []
for bid in range(batched_gt_boxes.shape[0]):
gt_boxes = batched_gt_boxes[bid, :batched_num_gts[bid]]
overlaps = layers.get_iou(gt_boxes[:, :4], anchors)
matched_indices, labels = self.matcher(overlaps)
offsets = self.box_coder.encode(anchors, gt_boxes[matched_indices, :4])
# sample positive labels
num_positive = int(self.cfg.num_sample_anchors * self.cfg.positive_anchor_ratio)
labels = layers.sample_labels(labels, num_positive, 1, -1)
# sample negative labels
num_positive = (labels == 1).sum().astype("int32")
num_negative = self.cfg.num_sample_anchors - num_positive
labels = layers.sample_labels(labels, num_negative, 0, -1)
labels_list.append(labels)
offsets_list.append(offsets)
return (
F.concat(labels_list, axis=0).detach(),
F.concat(offsets_list, axis=0).detach(),
)
|
[
"megengine.functional.maximum",
"megengine.module.init.normal_",
"megengine.functional.full",
"megengine.functional.topk",
"megengine.functional.concat",
"megengine.module.Conv2d",
"megengine.functional.loss.binary_cross_entropy",
"megengine.module.init.fill_",
"megengine.functional.full_like"
] |
[((580, 630), 'layers.BoxCoder', 'layers.BoxCoder', (['cfg.rpn_reg_mean', 'cfg.rpn_reg_std'], {}), '(cfg.rpn_reg_mean, cfg.rpn_reg_std)\n', (595, 630), False, 'import layers\n'), ((997, 1148), 'layers.AnchorBoxGenerator', 'layers.AnchorBoxGenerator', ([], {'anchor_scales': 'cfg.anchor_scales', 'anchor_ratios': 'cfg.anchor_ratios', 'strides': 'cfg.rpn_stride', 'offset': 'self.cfg.anchor_offset'}), '(anchor_scales=cfg.anchor_scales, anchor_ratios=\n cfg.anchor_ratios, strides=cfg.rpn_stride, offset=self.cfg.anchor_offset)\n', (1022, 1148), False, 'import layers\n'), ((1227, 1315), 'layers.Matcher', 'layers.Matcher', (['cfg.match_thresholds', 'cfg.match_labels', 'cfg.match_allow_low_quality'], {}), '(cfg.match_thresholds, cfg.match_labels, cfg.\n match_allow_low_quality)\n', (1241, 1315), False, 'import layers\n'), ((1358, 1420), 'megengine.module.Conv2d', 'M.Conv2d', (['(256)', 'rpn_channel'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(256, rpn_channel, kernel_size=3, stride=1, padding=1)\n', (1366, 1420), True, 'import megengine.module as M\n'), ((1450, 1519), 'megengine.module.Conv2d', 'M.Conv2d', (['rpn_channel', 'self.num_cell_anchors'], {'kernel_size': '(1)', 'stride': '(1)'}), '(rpn_channel, self.num_cell_anchors, kernel_size=1, stride=1)\n', (1458, 1519), True, 'import megengine.module as M\n'), ((1574, 1647), 'megengine.module.Conv2d', 'M.Conv2d', (['rpn_channel', '(self.num_cell_anchors * 4)'], {'kernel_size': '(1)', 'stride': '(1)'}), '(rpn_channel, self.num_cell_anchors * 4, kernel_size=1, stride=1)\n', (1582, 1647), True, 'import megengine.module as M\n'), ((6539, 6568), 'megengine.functional.concat', 'F.concat', (['return_rois'], {'axis': '(0)'}), '(return_rois, axis=0)\n', (6547, 6568), True, 'import megengine.functional as F\n'), ((7651, 7693), 'megengine.functional.concat', 'F.concat', (['final_rpn_cls_score_list'], {'axis': '(0)'}), '(final_rpn_cls_score_list, axis=0)\n', (7659, 7693), True, 'import megengine.functional as F\n'), ((7727, 7771), 'megengine.functional.concat', 'F.concat', (['final_rpn_bbox_offset_list'], {'axis': '(0)'}), '(final_rpn_bbox_offset_list, axis=0)\n', (7735, 7771), True, 'import megengine.functional as F\n'), ((7932, 7962), 'megengine.functional.concat', 'F.concat', (['anchors_list'], {'axis': '(0)'}), '(anchors_list, axis=0)\n', (7940, 7962), True, 'import megengine.functional as F\n'), ((1760, 1794), 'megengine.module.init.normal_', 'M.init.normal_', (['l.weight'], {'std': '(0.01)'}), '(l.weight, std=0.01)\n', (1774, 1794), True, 'import megengine.module as M\n'), ((1807, 1830), 'megengine.module.init.fill_', 'M.init.fill_', (['l.bias', '(0)'], {}), '(l.bias, 0)\n', (1819, 1830), True, 'import megengine.module as M\n'), ((3520, 3605), 'megengine.functional.loss.binary_cross_entropy', 'F.loss.binary_cross_entropy', (['pred_cls_logits[valid_mask]', 'rpn_labels[valid_mask]'], {}), '(pred_cls_logits[valid_mask], rpn_labels[valid_mask]\n )\n', (3547, 3605), True, 'import megengine.functional as F\n'), ((5526, 5563), 'megengine.functional.concat', 'F.concat', (['batch_proposal_list'], {'axis': '(0)'}), '(batch_proposal_list, axis=0)\n', (5534, 5563), True, 'import megengine.functional as F\n'), ((5585, 5619), 'megengine.functional.concat', 'F.concat', (['batch_score_list'], {'axis': '(0)'}), '(batch_score_list, axis=0)\n', (5593, 5619), True, 'import megengine.functional as F\n'), ((5641, 5675), 'megengine.functional.concat', 'F.concat', (['batch_level_list'], {'axis': '(0)'}), '(batch_level_list, axis=0)\n', (5649, 5675), True, 'import megengine.functional as F\n'), ((5701, 5750), 'layers.get_clipped_boxes', 'layers.get_clipped_boxes', (['proposals', 'im_info[bid]'], {}), '(proposals, im_info[bid])\n', (5725, 5750), False, 'import layers\n'), ((5840, 5870), 'layers.filter_boxes', 'layers.filter_boxes', (['proposals'], {}), '(proposals)\n', (5859, 5870), False, 'import layers\n'), ((6022, 6115), 'layers.batched_nms', 'layers.batched_nms', (['proposals', 'scores', 'levels', 'self.cfg.rpn_nms_threshold', 'post_nms_top_n'], {}), '(proposals, scores, levels, self.cfg.rpn_nms_threshold,\n post_nms_top_n)\n', (6040, 6115), False, 'import layers\n'), ((6372, 6403), 'megengine.functional.full', 'F.full', (['(rois.shape[0], 1)', 'bid'], {}), '((rois.shape[0], 1), bid)\n', (6378, 6403), True, 'import megengine.functional as F\n'), ((6429, 6472), 'megengine.functional.concat', 'F.concat', (['[batch_inds, rois[:, :4]]'], {'axis': '(1)'}), '([batch_inds, rois[:, :4]], axis=1)\n', (6437, 6472), True, 'import megengine.functional as F\n'), ((7357, 7399), 'megengine.functional.concat', 'F.concat', (['batch_rpn_cls_score_list'], {'axis': '(0)'}), '(batch_rpn_cls_score_list, axis=0)\n', (7365, 7399), True, 'import megengine.functional as F\n'), ((7437, 7481), 'megengine.functional.concat', 'F.concat', (['batch_rpn_bbox_offset_list'], {'axis': '(0)'}), '(batch_rpn_bbox_offset_list, axis=0)\n', (7445, 7481), True, 'import megengine.functional as F\n'), ((8160, 8200), 'layers.get_iou', 'layers.get_iou', (['gt_boxes[:, :4]', 'anchors'], {}), '(gt_boxes[:, :4], anchors)\n', (8174, 8200), False, 'import layers\n'), ((8499, 8548), 'layers.sample_labels', 'layers.sample_labels', (['labels', 'num_positive', '(1)', '(-1)'], {}), '(labels, num_positive, 1, -1)\n', (8519, 8548), False, 'import layers\n'), ((8740, 8789), 'layers.sample_labels', 'layers.sample_labels', (['labels', 'num_negative', '(0)', '(-1)'], {}), '(labels, num_negative, 0, -1)\n', (8760, 8789), False, 'import layers\n'), ((3866, 3889), 'megengine.functional.maximum', 'F.maximum', (['num_valid', '(1)'], {}), '(num_valid, 1)\n', (3875, 3889), True, 'import megengine.functional as F\n'), ((5193, 5242), 'megengine.functional.topk', 'F.topk', (['scores'], {'descending': '(True)', 'k': 'prev_nms_top_n'}), '(scores, descending=True, k=prev_nms_top_n)\n', (5199, 5242), True, 'import megengine.functional as F\n'), ((5431, 5453), 'megengine.functional.full_like', 'F.full_like', (['scores', 'l'], {}), '(scores, l)\n', (5442, 5453), True, 'import megengine.functional as F\n'), ((8901, 8930), 'megengine.functional.concat', 'F.concat', (['labels_list'], {'axis': '(0)'}), '(labels_list, axis=0)\n', (8909, 8930), True, 'import megengine.functional as F\n'), ((8953, 8983), 'megengine.functional.concat', 'F.concat', (['offsets_list'], {'axis': '(0)'}), '(offsets_list, axis=0)\n', (8961, 8983), True, 'import megengine.functional as F\n'), ((3694, 3798), 'layers.smooth_l1_loss', 'layers.smooth_l1_loss', (['pred_bbox_offsets[fg_mask]', 'rpn_offsets[fg_mask]', 'self.cfg.rpn_smooth_l1_beta'], {}), '(pred_bbox_offsets[fg_mask], rpn_offsets[fg_mask],\n self.cfg.rpn_smooth_l1_beta)\n', (3715, 3798), False, 'import layers\n')]
|
# -*- coding: utf-8 -*-
# MIT License
#
# Copyright (c) 2021 coolbeam
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# This repo is licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine.module as nn
import megengine.functional as F
from common.utils import flow_warp, upsample2d_flow_as
def conv(in_planes, out_planes, kernel_size=3, stride=1, dilation=1, isReLU=True, if_IN=False, IN_affine=False, if_BN=False):
if isReLU:
if if_IN:
return nn.Sequential(
nn.Conv2d(in_planes,
out_planes,
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
padding=((kernel_size - 1) * dilation) // 2,
bias=True), nn.LeakyReLU(0.1), nn.InstanceNorm(out_planes, affine=IN_affine))
elif if_BN:
return nn.Sequential(
nn.Conv2d(in_planes,
out_planes,
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
padding=((kernel_size - 1) * dilation) // 2,
bias=True), nn.LeakyReLU(0.1), nn.BatchNorm2d(out_planes, affine=IN_affine))
else:
return nn.Sequential(
nn.Conv2d(in_planes,
out_planes,
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
padding=((kernel_size - 1) * dilation) // 2,
bias=True), nn.LeakyReLU(0.1))
else:
if if_IN:
return nn.Sequential(
nn.Conv2d(in_planes,
out_planes,
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
padding=((kernel_size - 1) * dilation) // 2,
bias=True), nn.InstanceNorm(out_planes, affine=IN_affine))
elif if_BN:
return nn.Sequential(
nn.Conv2d(in_planes,
out_planes,
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
padding=((kernel_size - 1) * dilation) // 2,
bias=True), nn.BatchNorm2d(out_planes, affine=IN_affine))
else:
return nn.Sequential(
nn.Conv2d(in_planes,
out_planes,
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
padding=((kernel_size - 1) * dilation) // 2,
bias=True))
class FlowEstimatorDense_temp(nn.Module):
def __init__(self, ch_in=64, f_channels=(128, 128, 96, 64, 32, 32), ch_out=2):
super(FlowEstimatorDense_temp, self).__init__()
N = 0
ind = 0
N += ch_in
self.conv1 = conv(N, f_channels[ind])
N += f_channels[ind]
ind += 1
self.conv2 = conv(N, f_channels[ind])
N += f_channels[ind]
ind += 1
self.conv3 = conv(N, f_channels[ind])
N += f_channels[ind]
ind += 1
self.conv4 = conv(N, f_channels[ind])
N += f_channels[ind]
ind += 1
self.conv5 = conv(N, f_channels[ind])
N += f_channels[ind]
self.num_feature_channel = N
ind += 1
self.conv_last = conv(N, ch_out, isReLU=False)
def forward(self, x):
x1 = F.concat([self.conv1(x), x], axis=1)
x2 = F.concat([self.conv2(x1), x1], axis=1)
x3 = F.concat([self.conv3(x2), x2], axis=1)
x4 = F.concat([self.conv4(x3), x3], axis=1)
x5 = F.concat([self.conv5(x4), x4], axis=1)
x_out = self.conv_last(x5)
return x5, x_out
class FlowMaskEstimator(FlowEstimatorDense_temp):
def __init__(self, ch_in, f_channels, ch_out):
super(FlowMaskEstimator, self).__init__(ch_in=ch_in, f_channels=f_channels, ch_out=ch_out)
class NeuralUpsampler(nn.Module):
def __init__(self):
super(NeuralUpsampler, self).__init__()
f_channels_es = (32, 32, 32, 16, 8)
in_C = 64
self.dense_estimator_mask = FlowEstimatorDense_temp(in_C, f_channels=f_channels_es, ch_out=3)
self.upsample_output_conv = nn.Sequential(
conv(3, 16, kernel_size=3, stride=1, dilation=1),
conv(16, 16, stride=2),
conv(16, 32, kernel_size=3, stride=1, dilation=1),
conv(32, 32, stride=2),
)
def forward(self, flow_init, feature_1, feature_2, output_level_flow=None):
n, c, h, w = flow_init.shape
n_f, c_f, h_f, w_f = feature_1.shape
if h != h_f or w != w_f:
flow_init = F.vision.interpolate(flow_init, scale_factor=2., mode='bilinear', align_corners=True) * 2
feature_2_warp = flow_warp(feature_2, flow_init)
input_feature = F.concat((feature_1, feature_2_warp), axis=1)
_, x_out = self.dense_estimator_mask(input_feature)
inter_flow = x_out[:, :2, :, :]
inter_mask = x_out[:, 2, :, :]
inter_mask = F.expand_dims(inter_mask, 1)
inter_mask = F.sigmoid(inter_mask)
if output_level_flow is not None:
inter_flow = upsample2d_flow_as(inter_flow, output_level_flow, mode="bilinear", if_rate=True)
inter_mask = upsample2d_flow_as(inter_mask, output_level_flow, mode="bilinear")
flow_init = output_level_flow
flow_up = flow_warp(flow_init, inter_flow) * (1 - inter_mask) + flow_init * inter_mask
return flow_up
def output_conv(self, x):
return self.upsample_output_conv(x)
|
[
"megengine.functional.sigmoid",
"megengine.functional.expand_dims",
"megengine.module.BatchNorm2d",
"megengine.functional.vision.interpolate",
"megengine.module.InstanceNorm",
"megengine.functional.concat",
"megengine.module.Conv2d",
"megengine.module.LeakyReLU"
] |
[((5825, 5856), 'common.utils.flow_warp', 'flow_warp', (['feature_2', 'flow_init'], {}), '(feature_2, flow_init)\n', (5834, 5856), False, 'from common.utils import flow_warp, upsample2d_flow_as\n'), ((5881, 5926), 'megengine.functional.concat', 'F.concat', (['(feature_1, feature_2_warp)'], {'axis': '(1)'}), '((feature_1, feature_2_warp), axis=1)\n', (5889, 5926), True, 'import megengine.functional as F\n'), ((6088, 6116), 'megengine.functional.expand_dims', 'F.expand_dims', (['inter_mask', '(1)'], {}), '(inter_mask, 1)\n', (6101, 6116), True, 'import megengine.functional as F\n'), ((6138, 6159), 'megengine.functional.sigmoid', 'F.sigmoid', (['inter_mask'], {}), '(inter_mask)\n', (6147, 6159), True, 'import megengine.functional as F\n'), ((6228, 6313), 'common.utils.upsample2d_flow_as', 'upsample2d_flow_as', (['inter_flow', 'output_level_flow'], {'mode': '"""bilinear"""', 'if_rate': '(True)'}), "(inter_flow, output_level_flow, mode='bilinear', if_rate=True\n )\n", (6246, 6313), False, 'from common.utils import flow_warp, upsample2d_flow_as\n'), ((6334, 6400), 'common.utils.upsample2d_flow_as', 'upsample2d_flow_as', (['inter_mask', 'output_level_flow'], {'mode': '"""bilinear"""'}), "(inter_mask, output_level_flow, mode='bilinear')\n", (6352, 6400), False, 'from common.utils import flow_warp, upsample2d_flow_as\n'), ((1202, 1351), 'megengine.module.Conv2d', 'nn.Conv2d', (['in_planes', 'out_planes'], {'kernel_size': 'kernel_size', 'stride': 'stride', 'dilation': 'dilation', 'padding': '((kernel_size - 1) * dilation // 2)', 'bias': '(True)'}), '(in_planes, out_planes, kernel_size=kernel_size, stride=stride,\n dilation=dilation, padding=(kernel_size - 1) * dilation // 2, bias=True)\n', (1211, 1351), True, 'import megengine.module as nn\n'), ((1507, 1524), 'megengine.module.LeakyReLU', 'nn.LeakyReLU', (['(0.1)'], {}), '(0.1)\n', (1519, 1524), True, 'import megengine.module as nn\n'), ((1526, 1571), 'megengine.module.InstanceNorm', 'nn.InstanceNorm', (['out_planes'], {'affine': 'IN_affine'}), '(out_planes, affine=IN_affine)\n', (1541, 1571), True, 'import megengine.module as nn\n'), ((2479, 2628), 'megengine.module.Conv2d', 'nn.Conv2d', (['in_planes', 'out_planes'], {'kernel_size': 'kernel_size', 'stride': 'stride', 'dilation': 'dilation', 'padding': '((kernel_size - 1) * dilation // 2)', 'bias': '(True)'}), '(in_planes, out_planes, kernel_size=kernel_size, stride=stride,\n dilation=dilation, padding=(kernel_size - 1) * dilation // 2, bias=True)\n', (2488, 2628), True, 'import megengine.module as nn\n'), ((2784, 2829), 'megengine.module.InstanceNorm', 'nn.InstanceNorm', (['out_planes'], {'affine': 'IN_affine'}), '(out_planes, affine=IN_affine)\n', (2799, 2829), True, 'import megengine.module as nn\n'), ((5710, 5800), 'megengine.functional.vision.interpolate', 'F.vision.interpolate', (['flow_init'], {'scale_factor': '(2.0)', 'mode': '"""bilinear"""', 'align_corners': '(True)'}), "(flow_init, scale_factor=2.0, mode='bilinear',\n align_corners=True)\n", (5730, 5800), True, 'import megengine.functional as F\n'), ((6462, 6494), 'common.utils.flow_warp', 'flow_warp', (['flow_init', 'inter_flow'], {}), '(flow_init, inter_flow)\n', (6471, 6494), False, 'from common.utils import flow_warp, upsample2d_flow_as\n'), ((1643, 1792), 'megengine.module.Conv2d', 'nn.Conv2d', (['in_planes', 'out_planes'], {'kernel_size': 'kernel_size', 'stride': 'stride', 'dilation': 'dilation', 'padding': '((kernel_size - 1) * dilation // 2)', 'bias': '(True)'}), '(in_planes, out_planes, kernel_size=kernel_size, stride=stride,\n dilation=dilation, padding=(kernel_size - 1) * dilation // 2, bias=True)\n', (1652, 1792), True, 'import megengine.module as nn\n'), ((1948, 1965), 'megengine.module.LeakyReLU', 'nn.LeakyReLU', (['(0.1)'], {}), '(0.1)\n', (1960, 1965), True, 'import megengine.module as nn\n'), ((1967, 2011), 'megengine.module.BatchNorm2d', 'nn.BatchNorm2d', (['out_planes'], {'affine': 'IN_affine'}), '(out_planes, affine=IN_affine)\n', (1981, 2011), True, 'import megengine.module as nn\n'), ((2077, 2226), 'megengine.module.Conv2d', 'nn.Conv2d', (['in_planes', 'out_planes'], {'kernel_size': 'kernel_size', 'stride': 'stride', 'dilation': 'dilation', 'padding': '((kernel_size - 1) * dilation // 2)', 'bias': '(True)'}), '(in_planes, out_planes, kernel_size=kernel_size, stride=stride,\n dilation=dilation, padding=(kernel_size - 1) * dilation // 2, bias=True)\n', (2086, 2226), True, 'import megengine.module as nn\n'), ((2382, 2399), 'megengine.module.LeakyReLU', 'nn.LeakyReLU', (['(0.1)'], {}), '(0.1)\n', (2394, 2399), True, 'import megengine.module as nn\n'), ((2901, 3050), 'megengine.module.Conv2d', 'nn.Conv2d', (['in_planes', 'out_planes'], {'kernel_size': 'kernel_size', 'stride': 'stride', 'dilation': 'dilation', 'padding': '((kernel_size - 1) * dilation // 2)', 'bias': '(True)'}), '(in_planes, out_planes, kernel_size=kernel_size, stride=stride,\n dilation=dilation, padding=(kernel_size - 1) * dilation // 2, bias=True)\n', (2910, 3050), True, 'import megengine.module as nn\n'), ((3206, 3250), 'megengine.module.BatchNorm2d', 'nn.BatchNorm2d', (['out_planes'], {'affine': 'IN_affine'}), '(out_planes, affine=IN_affine)\n', (3220, 3250), True, 'import megengine.module as nn\n'), ((3316, 3465), 'megengine.module.Conv2d', 'nn.Conv2d', (['in_planes', 'out_planes'], {'kernel_size': 'kernel_size', 'stride': 'stride', 'dilation': 'dilation', 'padding': '((kernel_size - 1) * dilation // 2)', 'bias': '(True)'}), '(in_planes, out_planes, kernel_size=kernel_size, stride=stride,\n dilation=dilation, padding=(kernel_size - 1) * dilation // 2, bias=True)\n', (3325, 3465), True, 'import megengine.module as nn\n')]
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import multiprocessing as mp
import os
import cv2
import megengine as mge
import megengine.data as data
import megengine.data.dataset as dataset
import megengine.data.transform as T
import megengine.jit as jit
import numpy as np
from tqdm import tqdm
from official.vision.segmentation.deeplabv3plus import DeepLabV3Plus
from official.vision.segmentation.utils import import_config_from_file
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"-c", "--config", type=str, required=True, help="configuration file"
)
parser.add_argument(
"-d", "--dataset_dir", type=str, default="/data/datasets/VOC2012",
)
parser.add_argument(
"-m", "--model_path", type=str, default=None, help="eval model file"
)
args = parser.parse_args()
cfg = import_config_from_file(args.config)
test_loader, test_size = build_dataloader(args.dataset_dir, cfg)
print("number of test images: %d" % (test_size))
net = DeepLabV3Plus(class_num=cfg.NUM_CLASSES)
model_dict = mge.load(args.model_path)
net.load_state_dict(model_dict["state_dict"])
print("load model %s" % (args.model_path))
net.eval()
result_list = []
for sample_batched in tqdm(test_loader):
img = sample_batched[0].squeeze()
label = sample_batched[1].squeeze()
im_info = sample_batched[2]
pred = evaluate(net, img, cfg)
result_list.append({"pred": pred, "gt": label, "name":im_info[2]})
if cfg.VAL_SAVE:
save_results(result_list, cfg.VAL_SAVE, cfg)
compute_metric(result_list, cfg)
## inference one image
def pad_image_to_shape(img, shape, border_mode, value):
margin = np.zeros(4, np.uint32)
pad_height = shape[0] - img.shape[0] if shape[0] - img.shape[0] > 0 else 0
pad_width = shape[1] - img.shape[1] if shape[1] - img.shape[1] > 0 else 0
margin[0] = pad_height // 2
margin[1] = pad_height // 2 + pad_height % 2
margin[2] = pad_width // 2
margin[3] = pad_width // 2 + pad_width % 2
img = cv2.copyMakeBorder(
img, margin[0], margin[1], margin[2], margin[3], border_mode, value=value
)
return img, margin
def eval_single(net, img, is_flip):
@jit.trace(symbolic=True, opt_level=2)
def pred_fun(data, net=None):
net.eval()
pred = net(data)
return pred
data = mge.tensor()
data.set_value(img.transpose(2, 0, 1)[np.newaxis])
pred = pred_fun(data, net=net)
if is_flip:
img_flip = img[:, ::-1, :]
data.set_value(img_flip.transpose(2, 0, 1)[np.newaxis])
pred_flip = pred_fun(data, net=net)
pred = (pred + pred_flip[:, :, :, ::-1]) / 2.0
del pred_flip
pred = pred.numpy().squeeze().transpose(1, 2, 0)
del data
return pred
def evaluate(net, img, cfg):
ori_h, ori_w, _ = img.shape
pred_all = np.zeros((ori_h, ori_w, cfg.NUM_CLASSES))
for rate in cfg.VAL_MULTISCALE:
if cfg.VAL_SLIP:
new_h, new_w = int(ori_h*rate), int(ori_w*rate)
val_size = (cfg.VAL_HEIGHT, cfg.VAL_WIDTH)
else:
new_h, new_w = int(cfg.VAL_HEIGHT*rate), int(cfg.VAL_WIDTH*rate)
val_size = (new_h, new_w)
img_scale = cv2.resize(
img, (new_w, new_h), interpolation=cv2.INTER_LINEAR
)
if (new_h <= val_size[0]) and (new_h <= val_size[1]):
img_pad, margin = pad_image_to_shape(
img_scale, val_size, cv2.BORDER_CONSTANT, value=0
)
pred = eval_single(net, img_pad, cfg.VAL_FLIP)
pred = pred[
margin[0] : (pred.shape[0] - margin[1]),
margin[2] : (pred.shape[1] - margin[3]),
:,
]
else:
stride_rate = 2 / 3
stride = [int(np.ceil(i * stride_rate)) for i in val_size]
img_pad, margin = pad_image_to_shape(
img_scale, val_size, cv2.BORDER_CONSTANT, value=0
)
pad_h, pad_w = img_pad.shape[:2]
r_grid, c_grid = [
int(np.ceil((ps - cs) / stride)) + 1
for ps, cs, stride in zip(img_pad.shape, val_size, stride)
]
pred_scale = np.zeros((pad_h, pad_w, cfg.NUM_CLASSES))
count_scale = np.zeros((pad_h, pad_w, cfg.NUM_CLASSES))
for grid_yidx in range(r_grid):
for grid_xidx in range(c_grid):
s_x = grid_xidx * stride[1]
s_y = grid_yidx * stride[0]
e_x = min(s_x + val_size[1], pad_w)
e_y = min(s_y + val_size[0], pad_h)
s_x = e_x - val_size[1]
s_y = e_y - val_size[0]
img_sub = img_pad[s_y:e_y, s_x:e_x, :]
tpred = eval_single(net, img_sub, cfg.VAL_FLIP)
count_scale[s_y:e_y, s_x:e_x, :] += 1
pred_scale[s_y:e_y, s_x:e_x, :] += tpred
#pred_scale = pred_scale / count_scale
pred = pred_scale[
margin[0] : (pred_scale.shape[0] - margin[1]),
margin[2] : (pred_scale.shape[1] - margin[3]),
:,
]
pred = cv2.resize(pred, (ori_w, ori_h), interpolation=cv2.INTER_LINEAR)
pred_all = pred_all + pred
#pred_all = pred_all / len(cfg.VAL_MULTISCALE)
result = np.argmax(pred_all, axis=2).astype(np.uint8)
return result
def save_results(result_list, save_dir, cfg):
if not os.path.exists(save_dir):
os.makedirs(save_dir)
for idx, sample in enumerate(result_list):
if cfg.DATASET == "Cityscapes":
name = sample["name"].split('/')[-1][:-4]
else:
name = sample["name"]
file_path = os.path.join(save_dir, "%s.png"%name)
cv2.imwrite(file_path, sample["pred"])
file_path = os.path.join(save_dir, "%s.gt.png"%name)
cv2.imwrite(file_path, sample["gt"])
# voc cityscapes metric
def compute_metric(result_list, cfg):
class_num = cfg.NUM_CLASSES
hist = np.zeros((class_num, class_num))
correct = 0
labeled = 0
count = 0
for idx in range(len(result_list)):
pred = result_list[idx]['pred']
gt = result_list[idx]['gt']
assert(pred.shape == gt.shape)
k = (gt>=0) & (gt<class_num)
labeled += np.sum(k)
correct += np.sum((pred[k]==gt[k]))
hist += np.bincount(class_num * gt[k].astype(int) + pred[k].astype(int), minlength=class_num**2).reshape(class_num, class_num)
count += 1
iu = np.diag(hist) / (hist.sum(1) + hist.sum(0) - np.diag(hist))
mean_IU = np.nanmean(iu)
mean_IU_no_back = np.nanmean(iu[1:])
freq = hist.sum(1) / hist.sum()
freq_IU = (iu[freq > 0] * freq[freq >0]).sum()
mean_pixel_acc = correct / labeled
if cfg.DATASET == "VOC2012":
class_names = ("background", ) + dataset.PascalVOC.class_names
elif cfg.DATASET == "Cityscapes":
class_names = dataset.Cityscapes.class_names
else:
raise ValueError("Unsupported dataset {}".format(cfg.DATASET))
n = iu.size
lines = []
for i in range(n):
if class_names is None:
cls = 'Class %d:' % (i+1)
else:
cls = '%d %s' % (i+1, class_names[i])
lines.append('%-8s\t%.3f%%' % (cls, iu[i] * 100))
lines.append('---------------------------- %-8s\t%.3f%%\t%-8s\t%.3f%%' % ('mean_IU', mean_IU * 100,'mean_pixel_ACC',mean_pixel_acc*100))
line = "\n".join(lines)
print(line)
return mean_IU
class EvalPascalVOC(dataset.PascalVOC):
def _trans_mask(self, mask):
label = np.ones(mask.shape[:2]) * 255
class_colors = self.class_colors.copy()
class_colors.insert(0, [0,0,0])
for i in range(len(class_colors)):
b, g, r = class_colors[i]
label[
(mask[:, :, 0] == b) & (mask[:, :, 1] == g) & (mask[:, :, 2] == r)
] = i
return label.astype(np.uint8)
def build_dataloader(dataset_dir, cfg):
if cfg.DATASET == "VOC2012":
val_dataset = EvalPascalVOC(
dataset_dir,
"val",
order=["image", "mask", "info"]
)
elif cfg.DATASET == "Cityscapes":
val_dataset = dataset.Cityscapes(
dataset_dir,
"val",
mode='gtFine',
order=["image", "mask", "info"]
)
else:
raise ValueError("Unsupported dataset {}".format(cfg.DATASET))
val_sampler = data.SequentialSampler(val_dataset, cfg.VAL_BATCHES)
val_dataloader = data.DataLoader(
val_dataset,
sampler=val_sampler,
transform=T.Normalize(
mean=cfg.IMG_MEAN, std=cfg.IMG_STD, order=["image", "mask"]
),
num_workers=cfg.DATA_WORKERS,
)
return val_dataloader, val_dataset.__len__()
if __name__ == "__main__":
main()
|
[
"megengine.data.transform.Normalize",
"megengine.data.SequentialSampler",
"megengine.jit.trace",
"megengine.tensor",
"megengine.data.dataset.Cityscapes",
"megengine.load"
] |
[((813, 838), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (836, 838), False, 'import argparse\n'), ((1203, 1239), 'official.vision.segmentation.utils.import_config_from_file', 'import_config_from_file', (['args.config'], {}), '(args.config)\n', (1226, 1239), False, 'from official.vision.segmentation.utils import import_config_from_file\n'), ((1373, 1413), 'official.vision.segmentation.deeplabv3plus.DeepLabV3Plus', 'DeepLabV3Plus', ([], {'class_num': 'cfg.NUM_CLASSES'}), '(class_num=cfg.NUM_CLASSES)\n', (1386, 1413), False, 'from official.vision.segmentation.deeplabv3plus import DeepLabV3Plus\n'), ((1431, 1456), 'megengine.load', 'mge.load', (['args.model_path'], {}), '(args.model_path)\n', (1439, 1456), True, 'import megengine as mge\n'), ((1618, 1635), 'tqdm.tqdm', 'tqdm', (['test_loader'], {}), '(test_loader)\n', (1622, 1635), False, 'from tqdm import tqdm\n'), ((2078, 2100), 'numpy.zeros', 'np.zeros', (['(4)', 'np.uint32'], {}), '(4, np.uint32)\n', (2086, 2100), True, 'import numpy as np\n'), ((2427, 2524), 'cv2.copyMakeBorder', 'cv2.copyMakeBorder', (['img', 'margin[0]', 'margin[1]', 'margin[2]', 'margin[3]', 'border_mode'], {'value': 'value'}), '(img, margin[0], margin[1], margin[2], margin[3],\n border_mode, value=value)\n', (2445, 2524), False, 'import cv2\n'), ((2601, 2638), 'megengine.jit.trace', 'jit.trace', ([], {'symbolic': '(True)', 'opt_level': '(2)'}), '(symbolic=True, opt_level=2)\n', (2610, 2638), True, 'import megengine.jit as jit\n'), ((2749, 2761), 'megengine.tensor', 'mge.tensor', ([], {}), '()\n', (2759, 2761), True, 'import megengine as mge\n'), ((3248, 3289), 'numpy.zeros', 'np.zeros', (['(ori_h, ori_w, cfg.NUM_CLASSES)'], {}), '((ori_h, ori_w, cfg.NUM_CLASSES))\n', (3256, 3289), True, 'import numpy as np\n'), ((6466, 6498), 'numpy.zeros', 'np.zeros', (['(class_num, class_num)'], {}), '((class_num, class_num))\n', (6474, 6498), True, 'import numpy as np\n'), ((7061, 7075), 'numpy.nanmean', 'np.nanmean', (['iu'], {}), '(iu)\n', (7071, 7075), True, 'import numpy as np\n'), ((7098, 7116), 'numpy.nanmean', 'np.nanmean', (['iu[1:]'], {}), '(iu[1:])\n', (7108, 7116), True, 'import numpy as np\n'), ((8939, 8991), 'megengine.data.SequentialSampler', 'data.SequentialSampler', (['val_dataset', 'cfg.VAL_BATCHES'], {}), '(val_dataset, cfg.VAL_BATCHES)\n', (8961, 8991), True, 'import megengine.data as data\n'), ((3615, 3678), 'cv2.resize', 'cv2.resize', (['img', '(new_w, new_h)'], {'interpolation': 'cv2.INTER_LINEAR'}), '(img, (new_w, new_h), interpolation=cv2.INTER_LINEAR)\n', (3625, 3678), False, 'import cv2\n'), ((5617, 5681), 'cv2.resize', 'cv2.resize', (['pred', '(ori_w, ori_h)'], {'interpolation': 'cv2.INTER_LINEAR'}), '(pred, (ori_w, ori_h), interpolation=cv2.INTER_LINEAR)\n', (5627, 5681), False, 'import cv2\n'), ((5904, 5928), 'os.path.exists', 'os.path.exists', (['save_dir'], {}), '(save_dir)\n', (5918, 5928), False, 'import os\n'), ((5938, 5959), 'os.makedirs', 'os.makedirs', (['save_dir'], {}), '(save_dir)\n', (5949, 5959), False, 'import os\n'), ((6169, 6208), 'os.path.join', 'os.path.join', (['save_dir', "('%s.png' % name)"], {}), "(save_dir, '%s.png' % name)\n", (6181, 6208), False, 'import os\n'), ((6215, 6253), 'cv2.imwrite', 'cv2.imwrite', (['file_path', "sample['pred']"], {}), "(file_path, sample['pred'])\n", (6226, 6253), False, 'import cv2\n'), ((6274, 6316), 'os.path.join', 'os.path.join', (['save_dir', "('%s.gt.png' % name)"], {}), "(save_dir, '%s.gt.png' % name)\n", (6286, 6316), False, 'import os\n'), ((6323, 6359), 'cv2.imwrite', 'cv2.imwrite', (['file_path', "sample['gt']"], {}), "(file_path, sample['gt'])\n", (6334, 6359), False, 'import cv2\n'), ((6756, 6765), 'numpy.sum', 'np.sum', (['k'], {}), '(k)\n', (6762, 6765), True, 'import numpy as np\n'), ((6785, 6809), 'numpy.sum', 'np.sum', (['(pred[k] == gt[k])'], {}), '(pred[k] == gt[k])\n', (6791, 6809), True, 'import numpy as np\n'), ((6985, 6998), 'numpy.diag', 'np.diag', (['hist'], {}), '(hist)\n', (6992, 6998), True, 'import numpy as np\n'), ((4616, 4657), 'numpy.zeros', 'np.zeros', (['(pad_h, pad_w, cfg.NUM_CLASSES)'], {}), '((pad_h, pad_w, cfg.NUM_CLASSES))\n', (4624, 4657), True, 'import numpy as np\n'), ((4684, 4725), 'numpy.zeros', 'np.zeros', (['(pad_h, pad_w, cfg.NUM_CLASSES)'], {}), '((pad_h, pad_w, cfg.NUM_CLASSES))\n', (4692, 4725), True, 'import numpy as np\n'), ((5782, 5809), 'numpy.argmax', 'np.argmax', (['pred_all'], {'axis': '(2)'}), '(pred_all, axis=2)\n', (5791, 5809), True, 'import numpy as np\n'), ((7030, 7043), 'numpy.diag', 'np.diag', (['hist'], {}), '(hist)\n', (7037, 7043), True, 'import numpy as np\n'), ((8068, 8091), 'numpy.ones', 'np.ones', (['mask.shape[:2]'], {}), '(mask.shape[:2])\n', (8075, 8091), True, 'import numpy as np\n'), ((8694, 8784), 'megengine.data.dataset.Cityscapes', 'dataset.Cityscapes', (['dataset_dir', '"""val"""'], {'mode': '"""gtFine"""', 'order': "['image', 'mask', 'info']"}), "(dataset_dir, 'val', mode='gtFine', order=['image',\n 'mask', 'info'])\n", (8712, 8784), True, 'import megengine.data.dataset as dataset\n'), ((9098, 9170), 'megengine.data.transform.Normalize', 'T.Normalize', ([], {'mean': 'cfg.IMG_MEAN', 'std': 'cfg.IMG_STD', 'order': "['image', 'mask']"}), "(mean=cfg.IMG_MEAN, std=cfg.IMG_STD, order=['image', 'mask'])\n", (9109, 9170), True, 'import megengine.data.transform as T\n'), ((4197, 4221), 'numpy.ceil', 'np.ceil', (['(i * stride_rate)'], {}), '(i * stride_rate)\n', (4204, 4221), True, 'import numpy as np\n'), ((4468, 4495), 'numpy.ceil', 'np.ceil', (['((ps - cs) / stride)'], {}), '((ps - cs) / stride)\n', (4475, 4495), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
import math
import megengine as mge
import megengine.functional as F
import megengine.module as M
from yolox.utils import bboxes_iou
from .losses import binary_cross_entropy, iou_loss, l1_loss
from .network_blocks import BaseConv, DWConv
def meshgrid(x, y):
"""meshgrid wrapper for megengine"""
assert len(x.shape) == 1
assert len(y.shape) == 1
mesh_shape = (y.shape[0], x.shape[0])
mesh_x = F.broadcast_to(x, mesh_shape)
mesh_y = F.broadcast_to(y.reshape(-1, 1), mesh_shape)
return mesh_x, mesh_y
class YOLOXHead(M.Module):
def __init__(
self, num_classes, width=1.0, strides=[8, 16, 32, 64],
in_channels=[256, 512, 1024, 1024], act="silu", depthwise=False
):
"""
Args:
act (str): activation type of conv. Defalut value: "silu".
depthwise (bool): wheather apply depthwise conv in conv branch. Defalut value: False.
"""
super().__init__()
self.n_anchors = 1
self.num_classes = num_classes
self.decode_in_inference = True # for deploy, set to False
self.cls_convs = []
self.reg_convs = []
self.cls_preds = []
self.reg_preds = []
self.obj_preds = []
self.stems = []
Conv = DWConv if depthwise else BaseConv
for i in range(len(in_channels)):
self.stems.append(
BaseConv(
in_channels=int(in_channels[i] * width),
out_channels=int(256 * width),
ksize=1,
stride=1,
act=act,
)
)
self.cls_convs.append(
M.Sequential(
*[
Conv(
in_channels=int(256 * width),
out_channels=int(256 * width),
ksize=3,
stride=1,
act=act,
),
Conv(
in_channels=int(256 * width),
out_channels=int(256 * width),
ksize=3,
stride=1,
act=act,
),
]
)
)
self.reg_convs.append(
M.Sequential(
*[
Conv(
in_channels=int(256 * width),
out_channels=int(256 * width),
ksize=3,
stride=1,
act=act,
),
Conv(
in_channels=int(256 * width),
out_channels=int(256 * width),
ksize=3,
stride=1,
act=act,
),
]
)
)
self.cls_preds.append(
M.Conv2d(
in_channels=int(256 * width),
out_channels=self.n_anchors * self.num_classes,
kernel_size=1,
stride=1,
padding=0,
)
)
self.reg_preds.append(
M.Conv2d(
in_channels=int(256 * width),
out_channels=4,
kernel_size=1,
stride=1,
padding=0,
)
)
self.obj_preds.append(
M.Conv2d(
in_channels=int(256 * width),
out_channels=self.n_anchors * 1,
kernel_size=1,
stride=1,
padding=0,
)
)
self.use_l1 = False
self.strides = strides
self.grids = [F.zeros(1)] * len(in_channels)
self.expanded_strides = [None] * len(in_channels)
def initialize_biases(self, prior_prob):
for conv in self.cls_preds:
bias_value = -math.log((1 - prior_prob) / prior_prob)
M.init.fill_(conv.bias, bias_value)
for conv in self.obj_preds:
bias_value = -math.log((1 - prior_prob) / prior_prob)
M.init.fill_(conv.bias, bias_value)
def forward(self, xin, labels=None, imgs=None):
outputs = []
origin_preds = []
x_shifts = []
y_shifts = []
expanded_strides = []
for k, (cls_conv, reg_conv, stride_this_level, x) in enumerate(
zip(self.cls_convs, self.reg_convs, self.strides, xin)
):
x = self.stems[k](x)
cls_x = x
reg_x = x
cls_feat = cls_conv(cls_x)
cls_output = self.cls_preds[k](cls_feat)
reg_feat = reg_conv(reg_x)
reg_output = self.reg_preds[k](reg_feat)
obj_output = self.obj_preds[k](reg_feat)
if self.training:
output = F.concat([reg_output, obj_output, cls_output], 1)
output, grid = self.get_output_and_grid(output, k, stride_this_level)
x_shifts.append(grid[:, :, 0])
y_shifts.append(grid[:, :, 1])
expanded_strides.append(F.full((1, grid.shape[1]), stride_this_level))
if self.use_l1:
batch_size = reg_output.shape[0]
hsize, wsize = reg_output.shape[-2:]
reg_output = reg_output.reshape(batch_size, self.n_anchors, 4, hsize, wsize)
reg_output = (
F.transpose(reg_output, (0, 1, 3, 4, 2)).reshape(batch_size, -1, 4)
)
origin_preds.append(mge.Tensor(reg_output))
else:
output = F.concat([reg_output, F.sigmoid(obj_output), F.sigmoid(cls_output)], 1)
outputs.append(output)
if self.training:
return self.get_losses(
imgs, x_shifts, y_shifts, expanded_strides,
labels, F.concat(outputs, 1), origin_preds,
)
else:
self.hw = [x.shape[-2:] for x in outputs]
# [batch, n_anchors_all, 85]
outputs = F.concat([F.flatten(x, start_axis=2) for x in outputs], axis=2)
outputs = F.transpose(outputs, (0, 2, 1))
if self.decode_in_inference:
return self.decode_outputs(outputs)
else:
return outputs
def get_output_and_grid(self, output, k, stride):
grid = self.grids[k]
batch_size = output.shape[0]
n_ch = 5 + self.num_classes
hsize, wsize = output.shape[-2:]
if grid.shape[2:4] != output.shape[2:4]:
xv, yv = meshgrid(F.arange(hsize), F.arange(wsize))
grid = F.stack((xv, yv), 2).reshape(1, 1, hsize, wsize, 2)
self.grids[k] = grid
output = output.reshape(batch_size, self.n_anchors, n_ch, hsize, wsize)
output = (
F.transpose(output, (0, 1, 3, 4, 2))
.reshape(batch_size, self.n_anchors * hsize * wsize, -1)
)
grid = grid.reshape(1, -1, 2)
output[..., :2] = (output[..., :2] + grid) * stride
output[..., 2:4] = F.exp(output[..., 2:4]) * stride
return output, grid
def decode_outputs(self, outputs):
grids = []
strides = []
for (hsize, wsize), stride in zip(self.hw, self.strides):
xv, yv = meshgrid(F.arange(hsize), F.arange(wsize))
grid = F.stack((xv, yv), 2).reshape(1, -1, 2)
grids.append(grid)
shape = grid.shape[:2]
strides.append(F.full((*shape, 1), stride))
grids = F.concat(grids, axis=1)
strides = F.concat(strides, axis=1)
outputs[..., :2] = (outputs[..., :2] + grids) * strides
outputs[..., 2:4] = F.exp(outputs[..., 2:4]) * strides
return outputs
def focal_loss_discrite(self, pred, gt):
pos_inds = F.equal(gt, 1).astype("float32")
neg_inds = F.equal(gt, 0).astype("float32")
pos_loss = F.log(pred+1e-5) * F.pow(1-pred, 2) * pos_inds * 0.75
neg_loss = F.log(1-pred+1e-5) * F.pow(pred, 2) * neg_inds * 0.25
loss = -(pos_loss + neg_loss)
return loss
def get_losses(
self, imgs, x_shifts, y_shifts, expanded_strides, labels, outputs, origin_preds,
):
bbox_preds = outputs[:, :, :4] # [batch, n_anchors_all, 4]
obj_preds = F.expand_dims(outputs[:, :, 4], axis=-1) # [batch, n_anchors_all, 1]
cls_preds = outputs[:, :, 5:] # [batch, n_anchors_all, n_cls]
# calculate targets
mixup = labels.shape[2] > 5
if mixup:
label_cut = labels[..., :5]
else:
label_cut = labels
nlabel = (label_cut.sum(axis=2) > 0).sum(axis=1) # number of objects
total_num_anchors = outputs.shape[1]
x_shifts = F.concat(x_shifts, 1) # [1, n_anchors_all]
y_shifts = F.concat(y_shifts, 1) # [1, n_anchors_all]
expanded_strides = F.concat(expanded_strides, 1)
if self.use_l1:
origin_preds = F.concat(origin_preds, 1)
cls_targets = []
reg_targets = []
l1_targets = []
obj_targets = []
fg_masks = []
num_fg = 0.0
num_gts = 0.0
for batch_idx in range(outputs.shape[0]):
num_gt = int(nlabel[batch_idx])
num_gts += num_gt
if num_gt == 0:
cls_target = F.zeros((0, self.num_classes))
reg_target = F.zeros((0, 4))
l1_target = F.zeros((0, 4))
obj_target = F.zeros((total_num_anchors, 1))
fg_mask = F.zeros(total_num_anchors).astype("bool")
else:
gt_bboxes_per_image = labels[batch_idx, :num_gt, 1:5]
gt_classes = labels[batch_idx, :num_gt, 0]
bboxes_preds_per_image = bbox_preds[batch_idx]
gt_matched_classes, fg_mask, pred_ious_this_matching, matched_gt_inds, num_fg_img = self.get_assignments( # noqa
batch_idx, num_gt, total_num_anchors, gt_bboxes_per_image, gt_classes,
bboxes_preds_per_image, expanded_strides, x_shifts, y_shifts,
cls_preds, bbox_preds, obj_preds, labels, imgs,
)
num_fg += num_fg_img
cls_target = F.one_hot(
gt_matched_classes.astype("int32"), self.num_classes
) * F.expand_dims(pred_ious_this_matching, axis=-1)
obj_target = F.expand_dims(fg_mask, axis=-1)
reg_target = gt_bboxes_per_image[matched_gt_inds]
if self.use_l1:
l1_target = self.get_l1_target(
F.zeros((num_fg_img, 4)),
gt_bboxes_per_image[matched_gt_inds],
expanded_strides[0][fg_mask],
x_shifts=x_shifts[0][fg_mask],
y_shifts=y_shifts[0][fg_mask],
)
cls_targets.append(cls_target)
reg_targets.append(reg_target)
obj_targets.append(obj_target)
fg_masks.append(fg_mask)
if self.use_l1:
l1_targets.append(l1_target)
cls_targets = F.concat(cls_targets, 0)
reg_targets = F.concat(reg_targets, 0)
obj_targets = F.concat(obj_targets, 0)
fg_masks = F.concat(fg_masks, 0)
num_fg = max(num_fg, 1)
loss_iou = (iou_loss(bbox_preds.reshape(-1, 4)[fg_masks], reg_targets)).sum() / num_fg
loss_obj = (
# todo 修改为focalloss
self.focal_loss_discrite(F.sigmoid(obj_preds).reshape(-1, 1), obj_targets)
# self.bcewithlog_loss(obj_preds.view(-1, 1), obj_targets) # 原先的loss
).sum() / num_fg
# loss_obj = (binary_cross_entropy(obj_preds.reshape(-1, 1), obj_targets)).sum() / num_fg
loss_cls = (
binary_cross_entropy(cls_preds.reshape(-1, self.num_classes)[fg_masks], cls_targets)
).sum() / num_fg
if self.use_l1:
l1_targets = F.concat(l1_targets, 0)
loss_l1 = (l1_loss(origin_preds.reshape(-1, 4)[fg_masks], l1_targets)).sum() / num_fg
else:
loss_l1 = mge.Tensor(0.0)
reg_weight = 5.0
loss = reg_weight * loss_iou + loss_obj + loss_cls + loss_l1
return loss, reg_weight * loss_iou, loss_obj, loss_cls, loss_l1, num_fg / max(num_gts, 1)
def get_l1_target(self, l1_target, gt, stride, x_shifts, y_shifts, eps=1e-8):
l1_target[:, 0] = gt[:, 0] / stride - x_shifts
l1_target[:, 1] = gt[:, 1] / stride - y_shifts
l1_target[:, 2] = F.log(gt[:, 2] / stride + eps)
l1_target[:, 3] = F.log(gt[:, 3] / stride + eps)
return l1_target
def get_assignments(
self, batch_idx, num_gt, total_num_anchors, gt_bboxes_per_image, gt_classes,
bboxes_preds_per_image, expanded_strides, x_shifts, y_shifts,
cls_preds, bbox_preds, obj_preds, labels, imgs
):
fg_mask, is_in_boxes_and_center = self.get_in_boxes_info(
gt_bboxes_per_image, expanded_strides, x_shifts, y_shifts, total_num_anchors, num_gt,
)
bboxes_preds_per_image = bboxes_preds_per_image[fg_mask]
cls_preds_ = cls_preds[batch_idx][fg_mask]
obj_preds_ = obj_preds[batch_idx][fg_mask]
num_in_boxes_anchor = bboxes_preds_per_image.shape[0]
pair_wise_ious = bboxes_iou(
gt_bboxes_per_image, bboxes_preds_per_image, False
)
# MGE might bring bad exper
gt_cls_per_image = (
F.repeat(
F.expand_dims(
F.one_hot(gt_classes.astype("int32"), self.num_classes).astype("float32"),
axis=1,
),
repeats=num_in_boxes_anchor, axis=1,
)
)
pair_wise_ious_loss = -F.log(pair_wise_ious + 1e-8)
# ditto
cls_preds_ = F.sigmoid(
F.repeat(F.expand_dims(cls_preds_.astype("float32"), axis=0), repeats=num_gt, axis=0)
) * F.sigmoid(F.repeat(F.expand_dims(obj_preds_, axis=0), repeats=num_gt, axis=0))
pair_wise_cls_loss = binary_cross_entropy(
F.sqrt(cls_preds_), gt_cls_per_image, with_logits=False,
).sum(-1)
del cls_preds_
cost = (
pair_wise_cls_loss
+ 3.0 * pair_wise_ious_loss
+ 100000.0 * (~is_in_boxes_and_center)
)
(
num_fg, gt_matched_classes, pred_ious_this_matching, matched_gt_inds
) = self.dynamic_k_matching(cost, pair_wise_ious, gt_classes, num_gt, fg_mask)
del pair_wise_cls_loss, cost, pair_wise_ious, pair_wise_ious_loss
return (
gt_matched_classes.detach(),
fg_mask,
pred_ious_this_matching,
matched_gt_inds,
num_fg
)
def get_in_boxes_info(
self, gt_bboxes_per_image, expanded_strides, x_shifts, y_shifts, total_num_anchors, num_gt,
):
expanded_strides_per_image = expanded_strides[0]
x_shifts_per_image = x_shifts[0] * expanded_strides_per_image
y_shifts_per_image = y_shifts[0] * expanded_strides_per_image
x_centers_per_image = (
F.repeat(
F.expand_dims(x_shifts_per_image + 0.5 * expanded_strides_per_image, axis=0),
repeats=num_gt, axis=0,
)
) # [n_anchor] -> [n_gt, n_anchor]
y_centers_per_image = F.repeat(
F.expand_dims(y_shifts_per_image + 0.5 * expanded_strides_per_image, axis=0),
repeats=num_gt, axis=0,
)
gt_bboxes_per_image_l = F.repeat(
F.expand_dims(gt_bboxes_per_image[:, 0] - 0.5 * gt_bboxes_per_image[:, 2], axis=1),
repeats=total_num_anchors, axis=1,
)
gt_bboxes_per_image_r = F.repeat(
F.expand_dims(gt_bboxes_per_image[:, 0] + 0.5 * gt_bboxes_per_image[:, 2], axis=1),
repeats=total_num_anchors, axis=1,
)
gt_bboxes_per_image_t = F.repeat(
F.expand_dims(gt_bboxes_per_image[:, 1] - 0.5 * gt_bboxes_per_image[:, 3], axis=1),
repeats=total_num_anchors, axis=1,
)
gt_bboxes_per_image_b = F.repeat(
F.expand_dims(gt_bboxes_per_image[:, 1] + 0.5 * gt_bboxes_per_image[:, 3], axis=1),
repeats=total_num_anchors, axis=1,
)
b_l = x_centers_per_image - gt_bboxes_per_image_l
b_r = gt_bboxes_per_image_r - x_centers_per_image
b_t = y_centers_per_image - gt_bboxes_per_image_t
b_b = gt_bboxes_per_image_b - y_centers_per_image
bbox_deltas = F.stack([b_l, b_t, b_r, b_b], 2)
is_in_boxes = bbox_deltas.min(axis=-1) > 0.0
is_in_boxes_all = is_in_boxes.sum(axis=0) > 0
# in fixed center
center_radius = 2.5
gt_bboxes_per_image_l = F.repeat(
F.expand_dims(gt_bboxes_per_image[:, 0], axis=1),
repeats=total_num_anchors, axis=1,
) - center_radius * F.expand_dims(expanded_strides_per_image, axis=0)
gt_bboxes_per_image_r = F.repeat(
F.expand_dims(gt_bboxes_per_image[:, 0], axis=1),
repeats=total_num_anchors, axis=1,
) + center_radius * F.expand_dims(expanded_strides_per_image, axis=0)
gt_bboxes_per_image_t = F.repeat(
F.expand_dims(gt_bboxes_per_image[:, 1], axis=1),
repeats=total_num_anchors, axis=1,
) - center_radius * F.expand_dims(expanded_strides_per_image, axis=0)
gt_bboxes_per_image_b = F.repeat(
F.expand_dims(gt_bboxes_per_image[:, 1], axis=1),
repeats=total_num_anchors, axis=1,
) + center_radius * F.expand_dims(expanded_strides_per_image, axis=0)
c_l = x_centers_per_image - gt_bboxes_per_image_l
c_r = gt_bboxes_per_image_r - x_centers_per_image
c_t = y_centers_per_image - gt_bboxes_per_image_t
c_b = gt_bboxes_per_image_b - y_centers_per_image
center_deltas = F.stack([c_l, c_t, c_r, c_b], 2)
is_in_centers = center_deltas.min(axis=-1) > 0.0
is_in_centers_all = is_in_centers.sum(axis=0) > 0
# in boxes and in centers
is_in_boxes_anchor = is_in_boxes_all | is_in_centers_all
is_in_boxes_and_center = (
is_in_boxes[:, is_in_boxes_anchor] & is_in_centers[:, is_in_boxes_anchor]
)
return is_in_boxes_anchor.detach(), is_in_boxes_and_center.detach()
def dynamic_k_matching(self, cost, pair_wise_ious, gt_classes, num_gt, fg_mask):
# Dynamic K
# ---------------------------------------------------------------
matching_matrix = F.zeros_like(cost)
ious_in_boxes_matrix = pair_wise_ious
n_candidate_k = min(10, ious_in_boxes_matrix.shape[1])
topk_ious, _ = F.topk(ious_in_boxes_matrix, n_candidate_k, descending=True)
dynamic_ks = F.clip(topk_ious.sum(1).astype("int32"), lower=1)
for gt_idx in range(num_gt):
_, pos_idx = F.topk(cost[gt_idx], k=dynamic_ks[gt_idx], descending=False)
matching_matrix[gt_idx, pos_idx] = 1.0
del topk_ious, dynamic_ks, pos_idx
anchor_matching_gt = matching_matrix.sum(0)
if (anchor_matching_gt > 1).sum() > 0:
cost_argmin = F.argmin(cost[:, anchor_matching_gt > 1], axis=0)
matching_matrix[:, anchor_matching_gt > 1] = 0.0
matching_matrix[cost_argmin, anchor_matching_gt > 1] = 1.0
fg_mask_inboxes = matching_matrix.sum(0) > 0.0
num_fg = fg_mask_inboxes.sum()
# set True part to fg_mask_inboxes
fg_mask[fg_mask] = fg_mask_inboxes
matched_gt_inds = F.argmax(matching_matrix[:, fg_mask_inboxes], axis=0)
gt_matched_classes = gt_classes[matched_gt_inds]
pred_ious_this_matching = (matching_matrix * pair_wise_ious).sum(0)[fg_mask_inboxes]
return (
num_fg.detach(),
gt_matched_classes.detach(),
pred_ious_this_matching.detach(),
matched_gt_inds.detach(),
)
|
[
"megengine.functional.equal",
"megengine.functional.pow",
"megengine.functional.transpose",
"megengine.functional.zeros",
"megengine.functional.argmax",
"megengine.functional.log",
"megengine.functional.broadcast_to",
"megengine.functional.stack",
"megengine.functional.flatten",
"megengine.functional.sqrt",
"megengine.functional.concat",
"megengine.functional.exp",
"megengine.functional.zeros_like",
"megengine.functional.argmin",
"megengine.module.init.fill_",
"megengine.functional.sigmoid",
"megengine.functional.arange",
"megengine.functional.expand_dims",
"megengine.functional.topk",
"megengine.Tensor",
"megengine.functional.full"
] |
[((523, 552), 'megengine.functional.broadcast_to', 'F.broadcast_to', (['x', 'mesh_shape'], {}), '(x, mesh_shape)\n', (537, 552), True, 'import megengine.functional as F\n'), ((8005, 8028), 'megengine.functional.concat', 'F.concat', (['grids'], {'axis': '(1)'}), '(grids, axis=1)\n', (8013, 8028), True, 'import megengine.functional as F\n'), ((8047, 8072), 'megengine.functional.concat', 'F.concat', (['strides'], {'axis': '(1)'}), '(strides, axis=1)\n', (8055, 8072), True, 'import megengine.functional as F\n'), ((8787, 8827), 'megengine.functional.expand_dims', 'F.expand_dims', (['outputs[:, :, 4]'], {'axis': '(-1)'}), '(outputs[:, :, 4], axis=-1)\n', (8800, 8827), True, 'import megengine.functional as F\n'), ((9239, 9260), 'megengine.functional.concat', 'F.concat', (['x_shifts', '(1)'], {}), '(x_shifts, 1)\n', (9247, 9260), True, 'import megengine.functional as F\n'), ((9302, 9323), 'megengine.functional.concat', 'F.concat', (['y_shifts', '(1)'], {}), '(y_shifts, 1)\n', (9310, 9323), True, 'import megengine.functional as F\n'), ((9373, 9402), 'megengine.functional.concat', 'F.concat', (['expanded_strides', '(1)'], {}), '(expanded_strides, 1)\n', (9381, 9402), True, 'import megengine.functional as F\n'), ((11669, 11693), 'megengine.functional.concat', 'F.concat', (['cls_targets', '(0)'], {}), '(cls_targets, 0)\n', (11677, 11693), True, 'import megengine.functional as F\n'), ((11716, 11740), 'megengine.functional.concat', 'F.concat', (['reg_targets', '(0)'], {}), '(reg_targets, 0)\n', (11724, 11740), True, 'import megengine.functional as F\n'), ((11763, 11787), 'megengine.functional.concat', 'F.concat', (['obj_targets', '(0)'], {}), '(obj_targets, 0)\n', (11771, 11787), True, 'import megengine.functional as F\n'), ((11807, 11828), 'megengine.functional.concat', 'F.concat', (['fg_masks', '(0)'], {}), '(fg_masks, 0)\n', (11815, 11828), True, 'import megengine.functional as F\n'), ((13081, 13111), 'megengine.functional.log', 'F.log', (['(gt[:, 2] / stride + eps)'], {}), '(gt[:, 2] / stride + eps)\n', (13086, 13111), True, 'import megengine.functional as F\n'), ((13138, 13168), 'megengine.functional.log', 'F.log', (['(gt[:, 3] / stride + eps)'], {}), '(gt[:, 3] / stride + eps)\n', (13143, 13168), True, 'import megengine.functional as F\n'), ((13867, 13929), 'yolox.utils.bboxes_iou', 'bboxes_iou', (['gt_bboxes_per_image', 'bboxes_preds_per_image', '(False)'], {}), '(gt_bboxes_per_image, bboxes_preds_per_image, False)\n', (13877, 13929), False, 'from yolox.utils import bboxes_iou\n'), ((17118, 17150), 'megengine.functional.stack', 'F.stack', (['[b_l, b_t, b_r, b_b]', '(2)'], {}), '([b_l, b_t, b_r, b_b], 2)\n', (17125, 17150), True, 'import megengine.functional as F\n'), ((18488, 18520), 'megengine.functional.stack', 'F.stack', (['[c_l, c_t, c_r, c_b]', '(2)'], {}), '([c_l, c_t, c_r, c_b], 2)\n', (18495, 18520), True, 'import megengine.functional as F\n'), ((19150, 19168), 'megengine.functional.zeros_like', 'F.zeros_like', (['cost'], {}), '(cost)\n', (19162, 19168), True, 'import megengine.functional as F\n'), ((19302, 19362), 'megengine.functional.topk', 'F.topk', (['ious_in_boxes_matrix', 'n_candidate_k'], {'descending': '(True)'}), '(ious_in_boxes_matrix, n_candidate_k, descending=True)\n', (19308, 19362), True, 'import megengine.functional as F\n'), ((20168, 20221), 'megengine.functional.argmax', 'F.argmax', (['matching_matrix[:, fg_mask_inboxes]'], {'axis': '(0)'}), '(matching_matrix[:, fg_mask_inboxes], axis=0)\n', (20176, 20221), True, 'import megengine.functional as F\n'), ((4374, 4409), 'megengine.module.init.fill_', 'M.init.fill_', (['conv.bias', 'bias_value'], {}), '(conv.bias, bias_value)\n', (4386, 4409), True, 'import megengine.module as M\n'), ((4525, 4560), 'megengine.module.init.fill_', 'M.init.fill_', (['conv.bias', 'bias_value'], {}), '(conv.bias, bias_value)\n', (4537, 4560), True, 'import megengine.module as M\n'), ((6593, 6624), 'megengine.functional.transpose', 'F.transpose', (['outputs', '(0, 2, 1)'], {}), '(outputs, (0, 2, 1))\n', (6604, 6624), True, 'import megengine.functional as F\n'), ((7537, 7560), 'megengine.functional.exp', 'F.exp', (['output[..., 2:4]'], {}), '(output[..., 2:4])\n', (7542, 7560), True, 'import megengine.functional as F\n'), ((8166, 8190), 'megengine.functional.exp', 'F.exp', (['outputs[..., 2:4]'], {}), '(outputs[..., 2:4])\n', (8171, 8190), True, 'import megengine.functional as F\n'), ((9454, 9479), 'megengine.functional.concat', 'F.concat', (['origin_preds', '(1)'], {}), '(origin_preds, 1)\n', (9462, 9479), True, 'import megengine.functional as F\n'), ((12494, 12517), 'megengine.functional.concat', 'F.concat', (['l1_targets', '(0)'], {}), '(l1_targets, 0)\n', (12502, 12517), True, 'import megengine.functional as F\n'), ((12652, 12667), 'megengine.Tensor', 'mge.Tensor', (['(0.0)'], {}), '(0.0)\n', (12662, 12667), True, 'import megengine as mge\n'), ((14321, 14350), 'megengine.functional.log', 'F.log', (['(pair_wise_ious + 1e-08)'], {}), '(pair_wise_ious + 1e-08)\n', (14326, 14350), True, 'import megengine.functional as F\n'), ((15730, 15806), 'megengine.functional.expand_dims', 'F.expand_dims', (['(x_shifts_per_image + 0.5 * expanded_strides_per_image)'], {'axis': '(0)'}), '(x_shifts_per_image + 0.5 * expanded_strides_per_image, axis=0)\n', (15743, 15806), True, 'import megengine.functional as F\n'), ((15958, 16034), 'megengine.functional.expand_dims', 'F.expand_dims', (['(y_shifts_per_image + 0.5 * expanded_strides_per_image)'], {'axis': '(0)'}), '(y_shifts_per_image + 0.5 * expanded_strides_per_image, axis=0)\n', (15971, 16034), True, 'import megengine.functional as F\n'), ((16137, 16223), 'megengine.functional.expand_dims', 'F.expand_dims', (['(gt_bboxes_per_image[:, 0] - 0.5 * gt_bboxes_per_image[:, 2])'], {'axis': '(1)'}), '(gt_bboxes_per_image[:, 0] - 0.5 * gt_bboxes_per_image[:, 2],\n axis=1)\n', (16150, 16223), True, 'import megengine.functional as F\n'), ((16332, 16418), 'megengine.functional.expand_dims', 'F.expand_dims', (['(gt_bboxes_per_image[:, 0] + 0.5 * gt_bboxes_per_image[:, 2])'], {'axis': '(1)'}), '(gt_bboxes_per_image[:, 0] + 0.5 * gt_bboxes_per_image[:, 2],\n axis=1)\n', (16345, 16418), True, 'import megengine.functional as F\n'), ((16527, 16613), 'megengine.functional.expand_dims', 'F.expand_dims', (['(gt_bboxes_per_image[:, 1] - 0.5 * gt_bboxes_per_image[:, 3])'], {'axis': '(1)'}), '(gt_bboxes_per_image[:, 1] - 0.5 * gt_bboxes_per_image[:, 3],\n axis=1)\n', (16540, 16613), True, 'import megengine.functional as F\n'), ((16722, 16808), 'megengine.functional.expand_dims', 'F.expand_dims', (['(gt_bboxes_per_image[:, 1] + 0.5 * gt_bboxes_per_image[:, 3])'], {'axis': '(1)'}), '(gt_bboxes_per_image[:, 1] + 0.5 * gt_bboxes_per_image[:, 3],\n axis=1)\n', (16735, 16808), True, 'import megengine.functional as F\n'), ((19496, 19556), 'megengine.functional.topk', 'F.topk', (['cost[gt_idx]'], {'k': 'dynamic_ks[gt_idx]', 'descending': '(False)'}), '(cost[gt_idx], k=dynamic_ks[gt_idx], descending=False)\n', (19502, 19556), True, 'import megengine.functional as F\n'), ((19778, 19827), 'megengine.functional.argmin', 'F.argmin', (['cost[:, anchor_matching_gt > 1]'], {'axis': '(0)'}), '(cost[:, anchor_matching_gt > 1], axis=0)\n', (19786, 19827), True, 'import megengine.functional as F\n'), ((4125, 4135), 'megengine.functional.zeros', 'F.zeros', (['(1)'], {}), '(1)\n', (4132, 4135), True, 'import megengine.functional as F\n'), ((4322, 4361), 'math.log', 'math.log', (['((1 - prior_prob) / prior_prob)'], {}), '((1 - prior_prob) / prior_prob)\n', (4330, 4361), False, 'import math\n'), ((4473, 4512), 'math.log', 'math.log', (['((1 - prior_prob) / prior_prob)'], {}), '((1 - prior_prob) / prior_prob)\n', (4481, 4512), False, 'import math\n'), ((5258, 5307), 'megengine.functional.concat', 'F.concat', (['[reg_output, obj_output, cls_output]', '(1)'], {}), '([reg_output, obj_output, cls_output], 1)\n', (5266, 5307), True, 'import megengine.functional as F\n'), ((6326, 6346), 'megengine.functional.concat', 'F.concat', (['outputs', '(1)'], {}), '(outputs, 1)\n', (6334, 6346), True, 'import megengine.functional as F\n'), ((7046, 7061), 'megengine.functional.arange', 'F.arange', (['hsize'], {}), '(hsize)\n', (7054, 7061), True, 'import megengine.functional as F\n'), ((7063, 7078), 'megengine.functional.arange', 'F.arange', (['wsize'], {}), '(wsize)\n', (7071, 7078), True, 'import megengine.functional as F\n'), ((7296, 7332), 'megengine.functional.transpose', 'F.transpose', (['output', '(0, 1, 3, 4, 2)'], {}), '(output, (0, 1, 3, 4, 2))\n', (7307, 7332), True, 'import megengine.functional as F\n'), ((7774, 7789), 'megengine.functional.arange', 'F.arange', (['hsize'], {}), '(hsize)\n', (7782, 7789), True, 'import megengine.functional as F\n'), ((7791, 7806), 'megengine.functional.arange', 'F.arange', (['wsize'], {}), '(wsize)\n', (7799, 7806), True, 'import megengine.functional as F\n'), ((7959, 7986), 'megengine.functional.full', 'F.full', (['(*shape, 1)', 'stride'], {}), '((*shape, 1), stride)\n', (7965, 7986), True, 'import megengine.functional as F\n'), ((8293, 8307), 'megengine.functional.equal', 'F.equal', (['gt', '(1)'], {}), '(gt, 1)\n', (8300, 8307), True, 'import megengine.functional as F\n'), ((8345, 8359), 'megengine.functional.equal', 'F.equal', (['gt', '(0)'], {}), '(gt, 0)\n', (8352, 8359), True, 'import megengine.functional as F\n'), ((9828, 9858), 'megengine.functional.zeros', 'F.zeros', (['(0, self.num_classes)'], {}), '((0, self.num_classes))\n', (9835, 9858), True, 'import megengine.functional as F\n'), ((9888, 9903), 'megengine.functional.zeros', 'F.zeros', (['(0, 4)'], {}), '((0, 4))\n', (9895, 9903), True, 'import megengine.functional as F\n'), ((9932, 9947), 'megengine.functional.zeros', 'F.zeros', (['(0, 4)'], {}), '((0, 4))\n', (9939, 9947), True, 'import megengine.functional as F\n'), ((9977, 10008), 'megengine.functional.zeros', 'F.zeros', (['(total_num_anchors, 1)'], {}), '((total_num_anchors, 1))\n', (9984, 10008), True, 'import megengine.functional as F\n'), ((10926, 10957), 'megengine.functional.expand_dims', 'F.expand_dims', (['fg_mask'], {'axis': '(-1)'}), '(fg_mask, axis=-1)\n', (10939, 10957), True, 'import megengine.functional as F\n'), ((17369, 17417), 'megengine.functional.expand_dims', 'F.expand_dims', (['gt_bboxes_per_image[:, 0]'], {'axis': '(1)'}), '(gt_bboxes_per_image[:, 0], axis=1)\n', (17382, 17417), True, 'import megengine.functional as F\n'), ((17494, 17543), 'megengine.functional.expand_dims', 'F.expand_dims', (['expanded_strides_per_image'], {'axis': '(0)'}), '(expanded_strides_per_image, axis=0)\n', (17507, 17543), True, 'import megengine.functional as F\n'), ((17598, 17646), 'megengine.functional.expand_dims', 'F.expand_dims', (['gt_bboxes_per_image[:, 0]'], {'axis': '(1)'}), '(gt_bboxes_per_image[:, 0], axis=1)\n', (17611, 17646), True, 'import megengine.functional as F\n'), ((17723, 17772), 'megengine.functional.expand_dims', 'F.expand_dims', (['expanded_strides_per_image'], {'axis': '(0)'}), '(expanded_strides_per_image, axis=0)\n', (17736, 17772), True, 'import megengine.functional as F\n'), ((17827, 17875), 'megengine.functional.expand_dims', 'F.expand_dims', (['gt_bboxes_per_image[:, 1]'], {'axis': '(1)'}), '(gt_bboxes_per_image[:, 1], axis=1)\n', (17840, 17875), True, 'import megengine.functional as F\n'), ((17952, 18001), 'megengine.functional.expand_dims', 'F.expand_dims', (['expanded_strides_per_image'], {'axis': '(0)'}), '(expanded_strides_per_image, axis=0)\n', (17965, 18001), True, 'import megengine.functional as F\n'), ((18056, 18104), 'megengine.functional.expand_dims', 'F.expand_dims', (['gt_bboxes_per_image[:, 1]'], {'axis': '(1)'}), '(gt_bboxes_per_image[:, 1], axis=1)\n', (18069, 18104), True, 'import megengine.functional as F\n'), ((18181, 18230), 'megengine.functional.expand_dims', 'F.expand_dims', (['expanded_strides_per_image'], {'axis': '(0)'}), '(expanded_strides_per_image, axis=0)\n', (18194, 18230), True, 'import megengine.functional as F\n'), ((5528, 5573), 'megengine.functional.full', 'F.full', (['(1, grid.shape[1])', 'stride_this_level'], {}), '((1, grid.shape[1]), stride_this_level)\n', (5534, 5573), True, 'import megengine.functional as F\n'), ((6517, 6543), 'megengine.functional.flatten', 'F.flatten', (['x'], {'start_axis': '(2)'}), '(x, start_axis=2)\n', (6526, 6543), True, 'import megengine.functional as F\n'), ((7099, 7119), 'megengine.functional.stack', 'F.stack', (['(xv, yv)', '(2)'], {}), '((xv, yv), 2)\n', (7106, 7119), True, 'import megengine.functional as F\n'), ((7827, 7847), 'megengine.functional.stack', 'F.stack', (['(xv, yv)', '(2)'], {}), '((xv, yv), 2)\n', (7834, 7847), True, 'import megengine.functional as F\n'), ((8397, 8416), 'megengine.functional.log', 'F.log', (['(pred + 1e-05)'], {}), '(pred + 1e-05)\n', (8402, 8416), True, 'import megengine.functional as F\n'), ((8416, 8434), 'megengine.functional.pow', 'F.pow', (['(1 - pred)', '(2)'], {}), '(1 - pred, 2)\n', (8421, 8434), True, 'import megengine.functional as F\n'), ((8470, 8493), 'megengine.functional.log', 'F.log', (['(1 - pred + 1e-05)'], {}), '(1 - pred + 1e-05)\n', (8475, 8493), True, 'import megengine.functional as F\n'), ((8491, 8505), 'megengine.functional.pow', 'F.pow', (['pred', '(2)'], {}), '(pred, 2)\n', (8496, 8505), True, 'import megengine.functional as F\n'), ((10849, 10896), 'megengine.functional.expand_dims', 'F.expand_dims', (['pred_ious_this_matching'], {'axis': '(-1)'}), '(pred_ious_this_matching, axis=-1)\n', (10862, 10896), True, 'import megengine.functional as F\n'), ((14528, 14561), 'megengine.functional.expand_dims', 'F.expand_dims', (['obj_preds_'], {'axis': '(0)'}), '(obj_preds_, axis=0)\n', (14541, 14561), True, 'import megengine.functional as F\n'), ((14652, 14670), 'megengine.functional.sqrt', 'F.sqrt', (['cls_preds_'], {}), '(cls_preds_)\n', (14658, 14670), True, 'import megengine.functional as F\n'), ((6003, 6025), 'megengine.Tensor', 'mge.Tensor', (['reg_output'], {}), '(reg_output)\n', (6013, 6025), True, 'import megengine as mge\n'), ((6093, 6114), 'megengine.functional.sigmoid', 'F.sigmoid', (['obj_output'], {}), '(obj_output)\n', (6102, 6114), True, 'import megengine.functional as F\n'), ((6116, 6137), 'megengine.functional.sigmoid', 'F.sigmoid', (['cls_output'], {}), '(cls_output)\n', (6125, 6137), True, 'import megengine.functional as F\n'), ((10035, 10061), 'megengine.functional.zeros', 'F.zeros', (['total_num_anchors'], {}), '(total_num_anchors)\n', (10042, 10061), True, 'import megengine.functional as F\n'), ((11132, 11156), 'megengine.functional.zeros', 'F.zeros', (['(num_fg_img, 4)'], {}), '((num_fg_img, 4))\n', (11139, 11156), True, 'import megengine.functional as F\n'), ((5873, 5913), 'megengine.functional.transpose', 'F.transpose', (['reg_output', '(0, 1, 3, 4, 2)'], {}), '(reg_output, (0, 1, 3, 4, 2))\n', (5884, 5913), True, 'import megengine.functional as F\n'), ((12047, 12067), 'megengine.functional.sigmoid', 'F.sigmoid', (['obj_preds'], {}), '(obj_preds)\n', (12056, 12067), True, 'import megengine.functional as F\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.